xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_api.h>
25 #include <hif.h>
26 #include <htt.h>
27 #include <wdi_event.h>
28 #include <queue.h>
29 #include "dp_htt.h"
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include <cdp_txrx_handle.h>
36 #include <wlan_cfg.h>
37 #include "cdp_txrx_cmn_struct.h"
38 #include "cdp_txrx_stats_struct.h"
39 #include <qdf_util.h>
40 #include "dp_peer.h"
41 #include "dp_rx_mon.h"
42 #include "htt_stats.h"
43 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
44 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
45 #include "cdp_txrx_flow_ctrl_v2.h"
46 #else
47 static inline void
48 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
49 {
50 	return;
51 }
52 #endif
53 #include "dp_ipa.h"
54 
55 #ifdef CONFIG_MCL
56 static void dp_service_mon_rings(void *arg);
57 #ifndef REMOVE_PKT_LOG
58 #include <pktlog_ac_api.h>
59 #include <pktlog_ac.h>
60 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn);
61 #endif
62 #endif
63 static void dp_pktlogmod_exit(struct dp_pdev *handle);
64 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
65 					uint8_t *peer_mac_addr);
66 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
67 
68 #define DP_INTR_POLL_TIMER_MS	10
69 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
70 #define DP_MCS_LENGTH (6*MAX_MCS)
71 #define DP_NSS_LENGTH (6*SS_COUNT)
72 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
73 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
74 #define DP_MAX_MCS_STRING_LEN 30
75 #define DP_CURR_FW_STATS_AVAIL 19
76 #define DP_HTT_DBG_EXT_STATS_MAX 256
77 #define DP_MAX_SLEEP_TIME 100
78 
79 #ifdef IPA_OFFLOAD
80 /* Exclude IPA rings from the interrupt context */
81 #define TX_RING_MASK_VAL	0xb
82 #define RX_RING_MASK_VAL	0x7
83 #else
84 #define TX_RING_MASK_VAL	0xF
85 #define RX_RING_MASK_VAL	0xF
86 #endif
87 
88 bool rx_hash = 1;
89 qdf_declare_param(rx_hash, bool);
90 
91 #define STR_MAXLEN	64
92 
93 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
94 
95 /* PPDU stats mask sent to FW to enable enhanced stats */
96 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
97 /* PPDU stats mask sent to FW to support debug sniffer feature */
98 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
99 /**
100  * default_dscp_tid_map - Default DSCP-TID mapping
101  *
102  * DSCP        TID
103  * 000000      0
104  * 001000      1
105  * 010000      2
106  * 011000      3
107  * 100000      4
108  * 101000      5
109  * 110000      6
110  * 111000      7
111  */
112 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
113 	0, 0, 0, 0, 0, 0, 0, 0,
114 	1, 1, 1, 1, 1, 1, 1, 1,
115 	2, 2, 2, 2, 2, 2, 2, 2,
116 	3, 3, 3, 3, 3, 3, 3, 3,
117 	4, 4, 4, 4, 4, 4, 4, 4,
118 	5, 5, 5, 5, 5, 5, 5, 5,
119 	6, 6, 6, 6, 6, 6, 6, 6,
120 	7, 7, 7, 7, 7, 7, 7, 7,
121 };
122 
123 /*
124  * struct dp_rate_debug
125  *
126  * @mcs_type: print string for a given mcs
127  * @valid: valid mcs rate?
128  */
129 struct dp_rate_debug {
130 	char mcs_type[DP_MAX_MCS_STRING_LEN];
131 	uint8_t valid;
132 };
133 
134 #define MCS_VALID 1
135 #define MCS_INVALID 0
136 
137 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
138 
139 	{
140 		{"OFDM 48 Mbps", MCS_VALID},
141 		{"OFDM 24 Mbps", MCS_VALID},
142 		{"OFDM 12 Mbps", MCS_VALID},
143 		{"OFDM 6 Mbps ", MCS_VALID},
144 		{"OFDM 54 Mbps", MCS_VALID},
145 		{"OFDM 36 Mbps", MCS_VALID},
146 		{"OFDM 18 Mbps", MCS_VALID},
147 		{"OFDM 9 Mbps ", MCS_VALID},
148 		{"INVALID ", MCS_INVALID},
149 		{"INVALID ", MCS_INVALID},
150 		{"INVALID ", MCS_INVALID},
151 		{"INVALID ", MCS_INVALID},
152 		{"INVALID ", MCS_VALID},
153 	},
154 	{
155 		{"CCK 11 Mbps Long  ", MCS_VALID},
156 		{"CCK 5.5 Mbps Long ", MCS_VALID},
157 		{"CCK 2 Mbps Long   ", MCS_VALID},
158 		{"CCK 1 Mbps Long   ", MCS_VALID},
159 		{"CCK 11 Mbps Short ", MCS_VALID},
160 		{"CCK 5.5 Mbps Short", MCS_VALID},
161 		{"CCK 2 Mbps Short  ", MCS_VALID},
162 		{"INVALID ", MCS_INVALID},
163 		{"INVALID ", MCS_INVALID},
164 		{"INVALID ", MCS_INVALID},
165 		{"INVALID ", MCS_INVALID},
166 		{"INVALID ", MCS_INVALID},
167 		{"INVALID ", MCS_VALID},
168 	},
169 	{
170 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
171 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
172 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
173 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
174 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
175 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
176 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
177 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
178 		{"INVALID ", MCS_INVALID},
179 		{"INVALID ", MCS_INVALID},
180 		{"INVALID ", MCS_INVALID},
181 		{"INVALID ", MCS_INVALID},
182 		{"INVALID ", MCS_VALID},
183 	},
184 	{
185 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
186 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
187 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
188 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
189 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
190 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
191 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
192 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
193 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
194 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
195 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
196 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
197 		{"INVALID ", MCS_VALID},
198 	},
199 	{
200 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
201 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
202 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
203 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
204 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
205 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
206 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
207 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
208 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
209 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
210 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
211 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
212 		{"INVALID ", MCS_VALID},
213 	}
214 };
215 
216 /**
217  * @brief Cpu ring map types
218  */
219 enum dp_cpu_ring_map_types {
220 	DP_DEFAULT_MAP,
221 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
222 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
223 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
224 	DP_CPU_RING_MAP_MAX
225 };
226 
227 /**
228  * @brief Cpu to tx ring map
229  */
230 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
231 	{0x0, 0x1, 0x2, 0x0},
232 	{0x1, 0x2, 0x1, 0x2},
233 	{0x0, 0x2, 0x0, 0x2},
234 	{0x2, 0x2, 0x2, 0x2}
235 };
236 
237 /**
238  * @brief Select the type of statistics
239  */
240 enum dp_stats_type {
241 	STATS_FW = 0,
242 	STATS_HOST = 1,
243 	STATS_TYPE_MAX = 2,
244 };
245 
246 /**
247  * @brief General Firmware statistics options
248  *
249  */
250 enum dp_fw_stats {
251 	TXRX_FW_STATS_INVALID	= -1,
252 };
253 
254 /**
255  * dp_stats_mapping_table - Firmware and Host statistics
256  * currently supported
257  */
258 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
259 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
260 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
261 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
262 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
263 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
264 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
265 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
266 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
267 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
268 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
269 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
270 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
278 	/* Last ENUM for HTT FW STATS */
279 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
280 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
281 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
282 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
283 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
284 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
285 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
286 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
287 };
288 
289 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
290 					struct cdp_peer *peer_hdl,
291 					uint8_t *mac_addr,
292 					enum cdp_txrx_ast_entry_type type,
293 					uint32_t flags)
294 {
295 
296 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
297 				(struct dp_peer *)peer_hdl,
298 				mac_addr,
299 				type,
300 				flags);
301 }
302 
303 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
304 					 void *ast_entry_hdl)
305 {
306 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
307 	qdf_spin_lock_bh(&soc->ast_lock);
308 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
309 			(struct dp_ast_entry *)ast_entry_hdl);
310 	qdf_spin_unlock_bh(&soc->ast_lock);
311 }
312 
313 
314 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
315 						struct cdp_peer *peer_hdl,
316 						uint8_t *wds_macaddr,
317 						uint32_t flags)
318 {
319 	int status;
320 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
321 	struct dp_ast_entry  *ast_entry = NULL;
322 
323 	qdf_spin_lock_bh(&soc->ast_lock);
324 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
325 
326 	status = dp_peer_update_ast(soc,
327 					(struct dp_peer *)peer_hdl,
328 					ast_entry,
329 					flags);
330 	qdf_spin_unlock_bh(&soc->ast_lock);
331 
332 	return status;
333 }
334 
335 /*
336  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
337  * @soc_handle:		Datapath SOC handle
338  * @ast_entry_hdl:	AST Entry handle
339  * Return: None
340  */
341 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
342 						uint8_t *wds_macaddr)
343 {
344 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
345 	struct dp_ast_entry *ast_entry = NULL;
346 
347 	qdf_spin_lock_bh(&soc->ast_lock);
348 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
349 
350 	if (ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) {
351 		ast_entry->is_active = TRUE;
352 	}
353 	qdf_spin_unlock_bh(&soc->ast_lock);
354 }
355 
356 /*
357  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
358  * @soc:		Datapath SOC handle
359  *
360  * Return: None
361  */
362 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
363 {
364 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
365 	struct dp_pdev *pdev;
366 	struct dp_vdev *vdev;
367 	struct dp_peer *peer;
368 	struct dp_ast_entry *ase, *temp_ase;
369 	int i;
370 
371 	qdf_spin_lock_bh(&soc->ast_lock);
372 
373 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
374 		pdev = soc->pdev_list[i];
375 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
376 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
377 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
378 					if (ase->type ==
379 						CDP_TXRX_AST_TYPE_STATIC)
380 						continue;
381 					ase->is_active = TRUE;
382 				}
383 			}
384 		}
385 	}
386 
387 	qdf_spin_unlock_bh(&soc->ast_lock);
388 }
389 
390 /*
391  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
392  * @soc:		Datapath SOC handle
393  *
394  * Return: None
395  */
396 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
397 {
398 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
399 	struct dp_pdev *pdev;
400 	struct dp_vdev *vdev;
401 	struct dp_peer *peer;
402 	struct dp_ast_entry *ase, *temp_ase;
403 	int i;
404 
405 	qdf_spin_lock_bh(&soc->ast_lock);
406 
407 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
408 		pdev = soc->pdev_list[i];
409 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
410 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
411 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
412 					if (ase->type ==
413 						CDP_TXRX_AST_TYPE_STATIC)
414 						continue;
415 					dp_peer_del_ast(soc, ase);
416 				}
417 			}
418 		}
419 	}
420 
421 	qdf_spin_unlock_bh(&soc->ast_lock);
422 }
423 
424 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
425 						uint8_t *ast_mac_addr)
426 {
427 	struct dp_ast_entry *ast_entry;
428 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
429 	qdf_spin_lock_bh(&soc->ast_lock);
430 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
431 	qdf_spin_unlock_bh(&soc->ast_lock);
432 	return (void *)ast_entry;
433 }
434 
435 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
436 							void *ast_entry_hdl)
437 {
438 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
439 					(struct dp_ast_entry *)ast_entry_hdl);
440 }
441 
442 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
443 							void *ast_entry_hdl)
444 {
445 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
446 					(struct dp_ast_entry *)ast_entry_hdl);
447 }
448 
449 static void dp_peer_ast_set_type_wifi3(
450 					struct cdp_soc_t *soc_hdl,
451 					void *ast_entry_hdl,
452 					enum cdp_txrx_ast_entry_type type)
453 {
454 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
455 				(struct dp_ast_entry *)ast_entry_hdl,
456 				type);
457 }
458 
459 
460 
461 /**
462  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
463  * @ring_num: ring num of the ring being queried
464  * @grp_mask: the grp_mask array for the ring type in question.
465  *
466  * The grp_mask array is indexed by group number and the bit fields correspond
467  * to ring numbers.  We are finding which interrupt group a ring belongs to.
468  *
469  * Return: the index in the grp_mask array with the ring number.
470  * -QDF_STATUS_E_NOENT if no entry is found
471  */
472 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
473 {
474 	int ext_group_num;
475 	int mask = 1 << ring_num;
476 
477 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
478 	     ext_group_num++) {
479 		if (mask & grp_mask[ext_group_num])
480 			return ext_group_num;
481 	}
482 
483 	return -QDF_STATUS_E_NOENT;
484 }
485 
486 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
487 				       enum hal_ring_type ring_type,
488 				       int ring_num)
489 {
490 	int *grp_mask;
491 
492 	switch (ring_type) {
493 	case WBM2SW_RELEASE:
494 		/* dp_tx_comp_handler - soc->tx_comp_ring */
495 		if (ring_num < 3)
496 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
497 
498 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
499 		else if (ring_num == 3) {
500 			/* sw treats this as a separate ring type */
501 			grp_mask = &soc->wlan_cfg_ctx->
502 				int_rx_wbm_rel_ring_mask[0];
503 			ring_num = 0;
504 		} else {
505 			qdf_assert(0);
506 			return -QDF_STATUS_E_NOENT;
507 		}
508 	break;
509 
510 	case REO_EXCEPTION:
511 		/* dp_rx_err_process - &soc->reo_exception_ring */
512 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
513 	break;
514 
515 	case REO_DST:
516 		/* dp_rx_process - soc->reo_dest_ring */
517 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
518 	break;
519 
520 	case REO_STATUS:
521 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
522 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
523 	break;
524 
525 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
526 	case RXDMA_MONITOR_STATUS:
527 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
528 	case RXDMA_MONITOR_DST:
529 		/* dp_mon_process */
530 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
531 	break;
532 	case RXDMA_DST:
533 		/* dp_rxdma_err_process */
534 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
535 	break;
536 
537 	case RXDMA_BUF:
538 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
539 	break;
540 
541 	case RXDMA_MONITOR_BUF:
542 		/* TODO: support low_thresh interrupt */
543 		return -QDF_STATUS_E_NOENT;
544 	break;
545 
546 	case TCL_DATA:
547 	case TCL_CMD:
548 	case REO_CMD:
549 	case SW2WBM_RELEASE:
550 	case WBM_IDLE_LINK:
551 		/* normally empty SW_TO_HW rings */
552 		return -QDF_STATUS_E_NOENT;
553 	break;
554 
555 	case TCL_STATUS:
556 	case REO_REINJECT:
557 		/* misc unused rings */
558 		return -QDF_STATUS_E_NOENT;
559 	break;
560 
561 	case CE_SRC:
562 	case CE_DST:
563 	case CE_DST_STATUS:
564 		/* CE_rings - currently handled by hif */
565 	default:
566 		return -QDF_STATUS_E_NOENT;
567 	break;
568 	}
569 
570 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
571 }
572 
573 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
574 			      *ring_params, int ring_type, int ring_num)
575 {
576 	int msi_group_number;
577 	int msi_data_count;
578 	int ret;
579 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
580 
581 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
582 					    &msi_data_count, &msi_data_start,
583 					    &msi_irq_start);
584 
585 	if (ret)
586 		return;
587 
588 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
589 						       ring_num);
590 	if (msi_group_number < 0) {
591 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
592 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
593 			ring_type, ring_num);
594 		ring_params->msi_addr = 0;
595 		ring_params->msi_data = 0;
596 		return;
597 	}
598 
599 	if (msi_group_number > msi_data_count) {
600 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
601 			FL("2 msi_groups will share an msi; msi_group_num %d"),
602 			msi_group_number);
603 
604 		QDF_ASSERT(0);
605 	}
606 
607 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
608 
609 	ring_params->msi_addr = addr_low;
610 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
611 	ring_params->msi_data = (msi_group_number % msi_data_count)
612 		+ msi_data_start;
613 	ring_params->flags |= HAL_SRNG_MSI_INTR;
614 }
615 
616 /**
617  * dp_print_ast_stats() - Dump AST table contents
618  * @soc: Datapath soc handle
619  *
620  * return void
621  */
622 #ifdef FEATURE_AST
623 static void dp_print_ast_stats(struct dp_soc *soc)
624 {
625 	uint8_t i;
626 	uint8_t num_entries = 0;
627 	struct dp_vdev *vdev;
628 	struct dp_pdev *pdev;
629 	struct dp_peer *peer;
630 	struct dp_ast_entry *ase, *tmp_ase;
631 	char type[5][10] = {"NONE", "STATIC", "WDS", "MEC", "HMWDS"};
632 
633 	DP_PRINT_STATS("AST Stats:");
634 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
635 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
636 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
637 	DP_PRINT_STATS("AST Table:");
638 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
639 		pdev = soc->pdev_list[i];
640 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
641 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
642 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
643 					DP_PRINT_STATS("%6d mac_addr = %pM"
644 							" peer_mac_addr = %pM"
645 							" type = %s"
646 							" next_hop = %d"
647 							" is_active = %d"
648 							" is_bss = %d"
649 							" ast_idx = %d"
650 							" pdev_id = %d"
651 							" vdev_id = %d",
652 							++num_entries,
653 							ase->mac_addr.raw,
654 							ase->peer->mac_addr.raw,
655 							type[ase->type],
656 							ase->next_hop,
657 							ase->is_active,
658 							ase->is_bss,
659 							ase->ast_idx,
660 							ase->pdev_id,
661 							ase->vdev_id);
662 				}
663 			}
664 		}
665 	}
666 }
667 #else
668 static void dp_print_ast_stats(struct dp_soc *soc)
669 {
670 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
671 	return;
672 }
673 #endif
674 
675 static void dp_print_peer_table(struct dp_vdev *vdev)
676 {
677 	struct dp_peer *peer = NULL;
678 
679 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
680 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
681 		if (!peer) {
682 			DP_PRINT_STATS("Invalid Peer");
683 			return;
684 		}
685 		DP_PRINT_STATS("    peer_mac_addr = %pM"
686 			" nawds_enabled = %d"
687 			" bss_peer = %d"
688 			" wapi = %d"
689 			" wds_enabled = %d"
690 			" delete in progress = %d",
691 			peer->mac_addr.raw,
692 			peer->nawds_enabled,
693 			peer->bss_peer,
694 			peer->wapi,
695 			peer->wds_enabled,
696 			peer->delete_in_progress);
697 	}
698 }
699 
700 /*
701  * dp_setup_srng - Internal function to setup SRNG rings used by data path
702  */
703 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
704 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
705 {
706 	void *hal_soc = soc->hal_soc;
707 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
708 	/* TODO: See if we should get align size from hal */
709 	uint32_t ring_base_align = 8;
710 	struct hal_srng_params ring_params;
711 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
712 
713 	/* TODO: Currently hal layer takes care of endianness related settings.
714 	 * See if these settings need to passed from DP layer
715 	 */
716 	ring_params.flags = 0;
717 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
718 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
719 
720 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
721 	srng->hal_srng = NULL;
722 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
723 	srng->num_entries = num_entries;
724 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
725 		soc->osdev, soc->osdev->dev, srng->alloc_size,
726 		&(srng->base_paddr_unaligned));
727 
728 	if (!srng->base_vaddr_unaligned) {
729 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
730 			FL("alloc failed - ring_type: %d, ring_num %d"),
731 			ring_type, ring_num);
732 		return QDF_STATUS_E_NOMEM;
733 	}
734 
735 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
736 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
737 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
738 		((unsigned long)(ring_params.ring_base_vaddr) -
739 		(unsigned long)srng->base_vaddr_unaligned);
740 	ring_params.num_entries = num_entries;
741 
742 	if (soc->intr_mode == DP_INTR_MSI) {
743 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
744 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
745 			FL("Using MSI for ring_type: %d, ring_num %d"),
746 			ring_type, ring_num);
747 
748 	} else {
749 		ring_params.msi_data = 0;
750 		ring_params.msi_addr = 0;
751 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
752 			FL("Skipping MSI for ring_type: %d, ring_num %d"),
753 			ring_type, ring_num);
754 	}
755 
756 	/*
757 	 * Setup interrupt timer and batch counter thresholds for
758 	 * interrupt mitigation based on ring type
759 	 */
760 	if (ring_type == REO_DST) {
761 		ring_params.intr_timer_thres_us =
762 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
763 		ring_params.intr_batch_cntr_thres_entries =
764 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
765 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
766 		ring_params.intr_timer_thres_us =
767 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
768 		ring_params.intr_batch_cntr_thres_entries =
769 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
770 	} else {
771 		ring_params.intr_timer_thres_us =
772 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
773 		ring_params.intr_batch_cntr_thres_entries =
774 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
775 	}
776 
777 	/* Enable low threshold interrupts for rx buffer rings (regular and
778 	 * monitor buffer rings.
779 	 * TODO: See if this is required for any other ring
780 	 */
781 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
782 		(ring_type == RXDMA_MONITOR_STATUS)) {
783 		/* TODO: Setting low threshold to 1/8th of ring size
784 		 * see if this needs to be configurable
785 		 */
786 		ring_params.low_threshold = num_entries >> 3;
787 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
788 		ring_params.intr_timer_thres_us =
789 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
790 		ring_params.intr_batch_cntr_thres_entries = 0;
791 	}
792 
793 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
794 		mac_id, &ring_params);
795 
796 	if (!srng->hal_srng) {
797 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
798 				srng->alloc_size,
799 				srng->base_vaddr_unaligned,
800 				srng->base_paddr_unaligned, 0);
801 	}
802 
803 	return 0;
804 }
805 
806 /**
807  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
808  * Any buffers allocated and attached to ring entries are expected to be freed
809  * before calling this function.
810  */
811 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
812 	int ring_type, int ring_num)
813 {
814 	if (!srng->hal_srng) {
815 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
816 			FL("Ring type: %d, num:%d not setup"),
817 			ring_type, ring_num);
818 		return;
819 	}
820 
821 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
822 
823 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
824 				srng->alloc_size,
825 				srng->base_vaddr_unaligned,
826 				srng->base_paddr_unaligned, 0);
827 	srng->hal_srng = NULL;
828 }
829 
830 /* TODO: Need this interface from HIF */
831 void *hif_get_hal_handle(void *hif_handle);
832 
833 /*
834  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
835  * @dp_ctx: DP SOC handle
836  * @budget: Number of frames/descriptors that can be processed in one shot
837  *
838  * Return: remaining budget/quota for the soc device
839  */
840 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
841 {
842 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
843 	struct dp_soc *soc = int_ctx->soc;
844 	int ring = 0;
845 	uint32_t work_done  = 0;
846 	int budget = dp_budget;
847 	uint8_t tx_mask = int_ctx->tx_ring_mask;
848 	uint8_t rx_mask = int_ctx->rx_ring_mask;
849 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
850 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
851 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
852 	uint32_t remaining_quota = dp_budget;
853 	struct dp_pdev *pdev = NULL;
854 	int mac_id;
855 
856 	/* Process Tx completion interrupts first to return back buffers */
857 	while (tx_mask) {
858 		if (tx_mask & 0x1) {
859 			work_done = dp_tx_comp_handler(soc,
860 					soc->tx_comp_ring[ring].hal_srng,
861 					remaining_quota);
862 
863 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
864 				"tx mask 0x%x ring %d, budget %d, work_done %d",
865 				tx_mask, ring, budget, work_done);
866 
867 			budget -= work_done;
868 			if (budget <= 0)
869 				goto budget_done;
870 
871 			remaining_quota = budget;
872 		}
873 		tx_mask = tx_mask >> 1;
874 		ring++;
875 	}
876 
877 
878 	/* Process REO Exception ring interrupt */
879 	if (rx_err_mask) {
880 		work_done = dp_rx_err_process(soc,
881 				soc->reo_exception_ring.hal_srng,
882 				remaining_quota);
883 
884 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
885 			"REO Exception Ring: work_done %d budget %d",
886 			work_done, budget);
887 
888 		budget -=  work_done;
889 		if (budget <= 0) {
890 			goto budget_done;
891 		}
892 		remaining_quota = budget;
893 	}
894 
895 	/* Process Rx WBM release ring interrupt */
896 	if (rx_wbm_rel_mask) {
897 		work_done = dp_rx_wbm_err_process(soc,
898 				soc->rx_rel_ring.hal_srng, remaining_quota);
899 
900 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
901 			"WBM Release Ring: work_done %d budget %d",
902 			work_done, budget);
903 
904 		budget -=  work_done;
905 		if (budget <= 0) {
906 			goto budget_done;
907 		}
908 		remaining_quota = budget;
909 	}
910 
911 	/* Process Rx interrupts */
912 	if (rx_mask) {
913 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
914 			if (rx_mask & (1 << ring)) {
915 				work_done = dp_rx_process(int_ctx,
916 					    soc->reo_dest_ring[ring].hal_srng,
917 					    remaining_quota);
918 
919 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
920 					"rx mask 0x%x ring %d, work_done %d budget %d",
921 					rx_mask, ring, work_done, budget);
922 
923 				budget -=  work_done;
924 				if (budget <= 0)
925 					goto budget_done;
926 				remaining_quota = budget;
927 			}
928 		}
929 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
930 			work_done = dp_rxdma_err_process(soc, ring,
931 						remaining_quota);
932 			budget -= work_done;
933 		}
934 	}
935 
936 	if (reo_status_mask)
937 		dp_reo_status_ring_handler(soc);
938 
939 	/* Process LMAC interrupts */
940 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
941 		pdev = soc->pdev_list[ring];
942 		if (pdev == NULL)
943 			continue;
944 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
945 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
946 								pdev->pdev_id);
947 
948 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
949 				work_done = dp_mon_process(soc, mac_for_pdev,
950 						remaining_quota);
951 				budget -= work_done;
952 				if (budget <= 0)
953 					goto budget_done;
954 				remaining_quota = budget;
955 			}
956 
957 			if (int_ctx->rxdma2host_ring_mask &
958 					(1 << mac_for_pdev)) {
959 				work_done = dp_rxdma_err_process(soc,
960 							mac_for_pdev,
961 							remaining_quota);
962 				budget -=  work_done;
963 				if (budget <= 0)
964 					goto budget_done;
965 				remaining_quota = budget;
966 			}
967 
968 			if (int_ctx->host2rxdma_ring_mask &
969 						(1 << mac_for_pdev)) {
970 				union dp_rx_desc_list_elem_t *desc_list = NULL;
971 				union dp_rx_desc_list_elem_t *tail = NULL;
972 				struct dp_srng *rx_refill_buf_ring =
973 					&pdev->rx_refill_buf_ring;
974 
975 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
976 						1);
977 				dp_rx_buffers_replenish(soc, mac_for_pdev,
978 					rx_refill_buf_ring,
979 					&soc->rx_desc_buf[mac_for_pdev], 0,
980 					&desc_list, &tail);
981 			}
982 		}
983 	}
984 
985 	qdf_lro_flush(int_ctx->lro_ctx);
986 
987 budget_done:
988 	return dp_budget - budget;
989 }
990 
991 #ifdef DP_INTR_POLL_BASED
992 /* dp_interrupt_timer()- timer poll for interrupts
993  *
994  * @arg: SoC Handle
995  *
996  * Return:
997  *
998  */
999 static void dp_interrupt_timer(void *arg)
1000 {
1001 	struct dp_soc *soc = (struct dp_soc *) arg;
1002 	int i;
1003 
1004 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1005 		for (i = 0;
1006 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1007 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1008 
1009 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1010 	}
1011 }
1012 
1013 /*
1014  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1015  * @txrx_soc: DP SOC handle
1016  *
1017  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1018  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1019  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1020  *
1021  * Return: 0 for success. nonzero for failure.
1022  */
1023 static QDF_STATUS dp_soc_interrupt_attach_poll(void *txrx_soc)
1024 {
1025 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1026 	int i;
1027 
1028 	soc->intr_mode = DP_INTR_POLL;
1029 
1030 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1031 		soc->intr_ctx[i].dp_intr_id = i;
1032 		soc->intr_ctx[i].tx_ring_mask =
1033 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1034 		soc->intr_ctx[i].rx_ring_mask =
1035 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1036 		soc->intr_ctx[i].rx_mon_ring_mask =
1037 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1038 		soc->intr_ctx[i].rx_err_ring_mask =
1039 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1040 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1041 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1042 		soc->intr_ctx[i].reo_status_ring_mask =
1043 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1044 		soc->intr_ctx[i].rxdma2host_ring_mask =
1045 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1046 		soc->intr_ctx[i].soc = soc;
1047 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1048 	}
1049 
1050 	qdf_timer_init(soc->osdev, &soc->int_timer,
1051 			dp_interrupt_timer, (void *)soc,
1052 			QDF_TIMER_TYPE_WAKE_APPS);
1053 
1054 	return QDF_STATUS_SUCCESS;
1055 }
1056 
1057 #if defined(CONFIG_MCL)
1058 extern int con_mode_monitor;
1059 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1060 /*
1061  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1062  * @txrx_soc: DP SOC handle
1063  *
1064  * Call the appropriate attach function based on the mode of operation.
1065  * This is a WAR for enabling monitor mode.
1066  *
1067  * Return: 0 for success. nonzero for failure.
1068  */
1069 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1070 {
1071 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1072 
1073 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1074 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1075 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1076 				  "%s: Poll mode", __func__);
1077 		return dp_soc_interrupt_attach_poll(txrx_soc);
1078 	} else {
1079 
1080 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1081 				  "%s: Interrupt  mode", __func__);
1082 		return dp_soc_interrupt_attach(txrx_soc);
1083 	}
1084 }
1085 #else
1086 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1087 {
1088 	return dp_soc_interrupt_attach_poll(txrx_soc);
1089 }
1090 #endif
1091 #endif
1092 
1093 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1094 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1095 {
1096 	int j;
1097 	int num_irq = 0;
1098 
1099 	int tx_mask =
1100 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1101 	int rx_mask =
1102 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1103 	int rx_mon_mask =
1104 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1105 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1106 					soc->wlan_cfg_ctx, intr_ctx_num);
1107 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1108 					soc->wlan_cfg_ctx, intr_ctx_num);
1109 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1110 					soc->wlan_cfg_ctx, intr_ctx_num);
1111 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1112 					soc->wlan_cfg_ctx, intr_ctx_num);
1113 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1114 					soc->wlan_cfg_ctx, intr_ctx_num);
1115 
1116 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1117 
1118 		if (tx_mask & (1 << j)) {
1119 			irq_id_map[num_irq++] =
1120 				(wbm2host_tx_completions_ring1 - j);
1121 		}
1122 
1123 		if (rx_mask & (1 << j)) {
1124 			irq_id_map[num_irq++] =
1125 				(reo2host_destination_ring1 - j);
1126 		}
1127 
1128 		if (rxdma2host_ring_mask & (1 << j)) {
1129 			irq_id_map[num_irq++] =
1130 				rxdma2host_destination_ring_mac1 -
1131 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1132 		}
1133 
1134 		if (host2rxdma_ring_mask & (1 << j)) {
1135 			irq_id_map[num_irq++] =
1136 				host2rxdma_host_buf_ring_mac1 -
1137 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1138 		}
1139 
1140 		if (rx_mon_mask & (1 << j)) {
1141 			irq_id_map[num_irq++] =
1142 				ppdu_end_interrupts_mac1 -
1143 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1144 			irq_id_map[num_irq++] =
1145 				rxdma2host_monitor_status_ring_mac1 -
1146 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1147 		}
1148 
1149 		if (rx_wbm_rel_ring_mask & (1 << j))
1150 			irq_id_map[num_irq++] = wbm2host_rx_release;
1151 
1152 		if (rx_err_ring_mask & (1 << j))
1153 			irq_id_map[num_irq++] = reo2host_exception;
1154 
1155 		if (reo_status_ring_mask & (1 << j))
1156 			irq_id_map[num_irq++] = reo2host_status;
1157 
1158 	}
1159 	*num_irq_r = num_irq;
1160 }
1161 
1162 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1163 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1164 		int msi_vector_count, int msi_vector_start)
1165 {
1166 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1167 					soc->wlan_cfg_ctx, intr_ctx_num);
1168 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1169 					soc->wlan_cfg_ctx, intr_ctx_num);
1170 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1171 					soc->wlan_cfg_ctx, intr_ctx_num);
1172 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1173 					soc->wlan_cfg_ctx, intr_ctx_num);
1174 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1175 					soc->wlan_cfg_ctx, intr_ctx_num);
1176 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1177 					soc->wlan_cfg_ctx, intr_ctx_num);
1178 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1179 					soc->wlan_cfg_ctx, intr_ctx_num);
1180 
1181 	unsigned int vector =
1182 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1183 	int num_irq = 0;
1184 
1185 	soc->intr_mode = DP_INTR_MSI;
1186 
1187 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1188 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1189 		irq_id_map[num_irq++] =
1190 			pld_get_msi_irq(soc->osdev->dev, vector);
1191 
1192 	*num_irq_r = num_irq;
1193 }
1194 
1195 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1196 				    int *irq_id_map, int *num_irq)
1197 {
1198 	int msi_vector_count, ret;
1199 	uint32_t msi_base_data, msi_vector_start;
1200 
1201 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1202 					    &msi_vector_count,
1203 					    &msi_base_data,
1204 					    &msi_vector_start);
1205 	if (ret)
1206 		return dp_soc_interrupt_map_calculate_integrated(soc,
1207 				intr_ctx_num, irq_id_map, num_irq);
1208 
1209 	else
1210 		dp_soc_interrupt_map_calculate_msi(soc,
1211 				intr_ctx_num, irq_id_map, num_irq,
1212 				msi_vector_count, msi_vector_start);
1213 }
1214 
1215 /*
1216  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1217  * @txrx_soc: DP SOC handle
1218  *
1219  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1220  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1221  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1222  *
1223  * Return: 0 for success. nonzero for failure.
1224  */
1225 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1226 {
1227 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1228 
1229 	int i = 0;
1230 	int num_irq = 0;
1231 
1232 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1233 		int ret = 0;
1234 
1235 		/* Map of IRQ ids registered with one interrupt context */
1236 		int irq_id_map[HIF_MAX_GRP_IRQ];
1237 
1238 		int tx_mask =
1239 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1240 		int rx_mask =
1241 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1242 		int rx_mon_mask =
1243 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1244 		int rx_err_ring_mask =
1245 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1246 		int rx_wbm_rel_ring_mask =
1247 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1248 		int reo_status_ring_mask =
1249 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1250 		int rxdma2host_ring_mask =
1251 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1252 		int host2rxdma_ring_mask =
1253 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1254 
1255 
1256 		soc->intr_ctx[i].dp_intr_id = i;
1257 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1258 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1259 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1260 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1261 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1262 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1263 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1264 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1265 
1266 		soc->intr_ctx[i].soc = soc;
1267 
1268 		num_irq = 0;
1269 
1270 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1271 					       &num_irq);
1272 
1273 		ret = hif_register_ext_group(soc->hif_handle,
1274 				num_irq, irq_id_map, dp_service_srngs,
1275 				&soc->intr_ctx[i], "dp_intr",
1276 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1277 
1278 		if (ret) {
1279 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1280 			FL("failed, ret = %d"), ret);
1281 
1282 			return QDF_STATUS_E_FAILURE;
1283 		}
1284 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1285 	}
1286 
1287 	hif_configure_ext_group_interrupts(soc->hif_handle);
1288 
1289 	return QDF_STATUS_SUCCESS;
1290 }
1291 
1292 /*
1293  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1294  * @txrx_soc: DP SOC handle
1295  *
1296  * Return: void
1297  */
1298 static void dp_soc_interrupt_detach(void *txrx_soc)
1299 {
1300 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1301 	int i;
1302 
1303 	if (soc->intr_mode == DP_INTR_POLL) {
1304 		qdf_timer_stop(&soc->int_timer);
1305 		qdf_timer_free(&soc->int_timer);
1306 	} else {
1307 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1308 	}
1309 
1310 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1311 		soc->intr_ctx[i].tx_ring_mask = 0;
1312 		soc->intr_ctx[i].rx_ring_mask = 0;
1313 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1314 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1315 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1316 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1317 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1318 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1319 
1320 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1321 	}
1322 }
1323 
1324 #define AVG_MAX_MPDUS_PER_TID 128
1325 #define AVG_TIDS_PER_CLIENT 2
1326 #define AVG_FLOWS_PER_TID 2
1327 #define AVG_MSDUS_PER_FLOW 128
1328 #define AVG_MSDUS_PER_MPDU 4
1329 
1330 /*
1331  * Allocate and setup link descriptor pool that will be used by HW for
1332  * various link and queue descriptors and managed by WBM
1333  */
1334 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1335 {
1336 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1337 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1338 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1339 	uint32_t num_mpdus_per_link_desc =
1340 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1341 	uint32_t num_msdus_per_link_desc =
1342 		hal_num_msdus_per_link_desc(soc->hal_soc);
1343 	uint32_t num_mpdu_links_per_queue_desc =
1344 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1345 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1346 	uint32_t total_link_descs, total_mem_size;
1347 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1348 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1349 	uint32_t num_link_desc_banks;
1350 	uint32_t last_bank_size = 0;
1351 	uint32_t entry_size, num_entries;
1352 	int i;
1353 	uint32_t desc_id = 0;
1354 
1355 	/* Only Tx queue descriptors are allocated from common link descriptor
1356 	 * pool Rx queue descriptors are not included in this because (REO queue
1357 	 * extension descriptors) they are expected to be allocated contiguously
1358 	 * with REO queue descriptors
1359 	 */
1360 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1361 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1362 
1363 	num_mpdu_queue_descs = num_mpdu_link_descs /
1364 		num_mpdu_links_per_queue_desc;
1365 
1366 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1367 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1368 		num_msdus_per_link_desc;
1369 
1370 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1371 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1372 
1373 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1374 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1375 
1376 	/* Round up to power of 2 */
1377 	total_link_descs = 1;
1378 	while (total_link_descs < num_entries)
1379 		total_link_descs <<= 1;
1380 
1381 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1382 		FL("total_link_descs: %u, link_desc_size: %d"),
1383 		total_link_descs, link_desc_size);
1384 	total_mem_size =  total_link_descs * link_desc_size;
1385 
1386 	total_mem_size += link_desc_align;
1387 
1388 	if (total_mem_size <= max_alloc_size) {
1389 		num_link_desc_banks = 0;
1390 		last_bank_size = total_mem_size;
1391 	} else {
1392 		num_link_desc_banks = (total_mem_size) /
1393 			(max_alloc_size - link_desc_align);
1394 		last_bank_size = total_mem_size %
1395 			(max_alloc_size - link_desc_align);
1396 	}
1397 
1398 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1399 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1400 		total_mem_size, num_link_desc_banks);
1401 
1402 	for (i = 0; i < num_link_desc_banks; i++) {
1403 		soc->link_desc_banks[i].base_vaddr_unaligned =
1404 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1405 			max_alloc_size,
1406 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1407 		soc->link_desc_banks[i].size = max_alloc_size;
1408 
1409 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1410 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1411 			((unsigned long)(
1412 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1413 			link_desc_align));
1414 
1415 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1416 			soc->link_desc_banks[i].base_paddr_unaligned) +
1417 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1418 			(unsigned long)(
1419 			soc->link_desc_banks[i].base_vaddr_unaligned));
1420 
1421 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1422 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1423 				FL("Link descriptor memory alloc failed"));
1424 			goto fail;
1425 		}
1426 	}
1427 
1428 	if (last_bank_size) {
1429 		/* Allocate last bank in case total memory required is not exact
1430 		 * multiple of max_alloc_size
1431 		 */
1432 		soc->link_desc_banks[i].base_vaddr_unaligned =
1433 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1434 			last_bank_size,
1435 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1436 		soc->link_desc_banks[i].size = last_bank_size;
1437 
1438 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1439 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1440 			((unsigned long)(
1441 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1442 			link_desc_align));
1443 
1444 		soc->link_desc_banks[i].base_paddr =
1445 			(unsigned long)(
1446 			soc->link_desc_banks[i].base_paddr_unaligned) +
1447 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1448 			(unsigned long)(
1449 			soc->link_desc_banks[i].base_vaddr_unaligned));
1450 	}
1451 
1452 
1453 	/* Allocate and setup link descriptor idle list for HW internal use */
1454 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1455 	total_mem_size = entry_size * total_link_descs;
1456 
1457 	if (total_mem_size <= max_alloc_size) {
1458 		void *desc;
1459 
1460 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1461 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1462 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1463 				FL("Link desc idle ring setup failed"));
1464 			goto fail;
1465 		}
1466 
1467 		hal_srng_access_start_unlocked(soc->hal_soc,
1468 			soc->wbm_idle_link_ring.hal_srng);
1469 
1470 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1471 			soc->link_desc_banks[i].base_paddr; i++) {
1472 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1473 				((unsigned long)(
1474 				soc->link_desc_banks[i].base_vaddr) -
1475 				(unsigned long)(
1476 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1477 				/ link_desc_size;
1478 			unsigned long paddr = (unsigned long)(
1479 				soc->link_desc_banks[i].base_paddr);
1480 
1481 			while (num_entries && (desc = hal_srng_src_get_next(
1482 				soc->hal_soc,
1483 				soc->wbm_idle_link_ring.hal_srng))) {
1484 				hal_set_link_desc_addr(desc,
1485 					LINK_DESC_COOKIE(desc_id, i), paddr);
1486 				num_entries--;
1487 				desc_id++;
1488 				paddr += link_desc_size;
1489 			}
1490 		}
1491 		hal_srng_access_end_unlocked(soc->hal_soc,
1492 			soc->wbm_idle_link_ring.hal_srng);
1493 	} else {
1494 		uint32_t num_scatter_bufs;
1495 		uint32_t num_entries_per_buf;
1496 		uint32_t rem_entries;
1497 		uint8_t *scatter_buf_ptr;
1498 		uint16_t scatter_buf_num;
1499 
1500 		soc->wbm_idle_scatter_buf_size =
1501 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1502 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1503 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1504 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1505 					soc->hal_soc, total_mem_size,
1506 					soc->wbm_idle_scatter_buf_size);
1507 
1508 		for (i = 0; i < num_scatter_bufs; i++) {
1509 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1510 				qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1511 				soc->wbm_idle_scatter_buf_size,
1512 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1513 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1514 				QDF_TRACE(QDF_MODULE_ID_DP,
1515 					QDF_TRACE_LEVEL_ERROR,
1516 					FL("Scatter list memory alloc failed"));
1517 				goto fail;
1518 			}
1519 		}
1520 
1521 		/* Populate idle list scatter buffers with link descriptor
1522 		 * pointers
1523 		 */
1524 		scatter_buf_num = 0;
1525 		scatter_buf_ptr = (uint8_t *)(
1526 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1527 		rem_entries = num_entries_per_buf;
1528 
1529 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1530 			soc->link_desc_banks[i].base_paddr; i++) {
1531 			uint32_t num_link_descs =
1532 				(soc->link_desc_banks[i].size -
1533 				((unsigned long)(
1534 				soc->link_desc_banks[i].base_vaddr) -
1535 				(unsigned long)(
1536 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1537 				/ link_desc_size;
1538 			unsigned long paddr = (unsigned long)(
1539 				soc->link_desc_banks[i].base_paddr);
1540 
1541 			while (num_link_descs) {
1542 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1543 					LINK_DESC_COOKIE(desc_id, i), paddr);
1544 				num_link_descs--;
1545 				desc_id++;
1546 				paddr += link_desc_size;
1547 				rem_entries--;
1548 				if (rem_entries) {
1549 					scatter_buf_ptr += entry_size;
1550 				} else {
1551 					rem_entries = num_entries_per_buf;
1552 					scatter_buf_num++;
1553 
1554 					if (scatter_buf_num >= num_scatter_bufs)
1555 						break;
1556 
1557 					scatter_buf_ptr = (uint8_t *)(
1558 						soc->wbm_idle_scatter_buf_base_vaddr[
1559 						scatter_buf_num]);
1560 				}
1561 			}
1562 		}
1563 		/* Setup link descriptor idle list in HW */
1564 		hal_setup_link_idle_list(soc->hal_soc,
1565 			soc->wbm_idle_scatter_buf_base_paddr,
1566 			soc->wbm_idle_scatter_buf_base_vaddr,
1567 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1568 			(uint32_t)(scatter_buf_ptr -
1569 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1570 			scatter_buf_num-1])), total_link_descs);
1571 	}
1572 	return 0;
1573 
1574 fail:
1575 	if (soc->wbm_idle_link_ring.hal_srng) {
1576 		dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1577 			WBM_IDLE_LINK, 0);
1578 	}
1579 
1580 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1581 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1582 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1583 				soc->wbm_idle_scatter_buf_size,
1584 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1585 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1586 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1587 		}
1588 	}
1589 
1590 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1591 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1592 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1593 				soc->link_desc_banks[i].size,
1594 				soc->link_desc_banks[i].base_vaddr_unaligned,
1595 				soc->link_desc_banks[i].base_paddr_unaligned,
1596 				0);
1597 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1598 		}
1599 	}
1600 	return QDF_STATUS_E_FAILURE;
1601 }
1602 
1603 /*
1604  * Free link descriptor pool that was setup HW
1605  */
1606 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1607 {
1608 	int i;
1609 
1610 	if (soc->wbm_idle_link_ring.hal_srng) {
1611 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1612 			WBM_IDLE_LINK, 0);
1613 	}
1614 
1615 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1616 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1617 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1618 				soc->wbm_idle_scatter_buf_size,
1619 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1620 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1621 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1622 		}
1623 	}
1624 
1625 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1626 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1627 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1628 				soc->link_desc_banks[i].size,
1629 				soc->link_desc_banks[i].base_vaddr_unaligned,
1630 				soc->link_desc_banks[i].base_paddr_unaligned,
1631 				0);
1632 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1633 		}
1634 	}
1635 }
1636 
1637 /* TODO: Following should be configurable */
1638 #define WBM_RELEASE_RING_SIZE 64
1639 #define TCL_CMD_RING_SIZE 32
1640 #define TCL_STATUS_RING_SIZE 32
1641 #if defined(QCA_WIFI_QCA6290)
1642 #define REO_DST_RING_SIZE 1024
1643 #else
1644 #define REO_DST_RING_SIZE 2048
1645 #endif
1646 #define REO_REINJECT_RING_SIZE 32
1647 #define RX_RELEASE_RING_SIZE 1024
1648 #define REO_EXCEPTION_RING_SIZE 128
1649 #define REO_CMD_RING_SIZE 64
1650 #define REO_STATUS_RING_SIZE 128
1651 #define RXDMA_BUF_RING_SIZE 1024
1652 #define RXDMA_REFILL_RING_SIZE 4096
1653 #define RXDMA_MONITOR_BUF_RING_SIZE 4096
1654 #define RXDMA_MONITOR_DST_RING_SIZE 2048
1655 #define RXDMA_MONITOR_STATUS_RING_SIZE 1024
1656 #define RXDMA_MONITOR_DESC_RING_SIZE 4096
1657 #define RXDMA_ERR_DST_RING_SIZE 1024
1658 
1659 /*
1660  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1661  * @soc: Datapath SOC handle
1662  *
1663  * This is a timer function used to age out stale AST nodes from
1664  * AST table
1665  */
1666 #ifdef FEATURE_WDS
1667 static void dp_wds_aging_timer_fn(void *soc_hdl)
1668 {
1669 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1670 	struct dp_pdev *pdev;
1671 	struct dp_vdev *vdev;
1672 	struct dp_peer *peer;
1673 	struct dp_ast_entry *ase, *temp_ase;
1674 	int i;
1675 
1676 	qdf_spin_lock_bh(&soc->ast_lock);
1677 
1678 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1679 		pdev = soc->pdev_list[i];
1680 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1681 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1682 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1683 					/*
1684 					 * Do not expire static ast entries
1685 					 * and HM WDS entries
1686 					 */
1687 					if (ase->type ==
1688 						CDP_TXRX_AST_TYPE_STATIC ||
1689 						ase->type ==
1690 						CDP_TXRX_AST_TYPE_WDS_HM)
1691 						continue;
1692 
1693 					if (ase->is_active) {
1694 						ase->is_active = FALSE;
1695 						continue;
1696 					}
1697 
1698 					DP_STATS_INC(soc, ast.aged_out, 1);
1699 					dp_peer_del_ast(soc, ase);
1700 				}
1701 			}
1702 		}
1703 
1704 	}
1705 
1706 	qdf_spin_unlock_bh(&soc->ast_lock);
1707 
1708 	if (qdf_atomic_read(&soc->cmn_init_done))
1709 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1710 }
1711 
1712 
1713 /*
1714  * dp_soc_wds_attach() - Setup WDS timer and AST table
1715  * @soc:		Datapath SOC handle
1716  *
1717  * Return: None
1718  */
1719 static void dp_soc_wds_attach(struct dp_soc *soc)
1720 {
1721 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1722 			dp_wds_aging_timer_fn, (void *)soc,
1723 			QDF_TIMER_TYPE_WAKE_APPS);
1724 
1725 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1726 }
1727 
1728 /*
1729  * dp_soc_wds_detach() - Detach WDS data structures and timers
1730  * @txrx_soc: DP SOC handle
1731  *
1732  * Return: None
1733  */
1734 static void dp_soc_wds_detach(struct dp_soc *soc)
1735 {
1736 	qdf_timer_stop(&soc->wds_aging_timer);
1737 	qdf_timer_free(&soc->wds_aging_timer);
1738 }
1739 #else
1740 static void dp_soc_wds_attach(struct dp_soc *soc)
1741 {
1742 }
1743 
1744 static void dp_soc_wds_detach(struct dp_soc *soc)
1745 {
1746 }
1747 #endif
1748 
1749 /*
1750  * dp_soc_reset_ring_map() - Reset cpu ring map
1751  * @soc: Datapath soc handler
1752  *
1753  * This api resets the default cpu ring map
1754  */
1755 
1756 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1757 {
1758 	uint8_t i;
1759 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1760 
1761 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1762 		if (nss_config == 1) {
1763 			/*
1764 			 * Setting Tx ring map for one nss offloaded radio
1765 			 */
1766 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1767 		} else if (nss_config == 2) {
1768 			/*
1769 			 * Setting Tx ring for two nss offloaded radios
1770 			 */
1771 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1772 		} else {
1773 			/*
1774 			 * Setting Tx ring map for all nss offloaded radios
1775 			 */
1776 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1777 		}
1778 	}
1779 }
1780 
1781 /*
1782  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1783  * @dp_soc - DP soc handle
1784  * @ring_type - ring type
1785  * @ring_num - ring_num
1786  *
1787  * return 0 or 1
1788  */
1789 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1790 {
1791 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1792 	uint8_t status = 0;
1793 
1794 	switch (ring_type) {
1795 	case WBM2SW_RELEASE:
1796 	case REO_DST:
1797 	case RXDMA_BUF:
1798 		status = ((nss_config) & (1 << ring_num));
1799 		break;
1800 	default:
1801 		break;
1802 	}
1803 
1804 	return status;
1805 }
1806 
1807 /*
1808  * dp_soc_reset_intr_mask() - reset interrupt mask
1809  * @dp_soc - DP Soc handle
1810  *
1811  * Return: Return void
1812  */
1813 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1814 {
1815 	uint8_t j;
1816 	int *grp_mask = NULL;
1817 	int group_number, mask, num_ring;
1818 
1819 	/* number of tx ring */
1820 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1821 
1822 	/*
1823 	 * group mask for tx completion  ring.
1824 	 */
1825 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1826 
1827 	/* loop and reset the mask for only offloaded ring */
1828 	for (j = 0; j < num_ring; j++) {
1829 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
1830 			continue;
1831 		}
1832 
1833 		/*
1834 		 * Group number corresponding to tx offloaded ring.
1835 		 */
1836 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1837 		if (group_number < 0) {
1838 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1839 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1840 					WBM2SW_RELEASE, j);
1841 			return;
1842 		}
1843 
1844 		/* reset the tx mask for offloaded ring */
1845 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
1846 		mask &= (~(1 << j));
1847 
1848 		/*
1849 		 * reset the interrupt mask for offloaded ring.
1850 		 */
1851 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1852 	}
1853 
1854 	/* number of rx rings */
1855 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
1856 
1857 	/*
1858 	 * group mask for reo destination ring.
1859 	 */
1860 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1861 
1862 	/* loop and reset the mask for only offloaded ring */
1863 	for (j = 0; j < num_ring; j++) {
1864 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
1865 			continue;
1866 		}
1867 
1868 		/*
1869 		 * Group number corresponding to rx offloaded ring.
1870 		 */
1871 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1872 		if (group_number < 0) {
1873 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1874 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1875 					REO_DST, j);
1876 			return;
1877 		}
1878 
1879 		/* set the interrupt mask for offloaded ring */
1880 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1881 		mask &= (~(1 << j));
1882 
1883 		/*
1884 		 * set the interrupt mask to zero for rx offloaded radio.
1885 		 */
1886 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1887 	}
1888 
1889 	/*
1890 	 * group mask for Rx buffer refill ring
1891 	 */
1892 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1893 
1894 	/* loop and reset the mask for only offloaded ring */
1895 	for (j = 0; j < MAX_PDEV_CNT; j++) {
1896 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
1897 			continue;
1898 		}
1899 
1900 		/*
1901 		 * Group number corresponding to rx offloaded ring.
1902 		 */
1903 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1904 		if (group_number < 0) {
1905 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1906 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1907 					REO_DST, j);
1908 			return;
1909 		}
1910 
1911 		/* set the interrupt mask for offloaded ring */
1912 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1913 				group_number);
1914 		mask &= (~(1 << j));
1915 
1916 		/*
1917 		 * set the interrupt mask to zero for rx offloaded radio.
1918 		 */
1919 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1920 			group_number, mask);
1921 	}
1922 }
1923 
1924 #ifdef IPA_OFFLOAD
1925 /**
1926  * dp_reo_remap_config() - configure reo remap register value based
1927  *                         nss configuration.
1928  *		based on offload_radio value below remap configuration
1929  *		get applied.
1930  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
1931  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
1932  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
1933  *		3 - both Radios handled by NSS (remap not required)
1934  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
1935  *
1936  * @remap1: output parameter indicates reo remap 1 register value
1937  * @remap2: output parameter indicates reo remap 2 register value
1938  * Return: bool type, true if remap is configured else false.
1939  */
1940 static bool dp_reo_remap_config(struct dp_soc *soc,
1941 				uint32_t *remap1,
1942 				uint32_t *remap2)
1943 {
1944 
1945 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
1946 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
1947 
1948 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
1949 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
1950 
1951 	return true;
1952 }
1953 #else
1954 static bool dp_reo_remap_config(struct dp_soc *soc,
1955 				uint32_t *remap1,
1956 				uint32_t *remap2)
1957 {
1958 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1959 
1960 	switch (offload_radio) {
1961 	case 0:
1962 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1963 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1964 			(0x3 << 18) | (0x4 << 21)) << 8;
1965 
1966 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1967 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1968 			(0x3 << 18) | (0x4 << 21)) << 8;
1969 		break;
1970 
1971 	case 1:
1972 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
1973 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
1974 			(0x2 << 18) | (0x3 << 21)) << 8;
1975 
1976 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
1977 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
1978 			(0x4 << 18) | (0x2 << 21)) << 8;
1979 		break;
1980 
1981 	case 2:
1982 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
1983 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
1984 			(0x1 << 18) | (0x3 << 21)) << 8;
1985 
1986 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
1987 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
1988 			(0x4 << 18) | (0x1 << 21)) << 8;
1989 		break;
1990 
1991 	case 3:
1992 		/* return false if both radios are offloaded to NSS */
1993 		return false;
1994 	}
1995 	return true;
1996 }
1997 #endif
1998 
1999 /*
2000  * dp_reo_frag_dst_set() - configure reo register to set the
2001  *                        fragment destination ring
2002  * @soc : Datapath soc
2003  * @frag_dst_ring : output parameter to set fragment destination ring
2004  *
2005  * Based on offload_radio below fragment destination rings is selected
2006  * 0 - TCL
2007  * 1 - SW1
2008  * 2 - SW2
2009  * 3 - SW3
2010  * 4 - SW4
2011  * 5 - Release
2012  * 6 - FW
2013  * 7 - alternate select
2014  *
2015  * return: void
2016  */
2017 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2018 {
2019 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2020 
2021 	switch (offload_radio) {
2022 	case 0:
2023 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2024 		break;
2025 	case 3:
2026 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2027 		break;
2028 	default:
2029 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2030 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2031 		break;
2032 	}
2033 }
2034 
2035 /*
2036  * dp_soc_cmn_setup() - Common SoC level initializion
2037  * @soc:		Datapath SOC handle
2038  *
2039  * This is an internal function used to setup common SOC data structures,
2040  * to be called from PDEV attach after receiving HW mode capabilities from FW
2041  */
2042 static int dp_soc_cmn_setup(struct dp_soc *soc)
2043 {
2044 	int i;
2045 	struct hal_reo_params reo_params;
2046 	int tx_ring_size;
2047 	int tx_comp_ring_size;
2048 
2049 	if (qdf_atomic_read(&soc->cmn_init_done))
2050 		return 0;
2051 
2052 	if (dp_peer_find_attach(soc))
2053 		goto fail0;
2054 
2055 	if (dp_hw_link_desc_pool_setup(soc))
2056 		goto fail1;
2057 
2058 	/* Setup SRNG rings */
2059 	/* Common rings */
2060 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2061 		WBM_RELEASE_RING_SIZE)) {
2062 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2063 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2064 		goto fail1;
2065 	}
2066 
2067 
2068 	soc->num_tcl_data_rings = 0;
2069 	/* Tx data rings */
2070 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2071 		soc->num_tcl_data_rings =
2072 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2073 		tx_comp_ring_size =
2074 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
2075 		tx_ring_size =
2076 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
2077 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2078 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2079 				TCL_DATA, i, 0, tx_ring_size)) {
2080 				QDF_TRACE(QDF_MODULE_ID_DP,
2081 					QDF_TRACE_LEVEL_ERROR,
2082 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2083 				goto fail1;
2084 			}
2085 			/*
2086 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2087 			 * count
2088 			 */
2089 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2090 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2091 				QDF_TRACE(QDF_MODULE_ID_DP,
2092 					QDF_TRACE_LEVEL_ERROR,
2093 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2094 				goto fail1;
2095 			}
2096 		}
2097 	} else {
2098 		/* This will be incremented during per pdev ring setup */
2099 		soc->num_tcl_data_rings = 0;
2100 	}
2101 
2102 	if (dp_tx_soc_attach(soc)) {
2103 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2104 				FL("dp_tx_soc_attach failed"));
2105 		goto fail1;
2106 	}
2107 
2108 	/* TCL command and status rings */
2109 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2110 		TCL_CMD_RING_SIZE)) {
2111 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2112 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2113 		goto fail1;
2114 	}
2115 
2116 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2117 		TCL_STATUS_RING_SIZE)) {
2118 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2119 			FL("dp_srng_setup failed for tcl_status_ring"));
2120 		goto fail1;
2121 	}
2122 
2123 
2124 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2125 	 * descriptors
2126 	 */
2127 
2128 	/* Rx data rings */
2129 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2130 		soc->num_reo_dest_rings =
2131 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2132 		QDF_TRACE(QDF_MODULE_ID_DP,
2133 			QDF_TRACE_LEVEL_ERROR,
2134 			FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
2135 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2136 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2137 				i, 0, REO_DST_RING_SIZE)) {
2138 				QDF_TRACE(QDF_MODULE_ID_DP,
2139 					QDF_TRACE_LEVEL_ERROR,
2140 					FL("dp_srng_setup failed for reo_dest_ring[%d]"), i);
2141 				goto fail1;
2142 			}
2143 		}
2144 	} else {
2145 		/* This will be incremented during per pdev ring setup */
2146 		soc->num_reo_dest_rings = 0;
2147 	}
2148 
2149 	/* LMAC RxDMA to SW Rings configuration */
2150 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2151 		/* Only valid for MCL */
2152 		struct dp_pdev *pdev = soc->pdev_list[0];
2153 
2154 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2155 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2156 				RXDMA_DST, 0, i, RXDMA_ERR_DST_RING_SIZE)) {
2157 				QDF_TRACE(QDF_MODULE_ID_DP,
2158 					QDF_TRACE_LEVEL_ERROR,
2159 					FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2160 				goto fail1;
2161 			}
2162 		}
2163 	}
2164 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2165 
2166 	/* REO reinjection ring */
2167 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2168 		REO_REINJECT_RING_SIZE)) {
2169 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2170 			FL("dp_srng_setup failed for reo_reinject_ring"));
2171 		goto fail1;
2172 	}
2173 
2174 
2175 	/* Rx release ring */
2176 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2177 		RX_RELEASE_RING_SIZE)) {
2178 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2179 			FL("dp_srng_setup failed for rx_rel_ring"));
2180 		goto fail1;
2181 	}
2182 
2183 
2184 	/* Rx exception ring */
2185 	if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
2186 		MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
2187 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2188 			FL("dp_srng_setup failed for reo_exception_ring"));
2189 		goto fail1;
2190 	}
2191 
2192 
2193 	/* REO command and status rings */
2194 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2195 		REO_CMD_RING_SIZE)) {
2196 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2197 			FL("dp_srng_setup failed for reo_cmd_ring"));
2198 		goto fail1;
2199 	}
2200 
2201 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2202 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2203 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2204 
2205 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2206 		REO_STATUS_RING_SIZE)) {
2207 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2208 			FL("dp_srng_setup failed for reo_status_ring"));
2209 		goto fail1;
2210 	}
2211 
2212 	qdf_spinlock_create(&soc->ast_lock);
2213 	dp_soc_wds_attach(soc);
2214 
2215 	/* Reset the cpu ring map if radio is NSS offloaded */
2216 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2217 		dp_soc_reset_cpu_ring_map(soc);
2218 		dp_soc_reset_intr_mask(soc);
2219 	}
2220 
2221 	/* Setup HW REO */
2222 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2223 
2224 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2225 
2226 		/*
2227 		 * Reo ring remap is not required if both radios
2228 		 * are offloaded to NSS
2229 		 */
2230 		if (!dp_reo_remap_config(soc,
2231 					&reo_params.remap1,
2232 					&reo_params.remap2))
2233 			goto out;
2234 
2235 		reo_params.rx_hash_enabled = true;
2236 	}
2237 
2238 	/* setup the global rx defrag waitlist */
2239 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2240 	soc->rx.defrag.timeout_ms =
2241 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
2242 	soc->rx.flags.defrag_timeout_check =
2243 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
2244 
2245 out:
2246 	/*
2247 	 * set the fragment destination ring
2248 	 */
2249 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2250 
2251 	hal_reo_setup(soc->hal_soc, &reo_params);
2252 
2253 	qdf_atomic_set(&soc->cmn_init_done, 1);
2254 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2255 	return 0;
2256 fail1:
2257 	/*
2258 	 * Cleanup will be done as part of soc_detach, which will
2259 	 * be called on pdev attach failure
2260 	 */
2261 fail0:
2262 	return QDF_STATUS_E_FAILURE;
2263 }
2264 
2265 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2266 
2267 static void dp_lro_hash_setup(struct dp_soc *soc)
2268 {
2269 	struct cdp_lro_hash_config lro_hash;
2270 
2271 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2272 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2273 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2274 			 FL("LRO disabled RX hash disabled"));
2275 		return;
2276 	}
2277 
2278 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2279 
2280 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2281 		lro_hash.lro_enable = 1;
2282 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2283 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2284 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2285 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2286 	}
2287 
2288 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2289 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2290 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2291 		 LRO_IPV4_SEED_ARR_SZ));
2292 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2293 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2294 		 LRO_IPV6_SEED_ARR_SZ));
2295 
2296 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2297 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2298 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2299 		 lro_hash.tcp_flag_mask);
2300 
2301 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2302 		 QDF_TRACE_LEVEL_ERROR,
2303 		 (void *)lro_hash.toeplitz_hash_ipv4,
2304 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2305 		 LRO_IPV4_SEED_ARR_SZ));
2306 
2307 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2308 		 QDF_TRACE_LEVEL_ERROR,
2309 		 (void *)lro_hash.toeplitz_hash_ipv6,
2310 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2311 		 LRO_IPV6_SEED_ARR_SZ));
2312 
2313 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2314 
2315 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2316 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2317 			(soc->ctrl_psoc, &lro_hash);
2318 }
2319 
2320 /*
2321 * dp_rxdma_ring_setup() - configure the RX DMA rings
2322 * @soc: data path SoC handle
2323 * @pdev: Physical device handle
2324 *
2325 * Return: 0 - success, > 0 - failure
2326 */
2327 #ifdef QCA_HOST2FW_RXBUF_RING
2328 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2329 	 struct dp_pdev *pdev)
2330 {
2331 	int max_mac_rings =
2332 		 wlan_cfg_get_num_mac_rings
2333 			(pdev->wlan_cfg_ctx);
2334 	int i;
2335 
2336 	for (i = 0; i < max_mac_rings; i++) {
2337 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2338 			 "%s: pdev_id %d mac_id %d\n",
2339 			 __func__, pdev->pdev_id, i);
2340 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2341 			 RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) {
2342 			QDF_TRACE(QDF_MODULE_ID_DP,
2343 				 QDF_TRACE_LEVEL_ERROR,
2344 				 FL("failed rx mac ring setup"));
2345 			return QDF_STATUS_E_FAILURE;
2346 		}
2347 	}
2348 	return QDF_STATUS_SUCCESS;
2349 }
2350 #else
2351 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2352 	 struct dp_pdev *pdev)
2353 {
2354 	return QDF_STATUS_SUCCESS;
2355 }
2356 #endif
2357 
2358 /**
2359  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2360  * @pdev - DP_PDEV handle
2361  *
2362  * Return: void
2363  */
2364 static inline void
2365 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2366 {
2367 	uint8_t map_id;
2368 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2369 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2370 				sizeof(default_dscp_tid_map));
2371 	}
2372 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2373 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2374 				pdev->dscp_tid_map[map_id],
2375 				map_id);
2376 	}
2377 }
2378 
2379 #ifdef QCA_SUPPORT_SON
2380 /**
2381  * dp_mark_peer_inact(): Update peer inactivity status
2382  * @peer_handle - datapath peer handle
2383  *
2384  * Return: void
2385  */
2386 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2387 {
2388 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2389 	struct dp_pdev *pdev;
2390 	struct dp_soc *soc;
2391 	bool inactive_old;
2392 
2393 	if (!peer)
2394 		return;
2395 
2396 	pdev = peer->vdev->pdev;
2397 	soc = pdev->soc;
2398 
2399 	inactive_old = peer->peer_bs_inact_flag == 1;
2400 	if (!inactive)
2401 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2402 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2403 
2404 	if (inactive_old != inactive) {
2405 		/**
2406 		 * Note: a node lookup can happen in RX datapath context
2407 		 * when a node changes from inactive to active (at most once
2408 		 * per inactivity timeout threshold)
2409 		 */
2410 		if (soc->cdp_soc.ol_ops->record_act_change) {
2411 			soc->cdp_soc.ol_ops->record_act_change(pdev->osif_pdev,
2412 					peer->mac_addr.raw, !inactive);
2413 		}
2414 	}
2415 }
2416 
2417 /**
2418  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2419  *
2420  * Periodically checks the inactivity status
2421  */
2422 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2423 {
2424 	struct dp_pdev *pdev;
2425 	struct dp_vdev *vdev;
2426 	struct dp_peer *peer;
2427 	struct dp_soc *soc;
2428 	int i;
2429 
2430 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2431 
2432 	qdf_spin_lock(&soc->peer_ref_mutex);
2433 
2434 	for (i = 0; i < soc->pdev_count; i++) {
2435 	pdev = soc->pdev_list[i];
2436 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2437 		if (vdev->opmode != wlan_op_mode_ap)
2438 			continue;
2439 
2440 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2441 			if (!peer->authorize) {
2442 				/**
2443 				 * Inactivity check only interested in
2444 				 * connected node
2445 				 */
2446 				continue;
2447 			}
2448 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2449 				/**
2450 				 * This check ensures we do not wait extra long
2451 				 * due to the potential race condition
2452 				 */
2453 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2454 			}
2455 			if (peer->peer_bs_inact > 0) {
2456 				/* Do not let it wrap around */
2457 				peer->peer_bs_inact--;
2458 			}
2459 			if (peer->peer_bs_inact == 0)
2460 				dp_mark_peer_inact(peer, true);
2461 		}
2462 	}
2463 	}
2464 
2465 	qdf_spin_unlock(&soc->peer_ref_mutex);
2466 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2467 		      soc->pdev_bs_inact_interval * 1000);
2468 }
2469 
2470 
2471 /**
2472  * dp_free_inact_timer(): free inact timer
2473  * @timer - inact timer handle
2474  *
2475  * Return: bool
2476  */
2477 void dp_free_inact_timer(struct dp_soc *soc)
2478 {
2479 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2480 }
2481 #else
2482 
2483 void dp_mark_peer_inact(void *peer, bool inactive)
2484 {
2485 	return;
2486 }
2487 
2488 void dp_free_inact_timer(struct dp_soc *soc)
2489 {
2490 	return;
2491 }
2492 
2493 #endif
2494 
2495 #ifdef IPA_OFFLOAD
2496 /**
2497  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2498  * @soc: data path instance
2499  * @pdev: core txrx pdev context
2500  *
2501  * Return: QDF_STATUS_SUCCESS: success
2502  *         QDF_STATUS_E_RESOURCES: Error return
2503  */
2504 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2505 					   struct dp_pdev *pdev)
2506 {
2507 	/* Setup second Rx refill buffer ring */
2508 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2509 			  IPA_RX_REFILL_BUF_RING_IDX,
2510 			  pdev->pdev_id, RXDMA_REFILL_RING_SIZE)) {
2511 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2512 			FL("dp_srng_setup failed second rx refill ring"));
2513 		return QDF_STATUS_E_FAILURE;
2514 	}
2515 	return QDF_STATUS_SUCCESS;
2516 }
2517 
2518 /**
2519  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2520  * @soc: data path instance
2521  * @pdev: core txrx pdev context
2522  *
2523  * Return: void
2524  */
2525 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2526 					      struct dp_pdev *pdev)
2527 {
2528 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2529 			IPA_RX_REFILL_BUF_RING_IDX);
2530 }
2531 
2532 #else
2533 
2534 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2535 					   struct dp_pdev *pdev)
2536 {
2537 	return QDF_STATUS_SUCCESS;
2538 }
2539 
2540 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2541 					      struct dp_pdev *pdev)
2542 {
2543 }
2544 
2545 #endif
2546 
2547 /*
2548 * dp_pdev_attach_wifi3() - attach txrx pdev
2549 * @ctrl_pdev: Opaque PDEV object
2550 * @txrx_soc: Datapath SOC handle
2551 * @htc_handle: HTC handle for host-target interface
2552 * @qdf_osdev: QDF OS device
2553 * @pdev_id: PDEV ID
2554 *
2555 * Return: DP PDEV handle on success, NULL on failure
2556 */
2557 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2558 	struct cdp_cfg *ctrl_pdev,
2559 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2560 {
2561 	int tx_ring_size;
2562 	int tx_comp_ring_size;
2563 
2564 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2565 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2566 	int mac_id;
2567 
2568 	if (!pdev) {
2569 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2570 			FL("DP PDEV memory allocation failed"));
2571 		goto fail0;
2572 	}
2573 
2574 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach();
2575 
2576 	if (!pdev->wlan_cfg_ctx) {
2577 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2578 			FL("pdev cfg_attach failed"));
2579 
2580 		qdf_mem_free(pdev);
2581 		goto fail0;
2582 	}
2583 
2584 	/*
2585 	 * set nss pdev config based on soc config
2586 	 */
2587 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2588 			(wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx) & (1 << pdev_id)));
2589 
2590 	pdev->soc = soc;
2591 	pdev->osif_pdev = ctrl_pdev;
2592 	pdev->pdev_id = pdev_id;
2593 	soc->pdev_list[pdev_id] = pdev;
2594 	soc->pdev_count++;
2595 
2596 	TAILQ_INIT(&pdev->vdev_list);
2597 	pdev->vdev_count = 0;
2598 
2599 	qdf_spinlock_create(&pdev->tx_mutex);
2600 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2601 	TAILQ_INIT(&pdev->neighbour_peers_list);
2602 
2603 	if (dp_soc_cmn_setup(soc)) {
2604 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2605 			FL("dp_soc_cmn_setup failed"));
2606 		goto fail1;
2607 	}
2608 
2609 	/* Setup per PDEV TCL rings if configured */
2610 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2611 		tx_ring_size =
2612 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
2613 		tx_comp_ring_size =
2614 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
2615 
2616 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2617 			pdev_id, pdev_id, tx_ring_size)) {
2618 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2619 				FL("dp_srng_setup failed for tcl_data_ring"));
2620 			goto fail1;
2621 		}
2622 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2623 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2624 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2625 				FL("dp_srng_setup failed for tx_comp_ring"));
2626 			goto fail1;
2627 		}
2628 		soc->num_tcl_data_rings++;
2629 	}
2630 
2631 	/* Tx specific init */
2632 	if (dp_tx_pdev_attach(pdev)) {
2633 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2634 			FL("dp_tx_pdev_attach failed"));
2635 		goto fail1;
2636 	}
2637 
2638 	/* Setup per PDEV REO rings if configured */
2639 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2640 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2641 			pdev_id, pdev_id, REO_DST_RING_SIZE)) {
2642 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2643 				FL("dp_srng_setup failed for reo_dest_ringn"));
2644 			goto fail1;
2645 		}
2646 		soc->num_reo_dest_rings++;
2647 
2648 	}
2649 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2650 		RXDMA_REFILL_RING_SIZE)) {
2651 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2652 			 FL("dp_srng_setup failed rx refill ring"));
2653 		goto fail1;
2654 	}
2655 
2656 	if (dp_rxdma_ring_setup(soc, pdev)) {
2657 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2658 			 FL("RXDMA ring config failed"));
2659 		goto fail1;
2660 	}
2661 
2662 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2663 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2664 
2665 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2666 			RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2667 			RXDMA_MONITOR_BUF_RING_SIZE)) {
2668 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2669 			  FL("dp_srng_setup failed for rxdma_mon_buf_ring"));
2670 			goto fail1;
2671 		}
2672 
2673 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2674 			RXDMA_MONITOR_DST, 0, mac_for_pdev,
2675 			RXDMA_MONITOR_DST_RING_SIZE)) {
2676 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2677 			  FL("dp_srng_setup failed for rxdma_mon_dst_ring"));
2678 			goto fail1;
2679 		}
2680 
2681 
2682 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2683 			RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2684 			RXDMA_MONITOR_STATUS_RING_SIZE)) {
2685 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2686 			 FL("dp_srng_setup failed for rxdma_mon_status_ring"));
2687 			goto fail1;
2688 		}
2689 
2690 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2691 			RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2692 			RXDMA_MONITOR_DESC_RING_SIZE)) {
2693 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2694 			  "dp_srng_setup failed for rxdma_mon_desc_ring\n");
2695 			goto fail1;
2696 		}
2697 	}
2698 
2699 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2700 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2701 				  0, pdev_id, RXDMA_ERR_DST_RING_SIZE)) {
2702 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2703 				FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2704 			goto fail1;
2705 		}
2706 	}
2707 
2708 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2709 		goto fail1;
2710 
2711 	if (dp_ipa_ring_resource_setup(soc, pdev))
2712 		goto fail1;
2713 
2714 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2715 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2716 			FL("dp_ipa_uc_attach failed"));
2717 		goto fail1;
2718 	}
2719 
2720 	/* Rx specific init */
2721 	if (dp_rx_pdev_attach(pdev)) {
2722 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2723 			FL("dp_rx_pdev_attach failed"));
2724 		goto fail0;
2725 	}
2726 	DP_STATS_INIT(pdev);
2727 
2728 	/* Monitor filter init */
2729 	pdev->mon_filter_mode = MON_FILTER_ALL;
2730 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2731 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2732 	pdev->fp_data_filter = FILTER_DATA_ALL;
2733 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2734 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2735 	pdev->mo_data_filter = FILTER_DATA_ALL;
2736 
2737 #ifndef CONFIG_WIN
2738 	/* MCL */
2739 	dp_local_peer_id_pool_init(pdev);
2740 #endif
2741 	dp_dscp_tid_map_setup(pdev);
2742 
2743 	/* Rx monitor mode specific init */
2744 	if (dp_rx_pdev_mon_attach(pdev)) {
2745 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2746 				"dp_rx_pdev_attach failed\n");
2747 		goto fail1;
2748 	}
2749 
2750 	if (dp_wdi_event_attach(pdev)) {
2751 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2752 				"dp_wdi_evet_attach failed\n");
2753 		goto fail1;
2754 	}
2755 
2756 	/* set the reo destination during initialization */
2757 	pdev->reo_dest = pdev->pdev_id + 1;
2758 
2759 	/*
2760 	 * initialize ppdu tlv list
2761 	 */
2762 	TAILQ_INIT(&pdev->ppdu_info_list);
2763 	pdev->tlv_count = 0;
2764 	pdev->list_depth = 0;
2765 
2766 	return (struct cdp_pdev *)pdev;
2767 
2768 fail1:
2769 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
2770 
2771 fail0:
2772 	return NULL;
2773 }
2774 
2775 /*
2776 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
2777 * @soc: data path SoC handle
2778 * @pdev: Physical device handle
2779 *
2780 * Return: void
2781 */
2782 #ifdef QCA_HOST2FW_RXBUF_RING
2783 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2784 	 struct dp_pdev *pdev)
2785 {
2786 	int max_mac_rings =
2787 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2788 	int i;
2789 
2790 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
2791 				max_mac_rings : MAX_RX_MAC_RINGS;
2792 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
2793 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
2794 			 RXDMA_BUF, 1);
2795 
2796 	qdf_timer_free(&soc->mon_reap_timer);
2797 }
2798 #else
2799 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2800 	 struct dp_pdev *pdev)
2801 {
2802 }
2803 #endif
2804 
2805 /*
2806  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
2807  * @pdev: device object
2808  *
2809  * Return: void
2810  */
2811 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
2812 {
2813 	struct dp_neighbour_peer *peer = NULL;
2814 	struct dp_neighbour_peer *temp_peer = NULL;
2815 
2816 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
2817 			neighbour_peer_list_elem, temp_peer) {
2818 		/* delete this peer from the list */
2819 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
2820 				peer, neighbour_peer_list_elem);
2821 		qdf_mem_free(peer);
2822 	}
2823 
2824 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
2825 }
2826 
2827 /**
2828 * dp_htt_ppdu_stats_detach() - detach stats resources
2829 * @pdev: Datapath PDEV handle
2830 *
2831 * Return: void
2832 */
2833 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
2834 {
2835 	struct ppdu_info *ppdu_info, *ppdu_info_next;
2836 
2837 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
2838 			ppdu_info_list_elem, ppdu_info_next) {
2839 		if (!ppdu_info)
2840 			break;
2841 		qdf_assert_always(ppdu_info->nbuf);
2842 		qdf_nbuf_free(ppdu_info->nbuf);
2843 		qdf_mem_free(ppdu_info);
2844 	}
2845 }
2846 
2847 /*
2848 * dp_pdev_detach_wifi3() - detach txrx pdev
2849 * @txrx_pdev: Datapath PDEV handle
2850 * @force: Force detach
2851 *
2852 */
2853 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
2854 {
2855 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
2856 	struct dp_soc *soc = pdev->soc;
2857 	qdf_nbuf_t curr_nbuf, next_nbuf;
2858 	int mac_id;
2859 
2860 	dp_wdi_event_detach(pdev);
2861 
2862 	dp_tx_pdev_detach(pdev);
2863 
2864 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2865 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
2866 			TCL_DATA, pdev->pdev_id);
2867 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
2868 			WBM2SW_RELEASE, pdev->pdev_id);
2869 	}
2870 
2871 	dp_pktlogmod_exit(pdev);
2872 
2873 	dp_rx_pdev_detach(pdev);
2874 
2875 	dp_rx_pdev_mon_detach(pdev);
2876 
2877 	dp_neighbour_peers_detach(pdev);
2878 	qdf_spinlock_destroy(&pdev->tx_mutex);
2879 
2880 	dp_ipa_uc_detach(soc, pdev);
2881 
2882 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
2883 
2884 	/* Cleanup per PDEV REO rings if configured */
2885 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2886 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
2887 			REO_DST, pdev->pdev_id);
2888 	}
2889 
2890 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
2891 
2892 	dp_rxdma_ring_cleanup(soc, pdev);
2893 
2894 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2895 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2896 			RXDMA_MONITOR_BUF, 0);
2897 
2898 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2899 			RXDMA_MONITOR_DST, 0);
2900 
2901 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2902 			RXDMA_MONITOR_STATUS, 0);
2903 
2904 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2905 			RXDMA_MONITOR_DESC, 0);
2906 
2907 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
2908 			RXDMA_DST, 0);
2909 	}
2910 
2911 	curr_nbuf = pdev->invalid_peer_head_msdu;
2912 	while (curr_nbuf) {
2913 		next_nbuf = qdf_nbuf_next(curr_nbuf);
2914 		qdf_nbuf_free(curr_nbuf);
2915 		curr_nbuf = next_nbuf;
2916 	}
2917 
2918 	dp_htt_ppdu_stats_detach(pdev);
2919 
2920 	soc->pdev_list[pdev->pdev_id] = NULL;
2921 	soc->pdev_count--;
2922 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
2923 	qdf_mem_free(pdev->dp_txrx_handle);
2924 	qdf_mem_free(pdev);
2925 }
2926 
2927 /*
2928  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
2929  * @soc: DP SOC handle
2930  */
2931 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
2932 {
2933 	struct reo_desc_list_node *desc;
2934 	struct dp_rx_tid *rx_tid;
2935 
2936 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2937 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
2938 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2939 		rx_tid = &desc->rx_tid;
2940 		qdf_mem_unmap_nbytes_single(soc->osdev,
2941 			rx_tid->hw_qdesc_paddr,
2942 			QDF_DMA_BIDIRECTIONAL,
2943 			rx_tid->hw_qdesc_alloc_size);
2944 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2945 		qdf_mem_free(desc);
2946 	}
2947 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2948 	qdf_list_destroy(&soc->reo_desc_freelist);
2949 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
2950 }
2951 
2952 /*
2953  * dp_soc_detach_wifi3() - Detach txrx SOC
2954  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
2955  */
2956 static void dp_soc_detach_wifi3(void *txrx_soc)
2957 {
2958 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2959 	int i;
2960 
2961 	qdf_atomic_set(&soc->cmn_init_done, 0);
2962 
2963 	qdf_flush_work(&soc->htt_stats.work);
2964 	qdf_disable_work(&soc->htt_stats.work);
2965 
2966 	/* Free pending htt stats messages */
2967 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
2968 
2969 	dp_free_inact_timer(soc);
2970 
2971 	for (i = 0; i < MAX_PDEV_CNT; i++) {
2972 		if (soc->pdev_list[i])
2973 			dp_pdev_detach_wifi3(
2974 				(struct cdp_pdev *)soc->pdev_list[i], 1);
2975 	}
2976 
2977 	dp_peer_find_detach(soc);
2978 
2979 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
2980 	 * SW descriptors
2981 	 */
2982 
2983 	/* Free the ring memories */
2984 	/* Common rings */
2985 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
2986 
2987 	dp_tx_soc_detach(soc);
2988 	/* Tx data rings */
2989 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2990 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2991 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
2992 				TCL_DATA, i);
2993 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
2994 				WBM2SW_RELEASE, i);
2995 		}
2996 	}
2997 
2998 	/* TCL command and status rings */
2999 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3000 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3001 
3002 	/* Rx data rings */
3003 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3004 		soc->num_reo_dest_rings =
3005 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3006 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3007 			/* TODO: Get number of rings and ring sizes
3008 			 * from wlan_cfg
3009 			 */
3010 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3011 				REO_DST, i);
3012 		}
3013 	}
3014 	/* REO reinjection ring */
3015 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3016 
3017 	/* Rx release ring */
3018 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3019 
3020 	/* Rx exception ring */
3021 	/* TODO: Better to store ring_type and ring_num in
3022 	 * dp_srng during setup
3023 	 */
3024 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3025 
3026 	/* REO command and status rings */
3027 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3028 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3029 	dp_hw_link_desc_pool_cleanup(soc);
3030 
3031 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3032 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3033 
3034 	htt_soc_detach(soc->htt_handle);
3035 
3036 	dp_reo_cmdlist_destroy(soc);
3037 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3038 	dp_reo_desc_freelist_destroy(soc);
3039 
3040 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3041 
3042 	dp_soc_wds_detach(soc);
3043 	qdf_spinlock_destroy(&soc->ast_lock);
3044 
3045 	qdf_mem_free(soc);
3046 }
3047 
3048 /*
3049  * dp_rxdma_ring_config() - configure the RX DMA rings
3050  *
3051  * This function is used to configure the MAC rings.
3052  * On MCL host provides buffers in Host2FW ring
3053  * FW refills (copies) buffers to the ring and updates
3054  * ring_idx in register
3055  *
3056  * @soc: data path SoC handle
3057  *
3058  * Return: void
3059  */
3060 #ifdef QCA_HOST2FW_RXBUF_RING
3061 static void dp_rxdma_ring_config(struct dp_soc *soc)
3062 {
3063 	int i;
3064 
3065 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3066 		struct dp_pdev *pdev = soc->pdev_list[i];
3067 
3068 		if (pdev) {
3069 			int mac_id;
3070 			bool dbs_enable = 0;
3071 			int max_mac_rings =
3072 				 wlan_cfg_get_num_mac_rings
3073 				(pdev->wlan_cfg_ctx);
3074 
3075 			htt_srng_setup(soc->htt_handle, 0,
3076 				 pdev->rx_refill_buf_ring.hal_srng,
3077 				 RXDMA_BUF);
3078 
3079 			if (pdev->rx_refill_buf_ring2.hal_srng)
3080 				htt_srng_setup(soc->htt_handle, 0,
3081 					pdev->rx_refill_buf_ring2.hal_srng,
3082 					RXDMA_BUF);
3083 
3084 			if (soc->cdp_soc.ol_ops->
3085 				is_hw_dbs_2x2_capable) {
3086 				dbs_enable = soc->cdp_soc.ol_ops->
3087 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3088 			}
3089 
3090 			if (dbs_enable) {
3091 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3092 				QDF_TRACE_LEVEL_ERROR,
3093 				FL("DBS enabled max_mac_rings %d\n"),
3094 					 max_mac_rings);
3095 			} else {
3096 				max_mac_rings = 1;
3097 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3098 					 QDF_TRACE_LEVEL_ERROR,
3099 					 FL("DBS disabled, max_mac_rings %d\n"),
3100 					 max_mac_rings);
3101 			}
3102 
3103 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3104 					 FL("pdev_id %d max_mac_rings %d\n"),
3105 					 pdev->pdev_id, max_mac_rings);
3106 
3107 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3108 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3109 							mac_id, pdev->pdev_id);
3110 
3111 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3112 					 QDF_TRACE_LEVEL_ERROR,
3113 					 FL("mac_id %d\n"), mac_for_pdev);
3114 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3115 					 pdev->rx_mac_buf_ring[mac_id]
3116 						.hal_srng,
3117 					 RXDMA_BUF);
3118 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3119 					pdev->rxdma_err_dst_ring[mac_id]
3120 						.hal_srng,
3121 					RXDMA_DST);
3122 
3123 				/* Configure monitor mode rings */
3124 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3125 				   pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3126 				   RXDMA_MONITOR_BUF);
3127 
3128 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3129 				   pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3130 				   RXDMA_MONITOR_DST);
3131 
3132 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3133 				  pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3134 				  RXDMA_MONITOR_STATUS);
3135 
3136 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3137 				  pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3138 				  RXDMA_MONITOR_DESC);
3139 			}
3140 		}
3141 	}
3142 
3143 	/*
3144 	 * Timer to reap rxdma status rings.
3145 	 * Needed until we enable ppdu end interrupts
3146 	 */
3147 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3148 			dp_service_mon_rings, (void *)soc,
3149 			QDF_TIMER_TYPE_WAKE_APPS);
3150 	soc->reap_timer_init = 1;
3151 }
3152 #else
3153 /* This is only for WIN */
3154 static void dp_rxdma_ring_config(struct dp_soc *soc)
3155 {
3156 	int i;
3157 	int mac_id;
3158 
3159 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3160 		struct dp_pdev *pdev = soc->pdev_list[i];
3161 
3162 		if (pdev == NULL)
3163 			continue;
3164 
3165 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3166 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3167 
3168 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3169 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3170 
3171 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3172 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3173 				RXDMA_MONITOR_BUF);
3174 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3175 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3176 				RXDMA_MONITOR_DST);
3177 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3178 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3179 				RXDMA_MONITOR_STATUS);
3180 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3181 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3182 				RXDMA_MONITOR_DESC);
3183 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3184 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3185 				RXDMA_DST);
3186 		}
3187 	}
3188 }
3189 #endif
3190 
3191 /*
3192  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3193  * @txrx_soc: Datapath SOC handle
3194  */
3195 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3196 {
3197 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3198 
3199 	htt_soc_attach_target(soc->htt_handle);
3200 
3201 	dp_rxdma_ring_config(soc);
3202 
3203 	DP_STATS_INIT(soc);
3204 
3205 	/* initialize work queue for stats processing */
3206 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3207 
3208 	return 0;
3209 }
3210 
3211 /*
3212  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3213  * @txrx_soc: Datapath SOC handle
3214  */
3215 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3216 {
3217 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3218 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3219 }
3220 /*
3221  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3222  * @txrx_soc: Datapath SOC handle
3223  * @nss_cfg: nss config
3224  */
3225 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3226 {
3227 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3228 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3229 
3230 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3231 
3232 	/*
3233 	 * TODO: masked out based on the per offloaded radio
3234 	 */
3235 	if (config == dp_nss_cfg_dbdc) {
3236 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3237 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3238 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3239 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3240 	}
3241 
3242 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3243 				FL("nss-wifi<0> nss config is enabled"));
3244 }
3245 /*
3246 * dp_vdev_attach_wifi3() - attach txrx vdev
3247 * @txrx_pdev: Datapath PDEV handle
3248 * @vdev_mac_addr: MAC address of the virtual interface
3249 * @vdev_id: VDEV Id
3250 * @wlan_op_mode: VDEV operating mode
3251 *
3252 * Return: DP VDEV handle on success, NULL on failure
3253 */
3254 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3255 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3256 {
3257 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3258 	struct dp_soc *soc = pdev->soc;
3259 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3260 
3261 	if (!vdev) {
3262 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3263 			FL("DP VDEV memory allocation failed"));
3264 		goto fail0;
3265 	}
3266 
3267 	vdev->pdev = pdev;
3268 	vdev->vdev_id = vdev_id;
3269 	vdev->opmode = op_mode;
3270 	vdev->osdev = soc->osdev;
3271 
3272 	vdev->osif_rx = NULL;
3273 	vdev->osif_rsim_rx_decap = NULL;
3274 	vdev->osif_get_key = NULL;
3275 	vdev->osif_rx_mon = NULL;
3276 	vdev->osif_tx_free_ext = NULL;
3277 	vdev->osif_vdev = NULL;
3278 
3279 	vdev->delete.pending = 0;
3280 	vdev->safemode = 0;
3281 	vdev->drop_unenc = 1;
3282 	vdev->sec_type = cdp_sec_type_none;
3283 #ifdef notyet
3284 	vdev->filters_num = 0;
3285 #endif
3286 
3287 	qdf_mem_copy(
3288 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3289 
3290 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3291 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3292 	vdev->dscp_tid_map_id = 0;
3293 	vdev->mcast_enhancement_en = 0;
3294 
3295 	/* TODO: Initialize default HTT meta data that will be used in
3296 	 * TCL descriptors for packets transmitted from this VDEV
3297 	 */
3298 
3299 	TAILQ_INIT(&vdev->peer_list);
3300 
3301 	/* add this vdev into the pdev's list */
3302 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3303 	pdev->vdev_count++;
3304 
3305 	dp_tx_vdev_attach(vdev);
3306 
3307 
3308 	if ((soc->intr_mode == DP_INTR_POLL) &&
3309 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3310 		if (pdev->vdev_count == 1)
3311 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3312 	}
3313 
3314 	dp_lro_hash_setup(soc);
3315 
3316 	/* LRO */
3317 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3318 		wlan_op_mode_sta == vdev->opmode)
3319 		vdev->lro_enable = true;
3320 
3321 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3322 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3323 
3324 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3325 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3326 	DP_STATS_INIT(vdev);
3327 
3328 	if (wlan_op_mode_sta == vdev->opmode)
3329 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3330 							vdev->mac_addr.raw);
3331 
3332 	return (struct cdp_vdev *)vdev;
3333 
3334 fail0:
3335 	return NULL;
3336 }
3337 
3338 /**
3339  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3340  * @vdev: Datapath VDEV handle
3341  * @osif_vdev: OSIF vdev handle
3342  * @txrx_ops: Tx and Rx operations
3343  *
3344  * Return: DP VDEV handle on success, NULL on failure
3345  */
3346 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3347 	void *osif_vdev,
3348 	struct ol_txrx_ops *txrx_ops)
3349 {
3350 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3351 	vdev->osif_vdev = osif_vdev;
3352 	vdev->osif_rx = txrx_ops->rx.rx;
3353 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3354 	vdev->osif_get_key = txrx_ops->get_key;
3355 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3356 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3357 #ifdef notyet
3358 #if ATH_SUPPORT_WAPI
3359 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3360 #endif
3361 #endif
3362 #ifdef UMAC_SUPPORT_PROXY_ARP
3363 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3364 #endif
3365 	vdev->me_convert = txrx_ops->me_convert;
3366 
3367 	/* TODO: Enable the following once Tx code is integrated */
3368 	if (vdev->mesh_vdev)
3369 		txrx_ops->tx.tx = dp_tx_send_mesh;
3370 	else
3371 		txrx_ops->tx.tx = dp_tx_send;
3372 
3373 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3374 
3375 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3376 		"DP Vdev Register success");
3377 }
3378 
3379 /**
3380  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3381  * @vdev: Datapath VDEV handle
3382  *
3383  * Return: void
3384  */
3385 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3386 {
3387 	struct dp_pdev *pdev = vdev->pdev;
3388 	struct dp_soc *soc = pdev->soc;
3389 	struct dp_peer *peer;
3390 	uint16_t *peer_ids;
3391 	uint8_t i = 0, j = 0;
3392 
3393 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3394 	if (!peer_ids) {
3395 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3396 			"DP alloc failure - unable to flush peers");
3397 		return;
3398 	}
3399 
3400 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3401 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3402 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3403 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3404 				if (j < soc->max_peers)
3405 					peer_ids[j++] = peer->peer_ids[i];
3406 	}
3407 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3408 
3409 	for (i = 0; i < j ; i++)
3410 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3411 
3412 	qdf_mem_free(peer_ids);
3413 
3414 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3415 		FL("Flushed peers for vdev object %pK "), vdev);
3416 }
3417 
3418 /*
3419  * dp_vdev_detach_wifi3() - Detach txrx vdev
3420  * @txrx_vdev:		Datapath VDEV handle
3421  * @callback:		Callback OL_IF on completion of detach
3422  * @cb_context:	Callback context
3423  *
3424  */
3425 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3426 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3427 {
3428 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3429 	struct dp_pdev *pdev = vdev->pdev;
3430 	struct dp_soc *soc = pdev->soc;
3431 
3432 	/* preconditions */
3433 	qdf_assert(vdev);
3434 
3435 	/* remove the vdev from its parent pdev's list */
3436 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3437 
3438 	if (wlan_op_mode_sta == vdev->opmode)
3439 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3440 
3441 	/*
3442 	 * If Target is hung, flush all peers before detaching vdev
3443 	 * this will free all references held due to missing
3444 	 * unmap commands from Target
3445 	 */
3446 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3447 		dp_vdev_flush_peers(vdev);
3448 
3449 	/*
3450 	 * Use peer_ref_mutex while accessing peer_list, in case
3451 	 * a peer is in the process of being removed from the list.
3452 	 */
3453 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3454 	/* check that the vdev has no peers allocated */
3455 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3456 		/* debug print - will be removed later */
3457 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3458 			FL("not deleting vdev object %pK (%pM)"
3459 			"until deletion finishes for all its peers"),
3460 			vdev, vdev->mac_addr.raw);
3461 		/* indicate that the vdev needs to be deleted */
3462 		vdev->delete.pending = 1;
3463 		vdev->delete.callback = callback;
3464 		vdev->delete.context = cb_context;
3465 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3466 		return;
3467 	}
3468 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3469 
3470 	dp_tx_vdev_detach(vdev);
3471 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3472 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3473 
3474 	qdf_mem_free(vdev);
3475 
3476 	if (callback)
3477 		callback(cb_context);
3478 }
3479 
3480 /*
3481  * dp_peer_create_wifi3() - attach txrx peer
3482  * @txrx_vdev: Datapath VDEV handle
3483  * @peer_mac_addr: Peer MAC address
3484  *
3485  * Return: DP peeer handle on success, NULL on failure
3486  */
3487 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3488 		uint8_t *peer_mac_addr)
3489 {
3490 	struct dp_peer *peer;
3491 	int i;
3492 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3493 	struct dp_pdev *pdev;
3494 	struct dp_soc *soc;
3495 
3496 	/* preconditions */
3497 	qdf_assert(vdev);
3498 	qdf_assert(peer_mac_addr);
3499 
3500 	pdev = vdev->pdev;
3501 	soc = pdev->soc;
3502 
3503 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr,
3504 					0, vdev->vdev_id);
3505 
3506 	if (peer) {
3507 		peer->delete_in_progress = false;
3508 		/*
3509 		* on peer create, peer ref count decrements, sice new peer is not
3510 		* getting created earlier reference is reused, peer_unref_delete will
3511 		* take care of incrementing count
3512 		* */
3513 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3514 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
3515 				vdev->vdev_id, peer->mac_addr.raw);
3516 		}
3517 
3518 		return (void *)peer;
3519 	}
3520 
3521 #ifdef notyet
3522 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3523 		soc->mempool_ol_ath_peer);
3524 #else
3525 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3526 #endif
3527 
3528 	if (!peer)
3529 		return NULL; /* failure */
3530 
3531 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3532 
3533 	TAILQ_INIT(&peer->ast_entry_list);
3534 
3535 	/* store provided params */
3536 	peer->vdev = vdev;
3537 
3538 	dp_peer_add_ast(soc, peer, peer_mac_addr, CDP_TXRX_AST_TYPE_STATIC, 0);
3539 
3540 	qdf_spinlock_create(&peer->peer_info_lock);
3541 
3542 	qdf_mem_copy(
3543 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3544 
3545 	/* TODO: See of rx_opt_proc is really required */
3546 	peer->rx_opt_proc = soc->rx_opt_proc;
3547 
3548 	/* initialize the peer_id */
3549 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3550 		peer->peer_ids[i] = HTT_INVALID_PEER;
3551 
3552 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3553 
3554 	qdf_atomic_init(&peer->ref_cnt);
3555 
3556 	/* keep one reference for attach */
3557 	qdf_atomic_inc(&peer->ref_cnt);
3558 
3559 	/* add this peer into the vdev's list */
3560 	if (wlan_op_mode_sta == vdev->opmode)
3561 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3562 	else
3563 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3564 
3565 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3566 
3567 	/* TODO: See if hash based search is required */
3568 	dp_peer_find_hash_add(soc, peer);
3569 
3570 	/* Initialize the peer state */
3571 	peer->state = OL_TXRX_PEER_STATE_DISC;
3572 
3573 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3574 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
3575 		vdev, peer, peer->mac_addr.raw,
3576 		qdf_atomic_read(&peer->ref_cnt));
3577 	/*
3578 	 * For every peer MAp message search and set if bss_peer
3579 	 */
3580 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
3581 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3582 			"vdev bss_peer!!!!");
3583 		peer->bss_peer = 1;
3584 		vdev->vap_bss_peer = peer;
3585 	}
3586 
3587 
3588 #ifndef CONFIG_WIN
3589 	dp_local_peer_id_alloc(pdev, peer);
3590 #endif
3591 	DP_STATS_INIT(peer);
3592 	return (void *)peer;
3593 }
3594 
3595 /*
3596  * dp_peer_setup_wifi3() - initialize the peer
3597  * @vdev_hdl: virtual device object
3598  * @peer: Peer object
3599  *
3600  * Return: void
3601  */
3602 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
3603 {
3604 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3605 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3606 	struct dp_pdev *pdev;
3607 	struct dp_soc *soc;
3608 	bool hash_based = 0;
3609 	enum cdp_host_reo_dest_ring reo_dest;
3610 
3611 	/* preconditions */
3612 	qdf_assert(vdev);
3613 	qdf_assert(peer);
3614 
3615 	pdev = vdev->pdev;
3616 	soc = pdev->soc;
3617 
3618 	peer->last_assoc_rcvd = 0;
3619 	peer->last_disassoc_rcvd = 0;
3620 	peer->last_deauth_rcvd = 0;
3621 
3622 	/*
3623 	 * hash based steering is disabled for Radios which are offloaded
3624 	 * to NSS
3625 	 */
3626 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
3627 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3628 
3629 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3630 		FL("hash based steering for pdev: %d is %d\n"),
3631 		pdev->pdev_id, hash_based);
3632 
3633 	/*
3634 	 * Below line of code will ensure the proper reo_dest ring is choosen
3635 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
3636 	 */
3637 	reo_dest = pdev->reo_dest;
3638 
3639 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
3640 		/* TODO: Check the destination ring number to be passed to FW */
3641 		soc->cdp_soc.ol_ops->peer_set_default_routing(
3642 			pdev->osif_pdev, peer->mac_addr.raw,
3643 			 peer->vdev->vdev_id, hash_based, reo_dest);
3644 	}
3645 
3646 	dp_peer_rx_init(pdev, peer);
3647 	return;
3648 }
3649 
3650 /*
3651  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
3652  * @vdev_handle: virtual device object
3653  * @htt_pkt_type: type of pkt
3654  *
3655  * Return: void
3656  */
3657 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
3658 	 enum htt_cmn_pkt_type val)
3659 {
3660 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3661 	vdev->tx_encap_type = val;
3662 }
3663 
3664 /*
3665  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
3666  * @vdev_handle: virtual device object
3667  * @htt_pkt_type: type of pkt
3668  *
3669  * Return: void
3670  */
3671 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
3672 	 enum htt_cmn_pkt_type val)
3673 {
3674 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3675 	vdev->rx_decap_type = val;
3676 }
3677 
3678 /*
3679  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
3680  * @pdev_handle: physical device object
3681  * @val: reo destination ring index (1 - 4)
3682  *
3683  * Return: void
3684  */
3685 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
3686 	 enum cdp_host_reo_dest_ring val)
3687 {
3688 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3689 
3690 	if (pdev)
3691 		pdev->reo_dest = val;
3692 }
3693 
3694 /*
3695  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
3696  * @pdev_handle: physical device object
3697  *
3698  * Return: reo destination ring index
3699  */
3700 static enum cdp_host_reo_dest_ring
3701 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
3702 {
3703 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3704 
3705 	if (pdev)
3706 		return pdev->reo_dest;
3707 	else
3708 		return cdp_host_reo_dest_ring_unknown;
3709 }
3710 
3711 #ifdef QCA_SUPPORT_SON
3712 static void dp_son_peer_authorize(struct dp_peer *peer)
3713 {
3714 	struct dp_soc *soc;
3715 	soc = peer->vdev->pdev->soc;
3716 	peer->peer_bs_inact_flag = 0;
3717 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3718 	return;
3719 }
3720 #else
3721 static void dp_son_peer_authorize(struct dp_peer *peer)
3722 {
3723 	return;
3724 }
3725 #endif
3726 /*
3727  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
3728  * @pdev_handle: device object
3729  * @val: value to be set
3730  *
3731  * Return: void
3732  */
3733 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3734 	 uint32_t val)
3735 {
3736 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3737 
3738 	/* Enable/Disable smart mesh filtering. This flag will be checked
3739 	 * during rx processing to check if packets are from NAC clients.
3740 	 */
3741 	pdev->filter_neighbour_peers = val;
3742 	return 0;
3743 }
3744 
3745 /*
3746  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
3747  * address for smart mesh filtering
3748  * @pdev_handle: device object
3749  * @cmd: Add/Del command
3750  * @macaddr: nac client mac address
3751  *
3752  * Return: void
3753  */
3754 static int dp_update_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3755 	 uint32_t cmd, uint8_t *macaddr)
3756 {
3757 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3758 	struct dp_neighbour_peer *peer = NULL;
3759 
3760 	if (!macaddr)
3761 		goto fail0;
3762 
3763 	/* Store address of NAC (neighbour peer) which will be checked
3764 	 * against TA of received packets.
3765 	 */
3766 	if (cmd == DP_NAC_PARAM_ADD) {
3767 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
3768 				sizeof(*peer));
3769 
3770 		if (!peer) {
3771 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3772 				FL("DP neighbour peer node memory allocation failed"));
3773 			goto fail0;
3774 		}
3775 
3776 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
3777 			macaddr, DP_MAC_ADDR_LEN);
3778 
3779 
3780 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3781 		/* add this neighbour peer into the list */
3782 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
3783 				neighbour_peer_list_elem);
3784 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3785 
3786 		return 1;
3787 
3788 	} else if (cmd == DP_NAC_PARAM_DEL) {
3789 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3790 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3791 				neighbour_peer_list_elem) {
3792 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
3793 				macaddr, DP_MAC_ADDR_LEN)) {
3794 				/* delete this peer from the list */
3795 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
3796 					peer, neighbour_peer_list_elem);
3797 				qdf_mem_free(peer);
3798 				break;
3799 			}
3800 		}
3801 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3802 
3803 		return 1;
3804 
3805 	}
3806 
3807 fail0:
3808 	return 0;
3809 }
3810 
3811 /*
3812  * dp_get_sec_type() - Get the security type
3813  * @peer:		Datapath peer handle
3814  * @sec_idx:    Security id (mcast, ucast)
3815  *
3816  * return sec_type: Security type
3817  */
3818 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
3819 {
3820 	struct dp_peer *dpeer = (struct dp_peer *)peer;
3821 
3822 	return dpeer->security[sec_idx].sec_type;
3823 }
3824 
3825 /*
3826  * dp_peer_authorize() - authorize txrx peer
3827  * @peer_handle:		Datapath peer handle
3828  * @authorize
3829  *
3830  */
3831 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
3832 {
3833 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
3834 	struct dp_soc *soc;
3835 
3836 	if (peer != NULL) {
3837 		soc = peer->vdev->pdev->soc;
3838 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
3839 		dp_son_peer_authorize(peer);
3840 		peer->authorize = authorize ? 1 : 0;
3841 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3842 	}
3843 }
3844 
3845 #ifdef QCA_SUPPORT_SON
3846 /*
3847  * dp_txrx_update_inact_threshold() - Update inact timer threshold
3848  * @pdev_handle: Device handle
3849  * @new_threshold : updated threshold value
3850  *
3851  */
3852 static void
3853 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
3854 			       u_int16_t new_threshold)
3855 {
3856 	struct dp_vdev *vdev;
3857 	struct dp_peer *peer;
3858 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3859 	struct dp_soc *soc = pdev->soc;
3860 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
3861 
3862 	if (old_threshold == new_threshold)
3863 		return;
3864 
3865 	soc->pdev_bs_inact_reload = new_threshold;
3866 
3867 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3868 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3869 		if (vdev->opmode != wlan_op_mode_ap)
3870 			continue;
3871 
3872 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3873 			if (!peer->authorize)
3874 				continue;
3875 
3876 			if (old_threshold - peer->peer_bs_inact >=
3877 					new_threshold) {
3878 				dp_mark_peer_inact((void *)peer, true);
3879 				peer->peer_bs_inact = 0;
3880 			} else {
3881 				peer->peer_bs_inact = new_threshold -
3882 					(old_threshold - peer->peer_bs_inact);
3883 			}
3884 		}
3885 	}
3886 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3887 }
3888 
3889 /**
3890  * dp_txrx_reset_inact_count(): Reset inact count
3891  * @pdev_handle - device handle
3892  *
3893  * Return: void
3894  */
3895 static void
3896 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
3897 {
3898 	struct dp_vdev *vdev = NULL;
3899 	struct dp_peer *peer = NULL;
3900 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3901 	struct dp_soc *soc = pdev->soc;
3902 
3903 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3904 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3905 		if (vdev->opmode != wlan_op_mode_ap)
3906 			continue;
3907 
3908 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3909 			if (!peer->authorize)
3910 				continue;
3911 
3912 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3913 		}
3914 	}
3915 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3916 }
3917 
3918 /**
3919  * dp_set_inact_params(): set inactivity params
3920  * @pdev_handle - device handle
3921  * @inact_check_interval - inactivity interval
3922  * @inact_normal - Inactivity normal
3923  * @inact_overload - Inactivity overload
3924  *
3925  * Return: bool
3926  */
3927 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
3928 			 u_int16_t inact_check_interval,
3929 			 u_int16_t inact_normal, u_int16_t inact_overload)
3930 {
3931 	struct dp_soc *soc;
3932 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3933 
3934 	if (!pdev)
3935 		return false;
3936 
3937 	soc = pdev->soc;
3938 	if (!soc)
3939 		return false;
3940 
3941 	soc->pdev_bs_inact_interval = inact_check_interval;
3942 	soc->pdev_bs_inact_normal = inact_normal;
3943 	soc->pdev_bs_inact_overload = inact_overload;
3944 
3945 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
3946 					soc->pdev_bs_inact_normal);
3947 
3948 	return true;
3949 }
3950 
3951 /**
3952  * dp_start_inact_timer(): Inactivity timer start
3953  * @pdev_handle - device handle
3954  * @enable - Inactivity timer start/stop
3955  *
3956  * Return: bool
3957  */
3958 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
3959 {
3960 	struct dp_soc *soc;
3961 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3962 
3963 	if (!pdev)
3964 		return false;
3965 
3966 	soc = pdev->soc;
3967 	if (!soc)
3968 		return false;
3969 
3970 	if (enable) {
3971 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
3972 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
3973 			      soc->pdev_bs_inact_interval * 1000);
3974 	} else {
3975 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
3976 	}
3977 
3978 	return true;
3979 }
3980 
3981 /**
3982  * dp_set_overload(): Set inactivity overload
3983  * @pdev_handle - device handle
3984  * @overload - overload status
3985  *
3986  * Return: void
3987  */
3988 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
3989 {
3990 	struct dp_soc *soc;
3991 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3992 
3993 	if (!pdev)
3994 		return;
3995 
3996 	soc = pdev->soc;
3997 	if (!soc)
3998 		return;
3999 
4000 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4001 			overload ? soc->pdev_bs_inact_overload :
4002 			soc->pdev_bs_inact_normal);
4003 }
4004 
4005 /**
4006  * dp_peer_is_inact(): check whether peer is inactive
4007  * @peer_handle - datapath peer handle
4008  *
4009  * Return: bool
4010  */
4011 bool dp_peer_is_inact(void *peer_handle)
4012 {
4013 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4014 
4015 	if (!peer)
4016 		return false;
4017 
4018 	return peer->peer_bs_inact_flag == 1;
4019 }
4020 
4021 /**
4022  * dp_init_inact_timer: initialize the inact timer
4023  * @soc - SOC handle
4024  *
4025  * Return: void
4026  */
4027 void dp_init_inact_timer(struct dp_soc *soc)
4028 {
4029 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4030 		dp_txrx_peer_find_inact_timeout_handler,
4031 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4032 }
4033 
4034 #else
4035 
4036 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4037 			 u_int16_t inact_normal, u_int16_t inact_overload)
4038 {
4039 	return false;
4040 }
4041 
4042 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4043 {
4044 	return false;
4045 }
4046 
4047 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4048 {
4049 	return;
4050 }
4051 
4052 void dp_init_inact_timer(struct dp_soc *soc)
4053 {
4054 	return;
4055 }
4056 
4057 bool dp_peer_is_inact(void *peer)
4058 {
4059 	return false;
4060 }
4061 #endif
4062 
4063 /*
4064  * dp_peer_unref_delete() - unref and delete peer
4065  * @peer_handle:		Datapath peer handle
4066  *
4067  */
4068 void dp_peer_unref_delete(void *peer_handle)
4069 {
4070 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4071 	struct dp_peer *bss_peer = NULL;
4072 	struct dp_vdev *vdev = peer->vdev;
4073 	struct dp_pdev *pdev = vdev->pdev;
4074 	struct dp_soc *soc = pdev->soc;
4075 	struct dp_peer *tmppeer;
4076 	int found = 0;
4077 	uint16_t peer_id;
4078 	uint16_t vdev_id;
4079 
4080 	/*
4081 	 * Hold the lock all the way from checking if the peer ref count
4082 	 * is zero until the peer references are removed from the hash
4083 	 * table and vdev list (if the peer ref count is zero).
4084 	 * This protects against a new HL tx operation starting to use the
4085 	 * peer object just after this function concludes it's done being used.
4086 	 * Furthermore, the lock needs to be held while checking whether the
4087 	 * vdev's list of peers is empty, to make sure that list is not modified
4088 	 * concurrently with the empty check.
4089 	 */
4090 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4091 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4092 		  "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
4093 		  peer, qdf_atomic_read(&peer->ref_cnt));
4094 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4095 		peer_id = peer->peer_ids[0];
4096 		vdev_id = vdev->vdev_id;
4097 
4098 		/*
4099 		 * Make sure that the reference to the peer in
4100 		 * peer object map is removed
4101 		 */
4102 		if (peer_id != HTT_INVALID_PEER)
4103 			soc->peer_id_to_obj_map[peer_id] = NULL;
4104 
4105 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4106 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4107 
4108 		/* remove the reference to the peer from the hash table */
4109 		dp_peer_find_hash_remove(soc, peer);
4110 
4111 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4112 			if (tmppeer == peer) {
4113 				found = 1;
4114 				break;
4115 			}
4116 		}
4117 		if (found) {
4118 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4119 				peer_list_elem);
4120 		} else {
4121 			/*Ignoring the remove operation as peer not found*/
4122 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
4123 				"peer %pK not found in vdev (%pK)->peer_list:%pK",
4124 				peer, vdev, &peer->vdev->peer_list);
4125 		}
4126 
4127 		/* cleanup the peer data */
4128 		dp_peer_cleanup(vdev, peer);
4129 
4130 		/* check whether the parent vdev has no peers left */
4131 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4132 			/*
4133 			 * Now that there are no references to the peer, we can
4134 			 * release the peer reference lock.
4135 			 */
4136 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4137 			/*
4138 			 * Check if the parent vdev was waiting for its peers
4139 			 * to be deleted, in order for it to be deleted too.
4140 			 */
4141 			if (vdev->delete.pending) {
4142 				ol_txrx_vdev_delete_cb vdev_delete_cb =
4143 					vdev->delete.callback;
4144 				void *vdev_delete_context =
4145 					vdev->delete.context;
4146 
4147 				QDF_TRACE(QDF_MODULE_ID_DP,
4148 					QDF_TRACE_LEVEL_INFO_HIGH,
4149 					FL("deleting vdev object %pK (%pM)"
4150 					" - its last peer is done"),
4151 					vdev, vdev->mac_addr.raw);
4152 				/* all peers are gone, go ahead and delete it */
4153 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4154 								FLOW_TYPE_VDEV,
4155 								vdev_id);
4156 				dp_tx_vdev_detach(vdev);
4157 				QDF_TRACE(QDF_MODULE_ID_DP,
4158 					QDF_TRACE_LEVEL_INFO_HIGH,
4159 					FL("deleting vdev object %pK (%pM)"),
4160 					vdev, vdev->mac_addr.raw);
4161 
4162 				qdf_mem_free(vdev);
4163 				vdev = NULL;
4164 				if (vdev_delete_cb)
4165 					vdev_delete_cb(vdev_delete_context);
4166 			}
4167 		} else {
4168 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4169 		}
4170 
4171 		if (vdev) {
4172 			if (vdev->vap_bss_peer == peer) {
4173 				vdev->vap_bss_peer = NULL;
4174 			}
4175 		}
4176 
4177 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4178 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
4179 					vdev_id, peer->mac_addr.raw);
4180 		}
4181 
4182 		if (!vdev || !vdev->vap_bss_peer) {
4183 			goto free_peer;
4184 		}
4185 
4186 #ifdef notyet
4187 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4188 #else
4189 		bss_peer = vdev->vap_bss_peer;
4190 		DP_UPDATE_STATS(bss_peer, peer);
4191 
4192 free_peer:
4193 		qdf_mem_free(peer);
4194 
4195 #endif
4196 	} else {
4197 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4198 	}
4199 }
4200 
4201 /*
4202  * dp_peer_detach_wifi3() – Detach txrx peer
4203  * @peer_handle: Datapath peer handle
4204  * @bitmap: bitmap indicating special handling of request.
4205  *
4206  */
4207 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4208 {
4209 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4210 
4211 	/* redirect the peer's rx delivery function to point to a
4212 	 * discard func
4213 	 */
4214 
4215 	peer->rx_opt_proc = dp_rx_discard;
4216 
4217 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4218 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4219 
4220 #ifndef CONFIG_WIN
4221 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4222 #endif
4223 	qdf_spinlock_destroy(&peer->peer_info_lock);
4224 
4225 	/*
4226 	 * Remove the reference added during peer_attach.
4227 	 * The peer will still be left allocated until the
4228 	 * PEER_UNMAP message arrives to remove the other
4229 	 * reference, added by the PEER_MAP message.
4230 	 */
4231 	dp_peer_unref_delete(peer_handle);
4232 }
4233 
4234 /*
4235  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4236  * @peer_handle:		Datapath peer handle
4237  *
4238  */
4239 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4240 {
4241 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4242 	return vdev->mac_addr.raw;
4243 }
4244 
4245 /*
4246  * dp_vdev_set_wds() - Enable per packet stats
4247  * @vdev_handle: DP VDEV handle
4248  * @val: value
4249  *
4250  * Return: none
4251  */
4252 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4253 {
4254 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4255 
4256 	vdev->wds_enabled = val;
4257 	return 0;
4258 }
4259 
4260 /*
4261  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4262  * @peer_handle:		Datapath peer handle
4263  *
4264  */
4265 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4266 						uint8_t vdev_id)
4267 {
4268 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4269 	struct dp_vdev *vdev = NULL;
4270 
4271 	if (qdf_unlikely(!pdev))
4272 		return NULL;
4273 
4274 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4275 		if (vdev->vdev_id == vdev_id)
4276 			break;
4277 	}
4278 
4279 	return (struct cdp_vdev *)vdev;
4280 }
4281 
4282 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4283 {
4284 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4285 
4286 	return vdev->opmode;
4287 }
4288 
4289 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4290 {
4291 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4292 	struct dp_pdev *pdev = vdev->pdev;
4293 
4294 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4295 }
4296 
4297 /**
4298  * dp_reset_monitor_mode() - Disable monitor mode
4299  * @pdev_handle: Datapath PDEV handle
4300  *
4301  * Return: 0 on success, not 0 on failure
4302  */
4303 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4304 {
4305 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4306 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4307 	struct dp_soc *soc = pdev->soc;
4308 	uint8_t pdev_id;
4309 	int mac_id;
4310 
4311 	pdev_id = pdev->pdev_id;
4312 	soc = pdev->soc;
4313 
4314 	qdf_spin_lock_bh(&pdev->mon_lock);
4315 
4316 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4317 
4318 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4319 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4320 
4321 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4322 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4323 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4324 
4325 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4326 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4327 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4328 	}
4329 
4330 	pdev->monitor_vdev = NULL;
4331 
4332 	qdf_spin_unlock_bh(&pdev->mon_lock);
4333 
4334 	return 0;
4335 }
4336 
4337 /**
4338  * dp_set_nac() - set peer_nac
4339  * @peer_handle: Datapath PEER handle
4340  *
4341  * Return: void
4342  */
4343 static void dp_set_nac(struct cdp_peer *peer_handle)
4344 {
4345 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4346 
4347 	peer->nac = 1;
4348 }
4349 
4350 /**
4351  * dp_get_tx_pending() - read pending tx
4352  * @pdev_handle: Datapath PDEV handle
4353  *
4354  * Return: outstanding tx
4355  */
4356 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4357 {
4358 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4359 
4360 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4361 }
4362 
4363 /**
4364  * dp_get_peer_mac_from_peer_id() - get peer mac
4365  * @pdev_handle: Datapath PDEV handle
4366  * @peer_id: Peer ID
4367  * @peer_mac: MAC addr of PEER
4368  *
4369  * Return: void
4370  */
4371 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4372 	uint32_t peer_id, uint8_t *peer_mac)
4373 {
4374 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4375 	struct dp_peer *peer;
4376 
4377 	if (pdev && peer_mac) {
4378 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4379 		if (peer && peer->mac_addr.raw) {
4380 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4381 					DP_MAC_ADDR_LEN);
4382 		}
4383 	}
4384 }
4385 
4386 /**
4387  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4388  * @vdev_handle: Datapath VDEV handle
4389  * @smart_monitor: Flag to denote if its smart monitor mode
4390  *
4391  * Return: 0 on success, not 0 on failure
4392  */
4393 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4394 		uint8_t smart_monitor)
4395 {
4396 	/* Many monitor VAPs can exists in a system but only one can be up at
4397 	 * anytime
4398 	 */
4399 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4400 	struct dp_pdev *pdev;
4401 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4402 	struct dp_soc *soc;
4403 	uint8_t pdev_id;
4404 	int mac_id;
4405 
4406 	qdf_assert(vdev);
4407 
4408 	pdev = vdev->pdev;
4409 	pdev_id = pdev->pdev_id;
4410 	soc = pdev->soc;
4411 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4412 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4413 		pdev, pdev_id, soc, vdev);
4414 
4415 	/*Check if current pdev's monitor_vdev exists */
4416 	if (pdev->monitor_vdev) {
4417 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4418 			"vdev=%pK\n", vdev);
4419 		qdf_assert(vdev);
4420 	}
4421 
4422 	pdev->monitor_vdev = vdev;
4423 
4424 	/* If smart monitor mode, do not configure monitor ring */
4425 	if (smart_monitor)
4426 		return QDF_STATUS_SUCCESS;
4427 
4428 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4429 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4430 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4431 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4432 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4433 		pdev->mo_data_filter);
4434 
4435 	htt_tlv_filter.mpdu_start = 1;
4436 	htt_tlv_filter.msdu_start = 1;
4437 	htt_tlv_filter.packet = 1;
4438 	htt_tlv_filter.msdu_end = 1;
4439 	htt_tlv_filter.mpdu_end = 1;
4440 	htt_tlv_filter.packet_header = 1;
4441 	htt_tlv_filter.attention = 1;
4442 	htt_tlv_filter.ppdu_start = 0;
4443 	htt_tlv_filter.ppdu_end = 0;
4444 	htt_tlv_filter.ppdu_end_user_stats = 0;
4445 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4446 	htt_tlv_filter.ppdu_end_status_done = 0;
4447 	htt_tlv_filter.header_per_msdu = 1;
4448 	htt_tlv_filter.enable_fp =
4449 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4450 	htt_tlv_filter.enable_md = 0;
4451 	htt_tlv_filter.enable_mo =
4452 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4453 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4454 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4455 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4456 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4457 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4458 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4459 
4460 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4461 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4462 
4463 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4464 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4465 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4466 	}
4467 
4468 	htt_tlv_filter.mpdu_start = 1;
4469 	htt_tlv_filter.msdu_start = 1;
4470 	htt_tlv_filter.packet = 0;
4471 	htt_tlv_filter.msdu_end = 1;
4472 	htt_tlv_filter.mpdu_end = 1;
4473 	htt_tlv_filter.packet_header = 1;
4474 	htt_tlv_filter.attention = 1;
4475 	htt_tlv_filter.ppdu_start = 1;
4476 	htt_tlv_filter.ppdu_end = 1;
4477 	htt_tlv_filter.ppdu_end_user_stats = 1;
4478 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4479 	htt_tlv_filter.ppdu_end_status_done = 1;
4480 	htt_tlv_filter.header_per_msdu = 0;
4481 	htt_tlv_filter.enable_fp =
4482 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4483 	htt_tlv_filter.enable_md = 0;
4484 	htt_tlv_filter.enable_mo =
4485 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4486 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4487 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4488 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4489 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4490 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4491 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4492 
4493 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4494 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4495 
4496 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4497 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4498 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4499 	}
4500 
4501 	return QDF_STATUS_SUCCESS;
4502 }
4503 
4504 /**
4505  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4506  * @pdev_handle: Datapath PDEV handle
4507  * @filter_val: Flag to select Filter for monitor mode
4508  * Return: 0 on success, not 0 on failure
4509  */
4510 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4511 	struct cdp_monitor_filter *filter_val)
4512 {
4513 	/* Many monitor VAPs can exists in a system but only one can be up at
4514 	 * anytime
4515 	 */
4516 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4517 	struct dp_vdev *vdev = pdev->monitor_vdev;
4518 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4519 	struct dp_soc *soc;
4520 	uint8_t pdev_id;
4521 	int mac_id;
4522 
4523 	pdev_id = pdev->pdev_id;
4524 	soc = pdev->soc;
4525 
4526 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4527 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4528 		pdev, pdev_id, soc, vdev);
4529 
4530 	/*Check if current pdev's monitor_vdev exists */
4531 	if (!pdev->monitor_vdev) {
4532 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4533 			"vdev=%pK\n", vdev);
4534 		qdf_assert(vdev);
4535 	}
4536 
4537 	/* update filter mode, type in pdev structure */
4538 	pdev->mon_filter_mode = filter_val->mode;
4539 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4540 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4541 	pdev->fp_data_filter = filter_val->fp_data;
4542 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4543 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4544 	pdev->mo_data_filter = filter_val->mo_data;
4545 
4546 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4547 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4548 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4549 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4550 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4551 		pdev->mo_data_filter);
4552 
4553 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4554 
4555 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4556 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4557 
4558 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4559 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4560 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4561 
4562 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4563 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4564 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4565 	}
4566 
4567 	htt_tlv_filter.mpdu_start = 1;
4568 	htt_tlv_filter.msdu_start = 1;
4569 	htt_tlv_filter.packet = 1;
4570 	htt_tlv_filter.msdu_end = 1;
4571 	htt_tlv_filter.mpdu_end = 1;
4572 	htt_tlv_filter.packet_header = 1;
4573 	htt_tlv_filter.attention = 1;
4574 	htt_tlv_filter.ppdu_start = 0;
4575 	htt_tlv_filter.ppdu_end = 0;
4576 	htt_tlv_filter.ppdu_end_user_stats = 0;
4577 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4578 	htt_tlv_filter.ppdu_end_status_done = 0;
4579 	htt_tlv_filter.header_per_msdu = 1;
4580 	htt_tlv_filter.enable_fp =
4581 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4582 	htt_tlv_filter.enable_md = 0;
4583 	htt_tlv_filter.enable_mo =
4584 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4585 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4586 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4587 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4588 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4589 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4590 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4591 
4592 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4593 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4594 
4595 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4596 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4597 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4598 	}
4599 
4600 	htt_tlv_filter.mpdu_start = 1;
4601 	htt_tlv_filter.msdu_start = 1;
4602 	htt_tlv_filter.packet = 0;
4603 	htt_tlv_filter.msdu_end = 1;
4604 	htt_tlv_filter.mpdu_end = 1;
4605 	htt_tlv_filter.packet_header = 1;
4606 	htt_tlv_filter.attention = 1;
4607 	htt_tlv_filter.ppdu_start = 1;
4608 	htt_tlv_filter.ppdu_end = 1;
4609 	htt_tlv_filter.ppdu_end_user_stats = 1;
4610 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4611 	htt_tlv_filter.ppdu_end_status_done = 1;
4612 	htt_tlv_filter.header_per_msdu = 0;
4613 	htt_tlv_filter.enable_fp =
4614 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4615 	htt_tlv_filter.enable_md = 0;
4616 	htt_tlv_filter.enable_mo =
4617 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4618 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4619 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4620 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4621 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4622 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4623 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4624 
4625 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4626 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4627 
4628 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4629 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4630 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4631 	}
4632 
4633 	return QDF_STATUS_SUCCESS;
4634 }
4635 
4636 /**
4637  * dp_get_pdev_id_frm_pdev() - get pdev_id
4638  * @pdev_handle: Datapath PDEV handle
4639  *
4640  * Return: pdev_id
4641  */
4642 static
4643 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
4644 {
4645 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4646 
4647 	return pdev->pdev_id;
4648 }
4649 
4650 /**
4651  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
4652  * @vdev_handle: Datapath VDEV handle
4653  * Return: true on ucast filter flag set
4654  */
4655 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
4656 {
4657 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4658 	struct dp_pdev *pdev;
4659 
4660 	pdev = vdev->pdev;
4661 
4662 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
4663 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
4664 		return true;
4665 
4666 	return false;
4667 }
4668 
4669 /**
4670  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
4671  * @vdev_handle: Datapath VDEV handle
4672  * Return: true on mcast filter flag set
4673  */
4674 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
4675 {
4676 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4677 	struct dp_pdev *pdev;
4678 
4679 	pdev = vdev->pdev;
4680 
4681 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
4682 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
4683 		return true;
4684 
4685 	return false;
4686 }
4687 
4688 /**
4689  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
4690  * @vdev_handle: Datapath VDEV handle
4691  * Return: true on non data filter flag set
4692  */
4693 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
4694 {
4695 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4696 	struct dp_pdev *pdev;
4697 
4698 	pdev = vdev->pdev;
4699 
4700 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
4701 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
4702 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
4703 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
4704 			return true;
4705 		}
4706 	}
4707 
4708 	return false;
4709 }
4710 
4711 #ifdef MESH_MODE_SUPPORT
4712 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
4713 {
4714 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4715 
4716 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4717 		FL("val %d"), val);
4718 	vdev->mesh_vdev = val;
4719 }
4720 
4721 /*
4722  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
4723  * @vdev_hdl: virtual device object
4724  * @val: value to be set
4725  *
4726  * Return: void
4727  */
4728 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
4729 {
4730 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4731 
4732 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4733 		FL("val %d"), val);
4734 	vdev->mesh_rx_filter = val;
4735 }
4736 #endif
4737 
4738 /*
4739  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
4740  * Current scope is bar recieved count
4741  *
4742  * @pdev_handle: DP_PDEV handle
4743  *
4744  * Return: void
4745  */
4746 #define STATS_PROC_TIMEOUT        (HZ/1000)
4747 
4748 static void
4749 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
4750 {
4751 	struct dp_vdev *vdev;
4752 	struct dp_peer *peer;
4753 	uint32_t waitcnt;
4754 
4755 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4756 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4757 			if (!peer) {
4758 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4759 					FL("DP Invalid Peer refernce"));
4760 				return;
4761 			}
4762 
4763 			if (peer->delete_in_progress) {
4764 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4765 					FL("DP Peer deletion in progress"));
4766 				continue;
4767 			}
4768 
4769 			qdf_atomic_inc(&peer->ref_cnt);
4770 			waitcnt = 0;
4771 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
4772 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
4773 				&& waitcnt < 10) {
4774 				schedule_timeout_interruptible(
4775 						STATS_PROC_TIMEOUT);
4776 				waitcnt++;
4777 			}
4778 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
4779 			dp_peer_unref_delete(peer);
4780 		}
4781 	}
4782 }
4783 
4784 /**
4785  * dp_rx_bar_stats_cb(): BAR received stats callback
4786  * @soc: SOC handle
4787  * @cb_ctxt: Call back context
4788  * @reo_status: Reo status
4789  *
4790  * return: void
4791  */
4792 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
4793 	union hal_reo_status *reo_status)
4794 {
4795 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
4796 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
4797 
4798 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
4799 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
4800 			queue_status->header.status);
4801 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4802 		return;
4803 	}
4804 
4805 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
4806 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4807 
4808 }
4809 
4810 /**
4811  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
4812  * @vdev: DP VDEV handle
4813  *
4814  * return: void
4815  */
4816 void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
4817 {
4818 	struct dp_peer *peer = NULL;
4819 	struct dp_soc *soc = vdev->pdev->soc;
4820 
4821 	qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
4822 	qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
4823 
4824 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
4825 		DP_UPDATE_STATS(vdev, peer);
4826 
4827 	if (soc->cdp_soc.ol_ops->update_dp_stats)
4828 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
4829 			&vdev->stats, (uint16_t) vdev->vdev_id,
4830 			UPDATE_VDEV_STATS);
4831 
4832 }
4833 
4834 /**
4835  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
4836  * @pdev: DP PDEV handle
4837  *
4838  * return: void
4839  */
4840 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
4841 {
4842 	struct dp_vdev *vdev = NULL;
4843 	struct dp_soc *soc = pdev->soc;
4844 
4845 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
4846 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
4847 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
4848 
4849 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4850 
4851 		dp_aggregate_vdev_stats(vdev);
4852 		DP_UPDATE_STATS(pdev, vdev);
4853 
4854 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
4855 
4856 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
4857 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
4858 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
4859 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
4860 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
4861 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
4862 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
4863 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
4864 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
4865 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host);
4866 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
4867 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
4868 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
4869 		DP_STATS_AGGR(pdev, vdev,
4870 				tx_i.mcast_en.dropped_map_error);
4871 		DP_STATS_AGGR(pdev, vdev,
4872 				tx_i.mcast_en.dropped_self_mac);
4873 		DP_STATS_AGGR(pdev, vdev,
4874 				tx_i.mcast_en.dropped_send_fail);
4875 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
4876 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
4877 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
4878 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
4879 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na);
4880 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
4881 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
4882 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
4883 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
4884 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
4885 
4886 		pdev->stats.tx_i.dropped.dropped_pkt.num =
4887 			pdev->stats.tx_i.dropped.dma_error +
4888 			pdev->stats.tx_i.dropped.ring_full +
4889 			pdev->stats.tx_i.dropped.enqueue_fail +
4890 			pdev->stats.tx_i.dropped.desc_na +
4891 			pdev->stats.tx_i.dropped.res_full;
4892 
4893 		pdev->stats.tx.last_ack_rssi =
4894 			vdev->stats.tx.last_ack_rssi;
4895 		pdev->stats.tx_i.tso.num_seg =
4896 			vdev->stats.tx_i.tso.num_seg;
4897 	}
4898 	if (soc->cdp_soc.ol_ops->update_dp_stats)
4899 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
4900 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
4901 
4902 }
4903 
4904 /**
4905  * dp_pdev_getstats() - get pdev packet level stats
4906  * @pdev_handle: Datapath PDEV handle
4907  * @stats: cdp network device stats structure
4908  *
4909  * Return: void
4910  */
4911 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
4912 		struct cdp_dev_stats *stats)
4913 {
4914 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4915 
4916 	dp_aggregate_pdev_stats(pdev);
4917 
4918 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
4919 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
4920 
4921 	stats->tx_errors = pdev->stats.tx.tx_failed +
4922 		pdev->stats.tx_i.dropped.dropped_pkt.num;
4923 	stats->tx_dropped = stats->tx_errors;
4924 
4925 	stats->rx_packets = pdev->stats.rx.unicast.num +
4926 		pdev->stats.rx.multicast.num;
4927 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
4928 		pdev->stats.rx.multicast.bytes;
4929 }
4930 
4931 /**
4932  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
4933  * @pdev: DP_PDEV Handle
4934  *
4935  * Return:void
4936  */
4937 static inline void
4938 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
4939 {
4940 	uint8_t index = 0;
4941 	DP_PRINT_STATS("PDEV Tx Stats:\n");
4942 	DP_PRINT_STATS("Received From Stack:");
4943 	DP_PRINT_STATS("	Packets = %d",
4944 			pdev->stats.tx_i.rcvd.num);
4945 	DP_PRINT_STATS("	Bytes = %llu",
4946 			pdev->stats.tx_i.rcvd.bytes);
4947 	DP_PRINT_STATS("Processed:");
4948 	DP_PRINT_STATS("	Packets = %d",
4949 			pdev->stats.tx_i.processed.num);
4950 	DP_PRINT_STATS("	Bytes = %llu",
4951 			pdev->stats.tx_i.processed.bytes);
4952 	DP_PRINT_STATS("Total Completions:");
4953 	DP_PRINT_STATS("	Packets = %u",
4954 			pdev->stats.tx.comp_pkt.num);
4955 	DP_PRINT_STATS("	Bytes = %llu",
4956 			pdev->stats.tx.comp_pkt.bytes);
4957 	DP_PRINT_STATS("Successful Completions:");
4958 	DP_PRINT_STATS("	Packets = %u",
4959 			pdev->stats.tx.tx_success.num);
4960 	DP_PRINT_STATS("	Bytes = %llu",
4961 			pdev->stats.tx.tx_success.bytes);
4962 	DP_PRINT_STATS("Dropped:");
4963 	DP_PRINT_STATS("	Total = %d",
4964 			pdev->stats.tx_i.dropped.dropped_pkt.num);
4965 	DP_PRINT_STATS("	Dma_map_error = %d",
4966 			pdev->stats.tx_i.dropped.dma_error);
4967 	DP_PRINT_STATS("	Ring Full = %d",
4968 			pdev->stats.tx_i.dropped.ring_full);
4969 	DP_PRINT_STATS("	Descriptor Not available = %d",
4970 			pdev->stats.tx_i.dropped.desc_na);
4971 	DP_PRINT_STATS("	HW enqueue failed= %d",
4972 			pdev->stats.tx_i.dropped.enqueue_fail);
4973 	DP_PRINT_STATS("	Resources Full = %d",
4974 			pdev->stats.tx_i.dropped.res_full);
4975 	DP_PRINT_STATS("	FW removed = %d",
4976 			pdev->stats.tx.dropped.fw_rem);
4977 	DP_PRINT_STATS("	FW removed transmitted = %d",
4978 			pdev->stats.tx.dropped.fw_rem_tx);
4979 	DP_PRINT_STATS("	FW removed untransmitted = %d",
4980 			pdev->stats.tx.dropped.fw_rem_notx);
4981 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
4982 			pdev->stats.tx.dropped.fw_reason1);
4983 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
4984 			pdev->stats.tx.dropped.fw_reason2);
4985 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
4986 			pdev->stats.tx.dropped.fw_reason3);
4987 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
4988 			pdev->stats.tx.dropped.age_out);
4989 	DP_PRINT_STATS("Scatter Gather:");
4990 	DP_PRINT_STATS("	Packets = %d",
4991 			pdev->stats.tx_i.sg.sg_pkt.num);
4992 	DP_PRINT_STATS("	Bytes = %llu",
4993 			pdev->stats.tx_i.sg.sg_pkt.bytes);
4994 	DP_PRINT_STATS("	Dropped By Host = %d",
4995 			pdev->stats.tx_i.sg.dropped_host);
4996 	DP_PRINT_STATS("	Dropped By Target = %d",
4997 			pdev->stats.tx_i.sg.dropped_target);
4998 	DP_PRINT_STATS("TSO:");
4999 	DP_PRINT_STATS("	Number of Segments = %d",
5000 			pdev->stats.tx_i.tso.num_seg);
5001 	DP_PRINT_STATS("	Packets = %d",
5002 			pdev->stats.tx_i.tso.tso_pkt.num);
5003 	DP_PRINT_STATS("	Bytes = %llu",
5004 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5005 	DP_PRINT_STATS("	Dropped By Host = %d",
5006 			pdev->stats.tx_i.tso.dropped_host);
5007 	DP_PRINT_STATS("Mcast Enhancement:");
5008 	DP_PRINT_STATS("	Packets = %d",
5009 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5010 	DP_PRINT_STATS("	Bytes = %llu",
5011 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5012 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5013 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5014 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5015 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5016 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5017 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5018 	DP_PRINT_STATS("	Unicast sent = %d",
5019 			pdev->stats.tx_i.mcast_en.ucast);
5020 	DP_PRINT_STATS("Raw:");
5021 	DP_PRINT_STATS("	Packets = %d",
5022 			pdev->stats.tx_i.raw.raw_pkt.num);
5023 	DP_PRINT_STATS("	Bytes = %llu",
5024 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5025 	DP_PRINT_STATS("	DMA map error = %d",
5026 			pdev->stats.tx_i.raw.dma_map_error);
5027 	DP_PRINT_STATS("Reinjected:");
5028 	DP_PRINT_STATS("	Packets = %d",
5029 			pdev->stats.tx_i.reinject_pkts.num);
5030 	DP_PRINT_STATS("Bytes = %llu\n",
5031 				pdev->stats.tx_i.reinject_pkts.bytes);
5032 	DP_PRINT_STATS("Inspected:");
5033 	DP_PRINT_STATS("	Packets = %d",
5034 			pdev->stats.tx_i.inspect_pkts.num);
5035 	DP_PRINT_STATS("	Bytes = %llu",
5036 			pdev->stats.tx_i.inspect_pkts.bytes);
5037 	DP_PRINT_STATS("Nawds Multicast:");
5038 	DP_PRINT_STATS("	Packets = %d",
5039 			pdev->stats.tx_i.nawds_mcast.num);
5040 	DP_PRINT_STATS("	Bytes = %llu",
5041 			pdev->stats.tx_i.nawds_mcast.bytes);
5042 	DP_PRINT_STATS("CCE Classified:");
5043 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5044 			pdev->stats.tx_i.cce_classified);
5045 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5046 			pdev->stats.tx_i.cce_classified_raw);
5047 	DP_PRINT_STATS("Mesh stats:");
5048 	DP_PRINT_STATS("	frames to firmware: %u",
5049 			pdev->stats.tx_i.mesh.exception_fw);
5050 	DP_PRINT_STATS("	completions from fw: %u",
5051 			pdev->stats.tx_i.mesh.completion_fw);
5052 	DP_PRINT_STATS("PPDU stats counter");
5053 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5054 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5055 				pdev->stats.ppdu_stats_counter[index]);
5056 	}
5057 }
5058 
5059 /**
5060  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5061  * @pdev: DP_PDEV Handle
5062  *
5063  * Return: void
5064  */
5065 static inline void
5066 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5067 {
5068 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5069 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5070 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5071 			pdev->stats.rx.rcvd_reo[0].num,
5072 			pdev->stats.rx.rcvd_reo[1].num,
5073 			pdev->stats.rx.rcvd_reo[2].num,
5074 			pdev->stats.rx.rcvd_reo[3].num);
5075 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5076 			pdev->stats.rx.rcvd_reo[0].bytes,
5077 			pdev->stats.rx.rcvd_reo[1].bytes,
5078 			pdev->stats.rx.rcvd_reo[2].bytes,
5079 			pdev->stats.rx.rcvd_reo[3].bytes);
5080 	DP_PRINT_STATS("Replenished:");
5081 	DP_PRINT_STATS("	Packets = %d",
5082 			pdev->stats.replenish.pkts.num);
5083 	DP_PRINT_STATS("	Bytes = %llu",
5084 			pdev->stats.replenish.pkts.bytes);
5085 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5086 			pdev->stats.buf_freelist);
5087 	DP_PRINT_STATS("	Low threshold intr = %d",
5088 			pdev->stats.replenish.low_thresh_intrs);
5089 	DP_PRINT_STATS("Dropped:");
5090 	DP_PRINT_STATS("	msdu_not_done = %d",
5091 			pdev->stats.dropped.msdu_not_done);
5092 	DP_PRINT_STATS("        mon_rx_drop = %d",
5093 			pdev->stats.dropped.mon_rx_drop);
5094 	DP_PRINT_STATS("Sent To Stack:");
5095 	DP_PRINT_STATS("	Packets = %d",
5096 			pdev->stats.rx.to_stack.num);
5097 	DP_PRINT_STATS("	Bytes = %llu",
5098 			pdev->stats.rx.to_stack.bytes);
5099 	DP_PRINT_STATS("Multicast/Broadcast:");
5100 	DP_PRINT_STATS("	Packets = %d",
5101 			pdev->stats.rx.multicast.num);
5102 	DP_PRINT_STATS("	Bytes = %llu",
5103 			pdev->stats.rx.multicast.bytes);
5104 	DP_PRINT_STATS("Errors:");
5105 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5106 			pdev->stats.replenish.rxdma_err);
5107 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5108 			pdev->stats.err.desc_alloc_fail);
5109 
5110 	/* Get bar_recv_cnt */
5111 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5112 	DP_PRINT_STATS("BAR Received Count: = %d",
5113 			pdev->stats.rx.bar_recv_cnt);
5114 
5115 }
5116 
5117 /**
5118  * dp_print_soc_tx_stats(): Print SOC level  stats
5119  * @soc DP_SOC Handle
5120  *
5121  * Return: void
5122  */
5123 static inline void
5124 dp_print_soc_tx_stats(struct dp_soc *soc)
5125 {
5126 	DP_PRINT_STATS("SOC Tx Stats:\n");
5127 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5128 			soc->stats.tx.desc_in_use);
5129 	DP_PRINT_STATS("Invalid peer:");
5130 	DP_PRINT_STATS("	Packets = %d",
5131 			soc->stats.tx.tx_invalid_peer.num);
5132 	DP_PRINT_STATS("	Bytes = %llu",
5133 			soc->stats.tx.tx_invalid_peer.bytes);
5134 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5135 			soc->stats.tx.tcl_ring_full[0],
5136 			soc->stats.tx.tcl_ring_full[1],
5137 			soc->stats.tx.tcl_ring_full[2]);
5138 
5139 }
5140 /**
5141  * dp_print_soc_rx_stats: Print SOC level Rx stats
5142  * @soc: DP_SOC Handle
5143  *
5144  * Return:void
5145  */
5146 static inline void
5147 dp_print_soc_rx_stats(struct dp_soc *soc)
5148 {
5149 	uint32_t i;
5150 	char reo_error[DP_REO_ERR_LENGTH];
5151 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5152 	uint8_t index = 0;
5153 
5154 	DP_PRINT_STATS("SOC Rx Stats:\n");
5155 	DP_PRINT_STATS("Errors:\n");
5156 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5157 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5158 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5159 	DP_PRINT_STATS("Invalid RBM = %d",
5160 			soc->stats.rx.err.invalid_rbm);
5161 	DP_PRINT_STATS("Invalid Vdev = %d",
5162 			soc->stats.rx.err.invalid_vdev);
5163 	DP_PRINT_STATS("Invalid Pdev = %d",
5164 			soc->stats.rx.err.invalid_pdev);
5165 	DP_PRINT_STATS("Invalid Peer = %d",
5166 			soc->stats.rx.err.rx_invalid_peer.num);
5167 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5168 			soc->stats.rx.err.hal_ring_access_fail);
5169 
5170 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5171 		index += qdf_snprint(&rxdma_error[index],
5172 				DP_RXDMA_ERR_LENGTH - index,
5173 				" %d", soc->stats.rx.err.rxdma_error[i]);
5174 	}
5175 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5176 			rxdma_error);
5177 
5178 	index = 0;
5179 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5180 		index += qdf_snprint(&reo_error[index],
5181 				DP_REO_ERR_LENGTH - index,
5182 				" %d", soc->stats.rx.err.reo_error[i]);
5183 	}
5184 	DP_PRINT_STATS("REO Error(0-14):%s",
5185 			reo_error);
5186 }
5187 
5188 
5189 /**
5190  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5191  * @soc: DP_SOC handle
5192  * @srng: DP_SRNG handle
5193  * @ring_name: SRNG name
5194  *
5195  * Return: void
5196  */
5197 static inline void
5198 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5199 	char *ring_name)
5200 {
5201 	uint32_t tailp;
5202 	uint32_t headp;
5203 
5204 	if (srng->hal_srng != NULL) {
5205 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5206 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5207 				ring_name, headp, tailp);
5208 	}
5209 }
5210 
5211 /**
5212  * dp_print_ring_stats(): Print tail and head pointer
5213  * @pdev: DP_PDEV handle
5214  *
5215  * Return:void
5216  */
5217 static inline void
5218 dp_print_ring_stats(struct dp_pdev *pdev)
5219 {
5220 	uint32_t i;
5221 	char ring_name[STR_MAXLEN + 1];
5222 	int mac_id;
5223 
5224 	dp_print_ring_stat_from_hal(pdev->soc,
5225 			&pdev->soc->reo_exception_ring,
5226 			"Reo Exception Ring");
5227 	dp_print_ring_stat_from_hal(pdev->soc,
5228 			&pdev->soc->reo_reinject_ring,
5229 			"Reo Inject Ring");
5230 	dp_print_ring_stat_from_hal(pdev->soc,
5231 			&pdev->soc->reo_cmd_ring,
5232 			"Reo Command Ring");
5233 	dp_print_ring_stat_from_hal(pdev->soc,
5234 			&pdev->soc->reo_status_ring,
5235 			"Reo Status Ring");
5236 	dp_print_ring_stat_from_hal(pdev->soc,
5237 			&pdev->soc->rx_rel_ring,
5238 			"Rx Release ring");
5239 	dp_print_ring_stat_from_hal(pdev->soc,
5240 			&pdev->soc->tcl_cmd_ring,
5241 			"Tcl command Ring");
5242 	dp_print_ring_stat_from_hal(pdev->soc,
5243 			&pdev->soc->tcl_status_ring,
5244 			"Tcl Status Ring");
5245 	dp_print_ring_stat_from_hal(pdev->soc,
5246 			&pdev->soc->wbm_desc_rel_ring,
5247 			"Wbm Desc Rel Ring");
5248 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5249 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5250 		dp_print_ring_stat_from_hal(pdev->soc,
5251 				&pdev->soc->reo_dest_ring[i],
5252 				ring_name);
5253 	}
5254 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5255 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5256 		dp_print_ring_stat_from_hal(pdev->soc,
5257 				&pdev->soc->tcl_data_ring[i],
5258 				ring_name);
5259 	}
5260 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5261 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5262 		dp_print_ring_stat_from_hal(pdev->soc,
5263 				&pdev->soc->tx_comp_ring[i],
5264 				ring_name);
5265 	}
5266 	dp_print_ring_stat_from_hal(pdev->soc,
5267 			&pdev->rx_refill_buf_ring,
5268 			"Rx Refill Buf Ring");
5269 
5270 	dp_print_ring_stat_from_hal(pdev->soc,
5271 			&pdev->rx_refill_buf_ring2,
5272 			"Second Rx Refill Buf Ring");
5273 
5274 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5275 		dp_print_ring_stat_from_hal(pdev->soc,
5276 				&pdev->rxdma_mon_buf_ring[mac_id],
5277 				"Rxdma Mon Buf Ring");
5278 		dp_print_ring_stat_from_hal(pdev->soc,
5279 				&pdev->rxdma_mon_dst_ring[mac_id],
5280 				"Rxdma Mon Dst Ring");
5281 		dp_print_ring_stat_from_hal(pdev->soc,
5282 				&pdev->rxdma_mon_status_ring[mac_id],
5283 				"Rxdma Mon Status Ring");
5284 		dp_print_ring_stat_from_hal(pdev->soc,
5285 				&pdev->rxdma_mon_desc_ring[mac_id],
5286 				"Rxdma mon desc Ring");
5287 	}
5288 
5289 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5290 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5291 		dp_print_ring_stat_from_hal(pdev->soc,
5292 			&pdev->rxdma_err_dst_ring[i],
5293 			ring_name);
5294 	}
5295 
5296 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5297 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5298 		dp_print_ring_stat_from_hal(pdev->soc,
5299 				&pdev->rx_mac_buf_ring[i],
5300 				ring_name);
5301 	}
5302 }
5303 
5304 /**
5305  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5306  * @vdev: DP_VDEV handle
5307  *
5308  * Return:void
5309  */
5310 static inline void
5311 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5312 {
5313 	struct dp_peer *peer = NULL;
5314 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5315 
5316 	DP_STATS_CLR(vdev->pdev);
5317 	DP_STATS_CLR(vdev->pdev->soc);
5318 	DP_STATS_CLR(vdev);
5319 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5320 		if (!peer)
5321 			return;
5322 		DP_STATS_CLR(peer);
5323 
5324 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5325 			soc->cdp_soc.ol_ops->update_dp_stats(
5326 					vdev->pdev->osif_pdev,
5327 					&peer->stats,
5328 					peer->peer_ids[0],
5329 					UPDATE_PEER_STATS);
5330 		}
5331 
5332 	}
5333 
5334 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5335 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
5336 				&vdev->stats, (uint16_t)vdev->vdev_id,
5337 				UPDATE_VDEV_STATS);
5338 }
5339 
5340 /**
5341  * dp_print_rx_rates(): Print Rx rate stats
5342  * @vdev: DP_VDEV handle
5343  *
5344  * Return:void
5345  */
5346 static inline void
5347 dp_print_rx_rates(struct dp_vdev *vdev)
5348 {
5349 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5350 	uint8_t i, mcs, pkt_type;
5351 	uint8_t index = 0;
5352 	char nss[DP_NSS_LENGTH];
5353 
5354 	DP_PRINT_STATS("Rx Rate Info:\n");
5355 
5356 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5357 		index = 0;
5358 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5359 			if (!dp_rate_string[pkt_type][mcs].valid)
5360 				continue;
5361 
5362 			DP_PRINT_STATS("	%s = %d",
5363 					dp_rate_string[pkt_type][mcs].mcs_type,
5364 					pdev->stats.rx.pkt_type[pkt_type].
5365 					mcs_count[mcs]);
5366 		}
5367 
5368 		DP_PRINT_STATS("\n");
5369 	}
5370 
5371 	index = 0;
5372 	for (i = 0; i < SS_COUNT; i++) {
5373 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5374 				" %d", pdev->stats.rx.nss[i]);
5375 	}
5376 	DP_PRINT_STATS("NSS(1-8) = %s",
5377 			nss);
5378 
5379 	DP_PRINT_STATS("SGI ="
5380 			" 0.8us %d,"
5381 			" 0.4us %d,"
5382 			" 1.6us %d,"
5383 			" 3.2us %d,",
5384 			pdev->stats.rx.sgi_count[0],
5385 			pdev->stats.rx.sgi_count[1],
5386 			pdev->stats.rx.sgi_count[2],
5387 			pdev->stats.rx.sgi_count[3]);
5388 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5389 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5390 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
5391 	DP_PRINT_STATS("Reception Type ="
5392 			" SU: %d,"
5393 			" MU_MIMO:%d,"
5394 			" MU_OFDMA:%d,"
5395 			" MU_OFDMA_MIMO:%d\n",
5396 			pdev->stats.rx.reception_type[0],
5397 			pdev->stats.rx.reception_type[1],
5398 			pdev->stats.rx.reception_type[2],
5399 			pdev->stats.rx.reception_type[3]);
5400 	DP_PRINT_STATS("Aggregation:\n");
5401 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
5402 			pdev->stats.rx.ampdu_cnt);
5403 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
5404 			pdev->stats.rx.non_ampdu_cnt);
5405 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
5406 			pdev->stats.rx.amsdu_cnt);
5407 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
5408 			pdev->stats.rx.non_amsdu_cnt);
5409 }
5410 
5411 /**
5412  * dp_print_tx_rates(): Print tx rates
5413  * @vdev: DP_VDEV handle
5414  *
5415  * Return:void
5416  */
5417 static inline void
5418 dp_print_tx_rates(struct dp_vdev *vdev)
5419 {
5420 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5421 	uint8_t mcs, pkt_type;
5422 	uint32_t index;
5423 
5424 	DP_PRINT_STATS("Tx Rate Info:\n");
5425 
5426 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5427 		index = 0;
5428 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5429 			if (!dp_rate_string[pkt_type][mcs].valid)
5430 				continue;
5431 
5432 			DP_PRINT_STATS("	%s = %d",
5433 					dp_rate_string[pkt_type][mcs].mcs_type,
5434 					pdev->stats.tx.pkt_type[pkt_type].
5435 					mcs_count[mcs]);
5436 		}
5437 
5438 		DP_PRINT_STATS("\n");
5439 	}
5440 
5441 	DP_PRINT_STATS("SGI ="
5442 			" 0.8us %d"
5443 			" 0.4us %d"
5444 			" 1.6us %d"
5445 			" 3.2us %d",
5446 			pdev->stats.tx.sgi_count[0],
5447 			pdev->stats.tx.sgi_count[1],
5448 			pdev->stats.tx.sgi_count[2],
5449 			pdev->stats.tx.sgi_count[3]);
5450 
5451 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5452 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
5453 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
5454 
5455 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
5456 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
5457 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
5458 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
5459 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
5460 
5461 	DP_PRINT_STATS("Aggregation:\n");
5462 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
5463 			pdev->stats.tx.amsdu_cnt);
5464 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
5465 			pdev->stats.tx.non_amsdu_cnt);
5466 }
5467 
5468 /**
5469  * dp_print_peer_stats():print peer stats
5470  * @peer: DP_PEER handle
5471  *
5472  * return void
5473  */
5474 static inline void dp_print_peer_stats(struct dp_peer *peer)
5475 {
5476 	uint8_t i, mcs, pkt_type;
5477 	uint32_t index;
5478 	char nss[DP_NSS_LENGTH];
5479 	DP_PRINT_STATS("Node Tx Stats:\n");
5480 	DP_PRINT_STATS("Total Packet Completions = %d",
5481 			peer->stats.tx.comp_pkt.num);
5482 	DP_PRINT_STATS("Total Bytes Completions = %llu",
5483 			peer->stats.tx.comp_pkt.bytes);
5484 	DP_PRINT_STATS("Success Packets = %d",
5485 			peer->stats.tx.tx_success.num);
5486 	DP_PRINT_STATS("Success Bytes = %llu",
5487 			peer->stats.tx.tx_success.bytes);
5488 	DP_PRINT_STATS("Unicast Success Packets = %d",
5489 			peer->stats.tx.ucast.num);
5490 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
5491 			peer->stats.tx.ucast.bytes);
5492 	DP_PRINT_STATS("Multicast Success Packets = %d",
5493 			peer->stats.tx.mcast.num);
5494 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
5495 			peer->stats.tx.mcast.bytes);
5496 	DP_PRINT_STATS("Broadcast Success Packets = %d",
5497 			peer->stats.tx.bcast.num);
5498 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
5499 			peer->stats.tx.bcast.bytes);
5500 	DP_PRINT_STATS("Packets Failed = %d",
5501 			peer->stats.tx.tx_failed);
5502 	DP_PRINT_STATS("Packets In OFDMA = %d",
5503 			peer->stats.tx.ofdma);
5504 	DP_PRINT_STATS("Packets In STBC = %d",
5505 			peer->stats.tx.stbc);
5506 	DP_PRINT_STATS("Packets In LDPC = %d",
5507 			peer->stats.tx.ldpc);
5508 	DP_PRINT_STATS("Packet Retries = %d",
5509 			peer->stats.tx.retries);
5510 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
5511 			peer->stats.tx.amsdu_cnt);
5512 	DP_PRINT_STATS("Last Packet RSSI = %d",
5513 			peer->stats.tx.last_ack_rssi);
5514 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
5515 			peer->stats.tx.dropped.fw_rem);
5516 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
5517 			peer->stats.tx.dropped.fw_rem_tx);
5518 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
5519 			peer->stats.tx.dropped.fw_rem_notx);
5520 	DP_PRINT_STATS("Dropped : Age Out = %d",
5521 			peer->stats.tx.dropped.age_out);
5522 	DP_PRINT_STATS("NAWDS : ");
5523 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
5524 			peer->stats.tx.nawds_mcast_drop);
5525 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
5526 			peer->stats.tx.nawds_mcast.num);
5527 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
5528 			peer->stats.tx.nawds_mcast.bytes);
5529 
5530 	DP_PRINT_STATS("Rate Info:");
5531 
5532 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5533 		index = 0;
5534 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5535 			if (!dp_rate_string[pkt_type][mcs].valid)
5536 				continue;
5537 
5538 			DP_PRINT_STATS("	%s = %d",
5539 					dp_rate_string[pkt_type][mcs].mcs_type,
5540 					peer->stats.tx.pkt_type[pkt_type].
5541 					mcs_count[mcs]);
5542 		}
5543 
5544 		DP_PRINT_STATS("\n");
5545 	}
5546 
5547 	DP_PRINT_STATS("SGI = "
5548 			" 0.8us %d"
5549 			" 0.4us %d"
5550 			" 1.6us %d"
5551 			" 3.2us %d",
5552 			peer->stats.tx.sgi_count[0],
5553 			peer->stats.tx.sgi_count[1],
5554 			peer->stats.tx.sgi_count[2],
5555 			peer->stats.tx.sgi_count[3]);
5556 	DP_PRINT_STATS("Excess Retries per AC ");
5557 	DP_PRINT_STATS("	 Best effort = %d",
5558 			peer->stats.tx.excess_retries_per_ac[0]);
5559 	DP_PRINT_STATS("	 Background= %d",
5560 			peer->stats.tx.excess_retries_per_ac[1]);
5561 	DP_PRINT_STATS("	 Video = %d",
5562 			peer->stats.tx.excess_retries_per_ac[2]);
5563 	DP_PRINT_STATS("	 Voice = %d",
5564 			peer->stats.tx.excess_retries_per_ac[3]);
5565 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
5566 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
5567 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
5568 
5569 	index = 0;
5570 	for (i = 0; i < SS_COUNT; i++) {
5571 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5572 				" %d", peer->stats.tx.nss[i]);
5573 	}
5574 	DP_PRINT_STATS("NSS(1-8) = %s",
5575 			nss);
5576 
5577 	DP_PRINT_STATS("Aggregation:");
5578 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
5579 			peer->stats.tx.amsdu_cnt);
5580 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
5581 			peer->stats.tx.non_amsdu_cnt);
5582 
5583 	DP_PRINT_STATS("Node Rx Stats:");
5584 	DP_PRINT_STATS("Packets Sent To Stack = %d",
5585 			peer->stats.rx.to_stack.num);
5586 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
5587 			peer->stats.rx.to_stack.bytes);
5588 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
5589 		DP_PRINT_STATS("Ring Id = %d", i);
5590 		DP_PRINT_STATS("	Packets Received = %d",
5591 				peer->stats.rx.rcvd_reo[i].num);
5592 		DP_PRINT_STATS("	Bytes Received = %llu",
5593 				peer->stats.rx.rcvd_reo[i].bytes);
5594 	}
5595 	DP_PRINT_STATS("Multicast Packets Received = %d",
5596 			peer->stats.rx.multicast.num);
5597 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
5598 			peer->stats.rx.multicast.bytes);
5599 	DP_PRINT_STATS("Broadcast Packets Received = %d",
5600 			peer->stats.rx.bcast.num);
5601 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
5602 			peer->stats.rx.bcast.bytes);
5603 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
5604 			peer->stats.rx.intra_bss.pkts.num);
5605 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
5606 			peer->stats.rx.intra_bss.pkts.bytes);
5607 	DP_PRINT_STATS("Raw Packets Received = %d",
5608 			peer->stats.rx.raw.num);
5609 	DP_PRINT_STATS("Raw Bytes Received = %llu",
5610 			peer->stats.rx.raw.bytes);
5611 	DP_PRINT_STATS("Errors: MIC Errors = %d",
5612 			peer->stats.rx.err.mic_err);
5613 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
5614 			peer->stats.rx.err.decrypt_err);
5615 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
5616 			peer->stats.rx.non_ampdu_cnt);
5617 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
5618 			peer->stats.rx.ampdu_cnt);
5619 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
5620 			peer->stats.rx.non_amsdu_cnt);
5621 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
5622 			peer->stats.rx.amsdu_cnt);
5623 	DP_PRINT_STATS("NAWDS : ");
5624 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
5625 			peer->stats.rx.nawds_mcast_drop.num);
5626 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet Bytes = %llu",
5627 			peer->stats.rx.nawds_mcast_drop.bytes);
5628 	DP_PRINT_STATS("SGI ="
5629 			" 0.8us %d"
5630 			" 0.4us %d"
5631 			" 1.6us %d"
5632 			" 3.2us %d",
5633 			peer->stats.rx.sgi_count[0],
5634 			peer->stats.rx.sgi_count[1],
5635 			peer->stats.rx.sgi_count[2],
5636 			peer->stats.rx.sgi_count[3]);
5637 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
5638 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
5639 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
5640 	DP_PRINT_STATS("Reception Type ="
5641 			" SU %d,"
5642 			" MU_MIMO %d,"
5643 			" MU_OFDMA %d,"
5644 			" MU_OFDMA_MIMO %d",
5645 			peer->stats.rx.reception_type[0],
5646 			peer->stats.rx.reception_type[1],
5647 			peer->stats.rx.reception_type[2],
5648 			peer->stats.rx.reception_type[3]);
5649 
5650 
5651 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5652 		index = 0;
5653 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5654 			if (!dp_rate_string[pkt_type][mcs].valid)
5655 				continue;
5656 
5657 			DP_PRINT_STATS("	%s = %d",
5658 					dp_rate_string[pkt_type][mcs].mcs_type,
5659 					peer->stats.rx.pkt_type[pkt_type].
5660 					mcs_count[mcs]);
5661 		}
5662 
5663 		DP_PRINT_STATS("\n");
5664 	}
5665 
5666 	index = 0;
5667 	for (i = 0; i < SS_COUNT; i++) {
5668 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5669 				" %d", peer->stats.rx.nss[i]);
5670 	}
5671 	DP_PRINT_STATS("NSS(1-8) = %s",
5672 			nss);
5673 
5674 	DP_PRINT_STATS("Aggregation:");
5675 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
5676 			peer->stats.rx.ampdu_cnt);
5677 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
5678 			peer->stats.rx.non_ampdu_cnt);
5679 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
5680 			peer->stats.rx.amsdu_cnt);
5681 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
5682 			peer->stats.rx.non_amsdu_cnt);
5683 }
5684 
5685 /**
5686  * dp_print_host_stats()- Function to print the stats aggregated at host
5687  * @vdev_handle: DP_VDEV handle
5688  * @type: host stats type
5689  *
5690  * Available Stat types
5691  * TXRX_CLEAR_STATS  : Clear the stats
5692  * TXRX_RX_RATE_STATS: Print Rx Rate Info
5693  * TXRX_TX_RATE_STATS: Print Tx Rate Info
5694  * TXRX_TX_HOST_STATS: Print Tx Stats
5695  * TXRX_RX_HOST_STATS: Print Rx Stats
5696  * TXRX_AST_STATS: Print AST Stats
5697  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
5698  *
5699  * Return: 0 on success, print error message in case of failure
5700  */
5701 static int
5702 dp_print_host_stats(struct cdp_vdev *vdev_handle, enum cdp_host_txrx_stats type)
5703 {
5704 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5705 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5706 
5707 	dp_aggregate_pdev_stats(pdev);
5708 
5709 	switch (type) {
5710 	case TXRX_CLEAR_STATS:
5711 		dp_txrx_host_stats_clr(vdev);
5712 		break;
5713 	case TXRX_RX_RATE_STATS:
5714 		dp_print_rx_rates(vdev);
5715 		break;
5716 	case TXRX_TX_RATE_STATS:
5717 		dp_print_tx_rates(vdev);
5718 		break;
5719 	case TXRX_TX_HOST_STATS:
5720 		dp_print_pdev_tx_stats(pdev);
5721 		dp_print_soc_tx_stats(pdev->soc);
5722 		break;
5723 	case TXRX_RX_HOST_STATS:
5724 		dp_print_pdev_rx_stats(pdev);
5725 		dp_print_soc_rx_stats(pdev->soc);
5726 		break;
5727 	case TXRX_AST_STATS:
5728 		dp_print_ast_stats(pdev->soc);
5729 		dp_print_peer_table(vdev);
5730 		break;
5731 	case TXRX_SRNG_PTR_STATS:
5732 		 dp_print_ring_stats(pdev);
5733 		 break;
5734 	default:
5735 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
5736 		break;
5737 	}
5738 	return 0;
5739 }
5740 
5741 /*
5742  * dp_get_host_peer_stats()- function to print peer stats
5743  * @pdev_handle: DP_PDEV handle
5744  * @mac_addr: mac address of the peer
5745  *
5746  * Return: void
5747  */
5748 static void
5749 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
5750 {
5751 	struct dp_peer *peer;
5752 	uint8_t local_id;
5753 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
5754 			&local_id);
5755 
5756 	if (!peer) {
5757 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5758 			"%s: Invalid peer\n", __func__);
5759 		return;
5760 	}
5761 
5762 	dp_print_peer_stats(peer);
5763 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
5764 	return;
5765 }
5766 
5767 /*
5768  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
5769  * @pdev: DP_PDEV handle
5770  *
5771  * Return: void
5772  */
5773 static void
5774 dp_ppdu_ring_reset(struct dp_pdev *pdev)
5775 {
5776 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5777 	int mac_id;
5778 
5779 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5780 
5781 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5782 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5783 							pdev->pdev_id);
5784 
5785 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
5786 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5787 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5788 	}
5789 }
5790 
5791 /*
5792  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
5793  * @pdev: DP_PDEV handle
5794  *
5795  * Return: void
5796  */
5797 static void
5798 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
5799 {
5800 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
5801 	int mac_id;
5802 
5803 	htt_tlv_filter.mpdu_start = 1;
5804 	htt_tlv_filter.msdu_start = 0;
5805 	htt_tlv_filter.packet = 0;
5806 	htt_tlv_filter.msdu_end = 0;
5807 	htt_tlv_filter.mpdu_end = 0;
5808 	htt_tlv_filter.packet_header = 1;
5809 	htt_tlv_filter.attention = 1;
5810 	htt_tlv_filter.ppdu_start = 1;
5811 	htt_tlv_filter.ppdu_end = 1;
5812 	htt_tlv_filter.ppdu_end_user_stats = 1;
5813 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5814 	htt_tlv_filter.ppdu_end_status_done = 1;
5815 	htt_tlv_filter.enable_fp = 1;
5816 	htt_tlv_filter.enable_md = 0;
5817 	if (pdev->mcopy_mode)
5818 		htt_tlv_filter.enable_mo = 1;
5819 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5820 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5821 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5822 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5823 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5824 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
5825 
5826 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5827 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
5828 						pdev->pdev_id);
5829 
5830 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
5831 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5832 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
5833 	}
5834 }
5835 
5836 /*
5837  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
5838  * @pdev_handle: DP_PDEV handle
5839  * @val: user provided value
5840  *
5841  * Return: void
5842  */
5843 static void
5844 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
5845 {
5846 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5847 
5848 	switch (val) {
5849 	case 0:
5850 		pdev->tx_sniffer_enable = 0;
5851 		pdev->mcopy_mode = 0;
5852 
5853 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en) {
5854 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
5855 			dp_ppdu_ring_reset(pdev);
5856 		} else if (pdev->enhanced_stats_en) {
5857 			dp_h2t_cfg_stats_msg_send(pdev,
5858 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
5859 		}
5860 		break;
5861 
5862 	case 1:
5863 		pdev->tx_sniffer_enable = 1;
5864 		pdev->mcopy_mode = 0;
5865 
5866 		if (!pdev->pktlog_ppdu_stats)
5867 			dp_h2t_cfg_stats_msg_send(pdev,
5868 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
5869 		break;
5870 	case 2:
5871 		pdev->mcopy_mode = 1;
5872 		pdev->tx_sniffer_enable = 0;
5873 		if (!pdev->enhanced_stats_en)
5874 			dp_ppdu_ring_cfg(pdev);
5875 
5876 		if (!pdev->pktlog_ppdu_stats)
5877 			dp_h2t_cfg_stats_msg_send(pdev,
5878 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
5879 		break;
5880 	default:
5881 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5882 			"Invalid value\n");
5883 		break;
5884 	}
5885 }
5886 
5887 /*
5888  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
5889  * @pdev_handle: DP_PDEV handle
5890  *
5891  * Return: void
5892  */
5893 static void
5894 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
5895 {
5896 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5897 	pdev->enhanced_stats_en = 1;
5898 
5899 	if (!pdev->mcopy_mode)
5900 		dp_ppdu_ring_cfg(pdev);
5901 
5902 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
5903 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
5904 }
5905 
5906 /*
5907  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
5908  * @pdev_handle: DP_PDEV handle
5909  *
5910  * Return: void
5911  */
5912 static void
5913 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
5914 {
5915 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5916 
5917 	pdev->enhanced_stats_en = 0;
5918 
5919 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
5920 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
5921 
5922 	if (!pdev->mcopy_mode)
5923 		dp_ppdu_ring_reset(pdev);
5924 }
5925 
5926 /*
5927  * dp_get_fw_peer_stats()- function to print peer stats
5928  * @pdev_handle: DP_PDEV handle
5929  * @mac_addr: mac address of the peer
5930  * @cap: Type of htt stats requested
5931  *
5932  * Currently Supporting only MAC ID based requests Only
5933  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
5934  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
5935  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
5936  *
5937  * Return: void
5938  */
5939 static void
5940 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
5941 		uint32_t cap)
5942 {
5943 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5944 	int i;
5945 	uint32_t config_param0 = 0;
5946 	uint32_t config_param1 = 0;
5947 	uint32_t config_param2 = 0;
5948 	uint32_t config_param3 = 0;
5949 
5950 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
5951 	config_param0 |= (1 << (cap + 1));
5952 
5953 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
5954 		config_param1 |= (1 << i);
5955 	}
5956 
5957 	config_param2 |= (mac_addr[0] & 0x000000ff);
5958 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
5959 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
5960 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
5961 
5962 	config_param3 |= (mac_addr[4] & 0x000000ff);
5963 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
5964 
5965 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
5966 			config_param0, config_param1, config_param2,
5967 			config_param3, 0, 0, 0);
5968 
5969 }
5970 
5971 /* This struct definition will be removed from here
5972  * once it get added in FW headers*/
5973 struct httstats_cmd_req {
5974     uint32_t    config_param0;
5975     uint32_t    config_param1;
5976     uint32_t    config_param2;
5977     uint32_t    config_param3;
5978     int cookie;
5979     u_int8_t    stats_id;
5980 };
5981 
5982 /*
5983  * dp_get_htt_stats: function to process the httstas request
5984  * @pdev_handle: DP pdev handle
5985  * @data: pointer to request data
5986  * @data_len: length for request data
5987  *
5988  * return: void
5989  */
5990 static void
5991 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
5992 {
5993 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5994 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
5995 
5996 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
5997 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
5998 				req->config_param0, req->config_param1,
5999 				req->config_param2, req->config_param3,
6000 				req->cookie, 0, 0);
6001 }
6002 /*
6003  * dp_set_pdev_param: function to set parameters in pdev
6004  * @pdev_handle: DP pdev handle
6005  * @param: parameter type to be set
6006  * @val: value of parameter to be set
6007  *
6008  * return: void
6009  */
6010 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6011 		enum cdp_pdev_param_type param, uint8_t val)
6012 {
6013 	switch (param) {
6014 	case CDP_CONFIG_DEBUG_SNIFFER:
6015 		dp_config_debug_sniffer(pdev_handle, val);
6016 		break;
6017 	default:
6018 		break;
6019 	}
6020 }
6021 
6022 /*
6023  * dp_set_vdev_param: function to set parameters in vdev
6024  * @param: parameter type to be set
6025  * @val: value of parameter to be set
6026  *
6027  * return: void
6028  */
6029 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6030 		enum cdp_vdev_param_type param, uint32_t val)
6031 {
6032 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6033 	switch (param) {
6034 	case CDP_ENABLE_WDS:
6035 		vdev->wds_enabled = val;
6036 		break;
6037 	case CDP_ENABLE_NAWDS:
6038 		vdev->nawds_enabled = val;
6039 		break;
6040 	case CDP_ENABLE_MCAST_EN:
6041 		vdev->mcast_enhancement_en = val;
6042 		break;
6043 	case CDP_ENABLE_PROXYSTA:
6044 		vdev->proxysta_vdev = val;
6045 		break;
6046 	case CDP_UPDATE_TDLS_FLAGS:
6047 		vdev->tdls_link_connected = val;
6048 		break;
6049 	case CDP_CFG_WDS_AGING_TIMER:
6050 		if (val == 0)
6051 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6052 		else if (val != vdev->wds_aging_timer_val)
6053 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6054 
6055 		vdev->wds_aging_timer_val = val;
6056 		break;
6057 	case CDP_ENABLE_AP_BRIDGE:
6058 		if (wlan_op_mode_sta != vdev->opmode)
6059 			vdev->ap_bridge_enabled = val;
6060 		else
6061 			vdev->ap_bridge_enabled = false;
6062 		break;
6063 	case CDP_ENABLE_CIPHER:
6064 		vdev->sec_type = val;
6065 		break;
6066 	case CDP_ENABLE_QWRAP_ISOLATION:
6067 		vdev->isolation_vdev = val;
6068 		break;
6069 	default:
6070 		break;
6071 	}
6072 
6073 	dp_tx_vdev_update_search_flags(vdev);
6074 }
6075 
6076 /**
6077  * dp_peer_set_nawds: set nawds bit in peer
6078  * @peer_handle: pointer to peer
6079  * @value: enable/disable nawds
6080  *
6081  * return: void
6082  */
6083 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6084 {
6085 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6086 	peer->nawds_enabled = value;
6087 }
6088 
6089 /*
6090  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6091  * @vdev_handle: DP_VDEV handle
6092  * @map_id:ID of map that needs to be updated
6093  *
6094  * Return: void
6095  */
6096 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6097 		uint8_t map_id)
6098 {
6099 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6100 	vdev->dscp_tid_map_id = map_id;
6101 	return;
6102 }
6103 
6104 /*
6105  * dp_txrx_stats_publish(): publish pdev stats into a buffer
6106  * @pdev_handle: DP_PDEV handle
6107  * @buf: to hold pdev_stats
6108  *
6109  * Return: int
6110  */
6111 static int
6112 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6113 {
6114 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6115 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
6116 	struct cdp_txrx_stats_req req = {0,};
6117 
6118 	dp_aggregate_pdev_stats(pdev);
6119 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6120 	req.cookie_val = 1;
6121 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6122 				req.param1, req.param2, req.param3, 0,
6123 				req.cookie_val, 0);
6124 
6125 	msleep(DP_MAX_SLEEP_TIME);
6126 
6127 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6128 	req.cookie_val = 1;
6129 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6130 				req.param1, req.param2, req.param3, 0,
6131 				req.cookie_val, 0);
6132 
6133 	msleep(DP_MAX_SLEEP_TIME);
6134 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6135 
6136 	return TXRX_STATS_LEVEL;
6137 }
6138 
6139 /**
6140  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6141  * @pdev: DP_PDEV handle
6142  * @map_id: ID of map that needs to be updated
6143  * @tos: index value in map
6144  * @tid: tid value passed by the user
6145  *
6146  * Return: void
6147  */
6148 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6149 		uint8_t map_id, uint8_t tos, uint8_t tid)
6150 {
6151 	uint8_t dscp;
6152 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6153 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6154 	pdev->dscp_tid_map[map_id][dscp] = tid;
6155 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6156 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
6157 			map_id, dscp);
6158 	return;
6159 }
6160 
6161 /**
6162  * dp_fw_stats_process(): Process TxRX FW stats request
6163  * @vdev_handle: DP VDEV handle
6164  * @req: stats request
6165  *
6166  * return: int
6167  */
6168 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6169 		struct cdp_txrx_stats_req *req)
6170 {
6171 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6172 	struct dp_pdev *pdev = NULL;
6173 	uint32_t stats = req->stats;
6174 	uint8_t channel = req->channel;
6175 
6176 	if (!vdev) {
6177 		DP_TRACE(NONE, "VDEV not found");
6178 		return 1;
6179 	}
6180 	pdev = vdev->pdev;
6181 
6182 	/*
6183 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6184 	 * from param0 to param3 according to below rule:
6185 	 *
6186 	 * PARAM:
6187 	 *   - config_param0 : start_offset (stats type)
6188 	 *   - config_param1 : stats bmask from start offset
6189 	 *   - config_param2 : stats bmask from start offset + 32
6190 	 *   - config_param3 : stats bmask from start offset + 64
6191 	 */
6192 	if (req->stats == CDP_TXRX_STATS_0) {
6193 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6194 		req->param1 = 0xFFFFFFFF;
6195 		req->param2 = 0xFFFFFFFF;
6196 		req->param3 = 0xFFFFFFFF;
6197 	}
6198 
6199 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
6200 				req->param1, req->param2, req->param3,
6201 				0, 0, channel);
6202 }
6203 
6204 /**
6205  * dp_txrx_stats_request - function to map to firmware and host stats
6206  * @vdev: virtual handle
6207  * @req: stats request
6208  *
6209  * Return: integer
6210  */
6211 static int dp_txrx_stats_request(struct cdp_vdev *vdev,
6212 		struct cdp_txrx_stats_req *req)
6213 {
6214 	int host_stats;
6215 	int fw_stats;
6216 	enum cdp_stats stats;
6217 
6218 	if (!vdev || !req) {
6219 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6220 				"Invalid vdev/req instance");
6221 		return 0;
6222 	}
6223 
6224 	stats = req->stats;
6225 	if (stats >= CDP_TXRX_MAX_STATS)
6226 		return 0;
6227 
6228 	/*
6229 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
6230 	 *			has to be updated if new FW HTT stats added
6231 	 */
6232 	if (stats > CDP_TXRX_STATS_HTT_MAX)
6233 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
6234 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
6235 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
6236 
6237 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6238 		 "stats: %u fw_stats_type: %d host_stats_type: %d",
6239 		  stats, fw_stats, host_stats);
6240 
6241 	if (fw_stats != TXRX_FW_STATS_INVALID) {
6242 		/* update request with FW stats type */
6243 		req->stats = fw_stats;
6244 		return dp_fw_stats_process(vdev, req);
6245 	}
6246 
6247 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
6248 			(host_stats <= TXRX_HOST_STATS_MAX))
6249 		return dp_print_host_stats(vdev, host_stats);
6250 	else
6251 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6252 				"Wrong Input for TxRx Stats");
6253 
6254 	return 0;
6255 }
6256 
6257 /*
6258  * dp_print_napi_stats(): NAPI stats
6259  * @soc - soc handle
6260  */
6261 static void dp_print_napi_stats(struct dp_soc *soc)
6262 {
6263 	hif_print_napi_stats(soc->hif_handle);
6264 }
6265 
6266 /*
6267  * dp_print_per_ring_stats(): Packet count per ring
6268  * @soc - soc handle
6269  */
6270 static void dp_print_per_ring_stats(struct dp_soc *soc)
6271 {
6272 	uint8_t ring;
6273 	uint16_t core;
6274 	uint64_t total_packets;
6275 
6276 	DP_TRACE(FATAL, "Reo packets per ring:");
6277 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
6278 		total_packets = 0;
6279 		DP_TRACE(FATAL, "Packets on ring %u:", ring);
6280 		for (core = 0; core < NR_CPUS; core++) {
6281 			DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
6282 				core, soc->stats.rx.ring_packets[core][ring]);
6283 			total_packets += soc->stats.rx.ring_packets[core][ring];
6284 		}
6285 		DP_TRACE(FATAL, "Total packets on ring %u: %llu",
6286 			ring, total_packets);
6287 	}
6288 }
6289 
6290 /*
6291  * dp_txrx_path_stats() - Function to display dump stats
6292  * @soc - soc handle
6293  *
6294  * return: none
6295  */
6296 static void dp_txrx_path_stats(struct dp_soc *soc)
6297 {
6298 	uint8_t error_code;
6299 	uint8_t loop_pdev;
6300 	struct dp_pdev *pdev;
6301 	uint8_t i;
6302 
6303 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
6304 
6305 		pdev = soc->pdev_list[loop_pdev];
6306 		dp_aggregate_pdev_stats(pdev);
6307 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6308 			"Tx path Statistics:");
6309 
6310 		DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
6311 			pdev->stats.tx_i.rcvd.num,
6312 			pdev->stats.tx_i.rcvd.bytes);
6313 		DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
6314 			pdev->stats.tx_i.processed.num,
6315 			pdev->stats.tx_i.processed.bytes);
6316 		DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
6317 			pdev->stats.tx.tx_success.num,
6318 			pdev->stats.tx.tx_success.bytes);
6319 
6320 		DP_TRACE(FATAL, "Dropped in host:");
6321 		DP_TRACE(FATAL, "Total packets dropped: %u,",
6322 			pdev->stats.tx_i.dropped.dropped_pkt.num);
6323 		DP_TRACE(FATAL, "Descriptor not available: %u",
6324 			pdev->stats.tx_i.dropped.desc_na);
6325 		DP_TRACE(FATAL, "Ring full: %u",
6326 			pdev->stats.tx_i.dropped.ring_full);
6327 		DP_TRACE(FATAL, "Enqueue fail: %u",
6328 			pdev->stats.tx_i.dropped.enqueue_fail);
6329 		DP_TRACE(FATAL, "DMA Error: %u",
6330 			pdev->stats.tx_i.dropped.dma_error);
6331 
6332 		DP_TRACE(FATAL, "Dropped in hardware:");
6333 		DP_TRACE(FATAL, "total packets dropped: %u",
6334 			pdev->stats.tx.tx_failed);
6335 		DP_TRACE(FATAL, "mpdu age out: %u",
6336 			pdev->stats.tx.dropped.age_out);
6337 		DP_TRACE(FATAL, "firmware removed: %u",
6338 			pdev->stats.tx.dropped.fw_rem);
6339 		DP_TRACE(FATAL, "firmware removed tx: %u",
6340 			pdev->stats.tx.dropped.fw_rem_tx);
6341 		DP_TRACE(FATAL, "firmware removed notx %u",
6342 			pdev->stats.tx.dropped.fw_rem_notx);
6343 		DP_TRACE(FATAL, "peer_invalid: %u",
6344 			pdev->soc->stats.tx.tx_invalid_peer.num);
6345 
6346 
6347 		DP_TRACE(FATAL, "Tx packets sent per interrupt:");
6348 		DP_TRACE(FATAL, "Single Packet: %u",
6349 			pdev->stats.tx_comp_histogram.pkts_1);
6350 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6351 			pdev->stats.tx_comp_histogram.pkts_2_20);
6352 		DP_TRACE(FATAL, "21-40 Packets: %u",
6353 			pdev->stats.tx_comp_histogram.pkts_21_40);
6354 		DP_TRACE(FATAL, "41-60 Packets: %u",
6355 			pdev->stats.tx_comp_histogram.pkts_41_60);
6356 		DP_TRACE(FATAL, "61-80 Packets: %u",
6357 			pdev->stats.tx_comp_histogram.pkts_61_80);
6358 		DP_TRACE(FATAL, "81-100 Packets: %u",
6359 			pdev->stats.tx_comp_histogram.pkts_81_100);
6360 		DP_TRACE(FATAL, "101-200 Packets: %u",
6361 			pdev->stats.tx_comp_histogram.pkts_101_200);
6362 		DP_TRACE(FATAL, "   201+ Packets: %u",
6363 			pdev->stats.tx_comp_histogram.pkts_201_plus);
6364 
6365 		DP_TRACE(FATAL, "Rx path statistics");
6366 
6367 		DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
6368 			pdev->stats.rx.to_stack.num,
6369 			pdev->stats.rx.to_stack.bytes);
6370 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
6371 			DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
6372 					i, pdev->stats.rx.rcvd_reo[i].num,
6373 					pdev->stats.rx.rcvd_reo[i].bytes);
6374 		DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
6375 			pdev->stats.rx.intra_bss.pkts.num,
6376 			pdev->stats.rx.intra_bss.pkts.bytes);
6377 		DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
6378 			pdev->stats.rx.intra_bss.fail.num,
6379 			pdev->stats.rx.intra_bss.fail.bytes);
6380 		DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
6381 			pdev->stats.rx.raw.num,
6382 			pdev->stats.rx.raw.bytes);
6383 		DP_TRACE(FATAL, "dropped: error %u msdus",
6384 			pdev->stats.rx.err.mic_err);
6385 		DP_TRACE(FATAL, "peer invalid %u",
6386 			pdev->soc->stats.rx.err.rx_invalid_peer.num);
6387 
6388 		DP_TRACE(FATAL, "Reo Statistics");
6389 		DP_TRACE(FATAL, "rbm error: %u msdus",
6390 			pdev->soc->stats.rx.err.invalid_rbm);
6391 		DP_TRACE(FATAL, "hal ring access fail: %u msdus",
6392 			pdev->soc->stats.rx.err.hal_ring_access_fail);
6393 
6394 		DP_TRACE(FATAL, "Reo errors");
6395 
6396 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
6397 				error_code++) {
6398 			DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
6399 				error_code,
6400 				pdev->soc->stats.rx.err.reo_error[error_code]);
6401 		}
6402 
6403 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
6404 				error_code++) {
6405 			DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
6406 				error_code,
6407 				pdev->soc->stats.rx.err
6408 				.rxdma_error[error_code]);
6409 		}
6410 
6411 		DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
6412 		DP_TRACE(FATAL, "Single Packet: %u",
6413 			 pdev->stats.rx_ind_histogram.pkts_1);
6414 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6415 			 pdev->stats.rx_ind_histogram.pkts_2_20);
6416 		DP_TRACE(FATAL, "21-40 Packets: %u",
6417 			 pdev->stats.rx_ind_histogram.pkts_21_40);
6418 		DP_TRACE(FATAL, "41-60 Packets: %u",
6419 			 pdev->stats.rx_ind_histogram.pkts_41_60);
6420 		DP_TRACE(FATAL, "61-80 Packets: %u",
6421 			 pdev->stats.rx_ind_histogram.pkts_61_80);
6422 		DP_TRACE(FATAL, "81-100 Packets: %u",
6423 			 pdev->stats.rx_ind_histogram.pkts_81_100);
6424 		DP_TRACE(FATAL, "101-200 Packets: %u",
6425 			 pdev->stats.rx_ind_histogram.pkts_101_200);
6426 		DP_TRACE(FATAL, "   201+ Packets: %u",
6427 			 pdev->stats.rx_ind_histogram.pkts_201_plus);
6428 
6429 		DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
6430 			__func__,
6431 			pdev->soc->wlan_cfg_ctx->tso_enabled,
6432 			pdev->soc->wlan_cfg_ctx->lro_enabled,
6433 			pdev->soc->wlan_cfg_ctx->rx_hash,
6434 			pdev->soc->wlan_cfg_ctx->napi_enabled);
6435 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6436 		DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
6437 			__func__,
6438 			pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
6439 			pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
6440 #endif
6441 	}
6442 }
6443 
6444 /*
6445  * dp_txrx_dump_stats() -  Dump statistics
6446  * @value - Statistics option
6447  */
6448 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
6449 				     enum qdf_stats_verbosity_level level)
6450 {
6451 	struct dp_soc *soc =
6452 		(struct dp_soc *)psoc;
6453 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6454 
6455 	if (!soc) {
6456 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6457 			"%s: soc is NULL", __func__);
6458 		return QDF_STATUS_E_INVAL;
6459 	}
6460 
6461 	switch (value) {
6462 	case CDP_TXRX_PATH_STATS:
6463 		dp_txrx_path_stats(soc);
6464 		break;
6465 
6466 	case CDP_RX_RING_STATS:
6467 		dp_print_per_ring_stats(soc);
6468 		break;
6469 
6470 	case CDP_TXRX_TSO_STATS:
6471 		/* TODO: NOT IMPLEMENTED */
6472 		break;
6473 
6474 	case CDP_DUMP_TX_FLOW_POOL_INFO:
6475 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
6476 		break;
6477 
6478 	case CDP_DP_NAPI_STATS:
6479 		dp_print_napi_stats(soc);
6480 		break;
6481 
6482 	case CDP_TXRX_DESC_STATS:
6483 		/* TODO: NOT IMPLEMENTED */
6484 		break;
6485 
6486 	default:
6487 		status = QDF_STATUS_E_INVAL;
6488 		break;
6489 	}
6490 
6491 	return status;
6492 
6493 }
6494 
6495 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6496 /**
6497  * dp_update_flow_control_parameters() - API to store datapath
6498  *                            config parameters
6499  * @soc: soc handle
6500  * @cfg: ini parameter handle
6501  *
6502  * Return: void
6503  */
6504 static inline
6505 void dp_update_flow_control_parameters(struct dp_soc *soc,
6506 				struct cdp_config_params *params)
6507 {
6508 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
6509 					params->tx_flow_stop_queue_threshold;
6510 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
6511 					params->tx_flow_start_queue_offset;
6512 }
6513 #else
6514 static inline
6515 void dp_update_flow_control_parameters(struct dp_soc *soc,
6516 				struct cdp_config_params *params)
6517 {
6518 }
6519 #endif
6520 
6521 /**
6522  * dp_update_config_parameters() - API to store datapath
6523  *                            config parameters
6524  * @soc: soc handle
6525  * @cfg: ini parameter handle
6526  *
6527  * Return: status
6528  */
6529 static
6530 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
6531 				struct cdp_config_params *params)
6532 {
6533 	struct dp_soc *soc = (struct dp_soc *)psoc;
6534 
6535 	if (!(soc)) {
6536 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6537 				"%s: Invalid handle", __func__);
6538 		return QDF_STATUS_E_INVAL;
6539 	}
6540 
6541 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
6542 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
6543 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
6544 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
6545 				params->tcp_udp_checksumoffload;
6546 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
6547 
6548 	dp_update_flow_control_parameters(soc, params);
6549 
6550 	return QDF_STATUS_SUCCESS;
6551 }
6552 
6553 /**
6554  * dp_txrx_set_wds_rx_policy() - API to store datapath
6555  *                            config parameters
6556  * @vdev_handle - datapath vdev handle
6557  * @cfg: ini parameter handle
6558  *
6559  * Return: status
6560  */
6561 #ifdef WDS_VENDOR_EXTENSION
6562 void
6563 dp_txrx_set_wds_rx_policy(
6564 		struct cdp_vdev *vdev_handle,
6565 		u_int32_t val)
6566 {
6567 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6568 	struct dp_peer *peer;
6569 	if (vdev->opmode == wlan_op_mode_ap) {
6570 		/* for ap, set it on bss_peer */
6571 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6572 			if (peer->bss_peer) {
6573 				peer->wds_ecm.wds_rx_filter = 1;
6574 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6575 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6576 				break;
6577 			}
6578 		}
6579 	} else if (vdev->opmode == wlan_op_mode_sta) {
6580 		peer = TAILQ_FIRST(&vdev->peer_list);
6581 		peer->wds_ecm.wds_rx_filter = 1;
6582 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6583 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6584 	}
6585 }
6586 
6587 /**
6588  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
6589  *
6590  * @peer_handle - datapath peer handle
6591  * @wds_tx_ucast: policy for unicast transmission
6592  * @wds_tx_mcast: policy for multicast transmission
6593  *
6594  * Return: void
6595  */
6596 void
6597 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
6598 		int wds_tx_ucast, int wds_tx_mcast)
6599 {
6600 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6601 	if (wds_tx_ucast || wds_tx_mcast) {
6602 		peer->wds_enabled = 1;
6603 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
6604 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
6605 	} else {
6606 		peer->wds_enabled = 0;
6607 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
6608 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
6609 	}
6610 
6611 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6612 			FL("Policy Update set to :\
6613 				peer->wds_enabled %d\
6614 				peer->wds_ecm.wds_tx_ucast_4addr %d\
6615 				peer->wds_ecm.wds_tx_mcast_4addr %d\n"),
6616 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
6617 				peer->wds_ecm.wds_tx_mcast_4addr);
6618 	return;
6619 }
6620 #endif
6621 
6622 static struct cdp_wds_ops dp_ops_wds = {
6623 	.vdev_set_wds = dp_vdev_set_wds,
6624 #ifdef WDS_VENDOR_EXTENSION
6625 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
6626 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
6627 #endif
6628 };
6629 
6630 /*
6631  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
6632  * @soc - datapath soc handle
6633  * @peer - datapath peer handle
6634  *
6635  * Delete the AST entries belonging to a peer
6636  */
6637 #ifdef FEATURE_AST
6638 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6639 		struct dp_peer *peer)
6640 {
6641 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
6642 
6643 	qdf_spin_lock_bh(&soc->ast_lock);
6644 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
6645 		dp_peer_del_ast(soc, ast_entry);
6646 
6647 	qdf_spin_unlock_bh(&soc->ast_lock);
6648 }
6649 #else
6650 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6651 		struct dp_peer *peer)
6652 {
6653 }
6654 #endif
6655 
6656 /*
6657  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
6658  * @vdev_handle - datapath vdev handle
6659  * @callback - callback function
6660  * @ctxt: callback context
6661  *
6662  */
6663 static void
6664 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
6665 		       ol_txrx_data_tx_cb callback, void *ctxt)
6666 {
6667 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6668 
6669 	vdev->tx_non_std_data_callback.func = callback;
6670 	vdev->tx_non_std_data_callback.ctxt = ctxt;
6671 }
6672 
6673 /**
6674  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
6675  * @pdev_hdl: datapath pdev handle
6676  *
6677  * Return: opaque pointer to dp txrx handle
6678  */
6679 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
6680 {
6681 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6682 
6683 	return pdev->dp_txrx_handle;
6684 }
6685 
6686 /**
6687  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
6688  * @pdev_hdl: datapath pdev handle
6689  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
6690  *
6691  * Return: void
6692  */
6693 static void
6694 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
6695 {
6696 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6697 
6698 	pdev->dp_txrx_handle = dp_txrx_hdl;
6699 }
6700 
6701 /**
6702  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
6703  * @soc_handle: datapath soc handle
6704  *
6705  * Return: opaque pointer to external dp (non-core DP)
6706  */
6707 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
6708 {
6709 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6710 
6711 	return soc->external_txrx_handle;
6712 }
6713 
6714 /**
6715  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
6716  * @soc_handle: datapath soc handle
6717  * @txrx_handle: opaque pointer to external dp (non-core DP)
6718  *
6719  * Return: void
6720  */
6721 static void
6722 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
6723 {
6724 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6725 
6726 	soc->external_txrx_handle = txrx_handle;
6727 }
6728 
6729 #ifdef FEATURE_AST
6730 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
6731 {
6732 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
6733 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
6734 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6735 
6736 	/*
6737 	 * For BSS peer, new peer is not created on alloc_node if the
6738 	 * peer with same address already exists , instead refcnt is
6739 	 * increased for existing peer. Correspondingly in delete path,
6740 	 * only refcnt is decreased; and peer is only deleted , when all
6741 	 * references are deleted. So delete_in_progress should not be set
6742 	 * for bss_peer, unless only 2 reference remains (peer map reference
6743 	 * and peer hash table reference).
6744 	 */
6745 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
6746 		return;
6747 	}
6748 
6749 	peer->delete_in_progress = true;
6750 	dp_peer_delete_ast_entries(soc, peer);
6751 }
6752 #endif
6753 
6754 #ifdef ATH_SUPPORT_NAC_RSSI
6755 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
6756 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
6757 		uint8_t chan_num)
6758 {
6759 
6760 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6761 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6762 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6763 
6764 	pdev->nac_rssi_filtering = 1;
6765 	/* Store address of NAC (neighbour peer) which will be checked
6766 	 * against TA of received packets.
6767 	 */
6768 
6769 	if (cmd == CDP_NAC_PARAM_ADD) {
6770 		qdf_mem_copy(vdev->cdp_nac_rssi.client_mac,
6771 				client_macaddr, DP_MAC_ADDR_LEN);
6772 		vdev->cdp_nac_rssi_enabled = 1;
6773 	} else if (cmd == CDP_NAC_PARAM_DEL) {
6774 		if (!qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac,
6775 			client_macaddr, DP_MAC_ADDR_LEN)) {
6776 				/* delete this peer from the list */
6777 			qdf_mem_zero(vdev->cdp_nac_rssi.client_mac,
6778 				DP_MAC_ADDR_LEN);
6779 		}
6780 		vdev->cdp_nac_rssi_enabled = 0;
6781 	}
6782 
6783 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
6784 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
6785 			(vdev->pdev->osif_pdev, vdev->vdev_id, cmd, bssid);
6786 
6787 	return QDF_STATUS_SUCCESS;
6788 }
6789 #endif
6790 
6791 static struct cdp_cmn_ops dp_ops_cmn = {
6792 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
6793 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
6794 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
6795 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
6796 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
6797 	.txrx_peer_create = dp_peer_create_wifi3,
6798 	.txrx_peer_setup = dp_peer_setup_wifi3,
6799 #ifdef FEATURE_AST
6800 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
6801 #else
6802 	.txrx_peer_teardown = NULL,
6803 #endif
6804 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
6805 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
6806 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
6807 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
6808 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
6809 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
6810 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
6811 	.txrx_peer_delete = dp_peer_delete_wifi3,
6812 	.txrx_vdev_register = dp_vdev_register_wifi3,
6813 	.txrx_soc_detach = dp_soc_detach_wifi3,
6814 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
6815 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
6816 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
6817 	.txrx_ath_getstats = dp_pdev_getstats,
6818 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
6819 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
6820 	.delba_process = dp_delba_process_wifi3,
6821 	.set_addba_response = dp_set_addba_response,
6822 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
6823 	.flush_cache_rx_queue = NULL,
6824 	/* TODO: get API's for dscp-tid need to be added*/
6825 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
6826 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
6827 	.txrx_stats_request = dp_txrx_stats_request,
6828 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
6829 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
6830 	.txrx_set_nac = dp_set_nac,
6831 	.txrx_get_tx_pending = dp_get_tx_pending,
6832 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
6833 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
6834 	.display_stats = dp_txrx_dump_stats,
6835 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
6836 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
6837 #ifdef DP_INTR_POLL_BASED
6838 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
6839 #else
6840 	.txrx_intr_attach = dp_soc_interrupt_attach,
6841 #endif
6842 	.txrx_intr_detach = dp_soc_interrupt_detach,
6843 	.set_pn_check = dp_set_pn_check_wifi3,
6844 	.update_config_parameters = dp_update_config_parameters,
6845 	/* TODO: Add other functions */
6846 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
6847 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
6848 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
6849 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
6850 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
6851 	.tx_send = dp_tx_send,
6852 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
6853 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
6854 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
6855 };
6856 
6857 static struct cdp_ctrl_ops dp_ops_ctrl = {
6858 	.txrx_peer_authorize = dp_peer_authorize,
6859 #ifdef QCA_SUPPORT_SON
6860 	.txrx_set_inact_params = dp_set_inact_params,
6861 	.txrx_start_inact_timer = dp_start_inact_timer,
6862 	.txrx_set_overload = dp_set_overload,
6863 	.txrx_peer_is_inact = dp_peer_is_inact,
6864 	.txrx_mark_peer_inact = dp_mark_peer_inact,
6865 #endif
6866 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
6867 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
6868 #ifdef MESH_MODE_SUPPORT
6869 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
6870 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
6871 #endif
6872 	.txrx_set_vdev_param = dp_set_vdev_param,
6873 	.txrx_peer_set_nawds = dp_peer_set_nawds,
6874 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
6875 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
6876 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
6877 	.txrx_update_filter_neighbour_peers =
6878 		dp_update_filter_neighbour_peers,
6879 	.txrx_get_sec_type = dp_get_sec_type,
6880 	/* TODO: Add other functions */
6881 	.txrx_wdi_event_sub = dp_wdi_event_sub,
6882 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
6883 #ifdef WDI_EVENT_ENABLE
6884 	.txrx_get_pldev = dp_get_pldev,
6885 #endif
6886 	.txrx_set_pdev_param = dp_set_pdev_param,
6887 #ifdef ATH_SUPPORT_NAC_RSSI
6888 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
6889 #endif
6890 };
6891 
6892 static struct cdp_me_ops dp_ops_me = {
6893 #ifdef ATH_SUPPORT_IQUE
6894 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
6895 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
6896 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
6897 #endif
6898 };
6899 
6900 static struct cdp_mon_ops dp_ops_mon = {
6901 	.txrx_monitor_set_filter_ucast_data = NULL,
6902 	.txrx_monitor_set_filter_mcast_data = NULL,
6903 	.txrx_monitor_set_filter_non_data = NULL,
6904 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
6905 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
6906 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
6907 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
6908 	/* Added support for HK advance filter */
6909 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
6910 };
6911 
6912 static struct cdp_host_stats_ops dp_ops_host_stats = {
6913 	.txrx_per_peer_stats = dp_get_host_peer_stats,
6914 	.get_fw_peer_stats = dp_get_fw_peer_stats,
6915 	.get_htt_stats = dp_get_htt_stats,
6916 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
6917 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
6918 	.txrx_stats_publish = dp_txrx_stats_publish,
6919 	/* TODO */
6920 };
6921 
6922 static struct cdp_raw_ops dp_ops_raw = {
6923 	/* TODO */
6924 };
6925 
6926 #ifdef CONFIG_WIN
6927 static struct cdp_pflow_ops dp_ops_pflow = {
6928 	/* TODO */
6929 };
6930 #endif /* CONFIG_WIN */
6931 
6932 #ifdef FEATURE_RUNTIME_PM
6933 /**
6934  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
6935  * @opaque_pdev: DP pdev context
6936  *
6937  * DP is ready to runtime suspend if there are no pending TX packets.
6938  *
6939  * Return: QDF_STATUS
6940  */
6941 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
6942 {
6943 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6944 	struct dp_soc *soc = pdev->soc;
6945 
6946 	/* Call DP TX flow control API to check if there is any
6947 	   pending packets */
6948 
6949 	if (soc->intr_mode == DP_INTR_POLL)
6950 		qdf_timer_stop(&soc->int_timer);
6951 
6952 	return QDF_STATUS_SUCCESS;
6953 }
6954 
6955 /**
6956  * dp_runtime_resume() - ensure DP is ready to runtime resume
6957  * @opaque_pdev: DP pdev context
6958  *
6959  * Resume DP for runtime PM.
6960  *
6961  * Return: QDF_STATUS
6962  */
6963 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
6964 {
6965 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6966 	struct dp_soc *soc = pdev->soc;
6967 	void *hal_srng;
6968 	int i;
6969 
6970 	if (soc->intr_mode == DP_INTR_POLL)
6971 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
6972 
6973 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
6974 		hal_srng = soc->tcl_data_ring[i].hal_srng;
6975 		if (hal_srng) {
6976 			/* We actually only need to acquire the lock */
6977 			hal_srng_access_start(soc->hal_soc, hal_srng);
6978 			/* Update SRC ring head pointer for HW to send
6979 			   all pending packets */
6980 			hal_srng_access_end(soc->hal_soc, hal_srng);
6981 		}
6982 	}
6983 
6984 	return QDF_STATUS_SUCCESS;
6985 }
6986 #endif /* FEATURE_RUNTIME_PM */
6987 
6988 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
6989 {
6990 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6991 	struct dp_soc *soc = pdev->soc;
6992 
6993 	if (soc->intr_mode == DP_INTR_POLL)
6994 		qdf_timer_stop(&soc->int_timer);
6995 
6996 	return QDF_STATUS_SUCCESS;
6997 }
6998 
6999 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7000 {
7001 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7002 	struct dp_soc *soc = pdev->soc;
7003 
7004 	if (soc->intr_mode == DP_INTR_POLL)
7005 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7006 
7007 	return QDF_STATUS_SUCCESS;
7008 }
7009 
7010 #ifndef CONFIG_WIN
7011 static struct cdp_misc_ops dp_ops_misc = {
7012 	.tx_non_std = dp_tx_non_std,
7013 	.get_opmode = dp_get_opmode,
7014 #ifdef FEATURE_RUNTIME_PM
7015 	.runtime_suspend = dp_runtime_suspend,
7016 	.runtime_resume = dp_runtime_resume,
7017 #endif /* FEATURE_RUNTIME_PM */
7018 	.pkt_log_init = dp_pkt_log_init,
7019 	.pkt_log_con_service = dp_pkt_log_con_service,
7020 };
7021 
7022 static struct cdp_flowctl_ops dp_ops_flowctl = {
7023 	/* WIFI 3.0 DP implement as required. */
7024 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7025 	.flow_pool_map_handler = dp_tx_flow_pool_map,
7026 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
7027 	.register_pause_cb = dp_txrx_register_pause_cb,
7028 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7029 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
7030 };
7031 
7032 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7033 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7034 };
7035 
7036 #ifdef IPA_OFFLOAD
7037 static struct cdp_ipa_ops dp_ops_ipa = {
7038 	.ipa_get_resource = dp_ipa_get_resource,
7039 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7040 	.ipa_op_response = dp_ipa_op_response,
7041 	.ipa_register_op_cb = dp_ipa_register_op_cb,
7042 	.ipa_get_stat = dp_ipa_get_stat,
7043 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7044 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
7045 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
7046 	.ipa_setup = dp_ipa_setup,
7047 	.ipa_cleanup = dp_ipa_cleanup,
7048 	.ipa_setup_iface = dp_ipa_setup_iface,
7049 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
7050 	.ipa_enable_pipes = dp_ipa_enable_pipes,
7051 	.ipa_disable_pipes = dp_ipa_disable_pipes,
7052 	.ipa_set_perf_level = dp_ipa_set_perf_level
7053 };
7054 #endif
7055 
7056 static struct cdp_bus_ops dp_ops_bus = {
7057 	.bus_suspend = dp_bus_suspend,
7058 	.bus_resume = dp_bus_resume
7059 };
7060 
7061 static struct cdp_ocb_ops dp_ops_ocb = {
7062 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7063 };
7064 
7065 
7066 static struct cdp_throttle_ops dp_ops_throttle = {
7067 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7068 };
7069 
7070 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
7071 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7072 };
7073 
7074 static struct cdp_cfg_ops dp_ops_cfg = {
7075 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7076 };
7077 
7078 /*
7079  * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
7080  * @dev: physical device instance
7081  * @peer_mac_addr: peer mac address
7082  * @local_id: local id for the peer
7083  * @debug_id: to track enum peer access
7084 
7085  * Return: peer instance pointer
7086  */
7087 static inline void *
7088 dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7089 				u8 *local_id,
7090 				enum peer_debug_id_type debug_id)
7091 {
7092 	/*
7093 	 * Currently this function does not implement the "get ref"
7094 	 * functionality and is mapped to dp_find_peer_by_addr which does not
7095 	 * increment the peer ref count. So the peer state is uncertain after
7096 	 * calling this API. The functionality needs to be implemented.
7097 	 * Accordingly the corresponding release_ref function is NULL.
7098 	 */
7099 	return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
7100 }
7101 
7102 static struct cdp_peer_ops dp_ops_peer = {
7103 	.register_peer = dp_register_peer,
7104 	.clear_peer = dp_clear_peer,
7105 	.find_peer_by_addr = dp_find_peer_by_addr,
7106 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
7107 	.peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
7108 	.peer_release_ref = NULL,
7109 	.local_peer_id = dp_local_peer_id,
7110 	.peer_find_by_local_id = dp_peer_find_by_local_id,
7111 	.peer_state_update = dp_peer_state_update,
7112 	.get_vdevid = dp_get_vdevid,
7113 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
7114 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
7115 	.get_vdev_for_peer = dp_get_vdev_for_peer,
7116 	.get_peer_state = dp_get_peer_state,
7117 	.last_assoc_received = dp_get_last_assoc_received,
7118 	.last_disassoc_received = dp_get_last_disassoc_received,
7119 	.last_deauth_received = dp_get_last_deauth_received,
7120 };
7121 #endif
7122 
7123 static struct cdp_ops dp_txrx_ops = {
7124 	.cmn_drv_ops = &dp_ops_cmn,
7125 	.ctrl_ops = &dp_ops_ctrl,
7126 	.me_ops = &dp_ops_me,
7127 	.mon_ops = &dp_ops_mon,
7128 	.host_stats_ops = &dp_ops_host_stats,
7129 	.wds_ops = &dp_ops_wds,
7130 	.raw_ops = &dp_ops_raw,
7131 #ifdef CONFIG_WIN
7132 	.pflow_ops = &dp_ops_pflow,
7133 #endif /* CONFIG_WIN */
7134 #ifndef CONFIG_WIN
7135 	.misc_ops = &dp_ops_misc,
7136 	.cfg_ops = &dp_ops_cfg,
7137 	.flowctl_ops = &dp_ops_flowctl,
7138 	.l_flowctl_ops = &dp_ops_l_flowctl,
7139 #ifdef IPA_OFFLOAD
7140 	.ipa_ops = &dp_ops_ipa,
7141 #endif
7142 	.bus_ops = &dp_ops_bus,
7143 	.ocb_ops = &dp_ops_ocb,
7144 	.peer_ops = &dp_ops_peer,
7145 	.throttle_ops = &dp_ops_throttle,
7146 	.mob_stats_ops = &dp_ops_mob_stats,
7147 #endif
7148 };
7149 
7150 /*
7151  * dp_soc_set_txrx_ring_map()
7152  * @dp_soc: DP handler for soc
7153  *
7154  * Return: Void
7155  */
7156 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
7157 {
7158 	uint32_t i;
7159 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
7160 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
7161 	}
7162 }
7163 
7164 /*
7165  * dp_soc_attach_wifi3() - Attach txrx SOC
7166  * @ctrl_psoc:	Opaque SOC handle from control plane
7167  * @htc_handle:	Opaque HTC handle
7168  * @hif_handle:	Opaque HIF handle
7169  * @qdf_osdev:	QDF device
7170  *
7171  * Return: DP SOC handle on success, NULL on failure
7172  */
7173 /*
7174  * Local prototype added to temporarily address warning caused by
7175  * -Wmissing-prototypes. A more correct solution, namely to expose
7176  * a prototype in an appropriate header file, will come later.
7177  */
7178 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
7179 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
7180 	struct ol_if_ops *ol_ops);
7181 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
7182 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
7183 	struct ol_if_ops *ol_ops)
7184 {
7185 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
7186 
7187 	if (!soc) {
7188 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7189 			FL("DP SOC memory allocation failed"));
7190 		goto fail0;
7191 	}
7192 
7193 	soc->cdp_soc.ops = &dp_txrx_ops;
7194 	soc->cdp_soc.ol_ops = ol_ops;
7195 	soc->ctrl_psoc = ctrl_psoc;
7196 	soc->osdev = qdf_osdev;
7197 	soc->hif_handle = hif_handle;
7198 
7199 	soc->hal_soc = hif_get_hal_handle(hif_handle);
7200 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
7201 		soc->hal_soc, qdf_osdev);
7202 	if (!soc->htt_handle) {
7203 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7204 			FL("HTT attach failed"));
7205 		goto fail1;
7206 	}
7207 
7208 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach();
7209 	if (!soc->wlan_cfg_ctx) {
7210 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7211 				FL("wlan_cfg_soc_attach failed"));
7212 		goto fail2;
7213 	}
7214 
7215 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, rx_hash);
7216 	soc->cce_disable = false;
7217 
7218 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
7219 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7220 				CDP_CFG_MAX_PEER_ID);
7221 
7222 		if (ret != -EINVAL) {
7223 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
7224 		}
7225 
7226 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7227 				CDP_CFG_CCE_DISABLE);
7228 		if (ret == 1)
7229 			soc->cce_disable = true;
7230 	}
7231 
7232 	qdf_spinlock_create(&soc->peer_ref_mutex);
7233 
7234 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
7235 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
7236 
7237 	/* fill the tx/rx cpu ring map*/
7238 	dp_soc_set_txrx_ring_map(soc);
7239 
7240 	qdf_spinlock_create(&soc->htt_stats.lock);
7241 	/* initialize work queue for stats processing */
7242 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
7243 
7244 	/*Initialize inactivity timer for wifison */
7245 	dp_init_inact_timer(soc);
7246 
7247 	return (void *)soc;
7248 
7249 fail2:
7250 	htt_soc_detach(soc->htt_handle);
7251 fail1:
7252 	qdf_mem_free(soc);
7253 fail0:
7254 	return NULL;
7255 }
7256 
7257 /*
7258  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
7259  *
7260  * @soc: handle to DP soc
7261  * @mac_id: MAC id
7262  *
7263  * Return: Return pdev corresponding to MAC
7264  */
7265 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
7266 {
7267 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
7268 		return soc->pdev_list[mac_id];
7269 
7270 	/* Typically for MCL as there only 1 PDEV*/
7271 	return soc->pdev_list[0];
7272 }
7273 
7274 /*
7275  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
7276  * @soc:		DP SoC context
7277  * @max_mac_rings:	No of MAC rings
7278  *
7279  * Return: None
7280  */
7281 static
7282 void dp_is_hw_dbs_enable(struct dp_soc *soc,
7283 				int *max_mac_rings)
7284 {
7285 	bool dbs_enable = false;
7286 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
7287 		dbs_enable = soc->cdp_soc.ol_ops->
7288 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
7289 
7290 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
7291 }
7292 
7293 /*
7294 * dp_set_pktlog_wifi3() - attach txrx vdev
7295 * @pdev: Datapath PDEV handle
7296 * @event: which event's notifications are being subscribed to
7297 * @enable: WDI event subscribe or not. (True or False)
7298 *
7299 * Return: Success, NULL on failure
7300 */
7301 #ifdef WDI_EVENT_ENABLE
7302 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
7303 	bool enable)
7304 {
7305 	struct dp_soc *soc = pdev->soc;
7306 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7307 	int max_mac_rings = wlan_cfg_get_num_mac_rings
7308 					(pdev->wlan_cfg_ctx);
7309 	uint8_t mac_id = 0;
7310 
7311 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
7312 
7313 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7314 			FL("Max_mac_rings %d \n"),
7315 			max_mac_rings);
7316 
7317 	if (enable) {
7318 		switch (event) {
7319 		case WDI_EVENT_RX_DESC:
7320 			if (pdev->monitor_vdev) {
7321 				/* Nothing needs to be done if monitor mode is
7322 				 * enabled
7323 				 */
7324 				return 0;
7325 			}
7326 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
7327 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
7328 				htt_tlv_filter.mpdu_start = 1;
7329 				htt_tlv_filter.msdu_start = 1;
7330 				htt_tlv_filter.msdu_end = 1;
7331 				htt_tlv_filter.mpdu_end = 1;
7332 				htt_tlv_filter.packet_header = 1;
7333 				htt_tlv_filter.attention = 1;
7334 				htt_tlv_filter.ppdu_start = 1;
7335 				htt_tlv_filter.ppdu_end = 1;
7336 				htt_tlv_filter.ppdu_end_user_stats = 1;
7337 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7338 				htt_tlv_filter.ppdu_end_status_done = 1;
7339 				htt_tlv_filter.enable_fp = 1;
7340 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7341 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7342 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7343 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7344 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7345 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7346 
7347 				for (mac_id = 0; mac_id < max_mac_rings;
7348 								mac_id++) {
7349 					int mac_for_pdev =
7350 						dp_get_mac_id_for_pdev(mac_id,
7351 								pdev->pdev_id);
7352 
7353 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7354 					 mac_for_pdev,
7355 					 pdev->rxdma_mon_status_ring[mac_id]
7356 					 .hal_srng,
7357 					 RXDMA_MONITOR_STATUS,
7358 					 RX_BUFFER_SIZE,
7359 					 &htt_tlv_filter);
7360 
7361 				}
7362 
7363 				if (soc->reap_timer_init)
7364 					qdf_timer_mod(&soc->mon_reap_timer,
7365 					DP_INTR_POLL_TIMER_MS);
7366 			}
7367 			break;
7368 
7369 		case WDI_EVENT_LITE_RX:
7370 			if (pdev->monitor_vdev) {
7371 				/* Nothing needs to be done if monitor mode is
7372 				 * enabled
7373 				 */
7374 				return 0;
7375 			}
7376 
7377 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
7378 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
7379 
7380 				htt_tlv_filter.ppdu_start = 1;
7381 				htt_tlv_filter.ppdu_end = 1;
7382 				htt_tlv_filter.ppdu_end_user_stats = 1;
7383 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7384 				htt_tlv_filter.ppdu_end_status_done = 1;
7385 				htt_tlv_filter.mpdu_start = 1;
7386 				htt_tlv_filter.enable_fp = 1;
7387 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7388 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7389 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7390 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7391 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7392 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7393 
7394 				for (mac_id = 0; mac_id < max_mac_rings;
7395 								mac_id++) {
7396 					int mac_for_pdev =
7397 						dp_get_mac_id_for_pdev(mac_id,
7398 								pdev->pdev_id);
7399 
7400 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7401 					mac_for_pdev,
7402 					pdev->rxdma_mon_status_ring[mac_id]
7403 					.hal_srng,
7404 					RXDMA_MONITOR_STATUS,
7405 					RX_BUFFER_SIZE_PKTLOG_LITE,
7406 					&htt_tlv_filter);
7407 				}
7408 
7409 				if (soc->reap_timer_init)
7410 					qdf_timer_mod(&soc->mon_reap_timer,
7411 					DP_INTR_POLL_TIMER_MS);
7412 			}
7413 			break;
7414 
7415 		case WDI_EVENT_LITE_T2H:
7416 			if (pdev->monitor_vdev) {
7417 				/* Nothing needs to be done if monitor mode is
7418 				 * enabled
7419 				 */
7420 				return 0;
7421 			}
7422 
7423 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7424 				int mac_for_pdev = dp_get_mac_id_for_pdev(
7425 							mac_id,	pdev->pdev_id);
7426 
7427 				pdev->pktlog_ppdu_stats = true;
7428 				dp_h2t_cfg_stats_msg_send(pdev,
7429 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
7430 					mac_for_pdev);
7431 			}
7432 			break;
7433 
7434 		default:
7435 			/* Nothing needs to be done for other pktlog types */
7436 			break;
7437 		}
7438 	} else {
7439 		switch (event) {
7440 		case WDI_EVENT_RX_DESC:
7441 		case WDI_EVENT_LITE_RX:
7442 			if (pdev->monitor_vdev) {
7443 				/* Nothing needs to be done if monitor mode is
7444 				 * enabled
7445 				 */
7446 				return 0;
7447 			}
7448 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
7449 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
7450 
7451 				for (mac_id = 0; mac_id < max_mac_rings;
7452 								mac_id++) {
7453 					int mac_for_pdev =
7454 						dp_get_mac_id_for_pdev(mac_id,
7455 								pdev->pdev_id);
7456 
7457 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7458 					  mac_for_pdev,
7459 					  pdev->rxdma_mon_status_ring[mac_id]
7460 					  .hal_srng,
7461 					  RXDMA_MONITOR_STATUS,
7462 					  RX_BUFFER_SIZE,
7463 					  &htt_tlv_filter);
7464 				}
7465 
7466 				if (soc->reap_timer_init)
7467 					qdf_timer_stop(&soc->mon_reap_timer);
7468 			}
7469 			break;
7470 		case WDI_EVENT_LITE_T2H:
7471 			if (pdev->monitor_vdev) {
7472 				/* Nothing needs to be done if monitor mode is
7473 				 * enabled
7474 				 */
7475 				return 0;
7476 			}
7477 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
7478 			 * passing value 0. Once these macros will define in htt
7479 			 * header file will use proper macros
7480 			*/
7481 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7482 				int mac_for_pdev =
7483 						dp_get_mac_id_for_pdev(mac_id,
7484 								pdev->pdev_id);
7485 
7486 				pdev->pktlog_ppdu_stats = false;
7487 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7488 					dp_h2t_cfg_stats_msg_send(pdev, 0,
7489 								mac_for_pdev);
7490 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
7491 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
7492 								mac_for_pdev);
7493 				} else if (pdev->enhanced_stats_en) {
7494 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
7495 								mac_for_pdev);
7496 				}
7497 			}
7498 
7499 			break;
7500 		default:
7501 			/* Nothing needs to be done for other pktlog types */
7502 			break;
7503 		}
7504 	}
7505 	return 0;
7506 }
7507 #endif
7508 
7509 #ifdef CONFIG_MCL
7510 /*
7511  * dp_service_mon_rings()- timer to reap monitor rings
7512  * reqd as we are not getting ppdu end interrupts
7513  * @arg: SoC Handle
7514  *
7515  * Return:
7516  *
7517  */
7518 static void dp_service_mon_rings(void *arg)
7519 {
7520 	struct dp_soc *soc = (struct dp_soc *) arg;
7521 	int ring = 0, work_done, mac_id;
7522 	struct dp_pdev *pdev = NULL;
7523 
7524 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
7525 		pdev = soc->pdev_list[ring];
7526 		if (pdev == NULL)
7527 			continue;
7528 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7529 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7530 								pdev->pdev_id);
7531 			work_done = dp_mon_process(soc, mac_for_pdev,
7532 							QCA_NAPI_BUDGET);
7533 
7534 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
7535 				FL("Reaped %d descs from Monitor rings"),
7536 				work_done);
7537 		}
7538 	}
7539 
7540 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
7541 }
7542 
7543 #ifndef REMOVE_PKT_LOG
7544 /**
7545  * dp_pkt_log_init() - API to initialize packet log
7546  * @ppdev: physical device handle
7547  * @scn: HIF context
7548  *
7549  * Return: none
7550  */
7551 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
7552 {
7553 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
7554 
7555 	if (handle->pkt_log_init) {
7556 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7557 			 "%s: Packet log not initialized", __func__);
7558 		return;
7559 	}
7560 
7561 	pktlog_sethandle(&handle->pl_dev, scn);
7562 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
7563 
7564 	if (pktlogmod_init(scn)) {
7565 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7566 			 "%s: pktlogmod_init failed", __func__);
7567 		handle->pkt_log_init = false;
7568 	} else {
7569 		handle->pkt_log_init = true;
7570 	}
7571 }
7572 
7573 /**
7574  * dp_pkt_log_con_service() - connect packet log service
7575  * @ppdev: physical device handle
7576  * @scn: device context
7577  *
7578  * Return: none
7579  */
7580 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
7581 {
7582 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
7583 
7584 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
7585 	pktlog_htc_attach();
7586 }
7587 
7588 /**
7589  * dp_pktlogmod_exit() - API to cleanup pktlog info
7590  * @handle: Pdev handle
7591  *
7592  * Return: none
7593  */
7594 static void dp_pktlogmod_exit(struct dp_pdev *handle)
7595 {
7596 	void *scn = (void *)handle->soc->hif_handle;
7597 
7598 	if (!scn) {
7599 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7600 			 "%s: Invalid hif(scn) handle", __func__);
7601 		return;
7602 	}
7603 
7604 	pktlogmod_exit(scn);
7605 	handle->pkt_log_init = false;
7606 }
7607 #endif
7608 #else
7609 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
7610 #endif
7611 
7612