xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_api.h>
25 #include <hif.h>
26 #include <htt.h>
27 #include <wdi_event.h>
28 #include <queue.h>
29 #include "dp_htt.h"
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include <cdp_txrx_handle.h>
36 #include <wlan_cfg.h>
37 #include "cdp_txrx_cmn_struct.h"
38 #include "cdp_txrx_stats_struct.h"
39 #include <qdf_util.h>
40 #include "dp_peer.h"
41 #include "dp_rx_mon.h"
42 #include "htt_stats.h"
43 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
44 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
45 #include "cdp_txrx_flow_ctrl_v2.h"
46 #else
47 static inline void
48 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
49 {
50 	return;
51 }
52 #endif
53 #include "dp_ipa.h"
54 
55 #ifdef CONFIG_MCL
56 static void dp_service_mon_rings(void *arg);
57 #ifndef REMOVE_PKT_LOG
58 #include <pktlog_ac_api.h>
59 #include <pktlog_ac.h>
60 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn);
61 #endif
62 #endif
63 static void dp_pktlogmod_exit(struct dp_pdev *handle);
64 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
65 				uint8_t *peer_mac_addr,
66 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
67 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
68 
69 #define DP_INTR_POLL_TIMER_MS	10
70 #define DP_WDS_AGING_TIMER_DEFAULT_MS	120000
71 #define DP_MCS_LENGTH (6*MAX_MCS)
72 #define DP_NSS_LENGTH (6*SS_COUNT)
73 #define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
74 #define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
75 #define DP_MAX_MCS_STRING_LEN 30
76 #define DP_CURR_FW_STATS_AVAIL 19
77 #define DP_HTT_DBG_EXT_STATS_MAX 256
78 #define DP_MAX_SLEEP_TIME 100
79 
80 #ifdef IPA_OFFLOAD
81 /* Exclude IPA rings from the interrupt context */
82 #define TX_RING_MASK_VAL	0xb
83 #define RX_RING_MASK_VAL	0x7
84 #else
85 #define TX_RING_MASK_VAL	0xF
86 #define RX_RING_MASK_VAL	0xF
87 #endif
88 
89 bool rx_hash = 1;
90 qdf_declare_param(rx_hash, bool);
91 
92 #define STR_MAXLEN	64
93 
94 #define DP_PPDU_STATS_CFG_ALL 0xFFFF
95 
96 /* PPDU stats mask sent to FW to enable enhanced stats */
97 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
98 /* PPDU stats mask sent to FW to support debug sniffer feature */
99 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
100 /* PPDU stats mask sent to FW to support BPR feature*/
101 #define DP_PPDU_STATS_CFG_BPR 0x2000
102 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
103 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
104 				   DP_PPDU_STATS_CFG_ENH_STATS)
105 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
106 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
107 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
108 
109 /**
110  * default_dscp_tid_map - Default DSCP-TID mapping
111  *
112  * DSCP        TID
113  * 000000      0
114  * 001000      1
115  * 010000      2
116  * 011000      3
117  * 100000      4
118  * 101000      5
119  * 110000      6
120  * 111000      7
121  */
122 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
123 	0, 0, 0, 0, 0, 0, 0, 0,
124 	1, 1, 1, 1, 1, 1, 1, 1,
125 	2, 2, 2, 2, 2, 2, 2, 2,
126 	3, 3, 3, 3, 3, 3, 3, 3,
127 	4, 4, 4, 4, 4, 4, 4, 4,
128 	5, 5, 5, 5, 5, 5, 5, 5,
129 	6, 6, 6, 6, 6, 6, 6, 6,
130 	7, 7, 7, 7, 7, 7, 7, 7,
131 };
132 
133 /*
134  * struct dp_rate_debug
135  *
136  * @mcs_type: print string for a given mcs
137  * @valid: valid mcs rate?
138  */
139 struct dp_rate_debug {
140 	char mcs_type[DP_MAX_MCS_STRING_LEN];
141 	uint8_t valid;
142 };
143 
144 #define MCS_VALID 1
145 #define MCS_INVALID 0
146 
147 static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
148 
149 	{
150 		{"OFDM 48 Mbps", MCS_VALID},
151 		{"OFDM 24 Mbps", MCS_VALID},
152 		{"OFDM 12 Mbps", MCS_VALID},
153 		{"OFDM 6 Mbps ", MCS_VALID},
154 		{"OFDM 54 Mbps", MCS_VALID},
155 		{"OFDM 36 Mbps", MCS_VALID},
156 		{"OFDM 18 Mbps", MCS_VALID},
157 		{"OFDM 9 Mbps ", MCS_VALID},
158 		{"INVALID ", MCS_INVALID},
159 		{"INVALID ", MCS_INVALID},
160 		{"INVALID ", MCS_INVALID},
161 		{"INVALID ", MCS_INVALID},
162 		{"INVALID ", MCS_VALID},
163 	},
164 	{
165 		{"CCK 11 Mbps Long  ", MCS_VALID},
166 		{"CCK 5.5 Mbps Long ", MCS_VALID},
167 		{"CCK 2 Mbps Long   ", MCS_VALID},
168 		{"CCK 1 Mbps Long   ", MCS_VALID},
169 		{"CCK 11 Mbps Short ", MCS_VALID},
170 		{"CCK 5.5 Mbps Short", MCS_VALID},
171 		{"CCK 2 Mbps Short  ", MCS_VALID},
172 		{"INVALID ", MCS_INVALID},
173 		{"INVALID ", MCS_INVALID},
174 		{"INVALID ", MCS_INVALID},
175 		{"INVALID ", MCS_INVALID},
176 		{"INVALID ", MCS_INVALID},
177 		{"INVALID ", MCS_VALID},
178 	},
179 	{
180 		{"HT MCS 0 (BPSK 1/2)  ", MCS_VALID},
181 		{"HT MCS 1 (QPSK 1/2)  ", MCS_VALID},
182 		{"HT MCS 2 (QPSK 3/4)  ", MCS_VALID},
183 		{"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
184 		{"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
185 		{"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
186 		{"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
187 		{"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
188 		{"INVALID ", MCS_INVALID},
189 		{"INVALID ", MCS_INVALID},
190 		{"INVALID ", MCS_INVALID},
191 		{"INVALID ", MCS_INVALID},
192 		{"INVALID ", MCS_VALID},
193 	},
194 	{
195 		{"VHT MCS 0 (BPSK 1/2)     ", MCS_VALID},
196 		{"VHT MCS 1 (QPSK 1/2)     ", MCS_VALID},
197 		{"VHT MCS 2 (QPSK 3/4)     ", MCS_VALID},
198 		{"VHT MCS 3 (16-QAM 1/2)   ", MCS_VALID},
199 		{"VHT MCS 4 (16-QAM 3/4)   ", MCS_VALID},
200 		{"VHT MCS 5 (64-QAM 2/3)   ", MCS_VALID},
201 		{"VHT MCS 6 (64-QAM 3/4)   ", MCS_VALID},
202 		{"VHT MCS 7 (64-QAM 5/6)   ", MCS_VALID},
203 		{"VHT MCS 8 (256-QAM 3/4)  ", MCS_VALID},
204 		{"VHT MCS 9 (256-QAM 5/6)  ", MCS_VALID},
205 		{"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
206 		{"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
207 		{"INVALID ", MCS_VALID},
208 	},
209 	{
210 		{"HE MCS 0 (BPSK 1/2)     ", MCS_VALID},
211 		{"HE MCS 1 (QPSK 1/2)     ", MCS_VALID},
212 		{"HE MCS 2 (QPSK 3/4)     ", MCS_VALID},
213 		{"HE MCS 3 (16-QAM 1/2)   ", MCS_VALID},
214 		{"HE MCS 4 (16-QAM 3/4)   ", MCS_VALID},
215 		{"HE MCS 5 (64-QAM 2/3)   ", MCS_VALID},
216 		{"HE MCS 6 (64-QAM 3/4)   ", MCS_VALID},
217 		{"HE MCS 7 (64-QAM 5/6)   ", MCS_VALID},
218 		{"HE MCS 8 (256-QAM 3/4)  ", MCS_VALID},
219 		{"HE MCS 9 (256-QAM 5/6)  ", MCS_VALID},
220 		{"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
221 		{"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
222 		{"INVALID ", MCS_VALID},
223 	}
224 };
225 
226 /**
227  * @brief Cpu ring map types
228  */
229 enum dp_cpu_ring_map_types {
230 	DP_DEFAULT_MAP,
231 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
232 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
233 	DP_NSS_ALL_RADIO_OFFLOADED_MAP,
234 	DP_CPU_RING_MAP_MAX
235 };
236 
237 /**
238  * @brief Cpu to tx ring map
239  */
240 static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
241 	{0x0, 0x1, 0x2, 0x0},
242 	{0x1, 0x2, 0x1, 0x2},
243 	{0x0, 0x2, 0x0, 0x2},
244 	{0x2, 0x2, 0x2, 0x2}
245 };
246 
247 /**
248  * @brief Select the type of statistics
249  */
250 enum dp_stats_type {
251 	STATS_FW = 0,
252 	STATS_HOST = 1,
253 	STATS_TYPE_MAX = 2,
254 };
255 
256 /**
257  * @brief General Firmware statistics options
258  *
259  */
260 enum dp_fw_stats {
261 	TXRX_FW_STATS_INVALID	= -1,
262 };
263 
264 /**
265  * dp_stats_mapping_table - Firmware and Host statistics
266  * currently supported
267  */
268 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
269 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
270 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
275 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
280 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
281 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
282 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
283 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
284 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
285 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
286 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
287 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
288 	/* Last ENUM for HTT FW STATS */
289 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
290 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
291 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
292 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
293 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
295 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
296 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
297 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
298 };
299 
300 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
301 					struct cdp_peer *peer_hdl,
302 					uint8_t *mac_addr,
303 					enum cdp_txrx_ast_entry_type type,
304 					uint32_t flags)
305 {
306 
307 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
308 				(struct dp_peer *)peer_hdl,
309 				mac_addr,
310 				type,
311 				flags);
312 }
313 
314 static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
315 					 void *ast_entry_hdl)
316 {
317 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
318 	qdf_spin_lock_bh(&soc->ast_lock);
319 	dp_peer_del_ast((struct dp_soc *)soc_hdl,
320 			(struct dp_ast_entry *)ast_entry_hdl);
321 	qdf_spin_unlock_bh(&soc->ast_lock);
322 }
323 
324 
325 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
326 						struct cdp_peer *peer_hdl,
327 						uint8_t *wds_macaddr,
328 						uint32_t flags)
329 {
330 	int status = -1;
331 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
332 	struct dp_ast_entry  *ast_entry = NULL;
333 
334 	qdf_spin_lock_bh(&soc->ast_lock);
335 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
336 
337 	if (ast_entry) {
338 		status = dp_peer_update_ast(soc,
339 					    (struct dp_peer *)peer_hdl,
340 					   ast_entry, flags);
341 	}
342 
343 	qdf_spin_unlock_bh(&soc->ast_lock);
344 
345 	return status;
346 }
347 
348 /*
349  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
350  * @soc_handle: Datapath SOC handle
351  * @wds_macaddr: MAC address of the WDS entry to be added
352  * @vdev_hdl: vdev handle
353  * Return: None
354  */
355 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
356 				    uint8_t *wds_macaddr, void *vdev_hdl)
357 {
358 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
359 	struct dp_ast_entry *ast_entry = NULL;
360 
361 	qdf_spin_lock_bh(&soc->ast_lock);
362 	ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr);
363 
364 	if (ast_entry) {
365 		if (ast_entry->type != CDP_TXRX_AST_TYPE_STATIC)
366 			ast_entry->is_active = TRUE;
367 	}
368 
369 	qdf_spin_unlock_bh(&soc->ast_lock);
370 }
371 
372 /*
373  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
374  * @soc: Datapath SOC handle
375  * @vdev_hdl: vdev handle
376  *
377  * Return: None
378  */
379 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
380 					 void *vdev_hdl)
381 {
382 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
383 	struct dp_pdev *pdev;
384 	struct dp_vdev *vdev;
385 	struct dp_peer *peer;
386 	struct dp_ast_entry *ase, *temp_ase;
387 	int i;
388 
389 	qdf_spin_lock_bh(&soc->ast_lock);
390 
391 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
392 		pdev = soc->pdev_list[i];
393 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
394 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
395 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
396 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
397 					if (ase->type ==
398 						CDP_TXRX_AST_TYPE_STATIC)
399 						continue;
400 					ase->is_active = TRUE;
401 				}
402 			}
403 		}
404 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
405 	}
406 
407 	qdf_spin_unlock_bh(&soc->ast_lock);
408 }
409 
410 /*
411  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
412  * @soc:		Datapath SOC handle
413  *
414  * Return: None
415  */
416 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
417 {
418 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
419 	struct dp_pdev *pdev;
420 	struct dp_vdev *vdev;
421 	struct dp_peer *peer;
422 	struct dp_ast_entry *ase, *temp_ase;
423 	int i;
424 
425 	qdf_spin_lock_bh(&soc->ast_lock);
426 
427 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
428 		pdev = soc->pdev_list[i];
429 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
430 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
431 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
432 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
433 					if (ase->type ==
434 						CDP_TXRX_AST_TYPE_STATIC)
435 						continue;
436 					dp_peer_del_ast(soc, ase);
437 				}
438 			}
439 		}
440 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
441 	}
442 
443 	qdf_spin_unlock_bh(&soc->ast_lock);
444 }
445 
446 static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
447 						uint8_t *ast_mac_addr)
448 {
449 	struct dp_ast_entry *ast_entry;
450 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
451 	qdf_spin_lock_bh(&soc->ast_lock);
452 	ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
453 	qdf_spin_unlock_bh(&soc->ast_lock);
454 	return (void *)ast_entry;
455 }
456 
457 static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
458 							void *ast_entry_hdl)
459 {
460 	return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
461 					(struct dp_ast_entry *)ast_entry_hdl);
462 }
463 
464 static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
465 							void *ast_entry_hdl)
466 {
467 	return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
468 					(struct dp_ast_entry *)ast_entry_hdl);
469 }
470 
471 static void dp_peer_ast_set_type_wifi3(
472 					struct cdp_soc_t *soc_hdl,
473 					void *ast_entry_hdl,
474 					enum cdp_txrx_ast_entry_type type)
475 {
476 	dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
477 				(struct dp_ast_entry *)ast_entry_hdl,
478 				type);
479 }
480 
481 
482 
483 /**
484  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
485  * @ring_num: ring num of the ring being queried
486  * @grp_mask: the grp_mask array for the ring type in question.
487  *
488  * The grp_mask array is indexed by group number and the bit fields correspond
489  * to ring numbers.  We are finding which interrupt group a ring belongs to.
490  *
491  * Return: the index in the grp_mask array with the ring number.
492  * -QDF_STATUS_E_NOENT if no entry is found
493  */
494 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
495 {
496 	int ext_group_num;
497 	int mask = 1 << ring_num;
498 
499 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
500 	     ext_group_num++) {
501 		if (mask & grp_mask[ext_group_num])
502 			return ext_group_num;
503 	}
504 
505 	return -QDF_STATUS_E_NOENT;
506 }
507 
508 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
509 				       enum hal_ring_type ring_type,
510 				       int ring_num)
511 {
512 	int *grp_mask;
513 
514 	switch (ring_type) {
515 	case WBM2SW_RELEASE:
516 		/* dp_tx_comp_handler - soc->tx_comp_ring */
517 		if (ring_num < 3)
518 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
519 
520 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
521 		else if (ring_num == 3) {
522 			/* sw treats this as a separate ring type */
523 			grp_mask = &soc->wlan_cfg_ctx->
524 				int_rx_wbm_rel_ring_mask[0];
525 			ring_num = 0;
526 		} else {
527 			qdf_assert(0);
528 			return -QDF_STATUS_E_NOENT;
529 		}
530 	break;
531 
532 	case REO_EXCEPTION:
533 		/* dp_rx_err_process - &soc->reo_exception_ring */
534 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
535 	break;
536 
537 	case REO_DST:
538 		/* dp_rx_process - soc->reo_dest_ring */
539 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
540 	break;
541 
542 	case REO_STATUS:
543 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
544 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
545 	break;
546 
547 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
548 	case RXDMA_MONITOR_STATUS:
549 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
550 	case RXDMA_MONITOR_DST:
551 		/* dp_mon_process */
552 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
553 	break;
554 	case RXDMA_DST:
555 		/* dp_rxdma_err_process */
556 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
557 	break;
558 
559 	case RXDMA_BUF:
560 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
561 	break;
562 
563 	case RXDMA_MONITOR_BUF:
564 		/* TODO: support low_thresh interrupt */
565 		return -QDF_STATUS_E_NOENT;
566 	break;
567 
568 	case TCL_DATA:
569 	case TCL_CMD:
570 	case REO_CMD:
571 	case SW2WBM_RELEASE:
572 	case WBM_IDLE_LINK:
573 		/* normally empty SW_TO_HW rings */
574 		return -QDF_STATUS_E_NOENT;
575 	break;
576 
577 	case TCL_STATUS:
578 	case REO_REINJECT:
579 		/* misc unused rings */
580 		return -QDF_STATUS_E_NOENT;
581 	break;
582 
583 	case CE_SRC:
584 	case CE_DST:
585 	case CE_DST_STATUS:
586 		/* CE_rings - currently handled by hif */
587 	default:
588 		return -QDF_STATUS_E_NOENT;
589 	break;
590 	}
591 
592 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
593 }
594 
595 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
596 			      *ring_params, int ring_type, int ring_num)
597 {
598 	int msi_group_number;
599 	int msi_data_count;
600 	int ret;
601 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
602 
603 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
604 					    &msi_data_count, &msi_data_start,
605 					    &msi_irq_start);
606 
607 	if (ret)
608 		return;
609 
610 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
611 						       ring_num);
612 	if (msi_group_number < 0) {
613 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
614 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
615 			ring_type, ring_num);
616 		ring_params->msi_addr = 0;
617 		ring_params->msi_data = 0;
618 		return;
619 	}
620 
621 	if (msi_group_number > msi_data_count) {
622 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
623 			FL("2 msi_groups will share an msi; msi_group_num %d"),
624 			msi_group_number);
625 
626 		QDF_ASSERT(0);
627 	}
628 
629 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
630 
631 	ring_params->msi_addr = addr_low;
632 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
633 	ring_params->msi_data = (msi_group_number % msi_data_count)
634 		+ msi_data_start;
635 	ring_params->flags |= HAL_SRNG_MSI_INTR;
636 }
637 
638 /**
639  * dp_print_ast_stats() - Dump AST table contents
640  * @soc: Datapath soc handle
641  *
642  * return void
643  */
644 #ifdef FEATURE_AST
645 static void dp_print_ast_stats(struct dp_soc *soc)
646 {
647 	uint8_t i;
648 	uint8_t num_entries = 0;
649 	struct dp_vdev *vdev;
650 	struct dp_pdev *pdev;
651 	struct dp_peer *peer;
652 	struct dp_ast_entry *ase, *tmp_ase;
653 	char type[5][10] = {"NONE", "STATIC", "WDS", "MEC", "HMWDS"};
654 
655 	DP_PRINT_STATS("AST Stats:");
656 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
657 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
658 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
659 	DP_PRINT_STATS("AST Table:");
660 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
661 		pdev = soc->pdev_list[i];
662 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
663 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
664 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
665 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
666 					DP_PRINT_STATS("%6d mac_addr = %pM"
667 							" peer_mac_addr = %pM"
668 							" type = %s"
669 							" next_hop = %d"
670 							" is_active = %d"
671 							" is_bss = %d"
672 							" ast_idx = %d"
673 							" pdev_id = %d"
674 							" vdev_id = %d",
675 							++num_entries,
676 							ase->mac_addr.raw,
677 							ase->peer->mac_addr.raw,
678 							type[ase->type],
679 							ase->next_hop,
680 							ase->is_active,
681 							ase->is_bss,
682 							ase->ast_idx,
683 							ase->pdev_id,
684 							ase->vdev_id);
685 				}
686 			}
687 		}
688 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
689 	}
690 }
691 #else
692 static void dp_print_ast_stats(struct dp_soc *soc)
693 {
694 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
695 	return;
696 }
697 #endif
698 
699 static void dp_print_peer_table(struct dp_vdev *vdev)
700 {
701 	struct dp_peer *peer = NULL;
702 
703 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
704 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
705 		if (!peer) {
706 			DP_PRINT_STATS("Invalid Peer");
707 			return;
708 		}
709 		DP_PRINT_STATS("    peer_mac_addr = %pM"
710 			" nawds_enabled = %d"
711 			" bss_peer = %d"
712 			" wapi = %d"
713 			" wds_enabled = %d"
714 			" delete in progress = %d",
715 			peer->mac_addr.raw,
716 			peer->nawds_enabled,
717 			peer->bss_peer,
718 			peer->wapi,
719 			peer->wds_enabled,
720 			peer->delete_in_progress);
721 	}
722 }
723 
724 /*
725  * dp_setup_srng - Internal function to setup SRNG rings used by data path
726  */
727 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
728 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
729 {
730 	void *hal_soc = soc->hal_soc;
731 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
732 	/* TODO: See if we should get align size from hal */
733 	uint32_t ring_base_align = 8;
734 	struct hal_srng_params ring_params;
735 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
736 
737 	/* TODO: Currently hal layer takes care of endianness related settings.
738 	 * See if these settings need to passed from DP layer
739 	 */
740 	ring_params.flags = 0;
741 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
742 		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
743 
744 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
745 	srng->hal_srng = NULL;
746 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
747 	srng->num_entries = num_entries;
748 	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
749 		soc->osdev, soc->osdev->dev, srng->alloc_size,
750 		&(srng->base_paddr_unaligned));
751 
752 	if (!srng->base_vaddr_unaligned) {
753 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
754 			FL("alloc failed - ring_type: %d, ring_num %d"),
755 			ring_type, ring_num);
756 		return QDF_STATUS_E_NOMEM;
757 	}
758 
759 	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
760 		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
761 	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
762 		((unsigned long)(ring_params.ring_base_vaddr) -
763 		(unsigned long)srng->base_vaddr_unaligned);
764 	ring_params.num_entries = num_entries;
765 
766 	if (soc->intr_mode == DP_INTR_MSI) {
767 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
768 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
769 			FL("Using MSI for ring_type: %d, ring_num %d"),
770 			ring_type, ring_num);
771 
772 	} else {
773 		ring_params.msi_data = 0;
774 		ring_params.msi_addr = 0;
775 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
776 			FL("Skipping MSI for ring_type: %d, ring_num %d"),
777 			ring_type, ring_num);
778 	}
779 
780 	/*
781 	 * Setup interrupt timer and batch counter thresholds for
782 	 * interrupt mitigation based on ring type
783 	 */
784 	if (ring_type == REO_DST) {
785 		ring_params.intr_timer_thres_us =
786 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
787 		ring_params.intr_batch_cntr_thres_entries =
788 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
789 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
790 		ring_params.intr_timer_thres_us =
791 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
792 		ring_params.intr_batch_cntr_thres_entries =
793 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
794 	} else {
795 		ring_params.intr_timer_thres_us =
796 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
797 		ring_params.intr_batch_cntr_thres_entries =
798 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
799 	}
800 
801 	/* Enable low threshold interrupts for rx buffer rings (regular and
802 	 * monitor buffer rings.
803 	 * TODO: See if this is required for any other ring
804 	 */
805 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
806 		(ring_type == RXDMA_MONITOR_STATUS)) {
807 		/* TODO: Setting low threshold to 1/8th of ring size
808 		 * see if this needs to be configurable
809 		 */
810 		ring_params.low_threshold = num_entries >> 3;
811 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
812 		ring_params.intr_timer_thres_us =
813 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
814 		ring_params.intr_batch_cntr_thres_entries = 0;
815 	}
816 
817 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
818 		mac_id, &ring_params);
819 
820 	if (!srng->hal_srng) {
821 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
822 				srng->alloc_size,
823 				srng->base_vaddr_unaligned,
824 				srng->base_paddr_unaligned, 0);
825 	}
826 
827 	return 0;
828 }
829 
830 /**
831  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
832  * Any buffers allocated and attached to ring entries are expected to be freed
833  * before calling this function.
834  */
835 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
836 	int ring_type, int ring_num)
837 {
838 	if (!srng->hal_srng) {
839 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
840 			FL("Ring type: %d, num:%d not setup"),
841 			ring_type, ring_num);
842 		return;
843 	}
844 
845 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
846 
847 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
848 				srng->alloc_size,
849 				srng->base_vaddr_unaligned,
850 				srng->base_paddr_unaligned, 0);
851 	srng->hal_srng = NULL;
852 }
853 
854 /* TODO: Need this interface from HIF */
855 void *hif_get_hal_handle(void *hif_handle);
856 
857 /*
858  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
859  * @dp_ctx: DP SOC handle
860  * @budget: Number of frames/descriptors that can be processed in one shot
861  *
862  * Return: remaining budget/quota for the soc device
863  */
864 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
865 {
866 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
867 	struct dp_soc *soc = int_ctx->soc;
868 	int ring = 0;
869 	uint32_t work_done  = 0;
870 	int budget = dp_budget;
871 	uint8_t tx_mask = int_ctx->tx_ring_mask;
872 	uint8_t rx_mask = int_ctx->rx_ring_mask;
873 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
874 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
875 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
876 	uint32_t remaining_quota = dp_budget;
877 	struct dp_pdev *pdev = NULL;
878 	int mac_id;
879 
880 	/* Process Tx completion interrupts first to return back buffers */
881 	while (tx_mask) {
882 		if (tx_mask & 0x1) {
883 			work_done = dp_tx_comp_handler(soc,
884 					soc->tx_comp_ring[ring].hal_srng,
885 					remaining_quota);
886 
887 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
888 				"tx mask 0x%x ring %d, budget %d, work_done %d",
889 				tx_mask, ring, budget, work_done);
890 
891 			budget -= work_done;
892 			if (budget <= 0)
893 				goto budget_done;
894 
895 			remaining_quota = budget;
896 		}
897 		tx_mask = tx_mask >> 1;
898 		ring++;
899 	}
900 
901 
902 	/* Process REO Exception ring interrupt */
903 	if (rx_err_mask) {
904 		work_done = dp_rx_err_process(soc,
905 				soc->reo_exception_ring.hal_srng,
906 				remaining_quota);
907 
908 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
909 			"REO Exception Ring: work_done %d budget %d",
910 			work_done, budget);
911 
912 		budget -=  work_done;
913 		if (budget <= 0) {
914 			goto budget_done;
915 		}
916 		remaining_quota = budget;
917 	}
918 
919 	/* Process Rx WBM release ring interrupt */
920 	if (rx_wbm_rel_mask) {
921 		work_done = dp_rx_wbm_err_process(soc,
922 				soc->rx_rel_ring.hal_srng, remaining_quota);
923 
924 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
925 			"WBM Release Ring: work_done %d budget %d",
926 			work_done, budget);
927 
928 		budget -=  work_done;
929 		if (budget <= 0) {
930 			goto budget_done;
931 		}
932 		remaining_quota = budget;
933 	}
934 
935 	/* Process Rx interrupts */
936 	if (rx_mask) {
937 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
938 			if (rx_mask & (1 << ring)) {
939 				work_done = dp_rx_process(int_ctx,
940 					    soc->reo_dest_ring[ring].hal_srng,
941 					    remaining_quota);
942 
943 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
944 					"rx mask 0x%x ring %d, work_done %d budget %d",
945 					rx_mask, ring, work_done, budget);
946 
947 				budget -=  work_done;
948 				if (budget <= 0)
949 					goto budget_done;
950 				remaining_quota = budget;
951 			}
952 		}
953 		for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
954 			work_done = dp_rxdma_err_process(soc, ring,
955 						remaining_quota);
956 			budget -= work_done;
957 		}
958 	}
959 
960 	if (reo_status_mask)
961 		dp_reo_status_ring_handler(soc);
962 
963 	/* Process LMAC interrupts */
964 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
965 		pdev = soc->pdev_list[ring];
966 		if (pdev == NULL)
967 			continue;
968 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
969 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
970 								pdev->pdev_id);
971 
972 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
973 				work_done = dp_mon_process(soc, mac_for_pdev,
974 						remaining_quota);
975 				budget -= work_done;
976 				if (budget <= 0)
977 					goto budget_done;
978 				remaining_quota = budget;
979 			}
980 
981 			if (int_ctx->rxdma2host_ring_mask &
982 					(1 << mac_for_pdev)) {
983 				work_done = dp_rxdma_err_process(soc,
984 							mac_for_pdev,
985 							remaining_quota);
986 				budget -=  work_done;
987 				if (budget <= 0)
988 					goto budget_done;
989 				remaining_quota = budget;
990 			}
991 
992 			if (int_ctx->host2rxdma_ring_mask &
993 						(1 << mac_for_pdev)) {
994 				union dp_rx_desc_list_elem_t *desc_list = NULL;
995 				union dp_rx_desc_list_elem_t *tail = NULL;
996 				struct dp_srng *rx_refill_buf_ring =
997 					&pdev->rx_refill_buf_ring;
998 
999 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1000 						1);
1001 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1002 					rx_refill_buf_ring,
1003 					&soc->rx_desc_buf[mac_for_pdev], 0,
1004 					&desc_list, &tail);
1005 			}
1006 		}
1007 	}
1008 
1009 	qdf_lro_flush(int_ctx->lro_ctx);
1010 
1011 budget_done:
1012 	return dp_budget - budget;
1013 }
1014 
1015 #ifdef DP_INTR_POLL_BASED
1016 /* dp_interrupt_timer()- timer poll for interrupts
1017  *
1018  * @arg: SoC Handle
1019  *
1020  * Return:
1021  *
1022  */
1023 static void dp_interrupt_timer(void *arg)
1024 {
1025 	struct dp_soc *soc = (struct dp_soc *) arg;
1026 	int i;
1027 
1028 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1029 		for (i = 0;
1030 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1031 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1032 
1033 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1034 	}
1035 }
1036 
1037 /*
1038  * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
1039  * @txrx_soc: DP SOC handle
1040  *
1041  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1042  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1043  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1044  *
1045  * Return: 0 for success. nonzero for failure.
1046  */
1047 static QDF_STATUS dp_soc_interrupt_attach_poll(void *txrx_soc)
1048 {
1049 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1050 	int i;
1051 
1052 	soc->intr_mode = DP_INTR_POLL;
1053 
1054 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1055 		soc->intr_ctx[i].dp_intr_id = i;
1056 		soc->intr_ctx[i].tx_ring_mask =
1057 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1058 		soc->intr_ctx[i].rx_ring_mask =
1059 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1060 		soc->intr_ctx[i].rx_mon_ring_mask =
1061 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1062 		soc->intr_ctx[i].rx_err_ring_mask =
1063 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1064 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1065 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1066 		soc->intr_ctx[i].reo_status_ring_mask =
1067 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1068 		soc->intr_ctx[i].rxdma2host_ring_mask =
1069 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1070 		soc->intr_ctx[i].soc = soc;
1071 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1072 	}
1073 
1074 	qdf_timer_init(soc->osdev, &soc->int_timer,
1075 			dp_interrupt_timer, (void *)soc,
1076 			QDF_TIMER_TYPE_WAKE_APPS);
1077 
1078 	return QDF_STATUS_SUCCESS;
1079 }
1080 
1081 #if defined(CONFIG_MCL)
1082 extern int con_mode_monitor;
1083 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1084 /*
1085  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1086  * @txrx_soc: DP SOC handle
1087  *
1088  * Call the appropriate attach function based on the mode of operation.
1089  * This is a WAR for enabling monitor mode.
1090  *
1091  * Return: 0 for success. nonzero for failure.
1092  */
1093 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1094 {
1095 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1096 
1097 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1098 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1099 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1100 				  "%s: Poll mode", __func__);
1101 		return dp_soc_interrupt_attach_poll(txrx_soc);
1102 	} else {
1103 
1104 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1105 				  "%s: Interrupt  mode", __func__);
1106 		return dp_soc_interrupt_attach(txrx_soc);
1107 	}
1108 }
1109 #else
1110 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1111 {
1112 	return dp_soc_interrupt_attach_poll(txrx_soc);
1113 }
1114 #endif
1115 #endif
1116 
1117 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1118 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1119 {
1120 	int j;
1121 	int num_irq = 0;
1122 
1123 	int tx_mask =
1124 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1125 	int rx_mask =
1126 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1127 	int rx_mon_mask =
1128 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1129 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1130 					soc->wlan_cfg_ctx, intr_ctx_num);
1131 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1132 					soc->wlan_cfg_ctx, intr_ctx_num);
1133 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1134 					soc->wlan_cfg_ctx, intr_ctx_num);
1135 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1136 					soc->wlan_cfg_ctx, intr_ctx_num);
1137 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1138 					soc->wlan_cfg_ctx, intr_ctx_num);
1139 
1140 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1141 
1142 		if (tx_mask & (1 << j)) {
1143 			irq_id_map[num_irq++] =
1144 				(wbm2host_tx_completions_ring1 - j);
1145 		}
1146 
1147 		if (rx_mask & (1 << j)) {
1148 			irq_id_map[num_irq++] =
1149 				(reo2host_destination_ring1 - j);
1150 		}
1151 
1152 		if (rxdma2host_ring_mask & (1 << j)) {
1153 			irq_id_map[num_irq++] =
1154 				rxdma2host_destination_ring_mac1 -
1155 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1156 		}
1157 
1158 		if (host2rxdma_ring_mask & (1 << j)) {
1159 			irq_id_map[num_irq++] =
1160 				host2rxdma_host_buf_ring_mac1 -
1161 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1162 		}
1163 
1164 		if (rx_mon_mask & (1 << j)) {
1165 			irq_id_map[num_irq++] =
1166 				ppdu_end_interrupts_mac1 -
1167 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1168 			irq_id_map[num_irq++] =
1169 				rxdma2host_monitor_status_ring_mac1 -
1170 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1171 		}
1172 
1173 		if (rx_wbm_rel_ring_mask & (1 << j))
1174 			irq_id_map[num_irq++] = wbm2host_rx_release;
1175 
1176 		if (rx_err_ring_mask & (1 << j))
1177 			irq_id_map[num_irq++] = reo2host_exception;
1178 
1179 		if (reo_status_ring_mask & (1 << j))
1180 			irq_id_map[num_irq++] = reo2host_status;
1181 
1182 	}
1183 	*num_irq_r = num_irq;
1184 }
1185 
1186 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1187 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1188 		int msi_vector_count, int msi_vector_start)
1189 {
1190 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1191 					soc->wlan_cfg_ctx, intr_ctx_num);
1192 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1193 					soc->wlan_cfg_ctx, intr_ctx_num);
1194 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1195 					soc->wlan_cfg_ctx, intr_ctx_num);
1196 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1197 					soc->wlan_cfg_ctx, intr_ctx_num);
1198 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1199 					soc->wlan_cfg_ctx, intr_ctx_num);
1200 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1201 					soc->wlan_cfg_ctx, intr_ctx_num);
1202 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1203 					soc->wlan_cfg_ctx, intr_ctx_num);
1204 
1205 	unsigned int vector =
1206 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1207 	int num_irq = 0;
1208 
1209 	soc->intr_mode = DP_INTR_MSI;
1210 
1211 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1212 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1213 		irq_id_map[num_irq++] =
1214 			pld_get_msi_irq(soc->osdev->dev, vector);
1215 
1216 	*num_irq_r = num_irq;
1217 }
1218 
1219 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1220 				    int *irq_id_map, int *num_irq)
1221 {
1222 	int msi_vector_count, ret;
1223 	uint32_t msi_base_data, msi_vector_start;
1224 
1225 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1226 					    &msi_vector_count,
1227 					    &msi_base_data,
1228 					    &msi_vector_start);
1229 	if (ret)
1230 		return dp_soc_interrupt_map_calculate_integrated(soc,
1231 				intr_ctx_num, irq_id_map, num_irq);
1232 
1233 	else
1234 		dp_soc_interrupt_map_calculate_msi(soc,
1235 				intr_ctx_num, irq_id_map, num_irq,
1236 				msi_vector_count, msi_vector_start);
1237 }
1238 
1239 /*
1240  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1241  * @txrx_soc: DP SOC handle
1242  *
1243  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1244  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1245  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1246  *
1247  * Return: 0 for success. nonzero for failure.
1248  */
1249 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1250 {
1251 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1252 
1253 	int i = 0;
1254 	int num_irq = 0;
1255 
1256 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1257 		int ret = 0;
1258 
1259 		/* Map of IRQ ids registered with one interrupt context */
1260 		int irq_id_map[HIF_MAX_GRP_IRQ];
1261 
1262 		int tx_mask =
1263 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1264 		int rx_mask =
1265 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1266 		int rx_mon_mask =
1267 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1268 		int rx_err_ring_mask =
1269 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1270 		int rx_wbm_rel_ring_mask =
1271 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1272 		int reo_status_ring_mask =
1273 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1274 		int rxdma2host_ring_mask =
1275 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1276 		int host2rxdma_ring_mask =
1277 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1278 
1279 
1280 		soc->intr_ctx[i].dp_intr_id = i;
1281 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1282 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1283 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1284 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1285 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1286 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1287 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1288 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1289 
1290 		soc->intr_ctx[i].soc = soc;
1291 
1292 		num_irq = 0;
1293 
1294 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1295 					       &num_irq);
1296 
1297 		ret = hif_register_ext_group(soc->hif_handle,
1298 				num_irq, irq_id_map, dp_service_srngs,
1299 				&soc->intr_ctx[i], "dp_intr",
1300 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1301 
1302 		if (ret) {
1303 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1304 			FL("failed, ret = %d"), ret);
1305 
1306 			return QDF_STATUS_E_FAILURE;
1307 		}
1308 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1309 	}
1310 
1311 	hif_configure_ext_group_interrupts(soc->hif_handle);
1312 
1313 	return QDF_STATUS_SUCCESS;
1314 }
1315 
1316 /*
1317  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1318  * @txrx_soc: DP SOC handle
1319  *
1320  * Return: void
1321  */
1322 static void dp_soc_interrupt_detach(void *txrx_soc)
1323 {
1324 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1325 	int i;
1326 
1327 	if (soc->intr_mode == DP_INTR_POLL) {
1328 		qdf_timer_stop(&soc->int_timer);
1329 		qdf_timer_free(&soc->int_timer);
1330 	} else {
1331 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1332 	}
1333 
1334 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1335 		soc->intr_ctx[i].tx_ring_mask = 0;
1336 		soc->intr_ctx[i].rx_ring_mask = 0;
1337 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1338 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1339 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1340 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1341 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1342 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1343 
1344 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1345 	}
1346 }
1347 
1348 #define AVG_MAX_MPDUS_PER_TID 128
1349 #define AVG_TIDS_PER_CLIENT 2
1350 #define AVG_FLOWS_PER_TID 2
1351 #define AVG_MSDUS_PER_FLOW 128
1352 #define AVG_MSDUS_PER_MPDU 4
1353 
1354 /*
1355  * Allocate and setup link descriptor pool that will be used by HW for
1356  * various link and queue descriptors and managed by WBM
1357  */
1358 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1359 {
1360 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1361 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1362 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1363 	uint32_t num_mpdus_per_link_desc =
1364 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1365 	uint32_t num_msdus_per_link_desc =
1366 		hal_num_msdus_per_link_desc(soc->hal_soc);
1367 	uint32_t num_mpdu_links_per_queue_desc =
1368 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1369 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1370 	uint32_t total_link_descs, total_mem_size;
1371 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1372 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1373 	uint32_t num_link_desc_banks;
1374 	uint32_t last_bank_size = 0;
1375 	uint32_t entry_size, num_entries;
1376 	int i;
1377 	uint32_t desc_id = 0;
1378 
1379 	/* Only Tx queue descriptors are allocated from common link descriptor
1380 	 * pool Rx queue descriptors are not included in this because (REO queue
1381 	 * extension descriptors) they are expected to be allocated contiguously
1382 	 * with REO queue descriptors
1383 	 */
1384 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1385 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1386 
1387 	num_mpdu_queue_descs = num_mpdu_link_descs /
1388 		num_mpdu_links_per_queue_desc;
1389 
1390 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1391 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1392 		num_msdus_per_link_desc;
1393 
1394 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1395 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1396 
1397 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1398 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1399 
1400 	/* Round up to power of 2 */
1401 	total_link_descs = 1;
1402 	while (total_link_descs < num_entries)
1403 		total_link_descs <<= 1;
1404 
1405 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1406 		FL("total_link_descs: %u, link_desc_size: %d"),
1407 		total_link_descs, link_desc_size);
1408 	total_mem_size =  total_link_descs * link_desc_size;
1409 
1410 	total_mem_size += link_desc_align;
1411 
1412 	if (total_mem_size <= max_alloc_size) {
1413 		num_link_desc_banks = 0;
1414 		last_bank_size = total_mem_size;
1415 	} else {
1416 		num_link_desc_banks = (total_mem_size) /
1417 			(max_alloc_size - link_desc_align);
1418 		last_bank_size = total_mem_size %
1419 			(max_alloc_size - link_desc_align);
1420 	}
1421 
1422 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1423 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1424 		total_mem_size, num_link_desc_banks);
1425 
1426 	for (i = 0; i < num_link_desc_banks; i++) {
1427 		soc->link_desc_banks[i].base_vaddr_unaligned =
1428 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1429 			max_alloc_size,
1430 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1431 		soc->link_desc_banks[i].size = max_alloc_size;
1432 
1433 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1434 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1435 			((unsigned long)(
1436 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1437 			link_desc_align));
1438 
1439 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1440 			soc->link_desc_banks[i].base_paddr_unaligned) +
1441 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1442 			(unsigned long)(
1443 			soc->link_desc_banks[i].base_vaddr_unaligned));
1444 
1445 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1446 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1447 				FL("Link descriptor memory alloc failed"));
1448 			goto fail;
1449 		}
1450 	}
1451 
1452 	if (last_bank_size) {
1453 		/* Allocate last bank in case total memory required is not exact
1454 		 * multiple of max_alloc_size
1455 		 */
1456 		soc->link_desc_banks[i].base_vaddr_unaligned =
1457 			qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
1458 			last_bank_size,
1459 			&(soc->link_desc_banks[i].base_paddr_unaligned));
1460 		soc->link_desc_banks[i].size = last_bank_size;
1461 
1462 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1463 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1464 			((unsigned long)(
1465 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1466 			link_desc_align));
1467 
1468 		soc->link_desc_banks[i].base_paddr =
1469 			(unsigned long)(
1470 			soc->link_desc_banks[i].base_paddr_unaligned) +
1471 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1472 			(unsigned long)(
1473 			soc->link_desc_banks[i].base_vaddr_unaligned));
1474 	}
1475 
1476 
1477 	/* Allocate and setup link descriptor idle list for HW internal use */
1478 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1479 	total_mem_size = entry_size * total_link_descs;
1480 
1481 	if (total_mem_size <= max_alloc_size) {
1482 		void *desc;
1483 
1484 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1485 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
1486 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1487 				FL("Link desc idle ring setup failed"));
1488 			goto fail;
1489 		}
1490 
1491 		hal_srng_access_start_unlocked(soc->hal_soc,
1492 			soc->wbm_idle_link_ring.hal_srng);
1493 
1494 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1495 			soc->link_desc_banks[i].base_paddr; i++) {
1496 			uint32_t num_entries = (soc->link_desc_banks[i].size -
1497 				((unsigned long)(
1498 				soc->link_desc_banks[i].base_vaddr) -
1499 				(unsigned long)(
1500 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1501 				/ link_desc_size;
1502 			unsigned long paddr = (unsigned long)(
1503 				soc->link_desc_banks[i].base_paddr);
1504 
1505 			while (num_entries && (desc = hal_srng_src_get_next(
1506 				soc->hal_soc,
1507 				soc->wbm_idle_link_ring.hal_srng))) {
1508 				hal_set_link_desc_addr(desc,
1509 					LINK_DESC_COOKIE(desc_id, i), paddr);
1510 				num_entries--;
1511 				desc_id++;
1512 				paddr += link_desc_size;
1513 			}
1514 		}
1515 		hal_srng_access_end_unlocked(soc->hal_soc,
1516 			soc->wbm_idle_link_ring.hal_srng);
1517 	} else {
1518 		uint32_t num_scatter_bufs;
1519 		uint32_t num_entries_per_buf;
1520 		uint32_t rem_entries;
1521 		uint8_t *scatter_buf_ptr;
1522 		uint16_t scatter_buf_num;
1523 
1524 		soc->wbm_idle_scatter_buf_size =
1525 			hal_idle_list_scatter_buf_size(soc->hal_soc);
1526 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1527 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
1528 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1529 					soc->hal_soc, total_mem_size,
1530 					soc->wbm_idle_scatter_buf_size);
1531 
1532 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
1533 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1534 					FL("scatter bufs size out of bounds"));
1535 			goto fail;
1536 		}
1537 
1538 		for (i = 0; i < num_scatter_bufs; i++) {
1539 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
1540 				qdf_mem_alloc_consistent(soc->osdev,
1541 							soc->osdev->dev,
1542 				soc->wbm_idle_scatter_buf_size,
1543 				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
1544 			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
1545 				QDF_TRACE(QDF_MODULE_ID_DP,
1546 						QDF_TRACE_LEVEL_ERROR,
1547 					FL("Scatter list memory alloc failed"));
1548 				goto fail;
1549 			}
1550 		}
1551 
1552 		/* Populate idle list scatter buffers with link descriptor
1553 		 * pointers
1554 		 */
1555 		scatter_buf_num = 0;
1556 		scatter_buf_ptr = (uint8_t *)(
1557 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1558 		rem_entries = num_entries_per_buf;
1559 
1560 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
1561 			soc->link_desc_banks[i].base_paddr; i++) {
1562 			uint32_t num_link_descs =
1563 				(soc->link_desc_banks[i].size -
1564 				((unsigned long)(
1565 				soc->link_desc_banks[i].base_vaddr) -
1566 				(unsigned long)(
1567 				soc->link_desc_banks[i].base_vaddr_unaligned)))
1568 				/ link_desc_size;
1569 			unsigned long paddr = (unsigned long)(
1570 				soc->link_desc_banks[i].base_paddr);
1571 
1572 			while (num_link_descs) {
1573 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
1574 					LINK_DESC_COOKIE(desc_id, i), paddr);
1575 				num_link_descs--;
1576 				desc_id++;
1577 				paddr += link_desc_size;
1578 				rem_entries--;
1579 				if (rem_entries) {
1580 					scatter_buf_ptr += entry_size;
1581 				} else {
1582 					rem_entries = num_entries_per_buf;
1583 					scatter_buf_num++;
1584 
1585 					if (scatter_buf_num >= num_scatter_bufs)
1586 						break;
1587 
1588 					scatter_buf_ptr = (uint8_t *)(
1589 						soc->wbm_idle_scatter_buf_base_vaddr[
1590 						scatter_buf_num]);
1591 				}
1592 			}
1593 		}
1594 		/* Setup link descriptor idle list in HW */
1595 		hal_setup_link_idle_list(soc->hal_soc,
1596 			soc->wbm_idle_scatter_buf_base_paddr,
1597 			soc->wbm_idle_scatter_buf_base_vaddr,
1598 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
1599 			(uint32_t)(scatter_buf_ptr -
1600 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1601 			scatter_buf_num-1])), total_link_descs);
1602 	}
1603 	return 0;
1604 
1605 fail:
1606 	if (soc->wbm_idle_link_ring.hal_srng) {
1607 		dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1608 			WBM_IDLE_LINK, 0);
1609 	}
1610 
1611 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1612 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1613 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1614 				soc->wbm_idle_scatter_buf_size,
1615 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1616 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1617 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1618 		}
1619 	}
1620 
1621 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1622 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1623 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1624 				soc->link_desc_banks[i].size,
1625 				soc->link_desc_banks[i].base_vaddr_unaligned,
1626 				soc->link_desc_banks[i].base_paddr_unaligned,
1627 				0);
1628 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1629 		}
1630 	}
1631 	return QDF_STATUS_E_FAILURE;
1632 }
1633 
1634 /*
1635  * Free link descriptor pool that was setup HW
1636  */
1637 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
1638 {
1639 	int i;
1640 
1641 	if (soc->wbm_idle_link_ring.hal_srng) {
1642 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
1643 			WBM_IDLE_LINK, 0);
1644 	}
1645 
1646 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1647 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
1648 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1649 				soc->wbm_idle_scatter_buf_size,
1650 				soc->wbm_idle_scatter_buf_base_vaddr[i],
1651 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
1652 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
1653 		}
1654 	}
1655 
1656 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1657 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
1658 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1659 				soc->link_desc_banks[i].size,
1660 				soc->link_desc_banks[i].base_vaddr_unaligned,
1661 				soc->link_desc_banks[i].base_paddr_unaligned,
1662 				0);
1663 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
1664 		}
1665 	}
1666 }
1667 
1668 /* TODO: Following should be configurable */
1669 #define WBM_RELEASE_RING_SIZE 64
1670 #define TCL_CMD_RING_SIZE 32
1671 #define TCL_STATUS_RING_SIZE 32
1672 #if defined(QCA_WIFI_QCA6290)
1673 #define REO_DST_RING_SIZE 1024
1674 #else
1675 #define REO_DST_RING_SIZE 2048
1676 #endif
1677 #define REO_REINJECT_RING_SIZE 32
1678 #define RX_RELEASE_RING_SIZE 1024
1679 #define REO_EXCEPTION_RING_SIZE 128
1680 #define REO_CMD_RING_SIZE 64
1681 #define REO_STATUS_RING_SIZE 128
1682 #define RXDMA_BUF_RING_SIZE 1024
1683 #define RXDMA_REFILL_RING_SIZE 4096
1684 #define RXDMA_MONITOR_BUF_RING_SIZE 4096
1685 #define RXDMA_MONITOR_DST_RING_SIZE 2048
1686 #define RXDMA_MONITOR_STATUS_RING_SIZE 1024
1687 #define RXDMA_MONITOR_DESC_RING_SIZE 4096
1688 #define RXDMA_ERR_DST_RING_SIZE 1024
1689 
1690 /*
1691  * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1692  * @soc: Datapath SOC handle
1693  *
1694  * This is a timer function used to age out stale AST nodes from
1695  * AST table
1696  */
1697 #ifdef FEATURE_WDS
1698 static void dp_wds_aging_timer_fn(void *soc_hdl)
1699 {
1700 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1701 	struct dp_pdev *pdev;
1702 	struct dp_vdev *vdev;
1703 	struct dp_peer *peer;
1704 	struct dp_ast_entry *ase, *temp_ase;
1705 	int i;
1706 
1707 	qdf_spin_lock_bh(&soc->ast_lock);
1708 
1709 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1710 		pdev = soc->pdev_list[i];
1711 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1712 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1713 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1714 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
1715 					/*
1716 					 * Do not expire static ast entries
1717 					 * and HM WDS entries
1718 					 */
1719 					if (ase->type != CDP_TXRX_AST_TYPE_WDS)
1720 						continue;
1721 
1722 					if (ase->is_active) {
1723 						ase->is_active = FALSE;
1724 						continue;
1725 					}
1726 
1727 					DP_STATS_INC(soc, ast.aged_out, 1);
1728 					dp_peer_del_ast(soc, ase);
1729 				}
1730 			}
1731 		}
1732 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1733 	}
1734 
1735 	qdf_spin_unlock_bh(&soc->ast_lock);
1736 
1737 	if (qdf_atomic_read(&soc->cmn_init_done))
1738 		qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1739 }
1740 
1741 
1742 /*
1743  * dp_soc_wds_attach() - Setup WDS timer and AST table
1744  * @soc:		Datapath SOC handle
1745  *
1746  * Return: None
1747  */
1748 static void dp_soc_wds_attach(struct dp_soc *soc)
1749 {
1750 	qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1751 			dp_wds_aging_timer_fn, (void *)soc,
1752 			QDF_TIMER_TYPE_WAKE_APPS);
1753 
1754 	qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1755 }
1756 
1757 /*
1758  * dp_soc_wds_detach() - Detach WDS data structures and timers
1759  * @txrx_soc: DP SOC handle
1760  *
1761  * Return: None
1762  */
1763 static void dp_soc_wds_detach(struct dp_soc *soc)
1764 {
1765 	qdf_timer_stop(&soc->wds_aging_timer);
1766 	qdf_timer_free(&soc->wds_aging_timer);
1767 }
1768 #else
1769 static void dp_soc_wds_attach(struct dp_soc *soc)
1770 {
1771 }
1772 
1773 static void dp_soc_wds_detach(struct dp_soc *soc)
1774 {
1775 }
1776 #endif
1777 
1778 /*
1779  * dp_soc_reset_ring_map() - Reset cpu ring map
1780  * @soc: Datapath soc handler
1781  *
1782  * This api resets the default cpu ring map
1783  */
1784 
1785 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1786 {
1787 	uint8_t i;
1788 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1789 
1790 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1791 		if (nss_config == 1) {
1792 			/*
1793 			 * Setting Tx ring map for one nss offloaded radio
1794 			 */
1795 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1796 		} else if (nss_config == 2) {
1797 			/*
1798 			 * Setting Tx ring for two nss offloaded radios
1799 			 */
1800 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1801 		} else {
1802 			/*
1803 			 * Setting Tx ring map for all nss offloaded radios
1804 			 */
1805 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1806 		}
1807 	}
1808 }
1809 
1810 /*
1811  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1812  * @dp_soc - DP soc handle
1813  * @ring_type - ring type
1814  * @ring_num - ring_num
1815  *
1816  * return 0 or 1
1817  */
1818 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1819 {
1820 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1821 	uint8_t status = 0;
1822 
1823 	switch (ring_type) {
1824 	case WBM2SW_RELEASE:
1825 	case REO_DST:
1826 	case RXDMA_BUF:
1827 		status = ((nss_config) & (1 << ring_num));
1828 		break;
1829 	default:
1830 		break;
1831 	}
1832 
1833 	return status;
1834 }
1835 
1836 /*
1837  * dp_soc_reset_intr_mask() - reset interrupt mask
1838  * @dp_soc - DP Soc handle
1839  *
1840  * Return: Return void
1841  */
1842 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1843 {
1844 	uint8_t j;
1845 	int *grp_mask = NULL;
1846 	int group_number, mask, num_ring;
1847 
1848 	/* number of tx ring */
1849 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1850 
1851 	/*
1852 	 * group mask for tx completion  ring.
1853 	 */
1854 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1855 
1856 	/* loop and reset the mask for only offloaded ring */
1857 	for (j = 0; j < num_ring; j++) {
1858 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
1859 			continue;
1860 		}
1861 
1862 		/*
1863 		 * Group number corresponding to tx offloaded ring.
1864 		 */
1865 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1866 		if (group_number < 0) {
1867 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1868 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1869 					WBM2SW_RELEASE, j);
1870 			return;
1871 		}
1872 
1873 		/* reset the tx mask for offloaded ring */
1874 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
1875 		mask &= (~(1 << j));
1876 
1877 		/*
1878 		 * reset the interrupt mask for offloaded ring.
1879 		 */
1880 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1881 	}
1882 
1883 	/* number of rx rings */
1884 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
1885 
1886 	/*
1887 	 * group mask for reo destination ring.
1888 	 */
1889 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1890 
1891 	/* loop and reset the mask for only offloaded ring */
1892 	for (j = 0; j < num_ring; j++) {
1893 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
1894 			continue;
1895 		}
1896 
1897 		/*
1898 		 * Group number corresponding to rx offloaded ring.
1899 		 */
1900 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1901 		if (group_number < 0) {
1902 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1903 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1904 					REO_DST, j);
1905 			return;
1906 		}
1907 
1908 		/* set the interrupt mask for offloaded ring */
1909 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1910 		mask &= (~(1 << j));
1911 
1912 		/*
1913 		 * set the interrupt mask to zero for rx offloaded radio.
1914 		 */
1915 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1916 	}
1917 
1918 	/*
1919 	 * group mask for Rx buffer refill ring
1920 	 */
1921 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1922 
1923 	/* loop and reset the mask for only offloaded ring */
1924 	for (j = 0; j < MAX_PDEV_CNT; j++) {
1925 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
1926 			continue;
1927 		}
1928 
1929 		/*
1930 		 * Group number corresponding to rx offloaded ring.
1931 		 */
1932 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1933 		if (group_number < 0) {
1934 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1935 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
1936 					REO_DST, j);
1937 			return;
1938 		}
1939 
1940 		/* set the interrupt mask for offloaded ring */
1941 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1942 				group_number);
1943 		mask &= (~(1 << j));
1944 
1945 		/*
1946 		 * set the interrupt mask to zero for rx offloaded radio.
1947 		 */
1948 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1949 			group_number, mask);
1950 	}
1951 }
1952 
1953 #ifdef IPA_OFFLOAD
1954 /**
1955  * dp_reo_remap_config() - configure reo remap register value based
1956  *                         nss configuration.
1957  *		based on offload_radio value below remap configuration
1958  *		get applied.
1959  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
1960  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
1961  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
1962  *		3 - both Radios handled by NSS (remap not required)
1963  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
1964  *
1965  * @remap1: output parameter indicates reo remap 1 register value
1966  * @remap2: output parameter indicates reo remap 2 register value
1967  * Return: bool type, true if remap is configured else false.
1968  */
1969 static bool dp_reo_remap_config(struct dp_soc *soc,
1970 				uint32_t *remap1,
1971 				uint32_t *remap2)
1972 {
1973 
1974 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
1975 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
1976 
1977 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
1978 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
1979 
1980 	return true;
1981 }
1982 #else
1983 static bool dp_reo_remap_config(struct dp_soc *soc,
1984 				uint32_t *remap1,
1985 				uint32_t *remap2)
1986 {
1987 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1988 
1989 	switch (offload_radio) {
1990 	case 0:
1991 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1992 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1993 			(0x3 << 18) | (0x4 << 21)) << 8;
1994 
1995 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1996 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1997 			(0x3 << 18) | (0x4 << 21)) << 8;
1998 		break;
1999 
2000 	case 1:
2001 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2002 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2003 			(0x2 << 18) | (0x3 << 21)) << 8;
2004 
2005 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2006 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2007 			(0x4 << 18) | (0x2 << 21)) << 8;
2008 		break;
2009 
2010 	case 2:
2011 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2012 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2013 			(0x1 << 18) | (0x3 << 21)) << 8;
2014 
2015 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2016 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2017 			(0x4 << 18) | (0x1 << 21)) << 8;
2018 		break;
2019 
2020 	case 3:
2021 		/* return false if both radios are offloaded to NSS */
2022 		return false;
2023 	}
2024 	return true;
2025 }
2026 #endif
2027 
2028 /*
2029  * dp_reo_frag_dst_set() - configure reo register to set the
2030  *                        fragment destination ring
2031  * @soc : Datapath soc
2032  * @frag_dst_ring : output parameter to set fragment destination ring
2033  *
2034  * Based on offload_radio below fragment destination rings is selected
2035  * 0 - TCL
2036  * 1 - SW1
2037  * 2 - SW2
2038  * 3 - SW3
2039  * 4 - SW4
2040  * 5 - Release
2041  * 6 - FW
2042  * 7 - alternate select
2043  *
2044  * return: void
2045  */
2046 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2047 {
2048 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2049 
2050 	switch (offload_radio) {
2051 	case 0:
2052 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2053 		break;
2054 	case 3:
2055 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2056 		break;
2057 	default:
2058 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2059 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2060 		break;
2061 	}
2062 }
2063 
2064 /*
2065  * dp_soc_cmn_setup() - Common SoC level initializion
2066  * @soc:		Datapath SOC handle
2067  *
2068  * This is an internal function used to setup common SOC data structures,
2069  * to be called from PDEV attach after receiving HW mode capabilities from FW
2070  */
2071 static int dp_soc_cmn_setup(struct dp_soc *soc)
2072 {
2073 	int i;
2074 	struct hal_reo_params reo_params;
2075 	int tx_ring_size;
2076 	int tx_comp_ring_size;
2077 
2078 	if (qdf_atomic_read(&soc->cmn_init_done))
2079 		return 0;
2080 
2081 	if (dp_hw_link_desc_pool_setup(soc))
2082 		goto fail1;
2083 
2084 	/* Setup SRNG rings */
2085 	/* Common rings */
2086 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2087 		WBM_RELEASE_RING_SIZE)) {
2088 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2089 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2090 		goto fail1;
2091 	}
2092 
2093 
2094 	soc->num_tcl_data_rings = 0;
2095 	/* Tx data rings */
2096 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2097 		soc->num_tcl_data_rings =
2098 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2099 		tx_comp_ring_size =
2100 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
2101 		tx_ring_size =
2102 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
2103 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2104 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2105 				TCL_DATA, i, 0, tx_ring_size)) {
2106 				QDF_TRACE(QDF_MODULE_ID_DP,
2107 					QDF_TRACE_LEVEL_ERROR,
2108 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2109 				goto fail1;
2110 			}
2111 			/*
2112 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2113 			 * count
2114 			 */
2115 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2116 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2117 				QDF_TRACE(QDF_MODULE_ID_DP,
2118 					QDF_TRACE_LEVEL_ERROR,
2119 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2120 				goto fail1;
2121 			}
2122 		}
2123 	} else {
2124 		/* This will be incremented during per pdev ring setup */
2125 		soc->num_tcl_data_rings = 0;
2126 	}
2127 
2128 	if (dp_tx_soc_attach(soc)) {
2129 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2130 				FL("dp_tx_soc_attach failed"));
2131 		goto fail1;
2132 	}
2133 
2134 	/* TCL command and status rings */
2135 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2136 		TCL_CMD_RING_SIZE)) {
2137 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2138 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2139 		goto fail1;
2140 	}
2141 
2142 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2143 		TCL_STATUS_RING_SIZE)) {
2144 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2145 			FL("dp_srng_setup failed for tcl_status_ring"));
2146 		goto fail1;
2147 	}
2148 
2149 
2150 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2151 	 * descriptors
2152 	 */
2153 
2154 	/* Rx data rings */
2155 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2156 		soc->num_reo_dest_rings =
2157 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2158 		QDF_TRACE(QDF_MODULE_ID_DP,
2159 			QDF_TRACE_LEVEL_ERROR,
2160 			FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
2161 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2162 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2163 				i, 0, REO_DST_RING_SIZE)) {
2164 				QDF_TRACE(QDF_MODULE_ID_DP,
2165 					QDF_TRACE_LEVEL_ERROR,
2166 					FL("dp_srng_setup failed for reo_dest_ring[%d]"), i);
2167 				goto fail1;
2168 			}
2169 		}
2170 	} else {
2171 		/* This will be incremented during per pdev ring setup */
2172 		soc->num_reo_dest_rings = 0;
2173 	}
2174 
2175 	/* LMAC RxDMA to SW Rings configuration */
2176 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2177 		/* Only valid for MCL */
2178 		struct dp_pdev *pdev = soc->pdev_list[0];
2179 
2180 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2181 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2182 				RXDMA_DST, 0, i, RXDMA_ERR_DST_RING_SIZE)) {
2183 				QDF_TRACE(QDF_MODULE_ID_DP,
2184 					QDF_TRACE_LEVEL_ERROR,
2185 					FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2186 				goto fail1;
2187 			}
2188 		}
2189 	}
2190 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2191 
2192 	/* REO reinjection ring */
2193 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2194 		REO_REINJECT_RING_SIZE)) {
2195 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2196 			FL("dp_srng_setup failed for reo_reinject_ring"));
2197 		goto fail1;
2198 	}
2199 
2200 
2201 	/* Rx release ring */
2202 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2203 		RX_RELEASE_RING_SIZE)) {
2204 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2205 			FL("dp_srng_setup failed for rx_rel_ring"));
2206 		goto fail1;
2207 	}
2208 
2209 
2210 	/* Rx exception ring */
2211 	if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
2212 		MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
2213 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2214 			FL("dp_srng_setup failed for reo_exception_ring"));
2215 		goto fail1;
2216 	}
2217 
2218 
2219 	/* REO command and status rings */
2220 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2221 		REO_CMD_RING_SIZE)) {
2222 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2223 			FL("dp_srng_setup failed for reo_cmd_ring"));
2224 		goto fail1;
2225 	}
2226 
2227 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2228 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2229 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2230 
2231 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2232 		REO_STATUS_RING_SIZE)) {
2233 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2234 			FL("dp_srng_setup failed for reo_status_ring"));
2235 		goto fail1;
2236 	}
2237 
2238 	qdf_spinlock_create(&soc->ast_lock);
2239 	dp_soc_wds_attach(soc);
2240 
2241 	/* Reset the cpu ring map if radio is NSS offloaded */
2242 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2243 		dp_soc_reset_cpu_ring_map(soc);
2244 		dp_soc_reset_intr_mask(soc);
2245 	}
2246 
2247 	/* Setup HW REO */
2248 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2249 
2250 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2251 
2252 		/*
2253 		 * Reo ring remap is not required if both radios
2254 		 * are offloaded to NSS
2255 		 */
2256 		if (!dp_reo_remap_config(soc,
2257 					&reo_params.remap1,
2258 					&reo_params.remap2))
2259 			goto out;
2260 
2261 		reo_params.rx_hash_enabled = true;
2262 	}
2263 
2264 	/* setup the global rx defrag waitlist */
2265 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2266 	soc->rx.defrag.timeout_ms =
2267 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
2268 	soc->rx.flags.defrag_timeout_check =
2269 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
2270 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2271 
2272 out:
2273 	/*
2274 	 * set the fragment destination ring
2275 	 */
2276 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2277 
2278 	hal_reo_setup(soc->hal_soc, &reo_params);
2279 
2280 	qdf_atomic_set(&soc->cmn_init_done, 1);
2281 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2282 	return 0;
2283 fail1:
2284 	/*
2285 	 * Cleanup will be done as part of soc_detach, which will
2286 	 * be called on pdev attach failure
2287 	 */
2288 	return QDF_STATUS_E_FAILURE;
2289 }
2290 
2291 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2292 
2293 static void dp_lro_hash_setup(struct dp_soc *soc)
2294 {
2295 	struct cdp_lro_hash_config lro_hash;
2296 
2297 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2298 		!wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2299 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2300 			 FL("LRO disabled RX hash disabled"));
2301 		return;
2302 	}
2303 
2304 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2305 
2306 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2307 		lro_hash.lro_enable = 1;
2308 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2309 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2310 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2311 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2312 	}
2313 
2314 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2315 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2316 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2317 		 LRO_IPV4_SEED_ARR_SZ));
2318 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2319 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2320 		 LRO_IPV6_SEED_ARR_SZ));
2321 
2322 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2323 		 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2324 		 lro_hash.lro_enable, lro_hash.tcp_flag,
2325 		 lro_hash.tcp_flag_mask);
2326 
2327 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2328 		 QDF_TRACE_LEVEL_ERROR,
2329 		 (void *)lro_hash.toeplitz_hash_ipv4,
2330 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2331 		 LRO_IPV4_SEED_ARR_SZ));
2332 
2333 	qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2334 		 QDF_TRACE_LEVEL_ERROR,
2335 		 (void *)lro_hash.toeplitz_hash_ipv6,
2336 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2337 		 LRO_IPV6_SEED_ARR_SZ));
2338 
2339 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2340 
2341 	if (soc->cdp_soc.ol_ops->lro_hash_config)
2342 		(void)soc->cdp_soc.ol_ops->lro_hash_config
2343 			(soc->ctrl_psoc, &lro_hash);
2344 }
2345 
2346 /*
2347 * dp_rxdma_ring_setup() - configure the RX DMA rings
2348 * @soc: data path SoC handle
2349 * @pdev: Physical device handle
2350 *
2351 * Return: 0 - success, > 0 - failure
2352 */
2353 #ifdef QCA_HOST2FW_RXBUF_RING
2354 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2355 	 struct dp_pdev *pdev)
2356 {
2357 	int max_mac_rings =
2358 		 wlan_cfg_get_num_mac_rings
2359 			(pdev->wlan_cfg_ctx);
2360 	int i;
2361 
2362 	for (i = 0; i < max_mac_rings; i++) {
2363 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2364 			 "%s: pdev_id %d mac_id %d\n",
2365 			 __func__, pdev->pdev_id, i);
2366 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2367 			 RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) {
2368 			QDF_TRACE(QDF_MODULE_ID_DP,
2369 				 QDF_TRACE_LEVEL_ERROR,
2370 				 FL("failed rx mac ring setup"));
2371 			return QDF_STATUS_E_FAILURE;
2372 		}
2373 	}
2374 	return QDF_STATUS_SUCCESS;
2375 }
2376 #else
2377 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2378 	 struct dp_pdev *pdev)
2379 {
2380 	return QDF_STATUS_SUCCESS;
2381 }
2382 #endif
2383 
2384 /**
2385  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2386  * @pdev - DP_PDEV handle
2387  *
2388  * Return: void
2389  */
2390 static inline void
2391 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2392 {
2393 	uint8_t map_id;
2394 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2395 		qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2396 				sizeof(default_dscp_tid_map));
2397 	}
2398 	for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2399 		hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2400 				pdev->dscp_tid_map[map_id],
2401 				map_id);
2402 	}
2403 }
2404 
2405 #ifdef QCA_SUPPORT_SON
2406 /**
2407  * dp_mark_peer_inact(): Update peer inactivity status
2408  * @peer_handle - datapath peer handle
2409  *
2410  * Return: void
2411  */
2412 void dp_mark_peer_inact(void *peer_handle, bool inactive)
2413 {
2414 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
2415 	struct dp_pdev *pdev;
2416 	struct dp_soc *soc;
2417 	bool inactive_old;
2418 
2419 	if (!peer)
2420 		return;
2421 
2422 	pdev = peer->vdev->pdev;
2423 	soc = pdev->soc;
2424 
2425 	inactive_old = peer->peer_bs_inact_flag == 1;
2426 	if (!inactive)
2427 		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2428 	peer->peer_bs_inact_flag = inactive ? 1 : 0;
2429 
2430 	if (inactive_old != inactive) {
2431 		/**
2432 		 * Note: a node lookup can happen in RX datapath context
2433 		 * when a node changes from inactive to active (at most once
2434 		 * per inactivity timeout threshold)
2435 		 */
2436 		if (soc->cdp_soc.ol_ops->record_act_change) {
2437 			soc->cdp_soc.ol_ops->record_act_change(
2438 					(void *)pdev->ctrl_pdev,
2439 					peer->mac_addr.raw, !inactive);
2440 		}
2441 	}
2442 }
2443 
2444 /**
2445  * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2446  *
2447  * Periodically checks the inactivity status
2448  */
2449 static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2450 {
2451 	struct dp_pdev *pdev;
2452 	struct dp_vdev *vdev;
2453 	struct dp_peer *peer;
2454 	struct dp_soc *soc;
2455 	int i;
2456 
2457 	OS_GET_TIMER_ARG(soc, struct dp_soc *);
2458 
2459 	qdf_spin_lock(&soc->peer_ref_mutex);
2460 
2461 	for (i = 0; i < soc->pdev_count; i++) {
2462 	pdev = soc->pdev_list[i];
2463 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2464 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2465 		if (vdev->opmode != wlan_op_mode_ap)
2466 			continue;
2467 
2468 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2469 			if (!peer->authorize) {
2470 				/**
2471 				 * Inactivity check only interested in
2472 				 * connected node
2473 				 */
2474 				continue;
2475 			}
2476 			if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2477 				/**
2478 				 * This check ensures we do not wait extra long
2479 				 * due to the potential race condition
2480 				 */
2481 				peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2482 			}
2483 			if (peer->peer_bs_inact > 0) {
2484 				/* Do not let it wrap around */
2485 				peer->peer_bs_inact--;
2486 			}
2487 			if (peer->peer_bs_inact == 0)
2488 				dp_mark_peer_inact(peer, true);
2489 		}
2490 	}
2491 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2492 	}
2493 
2494 	qdf_spin_unlock(&soc->peer_ref_mutex);
2495 	qdf_timer_mod(&soc->pdev_bs_inact_timer,
2496 		      soc->pdev_bs_inact_interval * 1000);
2497 }
2498 
2499 
2500 /**
2501  * dp_free_inact_timer(): free inact timer
2502  * @timer - inact timer handle
2503  *
2504  * Return: bool
2505  */
2506 void dp_free_inact_timer(struct dp_soc *soc)
2507 {
2508 	qdf_timer_free(&soc->pdev_bs_inact_timer);
2509 }
2510 #else
2511 
2512 void dp_mark_peer_inact(void *peer, bool inactive)
2513 {
2514 	return;
2515 }
2516 
2517 void dp_free_inact_timer(struct dp_soc *soc)
2518 {
2519 	return;
2520 }
2521 
2522 #endif
2523 
2524 #ifdef IPA_OFFLOAD
2525 /**
2526  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
2527  * @soc: data path instance
2528  * @pdev: core txrx pdev context
2529  *
2530  * Return: QDF_STATUS_SUCCESS: success
2531  *         QDF_STATUS_E_RESOURCES: Error return
2532  */
2533 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2534 					   struct dp_pdev *pdev)
2535 {
2536 	/* Setup second Rx refill buffer ring */
2537 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2538 			  IPA_RX_REFILL_BUF_RING_IDX,
2539 			  pdev->pdev_id, RXDMA_REFILL_RING_SIZE)) {
2540 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2541 			FL("dp_srng_setup failed second rx refill ring"));
2542 		return QDF_STATUS_E_FAILURE;
2543 	}
2544 	return QDF_STATUS_SUCCESS;
2545 }
2546 
2547 /**
2548  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
2549  * @soc: data path instance
2550  * @pdev: core txrx pdev context
2551  *
2552  * Return: void
2553  */
2554 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2555 					      struct dp_pdev *pdev)
2556 {
2557 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
2558 			IPA_RX_REFILL_BUF_RING_IDX);
2559 }
2560 
2561 #else
2562 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2563 					   struct dp_pdev *pdev)
2564 {
2565 	return QDF_STATUS_SUCCESS;
2566 }
2567 
2568 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
2569 					      struct dp_pdev *pdev)
2570 {
2571 }
2572 #endif
2573 
2574 #ifndef QCA_WIFI_QCA6390
2575 static
2576 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2577 {
2578 	int mac_id = 0;
2579 	int pdev_id = pdev->pdev_id;
2580 
2581 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2582 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
2583 
2584 		if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2585 				  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
2586 				  RXDMA_MONITOR_BUF_RING_SIZE)) {
2587 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2588 				  FL("Srng setup failed for rxdma_mon_buf_ring"));
2589 			return QDF_STATUS_E_NOMEM;
2590 		}
2591 
2592 		if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2593 				  RXDMA_MONITOR_DST, 0, mac_for_pdev,
2594 				  RXDMA_MONITOR_DST_RING_SIZE)) {
2595 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2596 				  FL("Srng setup failed for rxdma_mon_dst_ring"));
2597 			return QDF_STATUS_E_NOMEM;
2598 		}
2599 
2600 		if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2601 				  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
2602 				  RXDMA_MONITOR_STATUS_RING_SIZE)) {
2603 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2604 				  FL("Srng setup failed for rxdma_mon_status_ring"));
2605 			return QDF_STATUS_E_NOMEM;
2606 		}
2607 
2608 		if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2609 				  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
2610 				  RXDMA_MONITOR_DESC_RING_SIZE)) {
2611 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2612 				  "Srng setup failed for rxdma_mon_desc_ring\n");
2613 			return QDF_STATUS_E_NOMEM;
2614 		}
2615 	}
2616 	return QDF_STATUS_SUCCESS;
2617 }
2618 #else
2619 static QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2620 {
2621 	return QDF_STATUS_SUCCESS;
2622 }
2623 #endif
2624 
2625 /*
2626 * dp_pdev_attach_wifi3() - attach txrx pdev
2627 * @ctrl_pdev: Opaque PDEV object
2628 * @txrx_soc: Datapath SOC handle
2629 * @htc_handle: HTC handle for host-target interface
2630 * @qdf_osdev: QDF OS device
2631 * @pdev_id: PDEV ID
2632 *
2633 * Return: DP PDEV handle on success, NULL on failure
2634 */
2635 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2636 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
2637 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
2638 {
2639 	int tx_ring_size;
2640 	int tx_comp_ring_size;
2641 
2642 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2643 	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2644 
2645 	if (!pdev) {
2646 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2647 			FL("DP PDEV memory allocation failed"));
2648 		goto fail0;
2649 	}
2650 
2651 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach();
2652 
2653 	if (!pdev->wlan_cfg_ctx) {
2654 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2655 			FL("pdev cfg_attach failed"));
2656 
2657 		qdf_mem_free(pdev);
2658 		goto fail0;
2659 	}
2660 
2661 	/*
2662 	 * set nss pdev config based on soc config
2663 	 */
2664 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
2665 			(wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx) & (1 << pdev_id)));
2666 
2667 	pdev->soc = soc;
2668 	pdev->ctrl_pdev = ctrl_pdev;
2669 	pdev->pdev_id = pdev_id;
2670 	soc->pdev_list[pdev_id] = pdev;
2671 	soc->pdev_count++;
2672 
2673 	TAILQ_INIT(&pdev->vdev_list);
2674 	qdf_spinlock_create(&pdev->vdev_list_lock);
2675 	pdev->vdev_count = 0;
2676 
2677 	qdf_spinlock_create(&pdev->tx_mutex);
2678 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2679 	TAILQ_INIT(&pdev->neighbour_peers_list);
2680 
2681 	if (dp_soc_cmn_setup(soc)) {
2682 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2683 			FL("dp_soc_cmn_setup failed"));
2684 		goto fail1;
2685 	}
2686 
2687 	/* Setup per PDEV TCL rings if configured */
2688 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2689 		tx_ring_size =
2690 			wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
2691 		tx_comp_ring_size =
2692 			wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
2693 
2694 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
2695 			pdev_id, pdev_id, tx_ring_size)) {
2696 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2697 				FL("dp_srng_setup failed for tcl_data_ring"));
2698 			goto fail1;
2699 		}
2700 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
2701 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
2702 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2703 				FL("dp_srng_setup failed for tx_comp_ring"));
2704 			goto fail1;
2705 		}
2706 		soc->num_tcl_data_rings++;
2707 	}
2708 
2709 	/* Tx specific init */
2710 	if (dp_tx_pdev_attach(pdev)) {
2711 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2712 			FL("dp_tx_pdev_attach failed"));
2713 		goto fail1;
2714 	}
2715 
2716 	/* Setup per PDEV REO rings if configured */
2717 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2718 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2719 			pdev_id, pdev_id, REO_DST_RING_SIZE)) {
2720 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2721 				FL("dp_srng_setup failed for reo_dest_ringn"));
2722 			goto fail1;
2723 		}
2724 		soc->num_reo_dest_rings++;
2725 
2726 	}
2727 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
2728 		RXDMA_REFILL_RING_SIZE)) {
2729 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2730 			 FL("dp_srng_setup failed rx refill ring"));
2731 		goto fail1;
2732 	}
2733 
2734 	if (dp_rxdma_ring_setup(soc, pdev)) {
2735 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2736 			 FL("RXDMA ring config failed"));
2737 		goto fail1;
2738 	}
2739 
2740 	if (dp_mon_rings_setup(soc, pdev)) {
2741 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2742 			  FL("MONITOR rings setup failed"));
2743 		goto fail1;
2744 	}
2745 
2746 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2747 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2748 				  0, pdev_id, RXDMA_ERR_DST_RING_SIZE)) {
2749 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2750 				FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2751 			goto fail1;
2752 		}
2753 	}
2754 
2755 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
2756 		goto fail1;
2757 
2758 	if (dp_ipa_ring_resource_setup(soc, pdev))
2759 		goto fail1;
2760 
2761 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
2762 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2763 			FL("dp_ipa_uc_attach failed"));
2764 		goto fail1;
2765 	}
2766 
2767 	/* Rx specific init */
2768 	if (dp_rx_pdev_attach(pdev)) {
2769 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2770 			FL("dp_rx_pdev_attach failed"));
2771 		goto fail0;
2772 	}
2773 	DP_STATS_INIT(pdev);
2774 
2775 	/* Monitor filter init */
2776 	pdev->mon_filter_mode = MON_FILTER_ALL;
2777 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2778 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2779 	pdev->fp_data_filter = FILTER_DATA_ALL;
2780 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2781 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2782 	pdev->mo_data_filter = FILTER_DATA_ALL;
2783 
2784 	dp_local_peer_id_pool_init(pdev);
2785 
2786 	dp_dscp_tid_map_setup(pdev);
2787 
2788 	/* Rx monitor mode specific init */
2789 	if (dp_rx_pdev_mon_attach(pdev)) {
2790 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2791 				"dp_rx_pdev_attach failed\n");
2792 		goto fail1;
2793 	}
2794 
2795 	if (dp_wdi_event_attach(pdev)) {
2796 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2797 				"dp_wdi_evet_attach failed\n");
2798 		goto fail1;
2799 	}
2800 
2801 	/* set the reo destination during initialization */
2802 	pdev->reo_dest = pdev->pdev_id + 1;
2803 
2804 	/*
2805 	 * initialize ppdu tlv list
2806 	 */
2807 	TAILQ_INIT(&pdev->ppdu_info_list);
2808 	pdev->tlv_count = 0;
2809 	pdev->list_depth = 0;
2810 
2811 	return (struct cdp_pdev *)pdev;
2812 
2813 fail1:
2814 	dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
2815 
2816 fail0:
2817 	return NULL;
2818 }
2819 
2820 /*
2821 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
2822 * @soc: data path SoC handle
2823 * @pdev: Physical device handle
2824 *
2825 * Return: void
2826 */
2827 #ifdef QCA_HOST2FW_RXBUF_RING
2828 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2829 	 struct dp_pdev *pdev)
2830 {
2831 	int max_mac_rings =
2832 		 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2833 	int i;
2834 
2835 	max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
2836 				max_mac_rings : MAX_RX_MAC_RINGS;
2837 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
2838 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
2839 			 RXDMA_BUF, 1);
2840 
2841 	qdf_timer_free(&soc->mon_reap_timer);
2842 }
2843 #else
2844 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2845 	 struct dp_pdev *pdev)
2846 {
2847 }
2848 #endif
2849 
2850 /*
2851  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
2852  * @pdev: device object
2853  *
2854  * Return: void
2855  */
2856 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
2857 {
2858 	struct dp_neighbour_peer *peer = NULL;
2859 	struct dp_neighbour_peer *temp_peer = NULL;
2860 
2861 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
2862 			neighbour_peer_list_elem, temp_peer) {
2863 		/* delete this peer from the list */
2864 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
2865 				peer, neighbour_peer_list_elem);
2866 		qdf_mem_free(peer);
2867 	}
2868 
2869 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
2870 }
2871 
2872 /**
2873 * dp_htt_ppdu_stats_detach() - detach stats resources
2874 * @pdev: Datapath PDEV handle
2875 *
2876 * Return: void
2877 */
2878 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
2879 {
2880 	struct ppdu_info *ppdu_info, *ppdu_info_next;
2881 
2882 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
2883 			ppdu_info_list_elem, ppdu_info_next) {
2884 		if (!ppdu_info)
2885 			break;
2886 		qdf_assert_always(ppdu_info->nbuf);
2887 		qdf_nbuf_free(ppdu_info->nbuf);
2888 		qdf_mem_free(ppdu_info);
2889 	}
2890 }
2891 
2892 #ifndef QCA_WIFI_QCA6390
2893 static
2894 void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
2895 			int mac_id)
2896 {
2897 		dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id],
2898 				RXDMA_MONITOR_BUF, 0);
2899 		dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id],
2900 				RXDMA_MONITOR_DST, 0);
2901 
2902 		dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id],
2903 				RXDMA_MONITOR_STATUS, 0);
2904 
2905 		dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id],
2906 				RXDMA_MONITOR_DESC, 0);
2907 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
2908 				RXDMA_DST, 0);
2909 }
2910 #else
2911 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
2912 			       int mac_id)
2913 {
2914 }
2915 #endif
2916 
2917 /*
2918 * dp_pdev_detach_wifi3() - detach txrx pdev
2919 * @txrx_pdev: Datapath PDEV handle
2920 * @force: Force detach
2921 *
2922 */
2923 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
2924 {
2925 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
2926 	struct dp_soc *soc = pdev->soc;
2927 	qdf_nbuf_t curr_nbuf, next_nbuf;
2928 	int mac_id;
2929 
2930 	dp_wdi_event_detach(pdev);
2931 
2932 	dp_tx_pdev_detach(pdev);
2933 
2934 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2935 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
2936 			TCL_DATA, pdev->pdev_id);
2937 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
2938 			WBM2SW_RELEASE, pdev->pdev_id);
2939 	}
2940 
2941 	dp_pktlogmod_exit(pdev);
2942 
2943 	dp_rx_pdev_detach(pdev);
2944 	dp_rx_pdev_mon_detach(pdev);
2945 	dp_neighbour_peers_detach(pdev);
2946 	qdf_spinlock_destroy(&pdev->tx_mutex);
2947 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
2948 
2949 	dp_ipa_uc_detach(soc, pdev);
2950 
2951 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
2952 
2953 	/* Cleanup per PDEV REO rings if configured */
2954 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2955 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
2956 			REO_DST, pdev->pdev_id);
2957 	}
2958 
2959 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
2960 
2961 	dp_rxdma_ring_cleanup(soc, pdev);
2962 
2963 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
2964 		dp_mon_ring_deinit(soc, pdev, mac_id);
2965 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
2966 			RXDMA_DST, 0);
2967 	}
2968 
2969 	curr_nbuf = pdev->invalid_peer_head_msdu;
2970 	while (curr_nbuf) {
2971 		next_nbuf = qdf_nbuf_next(curr_nbuf);
2972 		qdf_nbuf_free(curr_nbuf);
2973 		curr_nbuf = next_nbuf;
2974 	}
2975 
2976 	dp_htt_ppdu_stats_detach(pdev);
2977 
2978 	soc->pdev_list[pdev->pdev_id] = NULL;
2979 	soc->pdev_count--;
2980 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
2981 	qdf_mem_free(pdev->dp_txrx_handle);
2982 	qdf_mem_free(pdev);
2983 }
2984 
2985 /*
2986  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
2987  * @soc: DP SOC handle
2988  */
2989 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
2990 {
2991 	struct reo_desc_list_node *desc;
2992 	struct dp_rx_tid *rx_tid;
2993 
2994 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2995 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
2996 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2997 		rx_tid = &desc->rx_tid;
2998 		qdf_mem_unmap_nbytes_single(soc->osdev,
2999 			rx_tid->hw_qdesc_paddr,
3000 			QDF_DMA_BIDIRECTIONAL,
3001 			rx_tid->hw_qdesc_alloc_size);
3002 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3003 		qdf_mem_free(desc);
3004 	}
3005 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3006 	qdf_list_destroy(&soc->reo_desc_freelist);
3007 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3008 }
3009 
3010 /*
3011  * dp_soc_detach_wifi3() - Detach txrx SOC
3012  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3013  */
3014 static void dp_soc_detach_wifi3(void *txrx_soc)
3015 {
3016 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3017 	int i;
3018 
3019 	qdf_atomic_set(&soc->cmn_init_done, 0);
3020 
3021 	qdf_flush_work(&soc->htt_stats.work);
3022 	qdf_disable_work(&soc->htt_stats.work);
3023 
3024 	/* Free pending htt stats messages */
3025 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3026 
3027 	dp_free_inact_timer(soc);
3028 
3029 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3030 		if (soc->pdev_list[i])
3031 			dp_pdev_detach_wifi3(
3032 				(struct cdp_pdev *)soc->pdev_list[i], 1);
3033 	}
3034 
3035 	dp_peer_find_detach(soc);
3036 
3037 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3038 	 * SW descriptors
3039 	 */
3040 
3041 	/* Free the ring memories */
3042 	/* Common rings */
3043 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3044 
3045 	dp_tx_soc_detach(soc);
3046 	/* Tx data rings */
3047 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3048 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3049 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3050 				TCL_DATA, i);
3051 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3052 				WBM2SW_RELEASE, i);
3053 		}
3054 	}
3055 
3056 	/* TCL command and status rings */
3057 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3058 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3059 
3060 	/* Rx data rings */
3061 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3062 		soc->num_reo_dest_rings =
3063 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3064 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3065 			/* TODO: Get number of rings and ring sizes
3066 			 * from wlan_cfg
3067 			 */
3068 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3069 				REO_DST, i);
3070 		}
3071 	}
3072 	/* REO reinjection ring */
3073 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3074 
3075 	/* Rx release ring */
3076 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3077 
3078 	/* Rx exception ring */
3079 	/* TODO: Better to store ring_type and ring_num in
3080 	 * dp_srng during setup
3081 	 */
3082 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3083 
3084 	/* REO command and status rings */
3085 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3086 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
3087 	dp_hw_link_desc_pool_cleanup(soc);
3088 
3089 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3090 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3091 
3092 	htt_soc_detach(soc->htt_handle);
3093 
3094 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3095 
3096 	dp_reo_cmdlist_destroy(soc);
3097 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3098 	dp_reo_desc_freelist_destroy(soc);
3099 
3100 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
3101 
3102 	dp_soc_wds_detach(soc);
3103 	qdf_spinlock_destroy(&soc->ast_lock);
3104 
3105 	qdf_mem_free(soc);
3106 }
3107 
3108 #ifndef QCA_WIFI_QCA6390
3109 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3110 				  struct dp_pdev *pdev,
3111 				  int mac_id,
3112 				  int mac_for_pdev)
3113 {
3114 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3115 		       pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3116 		       RXDMA_MONITOR_BUF);
3117 
3118 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3119 		       pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3120 		       RXDMA_MONITOR_DST);
3121 
3122 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3123 		       pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3124 		       RXDMA_MONITOR_STATUS);
3125 
3126 	htt_srng_setup(soc->htt_handle, mac_for_pdev,
3127 		       pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3128 		       RXDMA_MONITOR_DESC);
3129 }
3130 #else
3131 static void dp_mon_htt_srng_setup(struct dp_soc *soc,
3132 				  struct dp_pdev *pdev,
3133 				  int mac_id,
3134 				  int mac_for_pdev)
3135 {
3136 }
3137 #endif
3138 /*
3139  * dp_rxdma_ring_config() - configure the RX DMA rings
3140  *
3141  * This function is used to configure the MAC rings.
3142  * On MCL host provides buffers in Host2FW ring
3143  * FW refills (copies) buffers to the ring and updates
3144  * ring_idx in register
3145  *
3146  * @soc: data path SoC handle
3147  *
3148  * Return: void
3149  */
3150 #ifdef QCA_HOST2FW_RXBUF_RING
3151 static void dp_rxdma_ring_config(struct dp_soc *soc)
3152 {
3153 	int i;
3154 
3155 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3156 		struct dp_pdev *pdev = soc->pdev_list[i];
3157 
3158 		if (pdev) {
3159 			int mac_id;
3160 			bool dbs_enable = 0;
3161 			int max_mac_rings =
3162 				 wlan_cfg_get_num_mac_rings
3163 				(pdev->wlan_cfg_ctx);
3164 
3165 			htt_srng_setup(soc->htt_handle, 0,
3166 				 pdev->rx_refill_buf_ring.hal_srng,
3167 				 RXDMA_BUF);
3168 
3169 			if (pdev->rx_refill_buf_ring2.hal_srng)
3170 				htt_srng_setup(soc->htt_handle, 0,
3171 					pdev->rx_refill_buf_ring2.hal_srng,
3172 					RXDMA_BUF);
3173 
3174 			if (soc->cdp_soc.ol_ops->
3175 				is_hw_dbs_2x2_capable) {
3176 				dbs_enable = soc->cdp_soc.ol_ops->
3177 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
3178 			}
3179 
3180 			if (dbs_enable) {
3181 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3182 				QDF_TRACE_LEVEL_ERROR,
3183 				FL("DBS enabled max_mac_rings %d\n"),
3184 					 max_mac_rings);
3185 			} else {
3186 				max_mac_rings = 1;
3187 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3188 					 QDF_TRACE_LEVEL_ERROR,
3189 					 FL("DBS disabled, max_mac_rings %d\n"),
3190 					 max_mac_rings);
3191 			}
3192 
3193 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3194 					 FL("pdev_id %d max_mac_rings %d\n"),
3195 					 pdev->pdev_id, max_mac_rings);
3196 
3197 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
3198 				int mac_for_pdev = dp_get_mac_id_for_pdev(
3199 							mac_id, pdev->pdev_id);
3200 
3201 				QDF_TRACE(QDF_MODULE_ID_TXRX,
3202 					 QDF_TRACE_LEVEL_ERROR,
3203 					 FL("mac_id %d\n"), mac_for_pdev);
3204 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3205 					 pdev->rx_mac_buf_ring[mac_id]
3206 						.hal_srng,
3207 					 RXDMA_BUF);
3208 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
3209 					pdev->rxdma_err_dst_ring[mac_id]
3210 						.hal_srng,
3211 					RXDMA_DST);
3212 
3213 				/* Configure monitor mode rings */
3214 				dp_mon_htt_srng_setup(soc, pdev, mac_id,
3215 						      mac_for_pdev);
3216 
3217 			}
3218 		}
3219 	}
3220 
3221 	/*
3222 	 * Timer to reap rxdma status rings.
3223 	 * Needed until we enable ppdu end interrupts
3224 	 */
3225 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
3226 			dp_service_mon_rings, (void *)soc,
3227 			QDF_TIMER_TYPE_WAKE_APPS);
3228 	soc->reap_timer_init = 1;
3229 }
3230 #else
3231 /* This is only for WIN */
3232 static void dp_rxdma_ring_config(struct dp_soc *soc)
3233 {
3234 	int i;
3235 	int mac_id;
3236 
3237 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3238 		struct dp_pdev *pdev = soc->pdev_list[i];
3239 
3240 		if (pdev == NULL)
3241 			continue;
3242 
3243 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3244 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
3245 
3246 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3247 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
3248 
3249 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3250 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
3251 				RXDMA_MONITOR_BUF);
3252 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3253 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
3254 				RXDMA_MONITOR_DST);
3255 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3256 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
3257 				RXDMA_MONITOR_STATUS);
3258 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3259 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
3260 				RXDMA_MONITOR_DESC);
3261 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
3262 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
3263 				RXDMA_DST);
3264 		}
3265 	}
3266 }
3267 #endif
3268 
3269 /*
3270  * dp_soc_attach_target_wifi3() - SOC initialization in the target
3271  * @txrx_soc: Datapath SOC handle
3272  */
3273 static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
3274 {
3275 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
3276 
3277 	htt_soc_attach_target(soc->htt_handle);
3278 
3279 	dp_rxdma_ring_config(soc);
3280 
3281 	DP_STATS_INIT(soc);
3282 
3283 	/* initialize work queue for stats processing */
3284 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
3285 
3286 	return 0;
3287 }
3288 
3289 /*
3290  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
3291  * @txrx_soc: Datapath SOC handle
3292  */
3293 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
3294 {
3295 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3296 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3297 }
3298 /*
3299  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3300  * @txrx_soc: Datapath SOC handle
3301  * @nss_cfg: nss config
3302  */
3303 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3304 {
3305 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3306 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3307 
3308 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3309 
3310 	/*
3311 	 * TODO: masked out based on the per offloaded radio
3312 	 */
3313 	if (config == dp_nss_cfg_dbdc) {
3314 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3315 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3316 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3317 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3318 	}
3319 
3320 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3321 				FL("nss-wifi<0> nss config is enabled"));
3322 }
3323 /*
3324 * dp_vdev_attach_wifi3() - attach txrx vdev
3325 * @txrx_pdev: Datapath PDEV handle
3326 * @vdev_mac_addr: MAC address of the virtual interface
3327 * @vdev_id: VDEV Id
3328 * @wlan_op_mode: VDEV operating mode
3329 *
3330 * Return: DP VDEV handle on success, NULL on failure
3331 */
3332 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
3333 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3334 {
3335 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3336 	struct dp_soc *soc = pdev->soc;
3337 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
3338 
3339 	if (!vdev) {
3340 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3341 			FL("DP VDEV memory allocation failed"));
3342 		goto fail0;
3343 	}
3344 
3345 	vdev->pdev = pdev;
3346 	vdev->vdev_id = vdev_id;
3347 	vdev->opmode = op_mode;
3348 	vdev->osdev = soc->osdev;
3349 
3350 	vdev->osif_rx = NULL;
3351 	vdev->osif_rsim_rx_decap = NULL;
3352 	vdev->osif_get_key = NULL;
3353 	vdev->osif_rx_mon = NULL;
3354 	vdev->osif_tx_free_ext = NULL;
3355 	vdev->osif_vdev = NULL;
3356 
3357 	vdev->delete.pending = 0;
3358 	vdev->safemode = 0;
3359 	vdev->drop_unenc = 1;
3360 	vdev->sec_type = cdp_sec_type_none;
3361 #ifdef notyet
3362 	vdev->filters_num = 0;
3363 #endif
3364 
3365 	qdf_mem_copy(
3366 		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3367 
3368 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3369 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3370 	vdev->dscp_tid_map_id = 0;
3371 	vdev->mcast_enhancement_en = 0;
3372 
3373 	/* TODO: Initialize default HTT meta data that will be used in
3374 	 * TCL descriptors for packets transmitted from this VDEV
3375 	 */
3376 
3377 	TAILQ_INIT(&vdev->peer_list);
3378 
3379 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3380 	/* add this vdev into the pdev's list */
3381 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3382 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3383 	pdev->vdev_count++;
3384 
3385 	dp_tx_vdev_attach(vdev);
3386 
3387 
3388 	if ((soc->intr_mode == DP_INTR_POLL) &&
3389 			wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
3390 		if (pdev->vdev_count == 1)
3391 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3392 	}
3393 
3394 	dp_lro_hash_setup(soc);
3395 
3396 	/* LRO */
3397 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3398 		wlan_op_mode_sta == vdev->opmode)
3399 		vdev->lro_enable = true;
3400 
3401 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3402 		 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3403 
3404 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3405 		"Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
3406 	DP_STATS_INIT(vdev);
3407 
3408 	if (wlan_op_mode_sta == vdev->opmode)
3409 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3410 							vdev->mac_addr.raw,
3411 							NULL);
3412 
3413 	return (struct cdp_vdev *)vdev;
3414 
3415 fail0:
3416 	return NULL;
3417 }
3418 
3419 /**
3420  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3421  * @vdev: Datapath VDEV handle
3422  * @osif_vdev: OSIF vdev handle
3423  * @ctrl_vdev: UMAC vdev handle
3424  * @txrx_ops: Tx and Rx operations
3425  *
3426  * Return: DP VDEV handle on success, NULL on failure
3427  */
3428 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3429 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
3430 	struct ol_txrx_ops *txrx_ops)
3431 {
3432 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3433 	vdev->osif_vdev = osif_vdev;
3434 	vdev->ctrl_vdev = ctrl_vdev;
3435 	vdev->osif_rx = txrx_ops->rx.rx;
3436 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
3437 	vdev->osif_get_key = txrx_ops->get_key;
3438 	vdev->osif_rx_mon = txrx_ops->rx.mon;
3439 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
3440 #ifdef notyet
3441 #if ATH_SUPPORT_WAPI
3442 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
3443 #endif
3444 #endif
3445 #ifdef UMAC_SUPPORT_PROXY_ARP
3446 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
3447 #endif
3448 	vdev->me_convert = txrx_ops->me_convert;
3449 
3450 	/* TODO: Enable the following once Tx code is integrated */
3451 	if (vdev->mesh_vdev)
3452 		txrx_ops->tx.tx = dp_tx_send_mesh;
3453 	else
3454 		txrx_ops->tx.tx = dp_tx_send;
3455 
3456 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
3457 
3458 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
3459 		"DP Vdev Register success");
3460 }
3461 
3462 /**
3463  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
3464  * @vdev: Datapath VDEV handle
3465  *
3466  * Return: void
3467  */
3468 static void dp_vdev_flush_peers(struct dp_vdev *vdev)
3469 {
3470 	struct dp_pdev *pdev = vdev->pdev;
3471 	struct dp_soc *soc = pdev->soc;
3472 	struct dp_peer *peer;
3473 	uint16_t *peer_ids;
3474 	uint8_t i = 0, j = 0;
3475 
3476 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
3477 	if (!peer_ids) {
3478 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3479 			"DP alloc failure - unable to flush peers");
3480 		return;
3481 	}
3482 
3483 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3484 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3485 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3486 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
3487 				if (j < soc->max_peers)
3488 					peer_ids[j++] = peer->peer_ids[i];
3489 	}
3490 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3491 
3492 	for (i = 0; i < j ; i++)
3493 		dp_rx_peer_unmap_handler(soc, peer_ids[i]);
3494 
3495 	qdf_mem_free(peer_ids);
3496 
3497 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3498 		FL("Flushed peers for vdev object %pK "), vdev);
3499 }
3500 
3501 /*
3502  * dp_vdev_detach_wifi3() - Detach txrx vdev
3503  * @txrx_vdev:		Datapath VDEV handle
3504  * @callback:		Callback OL_IF on completion of detach
3505  * @cb_context:	Callback context
3506  *
3507  */
3508 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
3509 	ol_txrx_vdev_delete_cb callback, void *cb_context)
3510 {
3511 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3512 	struct dp_pdev *pdev = vdev->pdev;
3513 	struct dp_soc *soc = pdev->soc;
3514 
3515 	/* preconditions */
3516 	qdf_assert(vdev);
3517 
3518 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3519 	/* remove the vdev from its parent pdev's list */
3520 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3521 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3522 
3523 	if (wlan_op_mode_sta == vdev->opmode)
3524 		dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3525 
3526 	/*
3527 	 * If Target is hung, flush all peers before detaching vdev
3528 	 * this will free all references held due to missing
3529 	 * unmap commands from Target
3530 	 */
3531 	if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
3532 		dp_vdev_flush_peers(vdev);
3533 
3534 	/*
3535 	 * Use peer_ref_mutex while accessing peer_list, in case
3536 	 * a peer is in the process of being removed from the list.
3537 	 */
3538 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3539 	/* check that the vdev has no peers allocated */
3540 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
3541 		/* debug print - will be removed later */
3542 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
3543 			FL("not deleting vdev object %pK (%pM)"
3544 			"until deletion finishes for all its peers"),
3545 			vdev, vdev->mac_addr.raw);
3546 		/* indicate that the vdev needs to be deleted */
3547 		vdev->delete.pending = 1;
3548 		vdev->delete.callback = callback;
3549 		vdev->delete.context = cb_context;
3550 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3551 		return;
3552 	}
3553 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3554 
3555 	dp_tx_vdev_detach(vdev);
3556 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3557 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
3558 
3559 	qdf_mem_free(vdev);
3560 
3561 	if (callback)
3562 		callback(cb_context);
3563 }
3564 
3565 /*
3566  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
3567  * @soc - datapath soc handle
3568  * @peer - datapath peer handle
3569  *
3570  * Delete the AST entries belonging to a peer
3571  */
3572 #ifdef FEATURE_AST
3573 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3574 					      struct dp_peer *peer)
3575 {
3576 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
3577 
3578 	qdf_spin_lock_bh(&soc->ast_lock);
3579 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
3580 		dp_peer_del_ast(soc, ast_entry);
3581 
3582 	TAILQ_INIT(&peer->ast_entry_list);
3583 	qdf_spin_unlock_bh(&soc->ast_lock);
3584 }
3585 #else
3586 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
3587 					      struct dp_peer *peer)
3588 {
3589 }
3590 #endif
3591 
3592 /*
3593  * dp_peer_create_wifi3() - attach txrx peer
3594  * @txrx_vdev: Datapath VDEV handle
3595  * @peer_mac_addr: Peer MAC address
3596  *
3597  * Return: DP peeer handle on success, NULL on failure
3598  */
3599 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3600 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
3601 {
3602 	struct dp_peer *peer;
3603 	int i;
3604 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3605 	struct dp_pdev *pdev;
3606 	struct dp_soc *soc;
3607 	struct dp_ast_entry *ast_entry;
3608 
3609 	/* preconditions */
3610 	qdf_assert(vdev);
3611 	qdf_assert(peer_mac_addr);
3612 
3613 	pdev = vdev->pdev;
3614 	soc = pdev->soc;
3615 
3616 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr,
3617 					0, vdev->vdev_id);
3618 
3619 	if (peer) {
3620 		peer->delete_in_progress = false;
3621 
3622 		dp_peer_delete_ast_entries(soc, peer);
3623 
3624 		/*
3625 		* on peer create, peer ref count decrements, sice new peer is not
3626 		* getting created earlier reference is reused, peer_unref_delete will
3627 		* take care of incrementing count
3628 		* */
3629 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3630 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
3631 				vdev->vdev_id, peer->mac_addr.raw);
3632 		}
3633 		peer->ctrl_peer = ctrl_peer;
3634 
3635 		dp_local_peer_id_alloc(pdev, peer);
3636 		DP_STATS_INIT(peer);
3637 		return (void *)peer;
3638 	} else {
3639 		/*
3640 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
3641 		 * need to remove the AST entry which was earlier added as a WDS
3642 		 * entry.
3643 		 */
3644 		ast_entry = dp_peer_ast_hash_find(soc, peer_mac_addr);
3645 		if (ast_entry)
3646 			dp_peer_del_ast(soc, ast_entry);
3647 	}
3648 
3649 #ifdef notyet
3650 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3651 		soc->mempool_ol_ath_peer);
3652 #else
3653 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3654 #endif
3655 
3656 	if (!peer)
3657 		return NULL; /* failure */
3658 
3659 	qdf_mem_zero(peer, sizeof(struct dp_peer));
3660 
3661 	TAILQ_INIT(&peer->ast_entry_list);
3662 
3663 	/* store provided params */
3664 	peer->vdev = vdev;
3665 	peer->ctrl_peer = ctrl_peer;
3666 
3667 	dp_peer_add_ast(soc, peer, peer_mac_addr, CDP_TXRX_AST_TYPE_STATIC, 0);
3668 
3669 	qdf_spinlock_create(&peer->peer_info_lock);
3670 
3671 	qdf_mem_copy(
3672 		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3673 
3674 	/* TODO: See of rx_opt_proc is really required */
3675 	peer->rx_opt_proc = soc->rx_opt_proc;
3676 
3677 	/* initialize the peer_id */
3678 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3679 		peer->peer_ids[i] = HTT_INVALID_PEER;
3680 
3681 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3682 
3683 	qdf_atomic_init(&peer->ref_cnt);
3684 
3685 	/* keep one reference for attach */
3686 	qdf_atomic_inc(&peer->ref_cnt);
3687 
3688 	/* add this peer into the vdev's list */
3689 	if (wlan_op_mode_sta == vdev->opmode)
3690 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3691 	else
3692 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3693 
3694 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3695 
3696 	/* TODO: See if hash based search is required */
3697 	dp_peer_find_hash_add(soc, peer);
3698 
3699 	/* Initialize the peer state */
3700 	peer->state = OL_TXRX_PEER_STATE_DISC;
3701 
3702 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3703 		"vdev %pK created peer %pK (%pM) ref_cnt: %d",
3704 		vdev, peer, peer->mac_addr.raw,
3705 		qdf_atomic_read(&peer->ref_cnt));
3706 	/*
3707 	 * For every peer MAp message search and set if bss_peer
3708 	 */
3709 	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
3710 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3711 			"vdev bss_peer!!!!");
3712 		peer->bss_peer = 1;
3713 		vdev->vap_bss_peer = peer;
3714 	}
3715 
3716 
3717 	dp_local_peer_id_alloc(pdev, peer);
3718 	DP_STATS_INIT(peer);
3719 	return (void *)peer;
3720 }
3721 
3722 /*
3723  * dp_peer_setup_wifi3() - initialize the peer
3724  * @vdev_hdl: virtual device object
3725  * @peer: Peer object
3726  *
3727  * Return: void
3728  */
3729 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
3730 {
3731 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3732 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3733 	struct dp_pdev *pdev;
3734 	struct dp_soc *soc;
3735 	bool hash_based = 0;
3736 	enum cdp_host_reo_dest_ring reo_dest;
3737 
3738 	/* preconditions */
3739 	qdf_assert(vdev);
3740 	qdf_assert(peer);
3741 
3742 	pdev = vdev->pdev;
3743 	soc = pdev->soc;
3744 
3745 	peer->last_assoc_rcvd = 0;
3746 	peer->last_disassoc_rcvd = 0;
3747 	peer->last_deauth_rcvd = 0;
3748 
3749 	/*
3750 	 * hash based steering is disabled for Radios which are offloaded
3751 	 * to NSS
3752 	 */
3753 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
3754 		hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3755 
3756 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3757 		FL("hash based steering for pdev: %d is %d\n"),
3758 		pdev->pdev_id, hash_based);
3759 
3760 	/*
3761 	 * Below line of code will ensure the proper reo_dest ring is chosen
3762 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
3763 	 */
3764 	reo_dest = pdev->reo_dest;
3765 
3766 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
3767 		/* TODO: Check the destination ring number to be passed to FW */
3768 		soc->cdp_soc.ol_ops->peer_set_default_routing(
3769 				pdev->ctrl_pdev, peer->mac_addr.raw,
3770 				peer->vdev->vdev_id, hash_based, reo_dest);
3771 	}
3772 
3773 	dp_peer_rx_init(pdev, peer);
3774 	return;
3775 }
3776 
3777 /*
3778  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
3779  * @vdev_handle: virtual device object
3780  * @htt_pkt_type: type of pkt
3781  *
3782  * Return: void
3783  */
3784 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
3785 	 enum htt_cmn_pkt_type val)
3786 {
3787 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3788 	vdev->tx_encap_type = val;
3789 }
3790 
3791 /*
3792  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
3793  * @vdev_handle: virtual device object
3794  * @htt_pkt_type: type of pkt
3795  *
3796  * Return: void
3797  */
3798 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
3799 	 enum htt_cmn_pkt_type val)
3800 {
3801 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3802 	vdev->rx_decap_type = val;
3803 }
3804 
3805 /*
3806  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
3807  * @pdev_handle: physical device object
3808  * @val: reo destination ring index (1 - 4)
3809  *
3810  * Return: void
3811  */
3812 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
3813 	 enum cdp_host_reo_dest_ring val)
3814 {
3815 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3816 
3817 	if (pdev)
3818 		pdev->reo_dest = val;
3819 }
3820 
3821 /*
3822  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
3823  * @pdev_handle: physical device object
3824  *
3825  * Return: reo destination ring index
3826  */
3827 static enum cdp_host_reo_dest_ring
3828 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
3829 {
3830 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3831 
3832 	if (pdev)
3833 		return pdev->reo_dest;
3834 	else
3835 		return cdp_host_reo_dest_ring_unknown;
3836 }
3837 
3838 #ifdef QCA_SUPPORT_SON
3839 static void dp_son_peer_authorize(struct dp_peer *peer)
3840 {
3841 	struct dp_soc *soc;
3842 	soc = peer->vdev->pdev->soc;
3843 	peer->peer_bs_inact_flag = 0;
3844 	peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3845 	return;
3846 }
3847 #else
3848 static void dp_son_peer_authorize(struct dp_peer *peer)
3849 {
3850 	return;
3851 }
3852 #endif
3853 /*
3854  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
3855  * @pdev_handle: device object
3856  * @val: value to be set
3857  *
3858  * Return: void
3859  */
3860 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3861 	 uint32_t val)
3862 {
3863 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3864 
3865 	/* Enable/Disable smart mesh filtering. This flag will be checked
3866 	 * during rx processing to check if packets are from NAC clients.
3867 	 */
3868 	pdev->filter_neighbour_peers = val;
3869 	return 0;
3870 }
3871 
3872 /*
3873  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
3874  * address for smart mesh filtering
3875  * @pdev_handle: device object
3876  * @cmd: Add/Del command
3877  * @macaddr: nac client mac address
3878  *
3879  * Return: void
3880  */
3881 static int dp_update_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3882 	 uint32_t cmd, uint8_t *macaddr)
3883 {
3884 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3885 	struct dp_neighbour_peer *peer = NULL;
3886 
3887 	if (!macaddr)
3888 		goto fail0;
3889 
3890 	/* Store address of NAC (neighbour peer) which will be checked
3891 	 * against TA of received packets.
3892 	 */
3893 	if (cmd == DP_NAC_PARAM_ADD) {
3894 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
3895 				sizeof(*peer));
3896 
3897 		if (!peer) {
3898 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3899 				FL("DP neighbour peer node memory allocation failed"));
3900 			goto fail0;
3901 		}
3902 
3903 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
3904 			macaddr, DP_MAC_ADDR_LEN);
3905 
3906 
3907 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3908 		/* add this neighbour peer into the list */
3909 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
3910 				neighbour_peer_list_elem);
3911 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3912 
3913 		return 1;
3914 
3915 	} else if (cmd == DP_NAC_PARAM_DEL) {
3916 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3917 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3918 				neighbour_peer_list_elem) {
3919 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
3920 				macaddr, DP_MAC_ADDR_LEN)) {
3921 				/* delete this peer from the list */
3922 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
3923 					peer, neighbour_peer_list_elem);
3924 				qdf_mem_free(peer);
3925 				break;
3926 			}
3927 		}
3928 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3929 
3930 		return 1;
3931 
3932 	}
3933 
3934 fail0:
3935 	return 0;
3936 }
3937 
3938 /*
3939  * dp_get_sec_type() - Get the security type
3940  * @peer:		Datapath peer handle
3941  * @sec_idx:    Security id (mcast, ucast)
3942  *
3943  * return sec_type: Security type
3944  */
3945 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
3946 {
3947 	struct dp_peer *dpeer = (struct dp_peer *)peer;
3948 
3949 	return dpeer->security[sec_idx].sec_type;
3950 }
3951 
3952 /*
3953  * dp_peer_authorize() - authorize txrx peer
3954  * @peer_handle:		Datapath peer handle
3955  * @authorize
3956  *
3957  */
3958 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
3959 {
3960 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
3961 	struct dp_soc *soc;
3962 
3963 	if (peer != NULL) {
3964 		soc = peer->vdev->pdev->soc;
3965 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
3966 		dp_son_peer_authorize(peer);
3967 		peer->authorize = authorize ? 1 : 0;
3968 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3969 	}
3970 }
3971 
3972 #ifdef QCA_SUPPORT_SON
3973 /*
3974  * dp_txrx_update_inact_threshold() - Update inact timer threshold
3975  * @pdev_handle: Device handle
3976  * @new_threshold : updated threshold value
3977  *
3978  */
3979 static void
3980 dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
3981 			       u_int16_t new_threshold)
3982 {
3983 	struct dp_vdev *vdev;
3984 	struct dp_peer *peer;
3985 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3986 	struct dp_soc *soc = pdev->soc;
3987 	u_int16_t old_threshold = soc->pdev_bs_inact_reload;
3988 
3989 	if (old_threshold == new_threshold)
3990 		return;
3991 
3992 	soc->pdev_bs_inact_reload = new_threshold;
3993 
3994 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3995 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3996 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3997 		if (vdev->opmode != wlan_op_mode_ap)
3998 			continue;
3999 
4000 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4001 			if (!peer->authorize)
4002 				continue;
4003 
4004 			if (old_threshold - peer->peer_bs_inact >=
4005 					new_threshold) {
4006 				dp_mark_peer_inact((void *)peer, true);
4007 				peer->peer_bs_inact = 0;
4008 			} else {
4009 				peer->peer_bs_inact = new_threshold -
4010 					(old_threshold - peer->peer_bs_inact);
4011 			}
4012 		}
4013 	}
4014 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4015 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4016 }
4017 
4018 /**
4019  * dp_txrx_reset_inact_count(): Reset inact count
4020  * @pdev_handle - device handle
4021  *
4022  * Return: void
4023  */
4024 static void
4025 dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
4026 {
4027 	struct dp_vdev *vdev = NULL;
4028 	struct dp_peer *peer = NULL;
4029 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4030 	struct dp_soc *soc = pdev->soc;
4031 
4032 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4033 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4034 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4035 		if (vdev->opmode != wlan_op_mode_ap)
4036 			continue;
4037 
4038 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4039 			if (!peer->authorize)
4040 				continue;
4041 
4042 			peer->peer_bs_inact = soc->pdev_bs_inact_reload;
4043 		}
4044 	}
4045 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4046 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4047 }
4048 
4049 /**
4050  * dp_set_inact_params(): set inactivity params
4051  * @pdev_handle - device handle
4052  * @inact_check_interval - inactivity interval
4053  * @inact_normal - Inactivity normal
4054  * @inact_overload - Inactivity overload
4055  *
4056  * Return: bool
4057  */
4058 bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
4059 			 u_int16_t inact_check_interval,
4060 			 u_int16_t inact_normal, u_int16_t inact_overload)
4061 {
4062 	struct dp_soc *soc;
4063 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4064 
4065 	if (!pdev)
4066 		return false;
4067 
4068 	soc = pdev->soc;
4069 	if (!soc)
4070 		return false;
4071 
4072 	soc->pdev_bs_inact_interval = inact_check_interval;
4073 	soc->pdev_bs_inact_normal = inact_normal;
4074 	soc->pdev_bs_inact_overload = inact_overload;
4075 
4076 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4077 					soc->pdev_bs_inact_normal);
4078 
4079 	return true;
4080 }
4081 
4082 /**
4083  * dp_start_inact_timer(): Inactivity timer start
4084  * @pdev_handle - device handle
4085  * @enable - Inactivity timer start/stop
4086  *
4087  * Return: bool
4088  */
4089 bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
4090 {
4091 	struct dp_soc *soc;
4092 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4093 
4094 	if (!pdev)
4095 		return false;
4096 
4097 	soc = pdev->soc;
4098 	if (!soc)
4099 		return false;
4100 
4101 	if (enable) {
4102 		dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
4103 		qdf_timer_mod(&soc->pdev_bs_inact_timer,
4104 			      soc->pdev_bs_inact_interval * 1000);
4105 	} else {
4106 		qdf_timer_stop(&soc->pdev_bs_inact_timer);
4107 	}
4108 
4109 	return true;
4110 }
4111 
4112 /**
4113  * dp_set_overload(): Set inactivity overload
4114  * @pdev_handle - device handle
4115  * @overload - overload status
4116  *
4117  * Return: void
4118  */
4119 void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
4120 {
4121 	struct dp_soc *soc;
4122 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4123 
4124 	if (!pdev)
4125 		return;
4126 
4127 	soc = pdev->soc;
4128 	if (!soc)
4129 		return;
4130 
4131 	dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
4132 			overload ? soc->pdev_bs_inact_overload :
4133 			soc->pdev_bs_inact_normal);
4134 }
4135 
4136 /**
4137  * dp_peer_is_inact(): check whether peer is inactive
4138  * @peer_handle - datapath peer handle
4139  *
4140  * Return: bool
4141  */
4142 bool dp_peer_is_inact(void *peer_handle)
4143 {
4144 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4145 
4146 	if (!peer)
4147 		return false;
4148 
4149 	return peer->peer_bs_inact_flag == 1;
4150 }
4151 
4152 /**
4153  * dp_init_inact_timer: initialize the inact timer
4154  * @soc - SOC handle
4155  *
4156  * Return: void
4157  */
4158 void dp_init_inact_timer(struct dp_soc *soc)
4159 {
4160 	qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
4161 		dp_txrx_peer_find_inact_timeout_handler,
4162 		(void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
4163 }
4164 
4165 #else
4166 
4167 bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
4168 			 u_int16_t inact_normal, u_int16_t inact_overload)
4169 {
4170 	return false;
4171 }
4172 
4173 bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
4174 {
4175 	return false;
4176 }
4177 
4178 void dp_set_overload(struct cdp_pdev *pdev, bool overload)
4179 {
4180 	return;
4181 }
4182 
4183 void dp_init_inact_timer(struct dp_soc *soc)
4184 {
4185 	return;
4186 }
4187 
4188 bool dp_peer_is_inact(void *peer)
4189 {
4190 	return false;
4191 }
4192 #endif
4193 
4194 /*
4195  * dp_peer_unref_delete() - unref and delete peer
4196  * @peer_handle:		Datapath peer handle
4197  *
4198  */
4199 void dp_peer_unref_delete(void *peer_handle)
4200 {
4201 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4202 	struct dp_peer *bss_peer = NULL;
4203 	struct dp_vdev *vdev = peer->vdev;
4204 	struct dp_pdev *pdev = vdev->pdev;
4205 	struct dp_soc *soc = pdev->soc;
4206 	struct dp_peer *tmppeer;
4207 	int found = 0;
4208 	uint16_t peer_id;
4209 	uint16_t vdev_id;
4210 
4211 	/*
4212 	 * Hold the lock all the way from checking if the peer ref count
4213 	 * is zero until the peer references are removed from the hash
4214 	 * table and vdev list (if the peer ref count is zero).
4215 	 * This protects against a new HL tx operation starting to use the
4216 	 * peer object just after this function concludes it's done being used.
4217 	 * Furthermore, the lock needs to be held while checking whether the
4218 	 * vdev's list of peers is empty, to make sure that list is not modified
4219 	 * concurrently with the empty check.
4220 	 */
4221 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4222 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4223 		  "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
4224 		  peer, qdf_atomic_read(&peer->ref_cnt));
4225 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
4226 		peer_id = peer->peer_ids[0];
4227 		vdev_id = vdev->vdev_id;
4228 
4229 		/*
4230 		 * Make sure that the reference to the peer in
4231 		 * peer object map is removed
4232 		 */
4233 		if (peer_id != HTT_INVALID_PEER)
4234 			soc->peer_id_to_obj_map[peer_id] = NULL;
4235 
4236 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4237 			"Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
4238 
4239 		/* remove the reference to the peer from the hash table */
4240 		dp_peer_find_hash_remove(soc, peer);
4241 
4242 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
4243 			if (tmppeer == peer) {
4244 				found = 1;
4245 				break;
4246 			}
4247 		}
4248 		if (found) {
4249 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
4250 				peer_list_elem);
4251 		} else {
4252 			/*Ignoring the remove operation as peer not found*/
4253 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
4254 				"peer %pK not found in vdev (%pK)->peer_list:%pK",
4255 				peer, vdev, &peer->vdev->peer_list);
4256 		}
4257 
4258 		/* cleanup the peer data */
4259 		dp_peer_cleanup(vdev, peer);
4260 
4261 		/* check whether the parent vdev has no peers left */
4262 		if (TAILQ_EMPTY(&vdev->peer_list)) {
4263 			/*
4264 			 * Now that there are no references to the peer, we can
4265 			 * release the peer reference lock.
4266 			 */
4267 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4268 			/*
4269 			 * Check if the parent vdev was waiting for its peers
4270 			 * to be deleted, in order for it to be deleted too.
4271 			 */
4272 			if (vdev->delete.pending) {
4273 				ol_txrx_vdev_delete_cb vdev_delete_cb =
4274 					vdev->delete.callback;
4275 				void *vdev_delete_context =
4276 					vdev->delete.context;
4277 
4278 				QDF_TRACE(QDF_MODULE_ID_DP,
4279 					QDF_TRACE_LEVEL_INFO_HIGH,
4280 					FL("deleting vdev object %pK (%pM)"
4281 					" - its last peer is done"),
4282 					vdev, vdev->mac_addr.raw);
4283 				/* all peers are gone, go ahead and delete it */
4284 				dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
4285 								FLOW_TYPE_VDEV,
4286 								vdev_id);
4287 				dp_tx_vdev_detach(vdev);
4288 				QDF_TRACE(QDF_MODULE_ID_DP,
4289 					QDF_TRACE_LEVEL_INFO_HIGH,
4290 					FL("deleting vdev object %pK (%pM)"),
4291 					vdev, vdev->mac_addr.raw);
4292 
4293 				qdf_mem_free(vdev);
4294 				vdev = NULL;
4295 				if (vdev_delete_cb)
4296 					vdev_delete_cb(vdev_delete_context);
4297 			}
4298 		} else {
4299 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4300 		}
4301 
4302 		if (vdev) {
4303 			if (vdev->vap_bss_peer == peer) {
4304 				vdev->vap_bss_peer = NULL;
4305 			}
4306 		}
4307 
4308 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4309 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4310 					vdev_id, peer->mac_addr.raw);
4311 		}
4312 
4313 		if (!vdev || !vdev->vap_bss_peer) {
4314 			goto free_peer;
4315 		}
4316 
4317 #ifdef notyet
4318 		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
4319 #else
4320 		bss_peer = vdev->vap_bss_peer;
4321 		DP_UPDATE_STATS(bss_peer, peer);
4322 
4323 free_peer:
4324 		qdf_mem_free(peer);
4325 
4326 #endif
4327 	} else {
4328 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4329 	}
4330 }
4331 
4332 /*
4333  * dp_peer_detach_wifi3() – Detach txrx peer
4334  * @peer_handle: Datapath peer handle
4335  * @bitmap: bitmap indicating special handling of request.
4336  *
4337  */
4338 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
4339 {
4340 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4341 
4342 	/* redirect the peer's rx delivery function to point to a
4343 	 * discard func
4344 	 */
4345 
4346 	peer->rx_opt_proc = dp_rx_discard;
4347 	peer->ctrl_peer = NULL;
4348 
4349 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4350 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
4351 
4352 	dp_local_peer_id_free(peer->vdev->pdev, peer);
4353 	qdf_spinlock_destroy(&peer->peer_info_lock);
4354 
4355 	/*
4356 	 * Remove the reference added during peer_attach.
4357 	 * The peer will still be left allocated until the
4358 	 * PEER_UNMAP message arrives to remove the other
4359 	 * reference, added by the PEER_MAP message.
4360 	 */
4361 	dp_peer_unref_delete(peer_handle);
4362 }
4363 
4364 /*
4365  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
4366  * @peer_handle:		Datapath peer handle
4367  *
4368  */
4369 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
4370 {
4371 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4372 	return vdev->mac_addr.raw;
4373 }
4374 
4375 /*
4376  * dp_vdev_set_wds() - Enable per packet stats
4377  * @vdev_handle: DP VDEV handle
4378  * @val: value
4379  *
4380  * Return: none
4381  */
4382 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
4383 {
4384 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4385 
4386 	vdev->wds_enabled = val;
4387 	return 0;
4388 }
4389 
4390 /*
4391  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
4392  * @peer_handle:		Datapath peer handle
4393  *
4394  */
4395 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
4396 						uint8_t vdev_id)
4397 {
4398 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
4399 	struct dp_vdev *vdev = NULL;
4400 
4401 	if (qdf_unlikely(!pdev))
4402 		return NULL;
4403 
4404 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4405 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4406 		if (vdev->vdev_id == vdev_id)
4407 			break;
4408 	}
4409 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4410 
4411 	return (struct cdp_vdev *)vdev;
4412 }
4413 
4414 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
4415 {
4416 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4417 
4418 	return vdev->opmode;
4419 }
4420 
4421 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
4422 {
4423 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
4424 	struct dp_pdev *pdev = vdev->pdev;
4425 
4426 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
4427 }
4428 
4429 /**
4430  * dp_reset_monitor_mode() - Disable monitor mode
4431  * @pdev_handle: Datapath PDEV handle
4432  *
4433  * Return: 0 on success, not 0 on failure
4434  */
4435 static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4436 {
4437 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4438 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4439 	struct dp_soc *soc = pdev->soc;
4440 	uint8_t pdev_id;
4441 	int mac_id;
4442 
4443 	pdev_id = pdev->pdev_id;
4444 	soc = pdev->soc;
4445 
4446 	qdf_spin_lock_bh(&pdev->mon_lock);
4447 
4448 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4449 
4450 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4451 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4452 
4453 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4454 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4455 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4456 
4457 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4458 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4459 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4460 	}
4461 
4462 	pdev->monitor_vdev = NULL;
4463 
4464 	qdf_spin_unlock_bh(&pdev->mon_lock);
4465 
4466 	return 0;
4467 }
4468 
4469 /**
4470  * dp_set_nac() - set peer_nac
4471  * @peer_handle: Datapath PEER handle
4472  *
4473  * Return: void
4474  */
4475 static void dp_set_nac(struct cdp_peer *peer_handle)
4476 {
4477 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
4478 
4479 	peer->nac = 1;
4480 }
4481 
4482 /**
4483  * dp_get_tx_pending() - read pending tx
4484  * @pdev_handle: Datapath PDEV handle
4485  *
4486  * Return: outstanding tx
4487  */
4488 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
4489 {
4490 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4491 
4492 	return qdf_atomic_read(&pdev->num_tx_outstanding);
4493 }
4494 
4495 /**
4496  * dp_get_peer_mac_from_peer_id() - get peer mac
4497  * @pdev_handle: Datapath PDEV handle
4498  * @peer_id: Peer ID
4499  * @peer_mac: MAC addr of PEER
4500  *
4501  * Return: void
4502  */
4503 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
4504 	uint32_t peer_id, uint8_t *peer_mac)
4505 {
4506 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4507 	struct dp_peer *peer;
4508 
4509 	if (pdev && peer_mac) {
4510 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
4511 		if (peer && peer->mac_addr.raw) {
4512 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
4513 					DP_MAC_ADDR_LEN);
4514 		}
4515 	}
4516 }
4517 
4518 /**
4519  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4520  * @vdev_handle: Datapath VDEV handle
4521  * @smart_monitor: Flag to denote if its smart monitor mode
4522  *
4523  * Return: 0 on success, not 0 on failure
4524  */
4525 static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4526 		uint8_t smart_monitor)
4527 {
4528 	/* Many monitor VAPs can exists in a system but only one can be up at
4529 	 * anytime
4530 	 */
4531 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4532 	struct dp_pdev *pdev;
4533 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4534 	struct dp_soc *soc;
4535 	uint8_t pdev_id;
4536 	int mac_id;
4537 
4538 	qdf_assert(vdev);
4539 
4540 	pdev = vdev->pdev;
4541 	pdev_id = pdev->pdev_id;
4542 	soc = pdev->soc;
4543 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4544 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4545 		pdev, pdev_id, soc, vdev);
4546 
4547 	/*Check if current pdev's monitor_vdev exists */
4548 	if (pdev->monitor_vdev) {
4549 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4550 			"vdev=%pK\n", vdev);
4551 		qdf_assert(vdev);
4552 	}
4553 
4554 	pdev->monitor_vdev = vdev;
4555 
4556 	/* If smart monitor mode, do not configure monitor ring */
4557 	if (smart_monitor)
4558 		return QDF_STATUS_SUCCESS;
4559 
4560 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4561 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4562 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4563 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4564 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4565 		pdev->mo_data_filter);
4566 
4567 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4568 
4569 	htt_tlv_filter.mpdu_start = 1;
4570 	htt_tlv_filter.msdu_start = 1;
4571 	htt_tlv_filter.packet = 1;
4572 	htt_tlv_filter.msdu_end = 1;
4573 	htt_tlv_filter.mpdu_end = 1;
4574 	htt_tlv_filter.packet_header = 1;
4575 	htt_tlv_filter.attention = 1;
4576 	htt_tlv_filter.ppdu_start = 0;
4577 	htt_tlv_filter.ppdu_end = 0;
4578 	htt_tlv_filter.ppdu_end_user_stats = 0;
4579 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4580 	htt_tlv_filter.ppdu_end_status_done = 0;
4581 	htt_tlv_filter.header_per_msdu = 1;
4582 	htt_tlv_filter.enable_fp =
4583 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4584 	htt_tlv_filter.enable_md = 0;
4585 	htt_tlv_filter.enable_mo =
4586 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4587 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4588 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4589 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4590 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4591 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4592 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4593 
4594 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4595 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4596 
4597 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4598 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4599 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4600 	}
4601 
4602 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4603 
4604 	htt_tlv_filter.mpdu_start = 1;
4605 	htt_tlv_filter.msdu_start = 0;
4606 	htt_tlv_filter.packet = 0;
4607 	htt_tlv_filter.msdu_end = 0;
4608 	htt_tlv_filter.mpdu_end = 0;
4609 	htt_tlv_filter.attention = 0;
4610 	htt_tlv_filter.ppdu_start = 1;
4611 	htt_tlv_filter.ppdu_end = 1;
4612 	htt_tlv_filter.ppdu_end_user_stats = 1;
4613 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4614 	htt_tlv_filter.ppdu_end_status_done = 1;
4615 	htt_tlv_filter.enable_fp = 1;
4616 	htt_tlv_filter.enable_md = 0;
4617 	htt_tlv_filter.enable_mo = 1;
4618 	if (pdev->mcopy_mode) {
4619 		htt_tlv_filter.packet_header = 1;
4620 	}
4621 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4622 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4623 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4624 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4625 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4626 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4627 
4628 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4629 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4630 						pdev->pdev_id);
4631 
4632 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4633 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4634 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4635 	}
4636 
4637 	return QDF_STATUS_SUCCESS;
4638 }
4639 
4640 /**
4641  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4642  * @pdev_handle: Datapath PDEV handle
4643  * @filter_val: Flag to select Filter for monitor mode
4644  * Return: 0 on success, not 0 on failure
4645  */
4646 static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4647 	struct cdp_monitor_filter *filter_val)
4648 {
4649 	/* Many monitor VAPs can exists in a system but only one can be up at
4650 	 * anytime
4651 	 */
4652 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4653 	struct dp_vdev *vdev = pdev->monitor_vdev;
4654 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
4655 	struct dp_soc *soc;
4656 	uint8_t pdev_id;
4657 	int mac_id;
4658 
4659 	pdev_id = pdev->pdev_id;
4660 	soc = pdev->soc;
4661 
4662 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4663 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4664 		pdev, pdev_id, soc, vdev);
4665 
4666 	/*Check if current pdev's monitor_vdev exists */
4667 	if (!pdev->monitor_vdev) {
4668 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4669 			"vdev=%pK\n", vdev);
4670 		qdf_assert(vdev);
4671 	}
4672 
4673 	/* update filter mode, type in pdev structure */
4674 	pdev->mon_filter_mode = filter_val->mode;
4675 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4676 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4677 	pdev->fp_data_filter = filter_val->fp_data;
4678 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4679 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4680 	pdev->mo_data_filter = filter_val->mo_data;
4681 
4682 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4683 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4684 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4685 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
4686 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4687 		pdev->mo_data_filter);
4688 
4689 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4690 
4691 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4692 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4693 
4694 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4695 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4696 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4697 
4698 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4699 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4700 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4701 	}
4702 
4703 	htt_tlv_filter.mpdu_start = 1;
4704 	htt_tlv_filter.msdu_start = 1;
4705 	htt_tlv_filter.packet = 1;
4706 	htt_tlv_filter.msdu_end = 1;
4707 	htt_tlv_filter.mpdu_end = 1;
4708 	htt_tlv_filter.packet_header = 1;
4709 	htt_tlv_filter.attention = 1;
4710 	htt_tlv_filter.ppdu_start = 0;
4711 	htt_tlv_filter.ppdu_end = 0;
4712 	htt_tlv_filter.ppdu_end_user_stats = 0;
4713 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4714 	htt_tlv_filter.ppdu_end_status_done = 0;
4715 	htt_tlv_filter.header_per_msdu = 1;
4716 	htt_tlv_filter.enable_fp =
4717 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4718 	htt_tlv_filter.enable_md = 0;
4719 	htt_tlv_filter.enable_mo =
4720 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4721 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4722 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4723 	htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4724 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4725 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4726 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4727 
4728 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4729 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
4730 
4731 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4732 			pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4733 			RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4734 	}
4735 
4736 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4737 
4738 	htt_tlv_filter.mpdu_start = 1;
4739 	htt_tlv_filter.msdu_start = 0;
4740 	htt_tlv_filter.packet = 0;
4741 	htt_tlv_filter.msdu_end = 0;
4742 	htt_tlv_filter.mpdu_end = 0;
4743 	htt_tlv_filter.attention = 0;
4744 	htt_tlv_filter.ppdu_start = 1;
4745 	htt_tlv_filter.ppdu_end = 1;
4746 	htt_tlv_filter.ppdu_end_user_stats = 1;
4747 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4748 	htt_tlv_filter.ppdu_end_status_done = 1;
4749 	htt_tlv_filter.enable_fp = 1;
4750 	htt_tlv_filter.enable_md = 0;
4751 	htt_tlv_filter.enable_mo = 1;
4752 	if (pdev->mcopy_mode) {
4753 		htt_tlv_filter.packet_header = 1;
4754 	}
4755 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
4756 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
4757 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
4758 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
4759 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
4760 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
4761 
4762 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4763 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4764 						pdev->pdev_id);
4765 
4766 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4767 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4768 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
4769 	}
4770 
4771 	return QDF_STATUS_SUCCESS;
4772 }
4773 
4774 /**
4775  * dp_get_pdev_id_frm_pdev() - get pdev_id
4776  * @pdev_handle: Datapath PDEV handle
4777  *
4778  * Return: pdev_id
4779  */
4780 static
4781 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
4782 {
4783 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4784 
4785 	return pdev->pdev_id;
4786 }
4787 
4788 /**
4789  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
4790  * @vdev_handle: Datapath VDEV handle
4791  * Return: true on ucast filter flag set
4792  */
4793 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
4794 {
4795 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4796 	struct dp_pdev *pdev;
4797 
4798 	pdev = vdev->pdev;
4799 
4800 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
4801 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
4802 		return true;
4803 
4804 	return false;
4805 }
4806 
4807 /**
4808  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
4809  * @vdev_handle: Datapath VDEV handle
4810  * Return: true on mcast filter flag set
4811  */
4812 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
4813 {
4814 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4815 	struct dp_pdev *pdev;
4816 
4817 	pdev = vdev->pdev;
4818 
4819 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
4820 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
4821 		return true;
4822 
4823 	return false;
4824 }
4825 
4826 /**
4827  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
4828  * @vdev_handle: Datapath VDEV handle
4829  * Return: true on non data filter flag set
4830  */
4831 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
4832 {
4833 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4834 	struct dp_pdev *pdev;
4835 
4836 	pdev = vdev->pdev;
4837 
4838 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
4839 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
4840 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
4841 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
4842 			return true;
4843 		}
4844 	}
4845 
4846 	return false;
4847 }
4848 
4849 #ifdef MESH_MODE_SUPPORT
4850 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
4851 {
4852 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4853 
4854 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4855 		FL("val %d"), val);
4856 	vdev->mesh_vdev = val;
4857 }
4858 
4859 /*
4860  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
4861  * @vdev_hdl: virtual device object
4862  * @val: value to be set
4863  *
4864  * Return: void
4865  */
4866 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
4867 {
4868 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4869 
4870 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4871 		FL("val %d"), val);
4872 	vdev->mesh_rx_filter = val;
4873 }
4874 #endif
4875 
4876 /*
4877  * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
4878  * Current scope is bar received count
4879  *
4880  * @pdev_handle: DP_PDEV handle
4881  *
4882  * Return: void
4883  */
4884 #define STATS_PROC_TIMEOUT        (HZ/1000)
4885 
4886 static void
4887 dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
4888 {
4889 	struct dp_vdev *vdev;
4890 	struct dp_peer *peer;
4891 	uint32_t waitcnt;
4892 
4893 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4894 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4895 			if (!peer) {
4896 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4897 					FL("DP Invalid Peer refernce"));
4898 				return;
4899 			}
4900 
4901 			if (peer->delete_in_progress) {
4902 				QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4903 					FL("DP Peer deletion in progress"));
4904 				continue;
4905 			}
4906 
4907 			qdf_atomic_inc(&peer->ref_cnt);
4908 			waitcnt = 0;
4909 			dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
4910 			while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
4911 				&& waitcnt < 10) {
4912 				schedule_timeout_interruptible(
4913 						STATS_PROC_TIMEOUT);
4914 				waitcnt++;
4915 			}
4916 			qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
4917 			dp_peer_unref_delete(peer);
4918 		}
4919 	}
4920 }
4921 
4922 /**
4923  * dp_rx_bar_stats_cb(): BAR received stats callback
4924  * @soc: SOC handle
4925  * @cb_ctxt: Call back context
4926  * @reo_status: Reo status
4927  *
4928  * return: void
4929  */
4930 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
4931 	union hal_reo_status *reo_status)
4932 {
4933 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
4934 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
4935 
4936 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
4937 		DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
4938 			queue_status->header.status);
4939 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4940 		return;
4941 	}
4942 
4943 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
4944 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
4945 
4946 }
4947 
4948 /**
4949  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
4950  * @vdev: DP VDEV handle
4951  *
4952  * return: void
4953  */
4954 void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
4955 {
4956 	struct dp_peer *peer = NULL;
4957 	struct dp_soc *soc = vdev->pdev->soc;
4958 
4959 	qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
4960 	qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
4961 
4962 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
4963 		DP_UPDATE_STATS(vdev, peer);
4964 
4965 	if (soc->cdp_soc.ol_ops->update_dp_stats)
4966 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
4967 			&vdev->stats, (uint16_t) vdev->vdev_id,
4968 			UPDATE_VDEV_STATS);
4969 
4970 }
4971 
4972 /**
4973  * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
4974  * @pdev: DP PDEV handle
4975  *
4976  * return: void
4977  */
4978 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
4979 {
4980 	struct dp_vdev *vdev = NULL;
4981 	struct dp_soc *soc = pdev->soc;
4982 
4983 	qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
4984 	qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
4985 	qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
4986 
4987 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4988 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4989 
4990 		dp_aggregate_vdev_stats(vdev);
4991 		DP_UPDATE_STATS(pdev, vdev);
4992 
4993 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
4994 
4995 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
4996 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
4997 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
4998 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
4999 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
5000 		DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
5001 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
5002 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
5003 		DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
5004 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host);
5005 		DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
5006 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
5007 		DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
5008 		DP_STATS_AGGR(pdev, vdev,
5009 				tx_i.mcast_en.dropped_map_error);
5010 		DP_STATS_AGGR(pdev, vdev,
5011 				tx_i.mcast_en.dropped_self_mac);
5012 		DP_STATS_AGGR(pdev, vdev,
5013 				tx_i.mcast_en.dropped_send_fail);
5014 		DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
5015 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
5016 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
5017 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
5018 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na);
5019 		DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
5020 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
5021 		DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
5022 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
5023 		DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
5024 
5025 		pdev->stats.tx_i.dropped.dropped_pkt.num =
5026 			pdev->stats.tx_i.dropped.dma_error +
5027 			pdev->stats.tx_i.dropped.ring_full +
5028 			pdev->stats.tx_i.dropped.enqueue_fail +
5029 			pdev->stats.tx_i.dropped.desc_na +
5030 			pdev->stats.tx_i.dropped.res_full;
5031 
5032 		pdev->stats.tx.last_ack_rssi =
5033 			vdev->stats.tx.last_ack_rssi;
5034 		pdev->stats.tx_i.tso.num_seg =
5035 			vdev->stats.tx_i.tso.num_seg;
5036 	}
5037 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5038 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5039 		soc->cdp_soc.ol_ops->update_dp_stats(pdev->ctrl_pdev,
5040 				&pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
5041 
5042 }
5043 
5044 /**
5045  * dp_vdev_getstats() - get vdev packet level stats
5046  * @vdev_handle: Datapath VDEV handle
5047  * @stats: cdp network device stats structure
5048  *
5049  * Return: void
5050  */
5051 static void dp_vdev_getstats(void *vdev_handle,
5052 		struct cdp_dev_stats *stats)
5053 {
5054 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5055 
5056 	dp_aggregate_vdev_stats(vdev);
5057 }
5058 
5059 
5060 /**
5061  * dp_pdev_getstats() - get pdev packet level stats
5062  * @pdev_handle: Datapath PDEV handle
5063  * @stats: cdp network device stats structure
5064  *
5065  * Return: void
5066  */
5067 static void dp_pdev_getstats(void *pdev_handle,
5068 		struct cdp_dev_stats *stats)
5069 {
5070 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5071 
5072 	dp_aggregate_pdev_stats(pdev);
5073 
5074 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
5075 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
5076 
5077 	stats->tx_errors = pdev->stats.tx.tx_failed +
5078 		pdev->stats.tx_i.dropped.dropped_pkt.num;
5079 	stats->tx_dropped = stats->tx_errors;
5080 
5081 	stats->rx_packets = pdev->stats.rx.unicast.num +
5082 		pdev->stats.rx.multicast.num +
5083 		pdev->stats.rx.bcast.num;
5084 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
5085 		pdev->stats.rx.multicast.bytes +
5086 		pdev->stats.rx.bcast.bytes;
5087 }
5088 
5089 /**
5090  * dp_get_device_stats() - get interface level packet stats
5091  * @handle: device handle
5092  * @stats: cdp network device stats structure
5093  * @type: device type pdev/vdev
5094  *
5095  * Return: void
5096  */
5097 static void dp_get_device_stats(void *handle,
5098 		struct cdp_dev_stats *stats, uint8_t type)
5099 {
5100 	switch (type) {
5101 	case UPDATE_VDEV_STATS:
5102 		dp_vdev_getstats(handle, stats);
5103 		break;
5104 	case UPDATE_PDEV_STATS:
5105 		dp_pdev_getstats(handle, stats);
5106 		break;
5107 	default:
5108 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5109 			"apstats cannot be updated for this input "
5110 			"type %d\n", type);
5111 		break;
5112 	}
5113 
5114 }
5115 
5116 
5117 /**
5118  * dp_print_pdev_tx_stats(): Print Pdev level TX stats
5119  * @pdev: DP_PDEV Handle
5120  *
5121  * Return:void
5122  */
5123 static inline void
5124 dp_print_pdev_tx_stats(struct dp_pdev *pdev)
5125 {
5126 	uint8_t index = 0;
5127 	DP_PRINT_STATS("PDEV Tx Stats:\n");
5128 	DP_PRINT_STATS("Received From Stack:");
5129 	DP_PRINT_STATS("	Packets = %d",
5130 			pdev->stats.tx_i.rcvd.num);
5131 	DP_PRINT_STATS("	Bytes = %llu",
5132 			pdev->stats.tx_i.rcvd.bytes);
5133 	DP_PRINT_STATS("Processed:");
5134 	DP_PRINT_STATS("	Packets = %d",
5135 			pdev->stats.tx_i.processed.num);
5136 	DP_PRINT_STATS("	Bytes = %llu",
5137 			pdev->stats.tx_i.processed.bytes);
5138 	DP_PRINT_STATS("Total Completions:");
5139 	DP_PRINT_STATS("	Packets = %u",
5140 			pdev->stats.tx.comp_pkt.num);
5141 	DP_PRINT_STATS("	Bytes = %llu",
5142 			pdev->stats.tx.comp_pkt.bytes);
5143 	DP_PRINT_STATS("Successful Completions:");
5144 	DP_PRINT_STATS("	Packets = %u",
5145 			pdev->stats.tx.tx_success.num);
5146 	DP_PRINT_STATS("	Bytes = %llu",
5147 			pdev->stats.tx.tx_success.bytes);
5148 	DP_PRINT_STATS("Dropped:");
5149 	DP_PRINT_STATS("	Total = %d",
5150 			pdev->stats.tx_i.dropped.dropped_pkt.num);
5151 	DP_PRINT_STATS("	Dma_map_error = %d",
5152 			pdev->stats.tx_i.dropped.dma_error);
5153 	DP_PRINT_STATS("	Ring Full = %d",
5154 			pdev->stats.tx_i.dropped.ring_full);
5155 	DP_PRINT_STATS("	Descriptor Not available = %d",
5156 			pdev->stats.tx_i.dropped.desc_na);
5157 	DP_PRINT_STATS("	HW enqueue failed= %d",
5158 			pdev->stats.tx_i.dropped.enqueue_fail);
5159 	DP_PRINT_STATS("	Resources Full = %d",
5160 			pdev->stats.tx_i.dropped.res_full);
5161 	DP_PRINT_STATS("	FW removed = %d",
5162 			pdev->stats.tx.dropped.fw_rem);
5163 	DP_PRINT_STATS("	FW removed transmitted = %d",
5164 			pdev->stats.tx.dropped.fw_rem_tx);
5165 	DP_PRINT_STATS("	FW removed untransmitted = %d",
5166 			pdev->stats.tx.dropped.fw_rem_notx);
5167 	DP_PRINT_STATS("	FW removed untransmitted fw_reason1 = %d",
5168 			pdev->stats.tx.dropped.fw_reason1);
5169 	DP_PRINT_STATS("	FW removed untransmitted fw_reason2 = %d",
5170 			pdev->stats.tx.dropped.fw_reason2);
5171 	DP_PRINT_STATS("	FW removed untransmitted fw_reason3 = %d",
5172 			pdev->stats.tx.dropped.fw_reason3);
5173 	DP_PRINT_STATS("	Aged Out from msdu/mpdu queues = %d",
5174 			pdev->stats.tx.dropped.age_out);
5175 	DP_PRINT_STATS("Scatter Gather:");
5176 	DP_PRINT_STATS("	Packets = %d",
5177 			pdev->stats.tx_i.sg.sg_pkt.num);
5178 	DP_PRINT_STATS("	Bytes = %llu",
5179 			pdev->stats.tx_i.sg.sg_pkt.bytes);
5180 	DP_PRINT_STATS("	Dropped By Host = %d",
5181 			pdev->stats.tx_i.sg.dropped_host);
5182 	DP_PRINT_STATS("	Dropped By Target = %d",
5183 			pdev->stats.tx_i.sg.dropped_target);
5184 	DP_PRINT_STATS("TSO:");
5185 	DP_PRINT_STATS("	Number of Segments = %d",
5186 			pdev->stats.tx_i.tso.num_seg);
5187 	DP_PRINT_STATS("	Packets = %d",
5188 			pdev->stats.tx_i.tso.tso_pkt.num);
5189 	DP_PRINT_STATS("	Bytes = %llu",
5190 			pdev->stats.tx_i.tso.tso_pkt.bytes);
5191 	DP_PRINT_STATS("	Dropped By Host = %d",
5192 			pdev->stats.tx_i.tso.dropped_host);
5193 	DP_PRINT_STATS("Mcast Enhancement:");
5194 	DP_PRINT_STATS("	Packets = %d",
5195 			pdev->stats.tx_i.mcast_en.mcast_pkt.num);
5196 	DP_PRINT_STATS("	Bytes = %llu",
5197 			pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
5198 	DP_PRINT_STATS("	Dropped: Map Errors = %d",
5199 			pdev->stats.tx_i.mcast_en.dropped_map_error);
5200 	DP_PRINT_STATS("	Dropped: Self Mac = %d",
5201 			pdev->stats.tx_i.mcast_en.dropped_self_mac);
5202 	DP_PRINT_STATS("	Dropped: Send Fail = %d",
5203 			pdev->stats.tx_i.mcast_en.dropped_send_fail);
5204 	DP_PRINT_STATS("	Unicast sent = %d",
5205 			pdev->stats.tx_i.mcast_en.ucast);
5206 	DP_PRINT_STATS("Raw:");
5207 	DP_PRINT_STATS("	Packets = %d",
5208 			pdev->stats.tx_i.raw.raw_pkt.num);
5209 	DP_PRINT_STATS("	Bytes = %llu",
5210 			pdev->stats.tx_i.raw.raw_pkt.bytes);
5211 	DP_PRINT_STATS("	DMA map error = %d",
5212 			pdev->stats.tx_i.raw.dma_map_error);
5213 	DP_PRINT_STATS("Reinjected:");
5214 	DP_PRINT_STATS("	Packets = %d",
5215 			pdev->stats.tx_i.reinject_pkts.num);
5216 	DP_PRINT_STATS("	Bytes = %llu\n",
5217 			pdev->stats.tx_i.reinject_pkts.bytes);
5218 	DP_PRINT_STATS("Inspected:");
5219 	DP_PRINT_STATS("	Packets = %d",
5220 			pdev->stats.tx_i.inspect_pkts.num);
5221 	DP_PRINT_STATS("	Bytes = %llu",
5222 			pdev->stats.tx_i.inspect_pkts.bytes);
5223 	DP_PRINT_STATS("Nawds Multicast:");
5224 	DP_PRINT_STATS("	Packets = %d",
5225 			pdev->stats.tx_i.nawds_mcast.num);
5226 	DP_PRINT_STATS("	Bytes = %llu",
5227 			pdev->stats.tx_i.nawds_mcast.bytes);
5228 	DP_PRINT_STATS("CCE Classified:");
5229 	DP_PRINT_STATS("	CCE Classified Packets: %u",
5230 			pdev->stats.tx_i.cce_classified);
5231 	DP_PRINT_STATS("	RAW CCE Classified Packets: %u",
5232 			pdev->stats.tx_i.cce_classified_raw);
5233 	DP_PRINT_STATS("Mesh stats:");
5234 	DP_PRINT_STATS("	frames to firmware: %u",
5235 			pdev->stats.tx_i.mesh.exception_fw);
5236 	DP_PRINT_STATS("	completions from fw: %u",
5237 			pdev->stats.tx_i.mesh.completion_fw);
5238 	DP_PRINT_STATS("PPDU stats counter");
5239 	for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
5240 		DP_PRINT_STATS("	Tag[%d] = %llu", index,
5241 				pdev->stats.ppdu_stats_counter[index]);
5242 	}
5243 }
5244 
5245 /**
5246  * dp_print_pdev_rx_stats(): Print Pdev level RX stats
5247  * @pdev: DP_PDEV Handle
5248  *
5249  * Return: void
5250  */
5251 static inline void
5252 dp_print_pdev_rx_stats(struct dp_pdev *pdev)
5253 {
5254 	DP_PRINT_STATS("PDEV Rx Stats:\n");
5255 	DP_PRINT_STATS("Received From HW (Per Rx Ring):");
5256 	DP_PRINT_STATS("	Packets = %d %d %d %d",
5257 			pdev->stats.rx.rcvd_reo[0].num,
5258 			pdev->stats.rx.rcvd_reo[1].num,
5259 			pdev->stats.rx.rcvd_reo[2].num,
5260 			pdev->stats.rx.rcvd_reo[3].num);
5261 	DP_PRINT_STATS("	Bytes = %llu %llu %llu %llu",
5262 			pdev->stats.rx.rcvd_reo[0].bytes,
5263 			pdev->stats.rx.rcvd_reo[1].bytes,
5264 			pdev->stats.rx.rcvd_reo[2].bytes,
5265 			pdev->stats.rx.rcvd_reo[3].bytes);
5266 	DP_PRINT_STATS("Replenished:");
5267 	DP_PRINT_STATS("	Packets = %d",
5268 			pdev->stats.replenish.pkts.num);
5269 	DP_PRINT_STATS("	Bytes = %llu",
5270 			pdev->stats.replenish.pkts.bytes);
5271 	DP_PRINT_STATS("	Buffers Added To Freelist = %d",
5272 			pdev->stats.buf_freelist);
5273 	DP_PRINT_STATS("	Low threshold intr = %d",
5274 			pdev->stats.replenish.low_thresh_intrs);
5275 	DP_PRINT_STATS("Dropped:");
5276 	DP_PRINT_STATS("	msdu_not_done = %d",
5277 			pdev->stats.dropped.msdu_not_done);
5278 	DP_PRINT_STATS("        mon_rx_drop = %d",
5279 			pdev->stats.dropped.mon_rx_drop);
5280 	DP_PRINT_STATS("Sent To Stack:");
5281 	DP_PRINT_STATS("	Packets = %d",
5282 			pdev->stats.rx.to_stack.num);
5283 	DP_PRINT_STATS("	Bytes = %llu",
5284 			pdev->stats.rx.to_stack.bytes);
5285 	DP_PRINT_STATS("Multicast/Broadcast:");
5286 	DP_PRINT_STATS("	Packets = %d",
5287 			(pdev->stats.rx.multicast.num +
5288 			pdev->stats.rx.bcast.num));
5289 	DP_PRINT_STATS("	Bytes = %llu",
5290 			(pdev->stats.rx.multicast.bytes +
5291 			pdev->stats.rx.bcast.bytes));
5292 	DP_PRINT_STATS("Errors:");
5293 	DP_PRINT_STATS("	Rxdma Ring Un-inititalized = %d",
5294 			pdev->stats.replenish.rxdma_err);
5295 	DP_PRINT_STATS("	Desc Alloc Failed: = %d",
5296 			pdev->stats.err.desc_alloc_fail);
5297 	DP_PRINT_STATS("	IP checksum error = %d",
5298 		       pdev->stats.err.ip_csum_err);
5299 	DP_PRINT_STATS("	TCP/UDP checksum error = %d",
5300 		       pdev->stats.err.tcp_udp_csum_err);
5301 
5302 	/* Get bar_recv_cnt */
5303 	dp_aggregate_pdev_ctrl_frames_stats(pdev);
5304 	DP_PRINT_STATS("BAR Received Count: = %d",
5305 			pdev->stats.rx.bar_recv_cnt);
5306 
5307 }
5308 
5309 /**
5310  * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
5311  * @pdev: DP_PDEV Handle
5312  *
5313  * Return: void
5314  */
5315 static inline void
5316 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
5317 {
5318 	struct cdp_pdev_mon_stats *rx_mon_stats;
5319 
5320 	rx_mon_stats = &pdev->rx_mon_stats;
5321 
5322 	DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
5323 
5324 	dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
5325 
5326 	DP_PRINT_STATS("status_ppdu_done_cnt = %d",
5327 		       rx_mon_stats->status_ppdu_done);
5328 	DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
5329 		       rx_mon_stats->dest_ppdu_done);
5330 	DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
5331 		       rx_mon_stats->dest_mpdu_done);
5332 	DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
5333 		       rx_mon_stats->dest_mpdu_drop);
5334 }
5335 
5336 /**
5337  * dp_print_soc_tx_stats(): Print SOC level  stats
5338  * @soc DP_SOC Handle
5339  *
5340  * Return: void
5341  */
5342 static inline void
5343 dp_print_soc_tx_stats(struct dp_soc *soc)
5344 {
5345 	uint8_t desc_pool_id;
5346 	soc->stats.tx.desc_in_use = 0;
5347 
5348 	DP_PRINT_STATS("SOC Tx Stats:\n");
5349 
5350 	for (desc_pool_id = 0;
5351 	     desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
5352 	     desc_pool_id++)
5353 		soc->stats.tx.desc_in_use +=
5354 			soc->tx_desc[desc_pool_id].num_allocated;
5355 
5356 	DP_PRINT_STATS("Tx Descriptors In Use = %d",
5357 			soc->stats.tx.desc_in_use);
5358 	DP_PRINT_STATS("Invalid peer:");
5359 	DP_PRINT_STATS("	Packets = %d",
5360 			soc->stats.tx.tx_invalid_peer.num);
5361 	DP_PRINT_STATS("	Bytes = %llu",
5362 			soc->stats.tx.tx_invalid_peer.bytes);
5363 	DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
5364 			soc->stats.tx.tcl_ring_full[0],
5365 			soc->stats.tx.tcl_ring_full[1],
5366 			soc->stats.tx.tcl_ring_full[2]);
5367 
5368 }
5369 /**
5370  * dp_print_soc_rx_stats: Print SOC level Rx stats
5371  * @soc: DP_SOC Handle
5372  *
5373  * Return:void
5374  */
5375 static inline void
5376 dp_print_soc_rx_stats(struct dp_soc *soc)
5377 {
5378 	uint32_t i;
5379 	char reo_error[DP_REO_ERR_LENGTH];
5380 	char rxdma_error[DP_RXDMA_ERR_LENGTH];
5381 	uint8_t index = 0;
5382 
5383 	DP_PRINT_STATS("SOC Rx Stats:\n");
5384 	DP_PRINT_STATS("Errors:\n");
5385 	DP_PRINT_STATS("Rx Decrypt Errors = %d",
5386 			(soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
5387 			soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
5388 	DP_PRINT_STATS("Invalid RBM = %d",
5389 			soc->stats.rx.err.invalid_rbm);
5390 	DP_PRINT_STATS("Invalid Vdev = %d",
5391 			soc->stats.rx.err.invalid_vdev);
5392 	DP_PRINT_STATS("Invalid Pdev = %d",
5393 			soc->stats.rx.err.invalid_pdev);
5394 	DP_PRINT_STATS("Invalid Peer = %d",
5395 			soc->stats.rx.err.rx_invalid_peer.num);
5396 	DP_PRINT_STATS("HAL Ring Access Fail = %d",
5397 			soc->stats.rx.err.hal_ring_access_fail);
5398 
5399 	for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
5400 		index += qdf_snprint(&rxdma_error[index],
5401 				DP_RXDMA_ERR_LENGTH - index,
5402 				" %d", soc->stats.rx.err.rxdma_error[i]);
5403 	}
5404 	DP_PRINT_STATS("RXDMA Error (0-31):%s",
5405 			rxdma_error);
5406 
5407 	index = 0;
5408 	for (i = 0; i < HAL_REO_ERR_MAX; i++) {
5409 		index += qdf_snprint(&reo_error[index],
5410 				DP_REO_ERR_LENGTH - index,
5411 				" %d", soc->stats.rx.err.reo_error[i]);
5412 	}
5413 	DP_PRINT_STATS("REO Error(0-14):%s",
5414 			reo_error);
5415 }
5416 
5417 
5418 /**
5419  * dp_print_ring_stat_from_hal(): Print hal level ring stats
5420  * @soc: DP_SOC handle
5421  * @srng: DP_SRNG handle
5422  * @ring_name: SRNG name
5423  *
5424  * Return: void
5425  */
5426 static inline void
5427 dp_print_ring_stat_from_hal(struct dp_soc *soc,  struct dp_srng *srng,
5428 	char *ring_name)
5429 {
5430 	uint32_t tailp;
5431 	uint32_t headp;
5432 
5433 	if (srng->hal_srng != NULL) {
5434 		hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
5435 		DP_PRINT_STATS("%s : Head pointer = %d  Tail Pointer = %d\n",
5436 				ring_name, headp, tailp);
5437 	}
5438 }
5439 
5440 /**
5441  * dp_print_ring_stats(): Print tail and head pointer
5442  * @pdev: DP_PDEV handle
5443  *
5444  * Return:void
5445  */
5446 static inline void
5447 dp_print_ring_stats(struct dp_pdev *pdev)
5448 {
5449 	uint32_t i;
5450 	char ring_name[STR_MAXLEN + 1];
5451 	int mac_id;
5452 
5453 	dp_print_ring_stat_from_hal(pdev->soc,
5454 			&pdev->soc->reo_exception_ring,
5455 			"Reo Exception Ring");
5456 	dp_print_ring_stat_from_hal(pdev->soc,
5457 			&pdev->soc->reo_reinject_ring,
5458 			"Reo Inject Ring");
5459 	dp_print_ring_stat_from_hal(pdev->soc,
5460 			&pdev->soc->reo_cmd_ring,
5461 			"Reo Command Ring");
5462 	dp_print_ring_stat_from_hal(pdev->soc,
5463 			&pdev->soc->reo_status_ring,
5464 			"Reo Status Ring");
5465 	dp_print_ring_stat_from_hal(pdev->soc,
5466 			&pdev->soc->rx_rel_ring,
5467 			"Rx Release ring");
5468 	dp_print_ring_stat_from_hal(pdev->soc,
5469 			&pdev->soc->tcl_cmd_ring,
5470 			"Tcl command Ring");
5471 	dp_print_ring_stat_from_hal(pdev->soc,
5472 			&pdev->soc->tcl_status_ring,
5473 			"Tcl Status Ring");
5474 	dp_print_ring_stat_from_hal(pdev->soc,
5475 			&pdev->soc->wbm_desc_rel_ring,
5476 			"Wbm Desc Rel Ring");
5477 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5478 		snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
5479 		dp_print_ring_stat_from_hal(pdev->soc,
5480 				&pdev->soc->reo_dest_ring[i],
5481 				ring_name);
5482 	}
5483 	for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
5484 		snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
5485 		dp_print_ring_stat_from_hal(pdev->soc,
5486 				&pdev->soc->tcl_data_ring[i],
5487 				ring_name);
5488 	}
5489 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
5490 		snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
5491 		dp_print_ring_stat_from_hal(pdev->soc,
5492 				&pdev->soc->tx_comp_ring[i],
5493 				ring_name);
5494 	}
5495 	dp_print_ring_stat_from_hal(pdev->soc,
5496 			&pdev->rx_refill_buf_ring,
5497 			"Rx Refill Buf Ring");
5498 
5499 	dp_print_ring_stat_from_hal(pdev->soc,
5500 			&pdev->rx_refill_buf_ring2,
5501 			"Second Rx Refill Buf Ring");
5502 
5503 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5504 		dp_print_ring_stat_from_hal(pdev->soc,
5505 				&pdev->rxdma_mon_buf_ring[mac_id],
5506 				"Rxdma Mon Buf Ring");
5507 		dp_print_ring_stat_from_hal(pdev->soc,
5508 				&pdev->rxdma_mon_dst_ring[mac_id],
5509 				"Rxdma Mon Dst Ring");
5510 		dp_print_ring_stat_from_hal(pdev->soc,
5511 				&pdev->rxdma_mon_status_ring[mac_id],
5512 				"Rxdma Mon Status Ring");
5513 		dp_print_ring_stat_from_hal(pdev->soc,
5514 				&pdev->rxdma_mon_desc_ring[mac_id],
5515 				"Rxdma mon desc Ring");
5516 	}
5517 
5518 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5519 		snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
5520 		dp_print_ring_stat_from_hal(pdev->soc,
5521 			&pdev->rxdma_err_dst_ring[i],
5522 			ring_name);
5523 	}
5524 
5525 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
5526 		snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
5527 		dp_print_ring_stat_from_hal(pdev->soc,
5528 				&pdev->rx_mac_buf_ring[i],
5529 				ring_name);
5530 	}
5531 }
5532 
5533 /**
5534  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
5535  * @vdev: DP_VDEV handle
5536  *
5537  * Return:void
5538  */
5539 static inline void
5540 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
5541 {
5542 	struct dp_peer *peer = NULL;
5543 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
5544 
5545 	DP_STATS_CLR(vdev->pdev);
5546 	DP_STATS_CLR(vdev->pdev->soc);
5547 	DP_STATS_CLR(vdev);
5548 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5549 		if (!peer)
5550 			return;
5551 		DP_STATS_CLR(peer);
5552 
5553 		if (soc->cdp_soc.ol_ops->update_dp_stats) {
5554 			soc->cdp_soc.ol_ops->update_dp_stats(
5555 					vdev->pdev->ctrl_pdev,
5556 					&peer->stats,
5557 					peer->peer_ids[0],
5558 					UPDATE_PEER_STATS);
5559 		}
5560 
5561 	}
5562 
5563 	if (soc->cdp_soc.ol_ops->update_dp_stats)
5564 		soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->ctrl_pdev,
5565 				&vdev->stats, (uint16_t)vdev->vdev_id,
5566 				UPDATE_VDEV_STATS);
5567 }
5568 
5569 /**
5570  * dp_print_rx_rates(): Print Rx rate stats
5571  * @vdev: DP_VDEV handle
5572  *
5573  * Return:void
5574  */
5575 static inline void
5576 dp_print_rx_rates(struct dp_vdev *vdev)
5577 {
5578 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5579 	uint8_t i, mcs, pkt_type;
5580 	uint8_t index = 0;
5581 	char nss[DP_NSS_LENGTH];
5582 
5583 	DP_PRINT_STATS("Rx Rate Info:\n");
5584 
5585 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5586 		index = 0;
5587 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5588 			if (!dp_rate_string[pkt_type][mcs].valid)
5589 				continue;
5590 
5591 			DP_PRINT_STATS("	%s = %d",
5592 					dp_rate_string[pkt_type][mcs].mcs_type,
5593 					pdev->stats.rx.pkt_type[pkt_type].
5594 					mcs_count[mcs]);
5595 		}
5596 
5597 		DP_PRINT_STATS("\n");
5598 	}
5599 
5600 	index = 0;
5601 	for (i = 0; i < SS_COUNT; i++) {
5602 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5603 				" %d", pdev->stats.rx.nss[i]);
5604 	}
5605 	DP_PRINT_STATS("NSS(1-8) = %s",
5606 			nss);
5607 
5608 	DP_PRINT_STATS("SGI ="
5609 			" 0.8us %d,"
5610 			" 0.4us %d,"
5611 			" 1.6us %d,"
5612 			" 3.2us %d,",
5613 			pdev->stats.rx.sgi_count[0],
5614 			pdev->stats.rx.sgi_count[1],
5615 			pdev->stats.rx.sgi_count[2],
5616 			pdev->stats.rx.sgi_count[3]);
5617 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5618 			pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
5619 			pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
5620 	DP_PRINT_STATS("Reception Type ="
5621 			" SU: %d,"
5622 			" MU_MIMO:%d,"
5623 			" MU_OFDMA:%d,"
5624 			" MU_OFDMA_MIMO:%d\n",
5625 			pdev->stats.rx.reception_type[0],
5626 			pdev->stats.rx.reception_type[1],
5627 			pdev->stats.rx.reception_type[2],
5628 			pdev->stats.rx.reception_type[3]);
5629 	DP_PRINT_STATS("Aggregation:\n");
5630 	DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
5631 			pdev->stats.rx.ampdu_cnt);
5632 	DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
5633 			pdev->stats.rx.non_ampdu_cnt);
5634 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
5635 			pdev->stats.rx.amsdu_cnt);
5636 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
5637 			pdev->stats.rx.non_amsdu_cnt);
5638 }
5639 
5640 /**
5641  * dp_print_tx_rates(): Print tx rates
5642  * @vdev: DP_VDEV handle
5643  *
5644  * Return:void
5645  */
5646 static inline void
5647 dp_print_tx_rates(struct dp_vdev *vdev)
5648 {
5649 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5650 	uint8_t mcs, pkt_type;
5651 	uint32_t index;
5652 
5653 	DP_PRINT_STATS("Tx Rate Info:\n");
5654 
5655 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5656 		index = 0;
5657 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5658 			if (!dp_rate_string[pkt_type][mcs].valid)
5659 				continue;
5660 
5661 			DP_PRINT_STATS("	%s = %d",
5662 					dp_rate_string[pkt_type][mcs].mcs_type,
5663 					pdev->stats.tx.pkt_type[pkt_type].
5664 					mcs_count[mcs]);
5665 		}
5666 
5667 		DP_PRINT_STATS("\n");
5668 	}
5669 
5670 	DP_PRINT_STATS("SGI ="
5671 			" 0.8us %d"
5672 			" 0.4us %d"
5673 			" 1.6us %d"
5674 			" 3.2us %d",
5675 			pdev->stats.tx.sgi_count[0],
5676 			pdev->stats.tx.sgi_count[1],
5677 			pdev->stats.tx.sgi_count[2],
5678 			pdev->stats.tx.sgi_count[3]);
5679 
5680 	DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
5681 			pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
5682 			pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
5683 
5684 	DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
5685 	DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
5686 	DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
5687 	DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
5688 	DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
5689 
5690 	DP_PRINT_STATS("Aggregation:\n");
5691 	DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
5692 			pdev->stats.tx.amsdu_cnt);
5693 	DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
5694 			pdev->stats.tx.non_amsdu_cnt);
5695 }
5696 
5697 /**
5698  * dp_print_peer_stats():print peer stats
5699  * @peer: DP_PEER handle
5700  *
5701  * return void
5702  */
5703 static inline void dp_print_peer_stats(struct dp_peer *peer)
5704 {
5705 	uint8_t i, mcs, pkt_type;
5706 	uint32_t index;
5707 	char nss[DP_NSS_LENGTH];
5708 	DP_PRINT_STATS("Node Tx Stats:\n");
5709 	DP_PRINT_STATS("Total Packet Completions = %d",
5710 			peer->stats.tx.comp_pkt.num);
5711 	DP_PRINT_STATS("Total Bytes Completions = %llu",
5712 			peer->stats.tx.comp_pkt.bytes);
5713 	DP_PRINT_STATS("Success Packets = %d",
5714 			peer->stats.tx.tx_success.num);
5715 	DP_PRINT_STATS("Success Bytes = %llu",
5716 			peer->stats.tx.tx_success.bytes);
5717 	DP_PRINT_STATS("Unicast Success Packets = %d",
5718 			peer->stats.tx.ucast.num);
5719 	DP_PRINT_STATS("Unicast Success Bytes = %llu",
5720 			peer->stats.tx.ucast.bytes);
5721 	DP_PRINT_STATS("Multicast Success Packets = %d",
5722 			peer->stats.tx.mcast.num);
5723 	DP_PRINT_STATS("Multicast Success Bytes = %llu",
5724 			peer->stats.tx.mcast.bytes);
5725 	DP_PRINT_STATS("Broadcast Success Packets = %d",
5726 			peer->stats.tx.bcast.num);
5727 	DP_PRINT_STATS("Broadcast Success Bytes = %llu",
5728 			peer->stats.tx.bcast.bytes);
5729 	DP_PRINT_STATS("Packets Failed = %d",
5730 			peer->stats.tx.tx_failed);
5731 	DP_PRINT_STATS("Packets In OFDMA = %d",
5732 			peer->stats.tx.ofdma);
5733 	DP_PRINT_STATS("Packets In STBC = %d",
5734 			peer->stats.tx.stbc);
5735 	DP_PRINT_STATS("Packets In LDPC = %d",
5736 			peer->stats.tx.ldpc);
5737 	DP_PRINT_STATS("Packet Retries = %d",
5738 			peer->stats.tx.retries);
5739 	DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
5740 			peer->stats.tx.amsdu_cnt);
5741 	DP_PRINT_STATS("Last Packet RSSI = %d",
5742 			peer->stats.tx.last_ack_rssi);
5743 	DP_PRINT_STATS("Dropped At FW: Removed = %d",
5744 			peer->stats.tx.dropped.fw_rem);
5745 	DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
5746 			peer->stats.tx.dropped.fw_rem_tx);
5747 	DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
5748 			peer->stats.tx.dropped.fw_rem_notx);
5749 	DP_PRINT_STATS("Dropped : Age Out = %d",
5750 			peer->stats.tx.dropped.age_out);
5751 	DP_PRINT_STATS("NAWDS : ");
5752 	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
5753 			peer->stats.tx.nawds_mcast_drop);
5754 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
5755 			peer->stats.tx.nawds_mcast.num);
5756 	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %llu",
5757 			peer->stats.tx.nawds_mcast.bytes);
5758 
5759 	DP_PRINT_STATS("Rate Info:");
5760 
5761 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5762 		index = 0;
5763 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5764 			if (!dp_rate_string[pkt_type][mcs].valid)
5765 				continue;
5766 
5767 			DP_PRINT_STATS("	%s = %d",
5768 					dp_rate_string[pkt_type][mcs].mcs_type,
5769 					peer->stats.tx.pkt_type[pkt_type].
5770 					mcs_count[mcs]);
5771 		}
5772 
5773 		DP_PRINT_STATS("\n");
5774 	}
5775 
5776 	DP_PRINT_STATS("SGI = "
5777 			" 0.8us %d"
5778 			" 0.4us %d"
5779 			" 1.6us %d"
5780 			" 3.2us %d",
5781 			peer->stats.tx.sgi_count[0],
5782 			peer->stats.tx.sgi_count[1],
5783 			peer->stats.tx.sgi_count[2],
5784 			peer->stats.tx.sgi_count[3]);
5785 	DP_PRINT_STATS("Excess Retries per AC ");
5786 	DP_PRINT_STATS("	 Best effort = %d",
5787 			peer->stats.tx.excess_retries_per_ac[0]);
5788 	DP_PRINT_STATS("	 Background= %d",
5789 			peer->stats.tx.excess_retries_per_ac[1]);
5790 	DP_PRINT_STATS("	 Video = %d",
5791 			peer->stats.tx.excess_retries_per_ac[2]);
5792 	DP_PRINT_STATS("	 Voice = %d",
5793 			peer->stats.tx.excess_retries_per_ac[3]);
5794 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
5795 			peer->stats.tx.bw[2], peer->stats.tx.bw[3],
5796 			peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
5797 
5798 	index = 0;
5799 	for (i = 0; i < SS_COUNT; i++) {
5800 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5801 				" %d", peer->stats.tx.nss[i]);
5802 	}
5803 	DP_PRINT_STATS("NSS(1-8) = %s",
5804 			nss);
5805 
5806 	DP_PRINT_STATS("Aggregation:");
5807 	DP_PRINT_STATS("	Number of Msdu's Part of Amsdu = %d",
5808 			peer->stats.tx.amsdu_cnt);
5809 	DP_PRINT_STATS("	Number of Msdu's With No Msdu Level Aggregation = %d\n",
5810 			peer->stats.tx.non_amsdu_cnt);
5811 
5812 	DP_PRINT_STATS("Node Rx Stats:");
5813 	DP_PRINT_STATS("Packets Sent To Stack = %d",
5814 			peer->stats.rx.to_stack.num);
5815 	DP_PRINT_STATS("Bytes Sent To Stack = %llu",
5816 			peer->stats.rx.to_stack.bytes);
5817 	for (i = 0; i <  CDP_MAX_RX_RINGS; i++) {
5818 		DP_PRINT_STATS("Ring Id = %d", i);
5819 		DP_PRINT_STATS("	Packets Received = %d",
5820 				peer->stats.rx.rcvd_reo[i].num);
5821 		DP_PRINT_STATS("	Bytes Received = %llu",
5822 				peer->stats.rx.rcvd_reo[i].bytes);
5823 	}
5824 	DP_PRINT_STATS("Multicast Packets Received = %d",
5825 			peer->stats.rx.multicast.num);
5826 	DP_PRINT_STATS("Multicast Bytes Received = %llu",
5827 			peer->stats.rx.multicast.bytes);
5828 	DP_PRINT_STATS("Broadcast Packets Received = %d",
5829 			peer->stats.rx.bcast.num);
5830 	DP_PRINT_STATS("Broadcast Bytes Received = %llu",
5831 			peer->stats.rx.bcast.bytes);
5832 	DP_PRINT_STATS("Intra BSS Packets Received = %d",
5833 			peer->stats.rx.intra_bss.pkts.num);
5834 	DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
5835 			peer->stats.rx.intra_bss.pkts.bytes);
5836 	DP_PRINT_STATS("Raw Packets Received = %d",
5837 			peer->stats.rx.raw.num);
5838 	DP_PRINT_STATS("Raw Bytes Received = %llu",
5839 			peer->stats.rx.raw.bytes);
5840 	DP_PRINT_STATS("Errors: MIC Errors = %d",
5841 			peer->stats.rx.err.mic_err);
5842 	DP_PRINT_STATS("Erros: Decryption Errors = %d",
5843 			peer->stats.rx.err.decrypt_err);
5844 	DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
5845 			peer->stats.rx.non_ampdu_cnt);
5846 	DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
5847 			peer->stats.rx.ampdu_cnt);
5848 	DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
5849 			peer->stats.rx.non_amsdu_cnt);
5850 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
5851 			peer->stats.rx.amsdu_cnt);
5852 	DP_PRINT_STATS("NAWDS : ");
5853 	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
5854 			peer->stats.rx.nawds_mcast_drop);
5855 	DP_PRINT_STATS("SGI ="
5856 			" 0.8us %d"
5857 			" 0.4us %d"
5858 			" 1.6us %d"
5859 			" 3.2us %d",
5860 			peer->stats.rx.sgi_count[0],
5861 			peer->stats.rx.sgi_count[1],
5862 			peer->stats.rx.sgi_count[2],
5863 			peer->stats.rx.sgi_count[3]);
5864 	DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
5865 			peer->stats.rx.bw[0], peer->stats.rx.bw[1],
5866 			peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
5867 	DP_PRINT_STATS("Reception Type ="
5868 			" SU %d,"
5869 			" MU_MIMO %d,"
5870 			" MU_OFDMA %d,"
5871 			" MU_OFDMA_MIMO %d",
5872 			peer->stats.rx.reception_type[0],
5873 			peer->stats.rx.reception_type[1],
5874 			peer->stats.rx.reception_type[2],
5875 			peer->stats.rx.reception_type[3]);
5876 
5877 
5878 	for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5879 		index = 0;
5880 		for (mcs = 0; mcs < MAX_MCS; mcs++) {
5881 			if (!dp_rate_string[pkt_type][mcs].valid)
5882 				continue;
5883 
5884 			DP_PRINT_STATS("	%s = %d",
5885 					dp_rate_string[pkt_type][mcs].mcs_type,
5886 					peer->stats.rx.pkt_type[pkt_type].
5887 					mcs_count[mcs]);
5888 		}
5889 
5890 		DP_PRINT_STATS("\n");
5891 	}
5892 
5893 	index = 0;
5894 	for (i = 0; i < SS_COUNT; i++) {
5895 		index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5896 				" %d", peer->stats.rx.nss[i]);
5897 	}
5898 	DP_PRINT_STATS("NSS(1-8) = %s",
5899 			nss);
5900 
5901 	DP_PRINT_STATS("Aggregation:");
5902 	DP_PRINT_STATS("	Msdu's Part of Ampdu = %d",
5903 			peer->stats.rx.ampdu_cnt);
5904 	DP_PRINT_STATS("	Msdu's With No Mpdu Level Aggregation = %d",
5905 			peer->stats.rx.non_ampdu_cnt);
5906 	DP_PRINT_STATS("	Msdu's Part of Amsdu = %d",
5907 			peer->stats.rx.amsdu_cnt);
5908 	DP_PRINT_STATS("	Msdu's With No Msdu Level Aggregation = %d",
5909 			peer->stats.rx.non_amsdu_cnt);
5910 }
5911 
5912 /**
5913  * dp_print_host_stats()- Function to print the stats aggregated at host
5914  * @vdev_handle: DP_VDEV handle
5915  * @type: host stats type
5916  *
5917  * Available Stat types
5918  * TXRX_CLEAR_STATS  : Clear the stats
5919  * TXRX_RX_RATE_STATS: Print Rx Rate Info
5920  * TXRX_TX_RATE_STATS: Print Tx Rate Info
5921  * TXRX_TX_HOST_STATS: Print Tx Stats
5922  * TXRX_RX_HOST_STATS: Print Rx Stats
5923  * TXRX_AST_STATS: Print AST Stats
5924  * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
5925  *
5926  * Return: 0 on success, print error message in case of failure
5927  */
5928 static int
5929 dp_print_host_stats(struct cdp_vdev *vdev_handle, enum cdp_host_txrx_stats type)
5930 {
5931 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5932 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5933 
5934 	dp_aggregate_pdev_stats(pdev);
5935 
5936 	switch (type) {
5937 	case TXRX_CLEAR_STATS:
5938 		dp_txrx_host_stats_clr(vdev);
5939 		break;
5940 	case TXRX_RX_RATE_STATS:
5941 		dp_print_rx_rates(vdev);
5942 		break;
5943 	case TXRX_TX_RATE_STATS:
5944 		dp_print_tx_rates(vdev);
5945 		break;
5946 	case TXRX_TX_HOST_STATS:
5947 		dp_print_pdev_tx_stats(pdev);
5948 		dp_print_soc_tx_stats(pdev->soc);
5949 		break;
5950 	case TXRX_RX_HOST_STATS:
5951 		dp_print_pdev_rx_stats(pdev);
5952 		dp_print_soc_rx_stats(pdev->soc);
5953 		break;
5954 	case TXRX_AST_STATS:
5955 		dp_print_ast_stats(pdev->soc);
5956 		dp_print_peer_table(vdev);
5957 		break;
5958 	case TXRX_SRNG_PTR_STATS:
5959 		dp_print_ring_stats(pdev);
5960 		break;
5961 	case TXRX_RX_MON_STATS:
5962 		dp_print_pdev_rx_mon_stats(pdev);
5963 		break;
5964 	default:
5965 		DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
5966 		break;
5967 	}
5968 	return 0;
5969 }
5970 
5971 /*
5972  * dp_get_host_peer_stats()- function to print peer stats
5973  * @pdev_handle: DP_PDEV handle
5974  * @mac_addr: mac address of the peer
5975  *
5976  * Return: void
5977  */
5978 static void
5979 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
5980 {
5981 	struct dp_peer *peer;
5982 	uint8_t local_id;
5983 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
5984 			&local_id);
5985 
5986 	if (!peer) {
5987 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5988 			"%s: Invalid peer\n", __func__);
5989 		return;
5990 	}
5991 
5992 	dp_print_peer_stats(peer);
5993 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
5994 	return;
5995 }
5996 
5997 /*
5998  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
5999  * @pdev: DP_PDEV handle
6000  *
6001  * Return: void
6002  */
6003 static void
6004 dp_ppdu_ring_reset(struct dp_pdev *pdev)
6005 {
6006 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6007 	int mac_id;
6008 
6009 	qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
6010 
6011 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6012 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6013 							pdev->pdev_id);
6014 
6015 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6016 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6017 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6018 	}
6019 }
6020 
6021 /*
6022  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6023  * @pdev: DP_PDEV handle
6024  *
6025  * Return: void
6026  */
6027 static void
6028 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6029 {
6030 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
6031 	int mac_id;
6032 
6033 	htt_tlv_filter.mpdu_start = 1;
6034 	htt_tlv_filter.msdu_start = 0;
6035 	htt_tlv_filter.packet = 0;
6036 	htt_tlv_filter.msdu_end = 0;
6037 	htt_tlv_filter.mpdu_end = 0;
6038 	htt_tlv_filter.attention = 0;
6039 	htt_tlv_filter.ppdu_start = 1;
6040 	htt_tlv_filter.ppdu_end = 1;
6041 	htt_tlv_filter.ppdu_end_user_stats = 1;
6042 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6043 	htt_tlv_filter.ppdu_end_status_done = 1;
6044 	htt_tlv_filter.enable_fp = 1;
6045 	htt_tlv_filter.enable_md = 0;
6046 	if (pdev->mcopy_mode) {
6047 		htt_tlv_filter.packet_header = 1;
6048 		htt_tlv_filter.enable_mo = 1;
6049 	}
6050 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6051 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6052 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6053 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6054 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6055 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6056 
6057 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6058 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6059 						pdev->pdev_id);
6060 
6061 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6062 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6063 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6064 	}
6065 }
6066 
6067 /*
6068  *dp_set_bpr_enable() - API to enable/disable bpr feature
6069  *@pdev_handle: DP_PDEV handle.
6070  *@val: Provided value.
6071  *
6072  *Return: void
6073  */
6074 static void
6075 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6076 {
6077 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6078 
6079 	switch (val) {
6080 	case CDP_BPR_DISABLE:
6081 		pdev->bpr_enable = CDP_BPR_DISABLE;
6082 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6083 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6084 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6085 		} else if (pdev->enhanced_stats_en &&
6086 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6087 			   !pdev->pktlog_ppdu_stats) {
6088 			dp_h2t_cfg_stats_msg_send(pdev,
6089 						  DP_PPDU_STATS_CFG_ENH_STATS,
6090 						  pdev->pdev_id);
6091 		}
6092 		break;
6093 	case CDP_BPR_ENABLE:
6094 		pdev->bpr_enable = CDP_BPR_ENABLE;
6095 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6096 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6097 			dp_h2t_cfg_stats_msg_send(pdev,
6098 						  DP_PPDU_STATS_CFG_BPR,
6099 						  pdev->pdev_id);
6100 		} else if (pdev->enhanced_stats_en &&
6101 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6102 			   !pdev->pktlog_ppdu_stats) {
6103 			dp_h2t_cfg_stats_msg_send(pdev,
6104 						  DP_PPDU_STATS_CFG_BPR_ENH,
6105 						  pdev->pdev_id);
6106 		} else if (pdev->pktlog_ppdu_stats) {
6107 			dp_h2t_cfg_stats_msg_send(pdev,
6108 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
6109 						  pdev->pdev_id);
6110 		}
6111 		break;
6112 	default:
6113 		break;
6114 	}
6115 }
6116 
6117 /*
6118  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
6119  * @pdev_handle: DP_PDEV handle
6120  * @val: user provided value
6121  *
6122  * Return: void
6123  */
6124 static void
6125 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
6126 {
6127 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6128 
6129 	switch (val) {
6130 	case 0:
6131 		pdev->tx_sniffer_enable = 0;
6132 		pdev->mcopy_mode = 0;
6133 
6134 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en) {
6135 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6136 			dp_ppdu_ring_reset(pdev);
6137 		} else if (pdev->enhanced_stats_en) {
6138 			dp_h2t_cfg_stats_msg_send(pdev,
6139 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6140 		}
6141 		break;
6142 
6143 	case 1:
6144 		pdev->tx_sniffer_enable = 1;
6145 		pdev->mcopy_mode = 0;
6146 
6147 		if (!pdev->pktlog_ppdu_stats)
6148 			dp_h2t_cfg_stats_msg_send(pdev,
6149 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6150 		break;
6151 	case 2:
6152 		pdev->mcopy_mode = 1;
6153 		pdev->tx_sniffer_enable = 0;
6154 		if (!pdev->enhanced_stats_en)
6155 			dp_ppdu_ring_cfg(pdev);
6156 
6157 		if (!pdev->pktlog_ppdu_stats)
6158 			dp_h2t_cfg_stats_msg_send(pdev,
6159 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
6160 		break;
6161 	default:
6162 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6163 			"Invalid value\n");
6164 		break;
6165 	}
6166 }
6167 
6168 /*
6169  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
6170  * @pdev_handle: DP_PDEV handle
6171  *
6172  * Return: void
6173  */
6174 static void
6175 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
6176 {
6177 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6178 	pdev->enhanced_stats_en = 1;
6179 
6180 	if (!pdev->mcopy_mode)
6181 		dp_ppdu_ring_cfg(pdev);
6182 
6183 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
6184 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
6185 }
6186 
6187 /*
6188  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
6189  * @pdev_handle: DP_PDEV handle
6190  *
6191  * Return: void
6192  */
6193 static void
6194 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
6195 {
6196 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6197 
6198 	pdev->enhanced_stats_en = 0;
6199 
6200 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
6201 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6202 
6203 	if (!pdev->mcopy_mode)
6204 		dp_ppdu_ring_reset(pdev);
6205 }
6206 
6207 /*
6208  * dp_get_fw_peer_stats()- function to print peer stats
6209  * @pdev_handle: DP_PDEV handle
6210  * @mac_addr: mac address of the peer
6211  * @cap: Type of htt stats requested
6212  *
6213  * Currently Supporting only MAC ID based requests Only
6214  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
6215  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
6216  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
6217  *
6218  * Return: void
6219  */
6220 static void
6221 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
6222 		uint32_t cap)
6223 {
6224 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6225 	int i;
6226 	uint32_t config_param0 = 0;
6227 	uint32_t config_param1 = 0;
6228 	uint32_t config_param2 = 0;
6229 	uint32_t config_param3 = 0;
6230 
6231 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
6232 	config_param0 |= (1 << (cap + 1));
6233 
6234 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
6235 		config_param1 |= (1 << i);
6236 	}
6237 
6238 	config_param2 |= (mac_addr[0] & 0x000000ff);
6239 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
6240 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
6241 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
6242 
6243 	config_param3 |= (mac_addr[4] & 0x000000ff);
6244 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
6245 
6246 	dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
6247 			config_param0, config_param1, config_param2,
6248 			config_param3, 0, 0, 0);
6249 
6250 }
6251 
6252 /* This struct definition will be removed from here
6253  * once it get added in FW headers*/
6254 struct httstats_cmd_req {
6255     uint32_t    config_param0;
6256     uint32_t    config_param1;
6257     uint32_t    config_param2;
6258     uint32_t    config_param3;
6259     int cookie;
6260     u_int8_t    stats_id;
6261 };
6262 
6263 /*
6264  * dp_get_htt_stats: function to process the httstas request
6265  * @pdev_handle: DP pdev handle
6266  * @data: pointer to request data
6267  * @data_len: length for request data
6268  *
6269  * return: void
6270  */
6271 static void
6272 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
6273 {
6274 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6275 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
6276 
6277 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
6278 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
6279 				req->config_param0, req->config_param1,
6280 				req->config_param2, req->config_param3,
6281 				req->cookie, 0, 0);
6282 }
6283 
6284 /*
6285  * dp_set_pdev_param: function to set parameters in pdev
6286  * @pdev_handle: DP pdev handle
6287  * @param: parameter type to be set
6288  * @val: value of parameter to be set
6289  *
6290  * return: void
6291  */
6292 static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
6293 		enum cdp_pdev_param_type param, uint8_t val)
6294 {
6295 	switch (param) {
6296 	case CDP_CONFIG_DEBUG_SNIFFER:
6297 		dp_config_debug_sniffer(pdev_handle, val);
6298 		break;
6299 	case CDP_CONFIG_BPR_ENABLE:
6300 		dp_set_bpr_enable(pdev_handle, val);
6301 		break;
6302 	default:
6303 		break;
6304 	}
6305 }
6306 
6307 /*
6308  * dp_set_vdev_param: function to set parameters in vdev
6309  * @param: parameter type to be set
6310  * @val: value of parameter to be set
6311  *
6312  * return: void
6313  */
6314 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
6315 		enum cdp_vdev_param_type param, uint32_t val)
6316 {
6317 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6318 	switch (param) {
6319 	case CDP_ENABLE_WDS:
6320 		vdev->wds_enabled = val;
6321 		break;
6322 	case CDP_ENABLE_NAWDS:
6323 		vdev->nawds_enabled = val;
6324 		break;
6325 	case CDP_ENABLE_MCAST_EN:
6326 		vdev->mcast_enhancement_en = val;
6327 		break;
6328 	case CDP_ENABLE_PROXYSTA:
6329 		vdev->proxysta_vdev = val;
6330 		break;
6331 	case CDP_UPDATE_TDLS_FLAGS:
6332 		vdev->tdls_link_connected = val;
6333 		break;
6334 	case CDP_CFG_WDS_AGING_TIMER:
6335 		if (val == 0)
6336 			qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
6337 		else if (val != vdev->wds_aging_timer_val)
6338 			qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
6339 
6340 		vdev->wds_aging_timer_val = val;
6341 		break;
6342 	case CDP_ENABLE_AP_BRIDGE:
6343 		if (wlan_op_mode_sta != vdev->opmode)
6344 			vdev->ap_bridge_enabled = val;
6345 		else
6346 			vdev->ap_bridge_enabled = false;
6347 		break;
6348 	case CDP_ENABLE_CIPHER:
6349 		vdev->sec_type = val;
6350 		break;
6351 	case CDP_ENABLE_QWRAP_ISOLATION:
6352 		vdev->isolation_vdev = val;
6353 		break;
6354 	default:
6355 		break;
6356 	}
6357 
6358 	dp_tx_vdev_update_search_flags(vdev);
6359 }
6360 
6361 /**
6362  * dp_peer_set_nawds: set nawds bit in peer
6363  * @peer_handle: pointer to peer
6364  * @value: enable/disable nawds
6365  *
6366  * return: void
6367  */
6368 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
6369 {
6370 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6371 	peer->nawds_enabled = value;
6372 }
6373 
6374 /*
6375  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
6376  * @vdev_handle: DP_VDEV handle
6377  * @map_id:ID of map that needs to be updated
6378  *
6379  * Return: void
6380  */
6381 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
6382 		uint8_t map_id)
6383 {
6384 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6385 	vdev->dscp_tid_map_id = map_id;
6386 	return;
6387 }
6388 
6389 /*
6390  * dp_txrx_stats_publish(): publish pdev stats into a buffer
6391  * @pdev_handle: DP_PDEV handle
6392  * @buf: to hold pdev_stats
6393  *
6394  * Return: int
6395  */
6396 static int
6397 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
6398 {
6399 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6400 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
6401 	struct cdp_txrx_stats_req req = {0,};
6402 
6403 	dp_aggregate_pdev_stats(pdev);
6404 	req.stats = HTT_DBG_EXT_STATS_PDEV_TX;
6405 	req.cookie_val = 1;
6406 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6407 				req.param1, req.param2, req.param3, 0,
6408 				req.cookie_val, 0);
6409 
6410 	msleep(DP_MAX_SLEEP_TIME);
6411 
6412 	req.stats = HTT_DBG_EXT_STATS_PDEV_RX;
6413 	req.cookie_val = 1;
6414 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
6415 				req.param1, req.param2, req.param3, 0,
6416 				req.cookie_val, 0);
6417 
6418 	msleep(DP_MAX_SLEEP_TIME);
6419 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
6420 
6421 	return TXRX_STATS_LEVEL;
6422 }
6423 
6424 /**
6425  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
6426  * @pdev: DP_PDEV handle
6427  * @map_id: ID of map that needs to be updated
6428  * @tos: index value in map
6429  * @tid: tid value passed by the user
6430  *
6431  * Return: void
6432  */
6433 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
6434 		uint8_t map_id, uint8_t tos, uint8_t tid)
6435 {
6436 	uint8_t dscp;
6437 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
6438 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
6439 	pdev->dscp_tid_map[map_id][dscp] = tid;
6440 	if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
6441 		hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
6442 			map_id, dscp);
6443 	return;
6444 }
6445 
6446 /**
6447  * dp_fw_stats_process(): Process TxRX FW stats request
6448  * @vdev_handle: DP VDEV handle
6449  * @req: stats request
6450  *
6451  * return: int
6452  */
6453 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
6454 		struct cdp_txrx_stats_req *req)
6455 {
6456 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6457 	struct dp_pdev *pdev = NULL;
6458 	uint32_t stats = req->stats;
6459 	uint8_t mac_id = req->mac_id;
6460 
6461 	if (!vdev) {
6462 		DP_TRACE(NONE, "VDEV not found");
6463 		return 1;
6464 	}
6465 	pdev = vdev->pdev;
6466 
6467 	/*
6468 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
6469 	 * from param0 to param3 according to below rule:
6470 	 *
6471 	 * PARAM:
6472 	 *   - config_param0 : start_offset (stats type)
6473 	 *   - config_param1 : stats bmask from start offset
6474 	 *   - config_param2 : stats bmask from start offset + 32
6475 	 *   - config_param3 : stats bmask from start offset + 64
6476 	 */
6477 	if (req->stats == CDP_TXRX_STATS_0) {
6478 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
6479 		req->param1 = 0xFFFFFFFF;
6480 		req->param2 = 0xFFFFFFFF;
6481 		req->param3 = 0xFFFFFFFF;
6482 	}
6483 
6484 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
6485 				req->param1, req->param2, req->param3,
6486 				0, 0, mac_id);
6487 }
6488 
6489 /**
6490  * dp_txrx_stats_request - function to map to firmware and host stats
6491  * @vdev: virtual handle
6492  * @req: stats request
6493  *
6494  * Return: integer
6495  */
6496 static int dp_txrx_stats_request(struct cdp_vdev *vdev,
6497 		struct cdp_txrx_stats_req *req)
6498 {
6499 	int host_stats;
6500 	int fw_stats;
6501 	enum cdp_stats stats;
6502 
6503 	if (!vdev || !req) {
6504 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6505 				"Invalid vdev/req instance");
6506 		return 0;
6507 	}
6508 
6509 	stats = req->stats;
6510 	if (stats >= CDP_TXRX_MAX_STATS)
6511 		return 0;
6512 
6513 	/*
6514 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
6515 	 *			has to be updated if new FW HTT stats added
6516 	 */
6517 	if (stats > CDP_TXRX_STATS_HTT_MAX)
6518 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
6519 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
6520 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
6521 
6522 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6523 		 "stats: %u fw_stats_type: %d host_stats_type: %d",
6524 		  stats, fw_stats, host_stats);
6525 
6526 	if (fw_stats != TXRX_FW_STATS_INVALID) {
6527 		/* update request with FW stats type */
6528 		req->stats = fw_stats;
6529 		return dp_fw_stats_process(vdev, req);
6530 	}
6531 
6532 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
6533 			(host_stats <= TXRX_HOST_STATS_MAX))
6534 		return dp_print_host_stats(vdev, host_stats);
6535 	else
6536 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6537 				"Wrong Input for TxRx Stats");
6538 
6539 	return 0;
6540 }
6541 
6542 /*
6543  * dp_print_napi_stats(): NAPI stats
6544  * @soc - soc handle
6545  */
6546 static void dp_print_napi_stats(struct dp_soc *soc)
6547 {
6548 	hif_print_napi_stats(soc->hif_handle);
6549 }
6550 
6551 /*
6552  * dp_print_per_ring_stats(): Packet count per ring
6553  * @soc - soc handle
6554  */
6555 static void dp_print_per_ring_stats(struct dp_soc *soc)
6556 {
6557 	uint8_t ring;
6558 	uint16_t core;
6559 	uint64_t total_packets;
6560 
6561 	DP_TRACE(FATAL, "Reo packets per ring:");
6562 	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
6563 		total_packets = 0;
6564 		DP_TRACE(FATAL, "Packets on ring %u:", ring);
6565 		for (core = 0; core < NR_CPUS; core++) {
6566 			DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
6567 				core, soc->stats.rx.ring_packets[core][ring]);
6568 			total_packets += soc->stats.rx.ring_packets[core][ring];
6569 		}
6570 		DP_TRACE(FATAL, "Total packets on ring %u: %llu",
6571 			ring, total_packets);
6572 	}
6573 }
6574 
6575 /*
6576  * dp_txrx_path_stats() - Function to display dump stats
6577  * @soc - soc handle
6578  *
6579  * return: none
6580  */
6581 static void dp_txrx_path_stats(struct dp_soc *soc)
6582 {
6583 	uint8_t error_code;
6584 	uint8_t loop_pdev;
6585 	struct dp_pdev *pdev;
6586 	uint8_t i;
6587 
6588 	for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
6589 
6590 		pdev = soc->pdev_list[loop_pdev];
6591 		dp_aggregate_pdev_stats(pdev);
6592 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6593 			"Tx path Statistics:");
6594 
6595 		DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
6596 			pdev->stats.tx_i.rcvd.num,
6597 			pdev->stats.tx_i.rcvd.bytes);
6598 		DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
6599 			pdev->stats.tx_i.processed.num,
6600 			pdev->stats.tx_i.processed.bytes);
6601 		DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
6602 			pdev->stats.tx.tx_success.num,
6603 			pdev->stats.tx.tx_success.bytes);
6604 
6605 		DP_TRACE(FATAL, "Dropped in host:");
6606 		DP_TRACE(FATAL, "Total packets dropped: %u,",
6607 			pdev->stats.tx_i.dropped.dropped_pkt.num);
6608 		DP_TRACE(FATAL, "Descriptor not available: %u",
6609 			pdev->stats.tx_i.dropped.desc_na);
6610 		DP_TRACE(FATAL, "Ring full: %u",
6611 			pdev->stats.tx_i.dropped.ring_full);
6612 		DP_TRACE(FATAL, "Enqueue fail: %u",
6613 			pdev->stats.tx_i.dropped.enqueue_fail);
6614 		DP_TRACE(FATAL, "DMA Error: %u",
6615 			pdev->stats.tx_i.dropped.dma_error);
6616 
6617 		DP_TRACE(FATAL, "Dropped in hardware:");
6618 		DP_TRACE(FATAL, "total packets dropped: %u",
6619 			pdev->stats.tx.tx_failed);
6620 		DP_TRACE(FATAL, "mpdu age out: %u",
6621 			pdev->stats.tx.dropped.age_out);
6622 		DP_TRACE(FATAL, "firmware removed: %u",
6623 			pdev->stats.tx.dropped.fw_rem);
6624 		DP_TRACE(FATAL, "firmware removed tx: %u",
6625 			pdev->stats.tx.dropped.fw_rem_tx);
6626 		DP_TRACE(FATAL, "firmware removed notx %u",
6627 			pdev->stats.tx.dropped.fw_rem_notx);
6628 		DP_TRACE(FATAL, "peer_invalid: %u",
6629 			pdev->soc->stats.tx.tx_invalid_peer.num);
6630 
6631 
6632 		DP_TRACE(FATAL, "Tx packets sent per interrupt:");
6633 		DP_TRACE(FATAL, "Single Packet: %u",
6634 			pdev->stats.tx_comp_histogram.pkts_1);
6635 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6636 			pdev->stats.tx_comp_histogram.pkts_2_20);
6637 		DP_TRACE(FATAL, "21-40 Packets: %u",
6638 			pdev->stats.tx_comp_histogram.pkts_21_40);
6639 		DP_TRACE(FATAL, "41-60 Packets: %u",
6640 			pdev->stats.tx_comp_histogram.pkts_41_60);
6641 		DP_TRACE(FATAL, "61-80 Packets: %u",
6642 			pdev->stats.tx_comp_histogram.pkts_61_80);
6643 		DP_TRACE(FATAL, "81-100 Packets: %u",
6644 			pdev->stats.tx_comp_histogram.pkts_81_100);
6645 		DP_TRACE(FATAL, "101-200 Packets: %u",
6646 			pdev->stats.tx_comp_histogram.pkts_101_200);
6647 		DP_TRACE(FATAL, "   201+ Packets: %u",
6648 			pdev->stats.tx_comp_histogram.pkts_201_plus);
6649 
6650 		DP_TRACE(FATAL, "Rx path statistics");
6651 
6652 		DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
6653 			pdev->stats.rx.to_stack.num,
6654 			pdev->stats.rx.to_stack.bytes);
6655 		for (i = 0; i <  CDP_MAX_RX_RINGS; i++)
6656 			DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
6657 					i, pdev->stats.rx.rcvd_reo[i].num,
6658 					pdev->stats.rx.rcvd_reo[i].bytes);
6659 		DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
6660 			pdev->stats.rx.intra_bss.pkts.num,
6661 			pdev->stats.rx.intra_bss.pkts.bytes);
6662 		DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
6663 			pdev->stats.rx.intra_bss.fail.num,
6664 			pdev->stats.rx.intra_bss.fail.bytes);
6665 		DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
6666 			pdev->stats.rx.raw.num,
6667 			pdev->stats.rx.raw.bytes);
6668 		DP_TRACE(FATAL, "dropped: error %u msdus",
6669 			pdev->stats.rx.err.mic_err);
6670 		DP_TRACE(FATAL, "peer invalid %u",
6671 			pdev->soc->stats.rx.err.rx_invalid_peer.num);
6672 
6673 		DP_TRACE(FATAL, "Reo Statistics");
6674 		DP_TRACE(FATAL, "rbm error: %u msdus",
6675 			pdev->soc->stats.rx.err.invalid_rbm);
6676 		DP_TRACE(FATAL, "hal ring access fail: %u msdus",
6677 			pdev->soc->stats.rx.err.hal_ring_access_fail);
6678 
6679 		for (error_code = 0; error_code < HAL_REO_ERR_MAX;
6680 				error_code++) {
6681 			if (!pdev->soc->stats.rx.err.reo_error[error_code])
6682 				continue;
6683 			DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
6684 				error_code,
6685 				pdev->soc->stats.rx.err.reo_error[error_code]);
6686 		}
6687 
6688 		for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
6689 				error_code++) {
6690 			if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
6691 				continue;
6692 			DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
6693 				error_code,
6694 				pdev->soc->stats.rx.err
6695 				.rxdma_error[error_code]);
6696 		}
6697 
6698 		DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
6699 		DP_TRACE(FATAL, "Single Packet: %u",
6700 			 pdev->stats.rx_ind_histogram.pkts_1);
6701 		DP_TRACE(FATAL, "2-20 Packets:  %u",
6702 			 pdev->stats.rx_ind_histogram.pkts_2_20);
6703 		DP_TRACE(FATAL, "21-40 Packets: %u",
6704 			 pdev->stats.rx_ind_histogram.pkts_21_40);
6705 		DP_TRACE(FATAL, "41-60 Packets: %u",
6706 			 pdev->stats.rx_ind_histogram.pkts_41_60);
6707 		DP_TRACE(FATAL, "61-80 Packets: %u",
6708 			 pdev->stats.rx_ind_histogram.pkts_61_80);
6709 		DP_TRACE(FATAL, "81-100 Packets: %u",
6710 			 pdev->stats.rx_ind_histogram.pkts_81_100);
6711 		DP_TRACE(FATAL, "101-200 Packets: %u",
6712 			 pdev->stats.rx_ind_histogram.pkts_101_200);
6713 		DP_TRACE(FATAL, "   201+ Packets: %u",
6714 			 pdev->stats.rx_ind_histogram.pkts_201_plus);
6715 
6716 		DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
6717 			__func__,
6718 			pdev->soc->wlan_cfg_ctx->tso_enabled,
6719 			pdev->soc->wlan_cfg_ctx->lro_enabled,
6720 			pdev->soc->wlan_cfg_ctx->rx_hash,
6721 			pdev->soc->wlan_cfg_ctx->napi_enabled);
6722 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6723 		DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
6724 			__func__,
6725 			pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
6726 			pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
6727 #endif
6728 	}
6729 }
6730 
6731 /*
6732  * dp_txrx_dump_stats() -  Dump statistics
6733  * @value - Statistics option
6734  */
6735 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
6736 				     enum qdf_stats_verbosity_level level)
6737 {
6738 	struct dp_soc *soc =
6739 		(struct dp_soc *)psoc;
6740 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6741 
6742 	if (!soc) {
6743 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6744 			"%s: soc is NULL", __func__);
6745 		return QDF_STATUS_E_INVAL;
6746 	}
6747 
6748 	switch (value) {
6749 	case CDP_TXRX_PATH_STATS:
6750 		dp_txrx_path_stats(soc);
6751 		break;
6752 
6753 	case CDP_RX_RING_STATS:
6754 		dp_print_per_ring_stats(soc);
6755 		break;
6756 
6757 	case CDP_TXRX_TSO_STATS:
6758 		/* TODO: NOT IMPLEMENTED */
6759 		break;
6760 
6761 	case CDP_DUMP_TX_FLOW_POOL_INFO:
6762 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
6763 		break;
6764 
6765 	case CDP_DP_NAPI_STATS:
6766 		dp_print_napi_stats(soc);
6767 		break;
6768 
6769 	case CDP_TXRX_DESC_STATS:
6770 		/* TODO: NOT IMPLEMENTED */
6771 		break;
6772 
6773 	default:
6774 		status = QDF_STATUS_E_INVAL;
6775 		break;
6776 	}
6777 
6778 	return status;
6779 
6780 }
6781 
6782 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6783 /**
6784  * dp_update_flow_control_parameters() - API to store datapath
6785  *                            config parameters
6786  * @soc: soc handle
6787  * @cfg: ini parameter handle
6788  *
6789  * Return: void
6790  */
6791 static inline
6792 void dp_update_flow_control_parameters(struct dp_soc *soc,
6793 				struct cdp_config_params *params)
6794 {
6795 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
6796 					params->tx_flow_stop_queue_threshold;
6797 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
6798 					params->tx_flow_start_queue_offset;
6799 }
6800 #else
6801 static inline
6802 void dp_update_flow_control_parameters(struct dp_soc *soc,
6803 				struct cdp_config_params *params)
6804 {
6805 }
6806 #endif
6807 
6808 /**
6809  * dp_update_config_parameters() - API to store datapath
6810  *                            config parameters
6811  * @soc: soc handle
6812  * @cfg: ini parameter handle
6813  *
6814  * Return: status
6815  */
6816 static
6817 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
6818 				struct cdp_config_params *params)
6819 {
6820 	struct dp_soc *soc = (struct dp_soc *)psoc;
6821 
6822 	if (!(soc)) {
6823 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6824 				"%s: Invalid handle", __func__);
6825 		return QDF_STATUS_E_INVAL;
6826 	}
6827 
6828 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
6829 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
6830 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
6831 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
6832 				params->tcp_udp_checksumoffload;
6833 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
6834 
6835 	dp_update_flow_control_parameters(soc, params);
6836 
6837 	return QDF_STATUS_SUCCESS;
6838 }
6839 
6840 /**
6841  * dp_txrx_set_wds_rx_policy() - API to store datapath
6842  *                            config parameters
6843  * @vdev_handle - datapath vdev handle
6844  * @cfg: ini parameter handle
6845  *
6846  * Return: status
6847  */
6848 #ifdef WDS_VENDOR_EXTENSION
6849 void
6850 dp_txrx_set_wds_rx_policy(
6851 		struct cdp_vdev *vdev_handle,
6852 		u_int32_t val)
6853 {
6854 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6855 	struct dp_peer *peer;
6856 	if (vdev->opmode == wlan_op_mode_ap) {
6857 		/* for ap, set it on bss_peer */
6858 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6859 			if (peer->bss_peer) {
6860 				peer->wds_ecm.wds_rx_filter = 1;
6861 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6862 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6863 				break;
6864 			}
6865 		}
6866 	} else if (vdev->opmode == wlan_op_mode_sta) {
6867 		peer = TAILQ_FIRST(&vdev->peer_list);
6868 		peer->wds_ecm.wds_rx_filter = 1;
6869 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6870 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6871 	}
6872 }
6873 
6874 /**
6875  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
6876  *
6877  * @peer_handle - datapath peer handle
6878  * @wds_tx_ucast: policy for unicast transmission
6879  * @wds_tx_mcast: policy for multicast transmission
6880  *
6881  * Return: void
6882  */
6883 void
6884 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
6885 		int wds_tx_ucast, int wds_tx_mcast)
6886 {
6887 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6888 	if (wds_tx_ucast || wds_tx_mcast) {
6889 		peer->wds_enabled = 1;
6890 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
6891 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
6892 	} else {
6893 		peer->wds_enabled = 0;
6894 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
6895 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
6896 	}
6897 
6898 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6899 			FL("Policy Update set to :\
6900 				peer->wds_enabled %d\
6901 				peer->wds_ecm.wds_tx_ucast_4addr %d\
6902 				peer->wds_ecm.wds_tx_mcast_4addr %d\n"),
6903 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
6904 				peer->wds_ecm.wds_tx_mcast_4addr);
6905 	return;
6906 }
6907 #endif
6908 
6909 static struct cdp_wds_ops dp_ops_wds = {
6910 	.vdev_set_wds = dp_vdev_set_wds,
6911 #ifdef WDS_VENDOR_EXTENSION
6912 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
6913 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
6914 #endif
6915 };
6916 
6917 /*
6918  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
6919  * @vdev_handle - datapath vdev handle
6920  * @callback - callback function
6921  * @ctxt: callback context
6922  *
6923  */
6924 static void
6925 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
6926 		       ol_txrx_data_tx_cb callback, void *ctxt)
6927 {
6928 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6929 
6930 	vdev->tx_non_std_data_callback.func = callback;
6931 	vdev->tx_non_std_data_callback.ctxt = ctxt;
6932 }
6933 
6934 /**
6935  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
6936  * @pdev_hdl: datapath pdev handle
6937  *
6938  * Return: opaque pointer to dp txrx handle
6939  */
6940 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
6941 {
6942 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6943 
6944 	return pdev->dp_txrx_handle;
6945 }
6946 
6947 /**
6948  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
6949  * @pdev_hdl: datapath pdev handle
6950  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
6951  *
6952  * Return: void
6953  */
6954 static void
6955 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
6956 {
6957 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6958 
6959 	pdev->dp_txrx_handle = dp_txrx_hdl;
6960 }
6961 
6962 /**
6963  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
6964  * @soc_handle: datapath soc handle
6965  *
6966  * Return: opaque pointer to external dp (non-core DP)
6967  */
6968 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
6969 {
6970 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6971 
6972 	return soc->external_txrx_handle;
6973 }
6974 
6975 /**
6976  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
6977  * @soc_handle: datapath soc handle
6978  * @txrx_handle: opaque pointer to external dp (non-core DP)
6979  *
6980  * Return: void
6981  */
6982 static void
6983 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
6984 {
6985 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
6986 
6987 	soc->external_txrx_handle = txrx_handle;
6988 }
6989 
6990 #ifdef FEATURE_AST
6991 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
6992 {
6993 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
6994 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
6995 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6996 
6997 	/*
6998 	 * For BSS peer, new peer is not created on alloc_node if the
6999 	 * peer with same address already exists , instead refcnt is
7000 	 * increased for existing peer. Correspondingly in delete path,
7001 	 * only refcnt is decreased; and peer is only deleted , when all
7002 	 * references are deleted. So delete_in_progress should not be set
7003 	 * for bss_peer, unless only 2 reference remains (peer map reference
7004 	 * and peer hash table reference).
7005 	 */
7006 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
7007 		return;
7008 	}
7009 
7010 	peer->delete_in_progress = true;
7011 	dp_peer_delete_ast_entries(soc, peer);
7012 }
7013 #endif
7014 
7015 #ifdef ATH_SUPPORT_NAC_RSSI
7016 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
7017 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
7018 		uint8_t chan_num)
7019 {
7020 
7021 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7022 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7023 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
7024 
7025 	pdev->nac_rssi_filtering = 1;
7026 	/* Store address of NAC (neighbour peer) which will be checked
7027 	 * against TA of received packets.
7028 	 */
7029 
7030 	if (cmd == CDP_NAC_PARAM_ADD) {
7031 		qdf_mem_copy(vdev->cdp_nac_rssi.client_mac,
7032 				client_macaddr, DP_MAC_ADDR_LEN);
7033 		vdev->cdp_nac_rssi_enabled = 1;
7034 	} else if (cmd == CDP_NAC_PARAM_DEL) {
7035 		if (!qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac,
7036 			client_macaddr, DP_MAC_ADDR_LEN)) {
7037 				/* delete this peer from the list */
7038 			qdf_mem_zero(vdev->cdp_nac_rssi.client_mac,
7039 				DP_MAC_ADDR_LEN);
7040 		}
7041 		vdev->cdp_nac_rssi_enabled = 0;
7042 	}
7043 
7044 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
7045 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
7046 			((void *)vdev->pdev->ctrl_pdev,
7047 			 vdev->vdev_id, cmd, bssid);
7048 
7049 	return QDF_STATUS_SUCCESS;
7050 }
7051 #endif
7052 
7053 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
7054 		uint32_t max_peers)
7055 {
7056 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7057 
7058 	soc->max_peers = max_peers;
7059 
7060 	qdf_print ("%s max_peers %u\n", __func__, max_peers);
7061 
7062 	if (dp_peer_find_attach(soc))
7063 		return QDF_STATUS_E_FAILURE;
7064 
7065 	return QDF_STATUS_SUCCESS;
7066 }
7067 
7068 /**
7069  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
7070  * @dp_pdev: dp pdev handle
7071  * @ctrl_pdev: UMAC ctrl pdev handle
7072  *
7073  * Return: void
7074  */
7075 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
7076 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
7077 {
7078 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
7079 
7080 	pdev->ctrl_pdev = ctrl_pdev;
7081 }
7082 
7083 static struct cdp_cmn_ops dp_ops_cmn = {
7084 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
7085 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
7086 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
7087 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
7088 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
7089 	.txrx_peer_create = dp_peer_create_wifi3,
7090 	.txrx_peer_setup = dp_peer_setup_wifi3,
7091 #ifdef FEATURE_AST
7092 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
7093 #else
7094 	.txrx_peer_teardown = NULL,
7095 #endif
7096 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
7097 	.txrx_peer_del_ast = dp_peer_del_ast_wifi3,
7098 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
7099 	.txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
7100 	.txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
7101 	.txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
7102 	.txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
7103 	.txrx_peer_delete = dp_peer_delete_wifi3,
7104 	.txrx_vdev_register = dp_vdev_register_wifi3,
7105 	.txrx_soc_detach = dp_soc_detach_wifi3,
7106 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
7107 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
7108 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
7109 	.txrx_ath_getstats = dp_get_device_stats,
7110 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
7111 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
7112 	.delba_process = dp_delba_process_wifi3,
7113 	.set_addba_response = dp_set_addba_response,
7114 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
7115 	.flush_cache_rx_queue = NULL,
7116 	/* TODO: get API's for dscp-tid need to be added*/
7117 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
7118 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
7119 	.txrx_stats_request = dp_txrx_stats_request,
7120 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
7121 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
7122 	.txrx_set_nac = dp_set_nac,
7123 	.txrx_get_tx_pending = dp_get_tx_pending,
7124 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
7125 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
7126 	.display_stats = dp_txrx_dump_stats,
7127 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
7128 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
7129 #ifdef DP_INTR_POLL_BASED
7130 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
7131 #else
7132 	.txrx_intr_attach = dp_soc_interrupt_attach,
7133 #endif
7134 	.txrx_intr_detach = dp_soc_interrupt_detach,
7135 	.set_pn_check = dp_set_pn_check_wifi3,
7136 	.update_config_parameters = dp_update_config_parameters,
7137 	/* TODO: Add other functions */
7138 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
7139 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
7140 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
7141 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
7142 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
7143 	.tx_send = dp_tx_send,
7144 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
7145 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
7146 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
7147 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
7148 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
7149 };
7150 
7151 static struct cdp_ctrl_ops dp_ops_ctrl = {
7152 	.txrx_peer_authorize = dp_peer_authorize,
7153 #ifdef QCA_SUPPORT_SON
7154 	.txrx_set_inact_params = dp_set_inact_params,
7155 	.txrx_start_inact_timer = dp_start_inact_timer,
7156 	.txrx_set_overload = dp_set_overload,
7157 	.txrx_peer_is_inact = dp_peer_is_inact,
7158 	.txrx_mark_peer_inact = dp_mark_peer_inact,
7159 #endif
7160 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
7161 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
7162 #ifdef MESH_MODE_SUPPORT
7163 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
7164 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
7165 #endif
7166 	.txrx_set_vdev_param = dp_set_vdev_param,
7167 	.txrx_peer_set_nawds = dp_peer_set_nawds,
7168 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
7169 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
7170 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
7171 	.txrx_update_filter_neighbour_peers =
7172 		dp_update_filter_neighbour_peers,
7173 	.txrx_get_sec_type = dp_get_sec_type,
7174 	/* TODO: Add other functions */
7175 	.txrx_wdi_event_sub = dp_wdi_event_sub,
7176 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
7177 #ifdef WDI_EVENT_ENABLE
7178 	.txrx_get_pldev = dp_get_pldev,
7179 #endif
7180 	.txrx_set_pdev_param = dp_set_pdev_param,
7181 #ifdef ATH_SUPPORT_NAC_RSSI
7182 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
7183 #endif
7184 	.set_key = dp_set_michael_key,
7185 };
7186 
7187 static struct cdp_me_ops dp_ops_me = {
7188 #ifdef ATH_SUPPORT_IQUE
7189 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
7190 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
7191 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
7192 #endif
7193 };
7194 
7195 static struct cdp_mon_ops dp_ops_mon = {
7196 	.txrx_monitor_set_filter_ucast_data = NULL,
7197 	.txrx_monitor_set_filter_mcast_data = NULL,
7198 	.txrx_monitor_set_filter_non_data = NULL,
7199 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
7200 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
7201 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
7202 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
7203 	/* Added support for HK advance filter */
7204 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
7205 };
7206 
7207 static struct cdp_host_stats_ops dp_ops_host_stats = {
7208 	.txrx_per_peer_stats = dp_get_host_peer_stats,
7209 	.get_fw_peer_stats = dp_get_fw_peer_stats,
7210 	.get_htt_stats = dp_get_htt_stats,
7211 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
7212 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
7213 	.txrx_stats_publish = dp_txrx_stats_publish,
7214 	/* TODO */
7215 };
7216 
7217 static struct cdp_raw_ops dp_ops_raw = {
7218 	/* TODO */
7219 };
7220 
7221 #ifdef CONFIG_WIN
7222 static struct cdp_pflow_ops dp_ops_pflow = {
7223 	/* TODO */
7224 };
7225 #endif /* CONFIG_WIN */
7226 
7227 #ifdef FEATURE_RUNTIME_PM
7228 /**
7229  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
7230  * @opaque_pdev: DP pdev context
7231  *
7232  * DP is ready to runtime suspend if there are no pending TX packets.
7233  *
7234  * Return: QDF_STATUS
7235  */
7236 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
7237 {
7238 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7239 	struct dp_soc *soc = pdev->soc;
7240 
7241 	/* Call DP TX flow control API to check if there is any
7242 	   pending packets */
7243 
7244 	if (soc->intr_mode == DP_INTR_POLL)
7245 		qdf_timer_stop(&soc->int_timer);
7246 
7247 	return QDF_STATUS_SUCCESS;
7248 }
7249 
7250 /**
7251  * dp_runtime_resume() - ensure DP is ready to runtime resume
7252  * @opaque_pdev: DP pdev context
7253  *
7254  * Resume DP for runtime PM.
7255  *
7256  * Return: QDF_STATUS
7257  */
7258 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
7259 {
7260 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7261 	struct dp_soc *soc = pdev->soc;
7262 	void *hal_srng;
7263 	int i;
7264 
7265 	if (soc->intr_mode == DP_INTR_POLL)
7266 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7267 
7268 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
7269 		hal_srng = soc->tcl_data_ring[i].hal_srng;
7270 		if (hal_srng) {
7271 			/* We actually only need to acquire the lock */
7272 			hal_srng_access_start(soc->hal_soc, hal_srng);
7273 			/* Update SRC ring head pointer for HW to send
7274 			   all pending packets */
7275 			hal_srng_access_end(soc->hal_soc, hal_srng);
7276 		}
7277 	}
7278 
7279 	return QDF_STATUS_SUCCESS;
7280 }
7281 #endif /* FEATURE_RUNTIME_PM */
7282 
7283 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
7284 {
7285 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7286 	struct dp_soc *soc = pdev->soc;
7287 
7288 	if (soc->intr_mode == DP_INTR_POLL)
7289 		qdf_timer_stop(&soc->int_timer);
7290 
7291 	return QDF_STATUS_SUCCESS;
7292 }
7293 
7294 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
7295 {
7296 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
7297 	struct dp_soc *soc = pdev->soc;
7298 
7299 	if (soc->intr_mode == DP_INTR_POLL)
7300 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7301 
7302 	return QDF_STATUS_SUCCESS;
7303 }
7304 
7305 #ifndef CONFIG_WIN
7306 static struct cdp_misc_ops dp_ops_misc = {
7307 	.tx_non_std = dp_tx_non_std,
7308 	.get_opmode = dp_get_opmode,
7309 #ifdef FEATURE_RUNTIME_PM
7310 	.runtime_suspend = dp_runtime_suspend,
7311 	.runtime_resume = dp_runtime_resume,
7312 #endif /* FEATURE_RUNTIME_PM */
7313 	.pkt_log_init = dp_pkt_log_init,
7314 	.pkt_log_con_service = dp_pkt_log_con_service,
7315 };
7316 
7317 static struct cdp_flowctl_ops dp_ops_flowctl = {
7318 	/* WIFI 3.0 DP implement as required. */
7319 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7320 	.flow_pool_map_handler = dp_tx_flow_pool_map,
7321 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
7322 	.register_pause_cb = dp_txrx_register_pause_cb,
7323 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
7324 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
7325 };
7326 
7327 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
7328 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7329 };
7330 
7331 #ifdef IPA_OFFLOAD
7332 static struct cdp_ipa_ops dp_ops_ipa = {
7333 	.ipa_get_resource = dp_ipa_get_resource,
7334 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
7335 	.ipa_op_response = dp_ipa_op_response,
7336 	.ipa_register_op_cb = dp_ipa_register_op_cb,
7337 	.ipa_get_stat = dp_ipa_get_stat,
7338 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
7339 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
7340 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
7341 	.ipa_setup = dp_ipa_setup,
7342 	.ipa_cleanup = dp_ipa_cleanup,
7343 	.ipa_setup_iface = dp_ipa_setup_iface,
7344 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
7345 	.ipa_enable_pipes = dp_ipa_enable_pipes,
7346 	.ipa_disable_pipes = dp_ipa_disable_pipes,
7347 	.ipa_set_perf_level = dp_ipa_set_perf_level
7348 };
7349 #endif
7350 
7351 static struct cdp_bus_ops dp_ops_bus = {
7352 	.bus_suspend = dp_bus_suspend,
7353 	.bus_resume = dp_bus_resume
7354 };
7355 
7356 static struct cdp_ocb_ops dp_ops_ocb = {
7357 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7358 };
7359 
7360 
7361 static struct cdp_throttle_ops dp_ops_throttle = {
7362 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7363 };
7364 
7365 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
7366 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7367 };
7368 
7369 static struct cdp_cfg_ops dp_ops_cfg = {
7370 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
7371 };
7372 
7373 /*
7374  * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
7375  * @dev: physical device instance
7376  * @peer_mac_addr: peer mac address
7377  * @local_id: local id for the peer
7378  * @debug_id: to track enum peer access
7379 
7380  * Return: peer instance pointer
7381  */
7382 static inline void *
7383 dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
7384 				u8 *local_id,
7385 				enum peer_debug_id_type debug_id)
7386 {
7387 	/*
7388 	 * Currently this function does not implement the "get ref"
7389 	 * functionality and is mapped to dp_find_peer_by_addr which does not
7390 	 * increment the peer ref count. So the peer state is uncertain after
7391 	 * calling this API. The functionality needs to be implemented.
7392 	 * Accordingly the corresponding release_ref function is NULL.
7393 	 */
7394 	return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
7395 }
7396 
7397 static struct cdp_peer_ops dp_ops_peer = {
7398 	.register_peer = dp_register_peer,
7399 	.clear_peer = dp_clear_peer,
7400 	.find_peer_by_addr = dp_find_peer_by_addr,
7401 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
7402 	.peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
7403 	.peer_release_ref = NULL,
7404 	.local_peer_id = dp_local_peer_id,
7405 	.peer_find_by_local_id = dp_peer_find_by_local_id,
7406 	.peer_state_update = dp_peer_state_update,
7407 	.get_vdevid = dp_get_vdevid,
7408 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
7409 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
7410 	.get_vdev_for_peer = dp_get_vdev_for_peer,
7411 	.get_peer_state = dp_get_peer_state,
7412 	.last_assoc_received = dp_get_last_assoc_received,
7413 	.last_disassoc_received = dp_get_last_disassoc_received,
7414 	.last_deauth_received = dp_get_last_deauth_received,
7415 };
7416 #endif
7417 
7418 static struct cdp_ops dp_txrx_ops = {
7419 	.cmn_drv_ops = &dp_ops_cmn,
7420 	.ctrl_ops = &dp_ops_ctrl,
7421 	.me_ops = &dp_ops_me,
7422 	.mon_ops = &dp_ops_mon,
7423 	.host_stats_ops = &dp_ops_host_stats,
7424 	.wds_ops = &dp_ops_wds,
7425 	.raw_ops = &dp_ops_raw,
7426 #ifdef CONFIG_WIN
7427 	.pflow_ops = &dp_ops_pflow,
7428 #endif /* CONFIG_WIN */
7429 #ifndef CONFIG_WIN
7430 	.misc_ops = &dp_ops_misc,
7431 	.cfg_ops = &dp_ops_cfg,
7432 	.flowctl_ops = &dp_ops_flowctl,
7433 	.l_flowctl_ops = &dp_ops_l_flowctl,
7434 #ifdef IPA_OFFLOAD
7435 	.ipa_ops = &dp_ops_ipa,
7436 #endif
7437 	.bus_ops = &dp_ops_bus,
7438 	.ocb_ops = &dp_ops_ocb,
7439 	.peer_ops = &dp_ops_peer,
7440 	.throttle_ops = &dp_ops_throttle,
7441 	.mob_stats_ops = &dp_ops_mob_stats,
7442 #endif
7443 };
7444 
7445 /*
7446  * dp_soc_set_txrx_ring_map()
7447  * @dp_soc: DP handler for soc
7448  *
7449  * Return: Void
7450  */
7451 static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
7452 {
7453 	uint32_t i;
7454 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
7455 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
7456 	}
7457 }
7458 
7459 /*
7460  * dp_soc_attach_wifi3() - Attach txrx SOC
7461  * @ctrl_psoc:	Opaque SOC handle from control plane
7462  * @htc_handle:	Opaque HTC handle
7463  * @hif_handle:	Opaque HIF handle
7464  * @qdf_osdev:	QDF device
7465  *
7466  * Return: DP SOC handle on success, NULL on failure
7467  */
7468 /*
7469  * Local prototype added to temporarily address warning caused by
7470  * -Wmissing-prototypes. A more correct solution, namely to expose
7471  * a prototype in an appropriate header file, will come later.
7472  */
7473 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
7474 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
7475 	struct ol_if_ops *ol_ops);
7476 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
7477 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
7478 	struct ol_if_ops *ol_ops)
7479 {
7480 	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
7481 
7482 	if (!soc) {
7483 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7484 			FL("DP SOC memory allocation failed"));
7485 		goto fail0;
7486 	}
7487 
7488 	soc->cdp_soc.ops = &dp_txrx_ops;
7489 	soc->cdp_soc.ol_ops = ol_ops;
7490 	soc->ctrl_psoc = ctrl_psoc;
7491 	soc->osdev = qdf_osdev;
7492 	soc->hif_handle = hif_handle;
7493 
7494 	soc->hal_soc = hif_get_hal_handle(hif_handle);
7495 	soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
7496 		soc->hal_soc, qdf_osdev);
7497 	if (!soc->htt_handle) {
7498 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7499 			FL("HTT attach failed"));
7500 		goto fail1;
7501 	}
7502 
7503 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach();
7504 	if (!soc->wlan_cfg_ctx) {
7505 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7506 				FL("wlan_cfg_soc_attach failed"));
7507 		goto fail2;
7508 	}
7509 
7510 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, rx_hash);
7511 	soc->cce_disable = false;
7512 
7513 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
7514 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7515 				CDP_CFG_MAX_PEER_ID);
7516 
7517 		if (ret != -EINVAL) {
7518 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
7519 		}
7520 
7521 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
7522 				CDP_CFG_CCE_DISABLE);
7523 		if (ret == 1)
7524 			soc->cce_disable = true;
7525 	}
7526 
7527 	qdf_spinlock_create(&soc->peer_ref_mutex);
7528 
7529 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
7530 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
7531 
7532 	/* fill the tx/rx cpu ring map*/
7533 	dp_soc_set_txrx_ring_map(soc);
7534 
7535 	qdf_spinlock_create(&soc->htt_stats.lock);
7536 	/* initialize work queue for stats processing */
7537 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
7538 
7539 	/*Initialize inactivity timer for wifison */
7540 	dp_init_inact_timer(soc);
7541 
7542 	return (void *)soc;
7543 
7544 fail2:
7545 	htt_soc_detach(soc->htt_handle);
7546 fail1:
7547 	qdf_mem_free(soc);
7548 fail0:
7549 	return NULL;
7550 }
7551 
7552 /*
7553  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
7554  *
7555  * @soc: handle to DP soc
7556  * @mac_id: MAC id
7557  *
7558  * Return: Return pdev corresponding to MAC
7559  */
7560 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
7561 {
7562 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
7563 		return soc->pdev_list[mac_id];
7564 
7565 	/* Typically for MCL as there only 1 PDEV*/
7566 	return soc->pdev_list[0];
7567 }
7568 
7569 /*
7570  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
7571  * @soc:		DP SoC context
7572  * @max_mac_rings:	No of MAC rings
7573  *
7574  * Return: None
7575  */
7576 static
7577 void dp_is_hw_dbs_enable(struct dp_soc *soc,
7578 				int *max_mac_rings)
7579 {
7580 	bool dbs_enable = false;
7581 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
7582 		dbs_enable = soc->cdp_soc.ol_ops->
7583 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
7584 
7585 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
7586 }
7587 
7588 /*
7589 * dp_set_pktlog_wifi3() - attach txrx vdev
7590 * @pdev: Datapath PDEV handle
7591 * @event: which event's notifications are being subscribed to
7592 * @enable: WDI event subscribe or not. (True or False)
7593 *
7594 * Return: Success, NULL on failure
7595 */
7596 #ifdef WDI_EVENT_ENABLE
7597 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
7598 	bool enable)
7599 {
7600 	struct dp_soc *soc = pdev->soc;
7601 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7602 	int max_mac_rings = wlan_cfg_get_num_mac_rings
7603 					(pdev->wlan_cfg_ctx);
7604 	uint8_t mac_id = 0;
7605 
7606 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
7607 
7608 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7609 			FL("Max_mac_rings %d \n"),
7610 			max_mac_rings);
7611 
7612 	if (enable) {
7613 		switch (event) {
7614 		case WDI_EVENT_RX_DESC:
7615 			if (pdev->monitor_vdev) {
7616 				/* Nothing needs to be done if monitor mode is
7617 				 * enabled
7618 				 */
7619 				return 0;
7620 			}
7621 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
7622 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
7623 				htt_tlv_filter.mpdu_start = 1;
7624 				htt_tlv_filter.msdu_start = 1;
7625 				htt_tlv_filter.msdu_end = 1;
7626 				htt_tlv_filter.mpdu_end = 1;
7627 				htt_tlv_filter.packet_header = 1;
7628 				htt_tlv_filter.attention = 1;
7629 				htt_tlv_filter.ppdu_start = 1;
7630 				htt_tlv_filter.ppdu_end = 1;
7631 				htt_tlv_filter.ppdu_end_user_stats = 1;
7632 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7633 				htt_tlv_filter.ppdu_end_status_done = 1;
7634 				htt_tlv_filter.enable_fp = 1;
7635 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7636 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7637 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7638 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7639 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7640 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7641 
7642 				for (mac_id = 0; mac_id < max_mac_rings;
7643 								mac_id++) {
7644 					int mac_for_pdev =
7645 						dp_get_mac_id_for_pdev(mac_id,
7646 								pdev->pdev_id);
7647 
7648 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7649 					 mac_for_pdev,
7650 					 pdev->rxdma_mon_status_ring[mac_id]
7651 					 .hal_srng,
7652 					 RXDMA_MONITOR_STATUS,
7653 					 RX_BUFFER_SIZE,
7654 					 &htt_tlv_filter);
7655 
7656 				}
7657 
7658 				if (soc->reap_timer_init)
7659 					qdf_timer_mod(&soc->mon_reap_timer,
7660 					DP_INTR_POLL_TIMER_MS);
7661 			}
7662 			break;
7663 
7664 		case WDI_EVENT_LITE_RX:
7665 			if (pdev->monitor_vdev) {
7666 				/* Nothing needs to be done if monitor mode is
7667 				 * enabled
7668 				 */
7669 				return 0;
7670 			}
7671 
7672 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
7673 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
7674 
7675 				htt_tlv_filter.ppdu_start = 1;
7676 				htt_tlv_filter.ppdu_end = 1;
7677 				htt_tlv_filter.ppdu_end_user_stats = 1;
7678 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7679 				htt_tlv_filter.ppdu_end_status_done = 1;
7680 				htt_tlv_filter.mpdu_start = 1;
7681 				htt_tlv_filter.enable_fp = 1;
7682 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7683 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7684 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7685 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7686 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7687 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7688 
7689 				for (mac_id = 0; mac_id < max_mac_rings;
7690 								mac_id++) {
7691 					int mac_for_pdev =
7692 						dp_get_mac_id_for_pdev(mac_id,
7693 								pdev->pdev_id);
7694 
7695 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7696 					mac_for_pdev,
7697 					pdev->rxdma_mon_status_ring[mac_id]
7698 					.hal_srng,
7699 					RXDMA_MONITOR_STATUS,
7700 					RX_BUFFER_SIZE_PKTLOG_LITE,
7701 					&htt_tlv_filter);
7702 				}
7703 
7704 				if (soc->reap_timer_init)
7705 					qdf_timer_mod(&soc->mon_reap_timer,
7706 					DP_INTR_POLL_TIMER_MS);
7707 			}
7708 			break;
7709 
7710 		case WDI_EVENT_LITE_T2H:
7711 			if (pdev->monitor_vdev) {
7712 				/* Nothing needs to be done if monitor mode is
7713 				 * enabled
7714 				 */
7715 				return 0;
7716 			}
7717 
7718 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7719 				int mac_for_pdev = dp_get_mac_id_for_pdev(
7720 							mac_id,	pdev->pdev_id);
7721 
7722 				pdev->pktlog_ppdu_stats = true;
7723 				dp_h2t_cfg_stats_msg_send(pdev,
7724 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
7725 					mac_for_pdev);
7726 			}
7727 			break;
7728 
7729 		default:
7730 			/* Nothing needs to be done for other pktlog types */
7731 			break;
7732 		}
7733 	} else {
7734 		switch (event) {
7735 		case WDI_EVENT_RX_DESC:
7736 		case WDI_EVENT_LITE_RX:
7737 			if (pdev->monitor_vdev) {
7738 				/* Nothing needs to be done if monitor mode is
7739 				 * enabled
7740 				 */
7741 				return 0;
7742 			}
7743 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
7744 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
7745 
7746 				for (mac_id = 0; mac_id < max_mac_rings;
7747 								mac_id++) {
7748 					int mac_for_pdev =
7749 						dp_get_mac_id_for_pdev(mac_id,
7750 								pdev->pdev_id);
7751 
7752 					htt_h2t_rx_ring_cfg(soc->htt_handle,
7753 					  mac_for_pdev,
7754 					  pdev->rxdma_mon_status_ring[mac_id]
7755 					  .hal_srng,
7756 					  RXDMA_MONITOR_STATUS,
7757 					  RX_BUFFER_SIZE,
7758 					  &htt_tlv_filter);
7759 				}
7760 
7761 				if (soc->reap_timer_init)
7762 					qdf_timer_stop(&soc->mon_reap_timer);
7763 			}
7764 			break;
7765 		case WDI_EVENT_LITE_T2H:
7766 			if (pdev->monitor_vdev) {
7767 				/* Nothing needs to be done if monitor mode is
7768 				 * enabled
7769 				 */
7770 				return 0;
7771 			}
7772 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
7773 			 * passing value 0. Once these macros will define in htt
7774 			 * header file will use proper macros
7775 			*/
7776 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
7777 				int mac_for_pdev =
7778 						dp_get_mac_id_for_pdev(mac_id,
7779 								pdev->pdev_id);
7780 
7781 				pdev->pktlog_ppdu_stats = false;
7782 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7783 					dp_h2t_cfg_stats_msg_send(pdev, 0,
7784 								mac_for_pdev);
7785 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
7786 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
7787 								mac_for_pdev);
7788 				} else if (pdev->enhanced_stats_en) {
7789 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
7790 								mac_for_pdev);
7791 				}
7792 			}
7793 
7794 			break;
7795 		default:
7796 			/* Nothing needs to be done for other pktlog types */
7797 			break;
7798 		}
7799 	}
7800 	return 0;
7801 }
7802 #endif
7803 
7804 #ifdef CONFIG_MCL
7805 /*
7806  * dp_service_mon_rings()- timer to reap monitor rings
7807  * reqd as we are not getting ppdu end interrupts
7808  * @arg: SoC Handle
7809  *
7810  * Return:
7811  *
7812  */
7813 static void dp_service_mon_rings(void *arg)
7814 {
7815 	struct dp_soc *soc = (struct dp_soc *) arg;
7816 	int ring = 0, work_done, mac_id;
7817 	struct dp_pdev *pdev = NULL;
7818 
7819 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
7820 		pdev = soc->pdev_list[ring];
7821 		if (pdev == NULL)
7822 			continue;
7823 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7824 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7825 								pdev->pdev_id);
7826 			work_done = dp_mon_process(soc, mac_for_pdev,
7827 							QCA_NAPI_BUDGET);
7828 
7829 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
7830 				FL("Reaped %d descs from Monitor rings"),
7831 				work_done);
7832 		}
7833 	}
7834 
7835 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
7836 }
7837 
7838 #ifndef REMOVE_PKT_LOG
7839 /**
7840  * dp_pkt_log_init() - API to initialize packet log
7841  * @ppdev: physical device handle
7842  * @scn: HIF context
7843  *
7844  * Return: none
7845  */
7846 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
7847 {
7848 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
7849 
7850 	if (handle->pkt_log_init) {
7851 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7852 			 "%s: Packet log not initialized", __func__);
7853 		return;
7854 	}
7855 
7856 	pktlog_sethandle(&handle->pl_dev, scn);
7857 	pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
7858 
7859 	if (pktlogmod_init(scn)) {
7860 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7861 			 "%s: pktlogmod_init failed", __func__);
7862 		handle->pkt_log_init = false;
7863 	} else {
7864 		handle->pkt_log_init = true;
7865 	}
7866 }
7867 
7868 /**
7869  * dp_pkt_log_con_service() - connect packet log service
7870  * @ppdev: physical device handle
7871  * @scn: device context
7872  *
7873  * Return: none
7874  */
7875 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
7876 {
7877 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
7878 
7879 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
7880 	pktlog_htc_attach();
7881 }
7882 
7883 /**
7884  * dp_pktlogmod_exit() - API to cleanup pktlog info
7885  * @handle: Pdev handle
7886  *
7887  * Return: none
7888  */
7889 static void dp_pktlogmod_exit(struct dp_pdev *handle)
7890 {
7891 	void *scn = (void *)handle->soc->hif_handle;
7892 
7893 	if (!scn) {
7894 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7895 			 "%s: Invalid hif(scn) handle", __func__);
7896 		return;
7897 	}
7898 
7899 	pktlogmod_exit(scn);
7900 	handle->pkt_log_init = false;
7901 }
7902 #endif
7903 #else
7904 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
7905 #endif
7906 
7907