xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include "dp_rx_mon.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include "cdp_txrx_cmn_struct.h"
42 #include "cdp_txrx_stats_struct.h"
43 #include "cdp_txrx_cmn_reg.h"
44 #include <qdf_util.h>
45 #include "dp_peer.h"
46 #include "dp_rx_mon.h"
47 #include "htt_stats.h"
48 #include "htt_ppdu_stats.h"
49 #include "dp_htt.h"
50 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
51 #include "cfg_ucfg_api.h"
52 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
53 #include "cdp_txrx_flow_ctrl_v2.h"
54 #else
55 static inline void
56 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
57 {
58 	return;
59 }
60 #endif
61 #include "dp_ipa.h"
62 #include "dp_cal_client_api.h"
63 #ifdef CONFIG_MCL
64 extern int con_mode_monitor;
65 #ifndef REMOVE_PKT_LOG
66 #include <pktlog_ac_api.h>
67 #include <pktlog_ac.h>
68 #endif
69 #endif
70 
71 #ifdef WLAN_RX_PKT_CAPTURE_ENH
72 #include "dp_rx_mon_feature.h"
73 #else
74 /*
75  * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
76  * @pdev_handle: DP_PDEV handle
77  * @val: user provided value
78  *
79  * Return: QDF_STATUS
80  */
81 static QDF_STATUS
82 dp_config_enh_rx_capture(struct cdp_pdev *pdev_handle, int val)
83 {
84 	return QDF_STATUS_E_INVAL;
85 }
86 #endif
87 
88 #ifdef WLAN_TX_PKT_CAPTURE_ENH
89 #include "dp_tx_capture.h"
90 #else
91 /*
92  * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
93  * @pdev_handle: DP_PDEV handle
94  * @val: user provided value
95  *
96  * Return: QDF_STATUS
97  */
98 static QDF_STATUS
99 dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, int val)
100 {
101 	return QDF_STATUS_E_INVAL;
102 }
103 #endif
104 
105 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
106 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
107 static struct dp_soc *
108 dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
109 	      struct ol_if_ops *ol_ops, uint16_t device_id);
110 static void dp_pktlogmod_exit(struct dp_pdev *handle);
111 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
112 				uint8_t *peer_mac_addr,
113 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
114 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
115 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
116 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
117 #ifdef ENABLE_VERBOSE_DEBUG
118 bool is_dp_verbose_debug_enabled;
119 #endif
120 
121 #define DP_INTR_POLL_TIMER_MS	10
122 /* Generic AST entry aging timer value */
123 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
124 
125 /* WDS AST entry aging timer value */
126 #define DP_WDS_AST_AGING_TIMER_DEFAULT_MS	120000
127 #define DP_WDS_AST_AGING_TIMER_CNT \
128 ((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
129 #define DP_MCS_LENGTH (6*MAX_MCS)
130 
131 #define DP_CURR_FW_STATS_AVAIL 19
132 #define DP_HTT_DBG_EXT_STATS_MAX 256
133 #define DP_MAX_SLEEP_TIME 100
134 #ifndef QCA_WIFI_3_0_EMU
135 #define SUSPEND_DRAIN_WAIT 500
136 #else
137 #define SUSPEND_DRAIN_WAIT 3000
138 #endif
139 
140 #ifdef IPA_OFFLOAD
141 /* Exclude IPA rings from the interrupt context */
142 #define TX_RING_MASK_VAL	0xb
143 #define RX_RING_MASK_VAL	0x7
144 #else
145 #define TX_RING_MASK_VAL	0xF
146 #define RX_RING_MASK_VAL	0xF
147 #endif
148 
149 #define STR_MAXLEN	64
150 
151 #define RNG_ERR		"SRNG setup failed for"
152 
153 /* Threshold for peer's cached buf queue beyond which frames are dropped */
154 #define DP_RX_CACHED_BUFQ_THRESH 64
155 
156 /**
157  * default_dscp_tid_map - Default DSCP-TID mapping
158  *
159  * DSCP        TID
160  * 000000      0
161  * 001000      1
162  * 010000      2
163  * 011000      3
164  * 100000      4
165  * 101000      5
166  * 110000      6
167  * 111000      7
168  */
169 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
170 	0, 0, 0, 0, 0, 0, 0, 0,
171 	1, 1, 1, 1, 1, 1, 1, 1,
172 	2, 2, 2, 2, 2, 2, 2, 2,
173 	3, 3, 3, 3, 3, 3, 3, 3,
174 	4, 4, 4, 4, 4, 4, 4, 4,
175 	5, 5, 5, 5, 5, 5, 5, 5,
176 	6, 6, 6, 6, 6, 6, 6, 6,
177 	7, 7, 7, 7, 7, 7, 7, 7,
178 };
179 
180 /**
181  * default_pcp_tid_map - Default PCP-TID mapping
182  *
183  * PCP     TID
184  * 000      0
185  * 001      1
186  * 010      2
187  * 011      3
188  * 100      4
189  * 101      5
190  * 110      6
191  * 111      7
192  */
193 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
194 	0, 1, 2, 3, 4, 5, 6, 7,
195 };
196 
197 /**
198  * @brief Cpu to tx ring map
199  */
200 #ifdef CONFIG_WIN
201 #ifdef WLAN_TX_PKT_CAPTURE_ENH
202 uint8_t
203 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
204 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
205 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
206 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
207 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
208 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
209 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
210 };
211 #else
212 static uint8_t
213 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
214 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
215 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
216 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
217 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
218 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
219 };
220 #endif
221 #else
222 static uint8_t
223 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
224 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
225 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
226 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
227 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
228 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
229 };
230 #endif
231 
232 /**
233  * @brief Select the type of statistics
234  */
235 enum dp_stats_type {
236 	STATS_FW = 0,
237 	STATS_HOST = 1,
238 	STATS_TYPE_MAX = 2,
239 };
240 
241 /**
242  * @brief General Firmware statistics options
243  *
244  */
245 enum dp_fw_stats {
246 	TXRX_FW_STATS_INVALID	= -1,
247 };
248 
249 /**
250  * dp_stats_mapping_table - Firmware and Host statistics
251  * currently supported
252  */
253 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
254 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
255 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
256 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
257 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
258 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
259 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
260 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
261 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
262 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
263 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
264 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
265 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
266 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
267 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
268 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
269 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
270 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
273 	/* Last ENUM for HTT FW STATS */
274 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
275 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
276 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
277 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
278 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
279 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
280 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
281 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
282 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
283 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
284 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
285 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
286 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
287 };
288 
289 /* MCL specific functions */
290 #ifdef CONFIG_MCL
291 /**
292  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
293  * @soc: pointer to dp_soc handle
294  * @intr_ctx_num: interrupt context number for which mon mask is needed
295  *
296  * For MCL, monitor mode rings are being processed in timer contexts (polled).
297  * This function is returning 0, since in interrupt mode(softirq based RX),
298  * we donot want to process monitor mode rings in a softirq.
299  *
300  * So, in case packet log is enabled for SAP/STA/P2P modes,
301  * regular interrupt processing will not process monitor mode rings. It would be
302  * done in a separate timer context.
303  *
304  * Return: 0
305  */
306 static inline
307 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
308 {
309 	return 0;
310 }
311 
312 /*
313  * dp_service_mon_rings()- timer to reap monitor rings
314  * reqd as we are not getting ppdu end interrupts
315  * @arg: SoC Handle
316  *
317  * Return:
318  *
319  */
320 static void dp_service_mon_rings(void *arg)
321 {
322 	struct dp_soc *soc = (struct dp_soc *)arg;
323 	int ring = 0, work_done, mac_id;
324 	struct dp_pdev *pdev = NULL;
325 
326 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
327 		pdev = soc->pdev_list[ring];
328 		if (!pdev)
329 			continue;
330 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
331 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
332 								pdev->pdev_id);
333 			work_done = dp_mon_process(soc, mac_for_pdev,
334 						   QCA_NAPI_BUDGET);
335 
336 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
337 				  FL("Reaped %d descs from Monitor rings"),
338 				  work_done);
339 		}
340 	}
341 
342 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
343 }
344 
345 #ifndef REMOVE_PKT_LOG
346 /**
347  * dp_pkt_log_init() - API to initialize packet log
348  * @ppdev: physical device handle
349  * @scn: HIF context
350  *
351  * Return: none
352  */
353 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
354 {
355 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
356 
357 	if (handle->pkt_log_init) {
358 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
359 			  "%s: Packet log not initialized", __func__);
360 		return;
361 	}
362 
363 	pktlog_sethandle(&handle->pl_dev, scn);
364 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
365 
366 	if (pktlogmod_init(scn)) {
367 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
368 			  "%s: pktlogmod_init failed", __func__);
369 		handle->pkt_log_init = false;
370 	} else {
371 		handle->pkt_log_init = true;
372 	}
373 }
374 
375 /**
376  * dp_pkt_log_con_service() - connect packet log service
377  * @ppdev: physical device handle
378  * @scn: device context
379  *
380  * Return: none
381  */
382 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
383 {
384 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
385 
386 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
387 	pktlog_htc_attach();
388 }
389 
390 /**
391  * dp_get_num_rx_contexts() - get number of RX contexts
392  * @soc_hdl: cdp opaque soc handle
393  *
394  * Return: number of RX contexts
395  */
396 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
397 {
398 	int i;
399 	int num_rx_contexts = 0;
400 
401 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
402 
403 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
404 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
405 			num_rx_contexts++;
406 
407 	return num_rx_contexts;
408 }
409 
410 /**
411  * dp_pktlogmod_exit() - API to cleanup pktlog info
412  * @handle: Pdev handle
413  *
414  * Return: none
415  */
416 static void dp_pktlogmod_exit(struct dp_pdev *handle)
417 {
418 	void *scn = (void *)handle->soc->hif_handle;
419 
420 	if (!scn) {
421 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
422 			  "%s: Invalid hif(scn) handle", __func__);
423 		return;
424 	}
425 
426 	pktlogmod_exit(scn);
427 	handle->pkt_log_init = false;
428 }
429 #endif
430 #else
431 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
432 
433 /**
434  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
435  * @soc: pointer to dp_soc handle
436  * @intr_ctx_num: interrupt context number for which mon mask is needed
437  *
438  * Return: mon mask value
439  */
440 static inline
441 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
442 {
443 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
444 }
445 #endif
446 
447 /**
448  * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
449  * @cdp_opaque_vdev: pointer to cdp_vdev
450  *
451  * Return: pointer to dp_vdev
452  */
453 static
454 struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
455 {
456 	return (struct dp_vdev *)cdp_opaque_vdev;
457 }
458 
459 
460 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
461 					struct cdp_peer *peer_hdl,
462 					uint8_t *mac_addr,
463 					enum cdp_txrx_ast_entry_type type,
464 					uint32_t flags)
465 {
466 
467 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
468 				(struct dp_peer *)peer_hdl,
469 				mac_addr,
470 				type,
471 				flags);
472 }
473 
474 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
475 						struct cdp_peer *peer_hdl,
476 						uint8_t *wds_macaddr,
477 						uint32_t flags)
478 {
479 	int status = -1;
480 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
481 	struct dp_ast_entry  *ast_entry = NULL;
482 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
483 
484 	qdf_spin_lock_bh(&soc->ast_lock);
485 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
486 						    peer->vdev->pdev->pdev_id);
487 
488 	if (ast_entry) {
489 		status = dp_peer_update_ast(soc,
490 					    peer,
491 					    ast_entry, flags);
492 	}
493 
494 	qdf_spin_unlock_bh(&soc->ast_lock);
495 
496 	return status;
497 }
498 
499 /*
500  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
501  * @soc_handle:		Datapath SOC handle
502  * @wds_macaddr:	WDS entry MAC Address
503  * Return: None
504  */
505 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
506 				   uint8_t *wds_macaddr,
507 				   uint8_t *peer_mac_addr,
508 				   void *vdev_handle)
509 {
510 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
511 	struct dp_ast_entry *ast_entry = NULL;
512 	struct dp_ast_entry *tmp_ast_entry;
513 	struct dp_peer *peer;
514 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
515 	struct dp_pdev *pdev;
516 
517 	if (!vdev)
518 		return;
519 
520 	pdev = vdev->pdev;
521 
522 	if (peer_mac_addr) {
523 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
524 					      0, vdev->vdev_id);
525 		if (!peer)
526 			return;
527 		qdf_spin_lock_bh(&soc->ast_lock);
528 		DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
529 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
530 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
531 				dp_peer_del_ast(soc, ast_entry);
532 		}
533 		qdf_spin_unlock_bh(&soc->ast_lock);
534 		dp_peer_unref_delete(peer);
535 
536 	} else if (wds_macaddr) {
537 		qdf_spin_lock_bh(&soc->ast_lock);
538 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
539 							    pdev->pdev_id);
540 
541 		if (ast_entry) {
542 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
543 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
544 				dp_peer_del_ast(soc, ast_entry);
545 		}
546 		qdf_spin_unlock_bh(&soc->ast_lock);
547 	}
548 }
549 
550 /*
551  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
552  * @soc:		Datapath SOC handle
553  *
554  * Return: None
555  */
556 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
557 					 void *vdev_hdl)
558 {
559 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
560 	struct dp_pdev *pdev;
561 	struct dp_vdev *vdev;
562 	struct dp_peer *peer;
563 	struct dp_ast_entry *ase, *temp_ase;
564 	int i;
565 
566 	qdf_spin_lock_bh(&soc->ast_lock);
567 
568 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
569 		pdev = soc->pdev_list[i];
570 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
571 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
572 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
573 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
574 					if ((ase->type ==
575 						CDP_TXRX_AST_TYPE_WDS_HM) ||
576 					    (ase->type ==
577 						CDP_TXRX_AST_TYPE_WDS_HM_SEC))
578 						dp_peer_del_ast(soc, ase);
579 				}
580 			}
581 		}
582 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
583 	}
584 
585 	qdf_spin_unlock_bh(&soc->ast_lock);
586 }
587 
588 /*
589  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
590  * @soc:		Datapath SOC handle
591  *
592  * Return: None
593  */
594 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
595 {
596 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
597 	struct dp_pdev *pdev;
598 	struct dp_vdev *vdev;
599 	struct dp_peer *peer;
600 	struct dp_ast_entry *ase, *temp_ase;
601 	int i;
602 
603 	qdf_spin_lock_bh(&soc->ast_lock);
604 
605 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
606 		pdev = soc->pdev_list[i];
607 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
608 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
609 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
610 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
611 					if ((ase->type ==
612 						CDP_TXRX_AST_TYPE_STATIC) ||
613 						(ase->type ==
614 						 CDP_TXRX_AST_TYPE_SELF) ||
615 						(ase->type ==
616 						 CDP_TXRX_AST_TYPE_STA_BSS))
617 						continue;
618 					dp_peer_del_ast(soc, ase);
619 				}
620 			}
621 		}
622 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
623 	}
624 
625 	qdf_spin_unlock_bh(&soc->ast_lock);
626 }
627 
628 /**
629  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
630  *                                       and return ast entry information
631  *                                       of first ast entry found in the
632  *                                       table with given mac address
633  *
634  * @soc : data path soc handle
635  * @ast_mac_addr : AST entry mac address
636  * @ast_entry_info : ast entry information
637  *
638  * return : true if ast entry found with ast_mac_addr
639  *          false if ast entry not found
640  */
641 static bool dp_peer_get_ast_info_by_soc_wifi3
642 	(struct cdp_soc_t *soc_hdl,
643 	 uint8_t *ast_mac_addr,
644 	 struct cdp_ast_entry_info *ast_entry_info)
645 {
646 	struct dp_ast_entry *ast_entry;
647 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
648 
649 	qdf_spin_lock_bh(&soc->ast_lock);
650 
651 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
652 	if (!ast_entry || !ast_entry->peer) {
653 		qdf_spin_unlock_bh(&soc->ast_lock);
654 		return false;
655 	}
656 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
657 		qdf_spin_unlock_bh(&soc->ast_lock);
658 		return false;
659 	}
660 	ast_entry_info->type = ast_entry->type;
661 	ast_entry_info->pdev_id = ast_entry->pdev_id;
662 	ast_entry_info->vdev_id = ast_entry->vdev_id;
663 	ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
664 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
665 		     &ast_entry->peer->mac_addr.raw[0],
666 		     QDF_MAC_ADDR_SIZE);
667 	qdf_spin_unlock_bh(&soc->ast_lock);
668 	return true;
669 }
670 
671 /**
672  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
673  *                                          and return ast entry information
674  *                                          if mac address and pdev_id matches
675  *
676  * @soc : data path soc handle
677  * @ast_mac_addr : AST entry mac address
678  * @pdev_id : pdev_id
679  * @ast_entry_info : ast entry information
680  *
681  * return : true if ast entry found with ast_mac_addr
682  *          false if ast entry not found
683  */
684 static bool dp_peer_get_ast_info_by_pdevid_wifi3
685 		(struct cdp_soc_t *soc_hdl,
686 		 uint8_t *ast_mac_addr,
687 		 uint8_t pdev_id,
688 		 struct cdp_ast_entry_info *ast_entry_info)
689 {
690 	struct dp_ast_entry *ast_entry;
691 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
692 
693 	qdf_spin_lock_bh(&soc->ast_lock);
694 
695 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
696 
697 	if (!ast_entry || !ast_entry->peer) {
698 		qdf_spin_unlock_bh(&soc->ast_lock);
699 		return false;
700 	}
701 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
702 		qdf_spin_unlock_bh(&soc->ast_lock);
703 		return false;
704 	}
705 	ast_entry_info->type = ast_entry->type;
706 	ast_entry_info->pdev_id = ast_entry->pdev_id;
707 	ast_entry_info->vdev_id = ast_entry->vdev_id;
708 	ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
709 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
710 		     &ast_entry->peer->mac_addr.raw[0],
711 		     QDF_MAC_ADDR_SIZE);
712 	qdf_spin_unlock_bh(&soc->ast_lock);
713 	return true;
714 }
715 
716 /**
717  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
718  *                            with given mac address
719  *
720  * @soc : data path soc handle
721  * @ast_mac_addr : AST entry mac address
722  * @callback : callback function to called on ast delete response from FW
723  * @cookie : argument to be passed to callback
724  *
725  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
726  *          is sent
727  *          QDF_STATUS_E_INVAL false if ast entry not found
728  */
729 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
730 					       uint8_t *mac_addr,
731 					       txrx_ast_free_cb callback,
732 					       void *cookie)
733 
734 {
735 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
736 	struct dp_ast_entry *ast_entry;
737 	txrx_ast_free_cb cb = NULL;
738 	void *arg = NULL;
739 
740 	qdf_spin_lock_bh(&soc->ast_lock);
741 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
742 	if (!ast_entry) {
743 		qdf_spin_unlock_bh(&soc->ast_lock);
744 		return -QDF_STATUS_E_INVAL;
745 	}
746 
747 	if (ast_entry->callback) {
748 		cb = ast_entry->callback;
749 		arg = ast_entry->cookie;
750 	}
751 
752 	ast_entry->callback = callback;
753 	ast_entry->cookie = cookie;
754 
755 	/*
756 	 * if delete_in_progress is set AST delete is sent to target
757 	 * and host is waiting for response should not send delete
758 	 * again
759 	 */
760 	if (!ast_entry->delete_in_progress)
761 		dp_peer_del_ast(soc, ast_entry);
762 
763 	qdf_spin_unlock_bh(&soc->ast_lock);
764 	if (cb) {
765 		cb(soc->ctrl_psoc,
766 		   soc,
767 		   arg,
768 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
769 	}
770 	return QDF_STATUS_SUCCESS;
771 }
772 
773 /**
774  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
775  *                                   table if mac address and pdev_id matches
776  *
777  * @soc : data path soc handle
778  * @ast_mac_addr : AST entry mac address
779  * @pdev_id : pdev id
780  * @callback : callback function to called on ast delete response from FW
781  * @cookie : argument to be passed to callback
782  *
783  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
784  *          is sent
785  *          QDF_STATUS_E_INVAL false if ast entry not found
786  */
787 
788 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
789 						uint8_t *mac_addr,
790 						uint8_t pdev_id,
791 						txrx_ast_free_cb callback,
792 						void *cookie)
793 
794 {
795 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
796 	struct dp_ast_entry *ast_entry;
797 	txrx_ast_free_cb cb = NULL;
798 	void *arg = NULL;
799 
800 	qdf_spin_lock_bh(&soc->ast_lock);
801 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
802 
803 	if (!ast_entry) {
804 		qdf_spin_unlock_bh(&soc->ast_lock);
805 		return -QDF_STATUS_E_INVAL;
806 	}
807 
808 	if (ast_entry->callback) {
809 		cb = ast_entry->callback;
810 		arg = ast_entry->cookie;
811 	}
812 
813 	ast_entry->callback = callback;
814 	ast_entry->cookie = cookie;
815 
816 	/*
817 	 * if delete_in_progress is set AST delete is sent to target
818 	 * and host is waiting for response should not sent delete
819 	 * again
820 	 */
821 	if (!ast_entry->delete_in_progress)
822 		dp_peer_del_ast(soc, ast_entry);
823 
824 	qdf_spin_unlock_bh(&soc->ast_lock);
825 
826 	if (cb) {
827 		cb(soc->ctrl_psoc,
828 		   soc,
829 		   arg,
830 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
831 	}
832 	return QDF_STATUS_SUCCESS;
833 }
834 
835 /**
836  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
837  * @ring_num: ring num of the ring being queried
838  * @grp_mask: the grp_mask array for the ring type in question.
839  *
840  * The grp_mask array is indexed by group number and the bit fields correspond
841  * to ring numbers.  We are finding which interrupt group a ring belongs to.
842  *
843  * Return: the index in the grp_mask array with the ring number.
844  * -QDF_STATUS_E_NOENT if no entry is found
845  */
846 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
847 {
848 	int ext_group_num;
849 	int mask = 1 << ring_num;
850 
851 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
852 	     ext_group_num++) {
853 		if (mask & grp_mask[ext_group_num])
854 			return ext_group_num;
855 	}
856 
857 	return -QDF_STATUS_E_NOENT;
858 }
859 
860 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
861 				       enum hal_ring_type ring_type,
862 				       int ring_num)
863 {
864 	int *grp_mask;
865 
866 	switch (ring_type) {
867 	case WBM2SW_RELEASE:
868 		/* dp_tx_comp_handler - soc->tx_comp_ring */
869 		if (ring_num < 3)
870 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
871 
872 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
873 		else if (ring_num == 3) {
874 			/* sw treats this as a separate ring type */
875 			grp_mask = &soc->wlan_cfg_ctx->
876 				int_rx_wbm_rel_ring_mask[0];
877 			ring_num = 0;
878 		} else {
879 			qdf_assert(0);
880 			return -QDF_STATUS_E_NOENT;
881 		}
882 	break;
883 
884 	case REO_EXCEPTION:
885 		/* dp_rx_err_process - &soc->reo_exception_ring */
886 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
887 	break;
888 
889 	case REO_DST:
890 		/* dp_rx_process - soc->reo_dest_ring */
891 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
892 	break;
893 
894 	case REO_STATUS:
895 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
896 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
897 	break;
898 
899 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
900 	case RXDMA_MONITOR_STATUS:
901 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
902 	case RXDMA_MONITOR_DST:
903 		/* dp_mon_process */
904 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
905 	break;
906 	case RXDMA_DST:
907 		/* dp_rxdma_err_process */
908 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
909 	break;
910 
911 	case RXDMA_BUF:
912 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
913 	break;
914 
915 	case RXDMA_MONITOR_BUF:
916 		/* TODO: support low_thresh interrupt */
917 		return -QDF_STATUS_E_NOENT;
918 	break;
919 
920 	case TCL_DATA:
921 	case TCL_CMD:
922 	case REO_CMD:
923 	case SW2WBM_RELEASE:
924 	case WBM_IDLE_LINK:
925 		/* normally empty SW_TO_HW rings */
926 		return -QDF_STATUS_E_NOENT;
927 	break;
928 
929 	case TCL_STATUS:
930 	case REO_REINJECT:
931 		/* misc unused rings */
932 		return -QDF_STATUS_E_NOENT;
933 	break;
934 
935 	case CE_SRC:
936 	case CE_DST:
937 	case CE_DST_STATUS:
938 		/* CE_rings - currently handled by hif */
939 	default:
940 		return -QDF_STATUS_E_NOENT;
941 	break;
942 	}
943 
944 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
945 }
946 
947 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
948 			      *ring_params, int ring_type, int ring_num)
949 {
950 	int msi_group_number;
951 	int msi_data_count;
952 	int ret;
953 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
954 
955 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
956 					    &msi_data_count, &msi_data_start,
957 					    &msi_irq_start);
958 
959 	if (ret)
960 		return;
961 
962 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
963 						       ring_num);
964 	if (msi_group_number < 0) {
965 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
966 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
967 			ring_type, ring_num);
968 		ring_params->msi_addr = 0;
969 		ring_params->msi_data = 0;
970 		return;
971 	}
972 
973 	if (msi_group_number > msi_data_count) {
974 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
975 			FL("2 msi_groups will share an msi; msi_group_num %d"),
976 			msi_group_number);
977 
978 		QDF_ASSERT(0);
979 	}
980 
981 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
982 
983 	ring_params->msi_addr = addr_low;
984 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
985 	ring_params->msi_data = (msi_group_number % msi_data_count)
986 		+ msi_data_start;
987 	ring_params->flags |= HAL_SRNG_MSI_INTR;
988 }
989 
990 /**
991  * dp_print_ast_stats() - Dump AST table contents
992  * @soc: Datapath soc handle
993  *
994  * return void
995  */
996 #ifdef FEATURE_AST
997 void dp_print_ast_stats(struct dp_soc *soc)
998 {
999 	uint8_t i;
1000 	uint8_t num_entries = 0;
1001 	struct dp_vdev *vdev;
1002 	struct dp_pdev *pdev;
1003 	struct dp_peer *peer;
1004 	struct dp_ast_entry *ase, *tmp_ase;
1005 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1006 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1007 			"DA", "HMWDS_SEC"};
1008 
1009 	DP_PRINT_STATS("AST Stats:");
1010 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1011 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1012 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1013 	DP_PRINT_STATS("AST Table:");
1014 
1015 	qdf_spin_lock_bh(&soc->ast_lock);
1016 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1017 		pdev = soc->pdev_list[i];
1018 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1019 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1020 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1021 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1022 					DP_PRINT_STATS("%6d mac_addr = %pM"
1023 							" peer_mac_addr = %pM"
1024 							" peer_id = %u"
1025 							" type = %s"
1026 							" next_hop = %d"
1027 							" is_active = %d"
1028 							" is_bss = %d"
1029 							" ast_idx = %d"
1030 							" ast_hash = %d"
1031 							" delete_in_progress = %d"
1032 							" pdev_id = %d"
1033 							" vdev_id = %d",
1034 							++num_entries,
1035 							ase->mac_addr.raw,
1036 							ase->peer->mac_addr.raw,
1037 							ase->peer->peer_ids[0],
1038 							type[ase->type],
1039 							ase->next_hop,
1040 							ase->is_active,
1041 							ase->is_bss,
1042 							ase->ast_idx,
1043 							ase->ast_hash_value,
1044 							ase->delete_in_progress,
1045 							ase->pdev_id,
1046 							ase->vdev_id);
1047 				}
1048 			}
1049 		}
1050 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1051 	}
1052 	qdf_spin_unlock_bh(&soc->ast_lock);
1053 }
1054 #else
1055 void dp_print_ast_stats(struct dp_soc *soc)
1056 {
1057 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1058 	return;
1059 }
1060 #endif
1061 
1062 /**
1063  *  dp_print_peer_table() - Dump all Peer stats
1064  * @vdev: Datapath Vdev handle
1065  *
1066  * return void
1067  */
1068 static void dp_print_peer_table(struct dp_vdev *vdev)
1069 {
1070 	struct dp_peer *peer = NULL;
1071 
1072 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1073 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1074 		if (!peer) {
1075 			DP_PRINT_STATS("Invalid Peer");
1076 			return;
1077 		}
1078 		DP_PRINT_STATS("    peer_mac_addr = %pM"
1079 			       " nawds_enabled = %d"
1080 			       " bss_peer = %d"
1081 			       " wapi = %d"
1082 			       " wds_enabled = %d"
1083 			       " delete in progress = %d"
1084 			       " peer id = %d",
1085 			       peer->mac_addr.raw,
1086 			       peer->nawds_enabled,
1087 			       peer->bss_peer,
1088 			       peer->wapi,
1089 			       peer->wds_enabled,
1090 			       peer->delete_in_progress,
1091 			       peer->peer_ids[0]);
1092 	}
1093 }
1094 
1095 /*
1096  * dp_srng_mem_alloc() - Allocate memory for SRNG
1097  * @soc  : Data path soc handle
1098  * @srng : SRNG pointer
1099  * @align : Align size
1100  *
1101  * return: QDF_STATUS_SUCCESS on successful allocation
1102  *         QDF_STATUS_E_NOMEM on failure
1103  */
1104 static QDF_STATUS
1105 dp_srng_mem_alloc(struct dp_soc *soc, struct dp_srng *srng, uint32_t align)
1106 {
1107 	srng->base_vaddr_unaligned =
1108 		qdf_mem_alloc_consistent(soc->osdev,
1109 					 soc->osdev->dev,
1110 					 srng->alloc_size,
1111 					 &srng->base_paddr_unaligned);
1112 	if (!srng->base_vaddr_unaligned) {
1113 		return QDF_STATUS_E_NOMEM;
1114 	}
1115 
1116 	/* Re-allocate additional bytes to align base address only if
1117 	 * above allocation returns unaligned address. Reason for
1118 	 * trying exact size allocation above is, OS tries to allocate
1119 	 * blocks of size power-of-2 pages and then free extra pages.
1120 	 * e.g., of a ring size of 1MB, the allocation below will
1121 	 * request 1MB plus 7 bytes for alignment, which will cause a
1122 	 * 2MB block allocation,and that is failing sometimes due to
1123 	 * memory fragmentation.
1124 	 * dp_srng_mem_alloc should be replaced with
1125 	 * qdf_aligned_mem_alloc_consistent after fixing some known
1126 	 * shortcomings with this QDF function
1127 	 */
1128 	if ((unsigned long)(srng->base_paddr_unaligned) &
1129 	    (align - 1)) {
1130 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1131 					srng->alloc_size,
1132 					srng->base_vaddr_unaligned,
1133 					srng->base_paddr_unaligned, 0);
1134 		srng->alloc_size = srng->alloc_size + align - 1;
1135 		srng->base_vaddr_unaligned =
1136 			qdf_mem_alloc_consistent(soc->osdev,
1137 						 soc->osdev->dev,
1138 						 srng->alloc_size,
1139 						 &srng->base_paddr_unaligned);
1140 
1141 		if (!srng->base_vaddr_unaligned) {
1142 			return QDF_STATUS_E_NOMEM;
1143 		}
1144 	}
1145 	return QDF_STATUS_SUCCESS;
1146 }
1147 
1148 
1149 /*
1150  * dp_setup_srng - Internal function to setup SRNG rings used by data path
1151  */
1152 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
1153 	int ring_type, int ring_num, int mac_id, uint32_t num_entries)
1154 {
1155 	void *hal_soc = soc->hal_soc;
1156 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1157 	/* TODO: See if we should get align size from hal */
1158 	uint32_t ring_base_align = 8;
1159 	struct hal_srng_params ring_params;
1160 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1161 
1162 	/* TODO: Currently hal layer takes care of endianness related settings.
1163 	 * See if these settings need to passed from DP layer
1164 	 */
1165 	ring_params.flags = 0;
1166 
1167 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1168 	srng->hal_srng = NULL;
1169 	srng->alloc_size = num_entries * entry_size;
1170 	srng->num_entries = num_entries;
1171 
1172 	if (!dp_is_soc_reinit(soc)) {
1173 		if (dp_srng_mem_alloc(soc, srng, ring_base_align) !=
1174 		    QDF_STATUS_SUCCESS) {
1175 			dp_err("alloc failed - ring_type: %d, ring_num %d",
1176 			       ring_type, ring_num);
1177 			return QDF_STATUS_E_NOMEM;
1178 		}
1179 	}
1180 
1181 	ring_params.ring_base_paddr =
1182 		(qdf_dma_addr_t)qdf_align(
1183 			(unsigned long)(srng->base_paddr_unaligned),
1184 			ring_base_align);
1185 
1186 	ring_params.ring_base_vaddr =
1187 		(void *)((unsigned long)(srng->base_vaddr_unaligned) +
1188 			((unsigned long)(ring_params.ring_base_paddr) -
1189 			(unsigned long)(srng->base_paddr_unaligned)));
1190 
1191 	ring_params.num_entries = num_entries;
1192 
1193 	dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1194 			 ring_type, ring_num,
1195 			 (void *)ring_params.ring_base_vaddr,
1196 			 (void *)ring_params.ring_base_paddr,
1197 			 ring_params.num_entries);
1198 
1199 	if (soc->intr_mode == DP_INTR_MSI) {
1200 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1201 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1202 				 ring_type, ring_num);
1203 
1204 	} else {
1205 		ring_params.msi_data = 0;
1206 		ring_params.msi_addr = 0;
1207 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1208 				 ring_type, ring_num);
1209 	}
1210 
1211 	/*
1212 	 * Setup interrupt timer and batch counter thresholds for
1213 	 * interrupt mitigation based on ring type
1214 	 */
1215 	if (ring_type == REO_DST) {
1216 		ring_params.intr_timer_thres_us =
1217 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1218 		ring_params.intr_batch_cntr_thres_entries =
1219 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1220 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1221 		ring_params.intr_timer_thres_us =
1222 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1223 		ring_params.intr_batch_cntr_thres_entries =
1224 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1225 	} else {
1226 		ring_params.intr_timer_thres_us =
1227 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1228 		ring_params.intr_batch_cntr_thres_entries =
1229 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1230 	}
1231 
1232 	/* Enable low threshold interrupts for rx buffer rings (regular and
1233 	 * monitor buffer rings.
1234 	 * TODO: See if this is required for any other ring
1235 	 */
1236 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1237 		(ring_type == RXDMA_MONITOR_STATUS)) {
1238 		/* TODO: Setting low threshold to 1/8th of ring size
1239 		 * see if this needs to be configurable
1240 		 */
1241 		ring_params.low_threshold = num_entries >> 3;
1242 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1243 		ring_params.intr_timer_thres_us =
1244 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1245 		ring_params.intr_batch_cntr_thres_entries = 0;
1246 	}
1247 
1248 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1249 		mac_id, &ring_params);
1250 
1251 	if (!srng->hal_srng) {
1252 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1253 				srng->alloc_size,
1254 				srng->base_vaddr_unaligned,
1255 				srng->base_paddr_unaligned, 0);
1256 	}
1257 
1258 	return 0;
1259 }
1260 
1261 /*
1262  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1263  * @soc: DP SOC handle
1264  * @srng: source ring structure
1265  * @ring_type: type of ring
1266  * @ring_num: ring number
1267  *
1268  * Return: None
1269  */
1270 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1271 			   int ring_type, int ring_num)
1272 {
1273 	if (!srng->hal_srng) {
1274 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1275 			  FL("Ring type: %d, num:%d not setup"),
1276 			  ring_type, ring_num);
1277 		return;
1278 	}
1279 
1280 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1281 	srng->hal_srng = NULL;
1282 }
1283 
1284 /**
1285  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1286  * Any buffers allocated and attached to ring entries are expected to be freed
1287  * before calling this function.
1288  */
1289 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1290 	int ring_type, int ring_num)
1291 {
1292 	if (!dp_is_soc_reinit(soc)) {
1293 		if (!srng->hal_srng && (srng->alloc_size == 0)) {
1294 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1295 				  FL("Ring type: %d, num:%d not setup"),
1296 				  ring_type, ring_num);
1297 			return;
1298 		}
1299 
1300 		if (srng->hal_srng) {
1301 			hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1302 			srng->hal_srng = NULL;
1303 		}
1304 	}
1305 
1306 	if (srng->alloc_size) {
1307 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1308 					srng->alloc_size,
1309 					srng->base_vaddr_unaligned,
1310 					srng->base_paddr_unaligned, 0);
1311 		srng->alloc_size = 0;
1312 	}
1313 }
1314 
1315 /* TODO: Need this interface from HIF */
1316 void *hif_get_hal_handle(void *hif_handle);
1317 
1318 /*
1319  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1320  * @dp_ctx: DP SOC handle
1321  * @budget: Number of frames/descriptors that can be processed in one shot
1322  *
1323  * Return: remaining budget/quota for the soc device
1324  */
1325 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1326 {
1327 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1328 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1329 	struct dp_soc *soc = int_ctx->soc;
1330 	int ring = 0;
1331 	uint32_t work_done  = 0;
1332 	int budget = dp_budget;
1333 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1334 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1335 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1336 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1337 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1338 	uint32_t remaining_quota = dp_budget;
1339 	struct dp_pdev *pdev = NULL;
1340 	int mac_id;
1341 
1342 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
1343 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1344 			 reo_status_mask,
1345 			 int_ctx->rx_mon_ring_mask,
1346 			 int_ctx->host2rxdma_ring_mask,
1347 			 int_ctx->rxdma2host_ring_mask);
1348 
1349 	/* Process Tx completion interrupts first to return back buffers */
1350 	while (tx_mask) {
1351 		if (tx_mask & 0x1) {
1352 			work_done = dp_tx_comp_handler(int_ctx,
1353 						       soc,
1354 						       soc->tx_comp_ring[ring].hal_srng,
1355 						       remaining_quota);
1356 
1357 			if (work_done) {
1358 				intr_stats->num_tx_ring_masks[ring]++;
1359 				dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
1360 						 tx_mask, ring, budget,
1361 						 work_done);
1362 			}
1363 
1364 			budget -= work_done;
1365 			if (budget <= 0)
1366 				goto budget_done;
1367 
1368 			remaining_quota = budget;
1369 		}
1370 		tx_mask = tx_mask >> 1;
1371 		ring++;
1372 	}
1373 
1374 	/* Process REO Exception ring interrupt */
1375 	if (rx_err_mask) {
1376 		work_done = dp_rx_err_process(soc,
1377 				soc->reo_exception_ring.hal_srng,
1378 				remaining_quota);
1379 
1380 		if (work_done) {
1381 			intr_stats->num_rx_err_ring_masks++;
1382 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1383 					 work_done, budget);
1384 		}
1385 
1386 		budget -=  work_done;
1387 		if (budget <= 0) {
1388 			goto budget_done;
1389 		}
1390 		remaining_quota = budget;
1391 	}
1392 
1393 	/* Process Rx WBM release ring interrupt */
1394 	if (rx_wbm_rel_mask) {
1395 		work_done = dp_rx_wbm_err_process(soc,
1396 				soc->rx_rel_ring.hal_srng, remaining_quota);
1397 
1398 		if (work_done) {
1399 			intr_stats->num_rx_wbm_rel_ring_masks++;
1400 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1401 					 work_done, budget);
1402 		}
1403 
1404 		budget -=  work_done;
1405 		if (budget <= 0) {
1406 			goto budget_done;
1407 		}
1408 		remaining_quota = budget;
1409 	}
1410 
1411 	/* Process Rx interrupts */
1412 	if (rx_mask) {
1413 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1414 			if (!(rx_mask & (1 << ring)))
1415 				continue;
1416 			work_done = dp_rx_process(int_ctx,
1417 						  soc->reo_dest_ring[ring].hal_srng,
1418 						  ring,
1419 						  remaining_quota);
1420 			if (work_done) {
1421 				intr_stats->num_rx_ring_masks[ring]++;
1422 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1423 						 rx_mask, ring,
1424 						 work_done, budget);
1425 				budget -=  work_done;
1426 				if (budget <= 0)
1427 					goto budget_done;
1428 				remaining_quota = budget;
1429 			}
1430 		}
1431 	}
1432 
1433 	if (reo_status_mask) {
1434 		if (dp_reo_status_ring_handler(soc))
1435 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1436 	}
1437 
1438 	/* Process LMAC interrupts */
1439 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1440 		pdev = soc->pdev_list[ring];
1441 		if (!pdev)
1442 			continue;
1443 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1444 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1445 								pdev->pdev_id);
1446 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1447 				work_done = dp_mon_process(soc, mac_for_pdev,
1448 							   remaining_quota);
1449 				if (work_done)
1450 					intr_stats->num_rx_mon_ring_masks++;
1451 				budget -= work_done;
1452 				if (budget <= 0)
1453 					goto budget_done;
1454 				remaining_quota = budget;
1455 			}
1456 
1457 			if (int_ctx->rxdma2host_ring_mask &
1458 					(1 << mac_for_pdev)) {
1459 				work_done = dp_rxdma_err_process(soc,
1460 								 mac_for_pdev,
1461 								 remaining_quota);
1462 				if (work_done)
1463 					intr_stats->num_rxdma2host_ring_masks++;
1464 				budget -=  work_done;
1465 				if (budget <= 0)
1466 					goto budget_done;
1467 				remaining_quota = budget;
1468 			}
1469 
1470 			if (int_ctx->host2rxdma_ring_mask &
1471 						(1 << mac_for_pdev)) {
1472 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1473 				union dp_rx_desc_list_elem_t *tail = NULL;
1474 				struct dp_srng *rx_refill_buf_ring =
1475 					&pdev->rx_refill_buf_ring;
1476 
1477 				intr_stats->num_host2rxdma_ring_masks++;
1478 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1479 						1);
1480 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1481 							rx_refill_buf_ring,
1482 							&soc->rx_desc_buf[mac_for_pdev],
1483 							0, &desc_list, &tail);
1484 			}
1485 		}
1486 	}
1487 
1488 	qdf_lro_flush(int_ctx->lro_ctx);
1489 	intr_stats->num_masks++;
1490 
1491 budget_done:
1492 	return dp_budget - budget;
1493 }
1494 
1495 /* dp_interrupt_timer()- timer poll for interrupts
1496  *
1497  * @arg: SoC Handle
1498  *
1499  * Return:
1500  *
1501  */
1502 static void dp_interrupt_timer(void *arg)
1503 {
1504 	struct dp_soc *soc = (struct dp_soc *) arg;
1505 	int i;
1506 
1507 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1508 		for (i = 0;
1509 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1510 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1511 
1512 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1513 	}
1514 }
1515 
1516 /*
1517  * dp_soc_attach_poll() - Register handlers for DP interrupts
1518  * @txrx_soc: DP SOC handle
1519  *
1520  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1521  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1522  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1523  *
1524  * Return: 0 for success, nonzero for failure.
1525  */
1526 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1527 {
1528 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1529 	int i;
1530 
1531 	soc->intr_mode = DP_INTR_POLL;
1532 
1533 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1534 		soc->intr_ctx[i].dp_intr_id = i;
1535 		soc->intr_ctx[i].tx_ring_mask =
1536 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1537 		soc->intr_ctx[i].rx_ring_mask =
1538 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1539 		soc->intr_ctx[i].rx_mon_ring_mask =
1540 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1541 		soc->intr_ctx[i].rx_err_ring_mask =
1542 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1543 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1544 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1545 		soc->intr_ctx[i].reo_status_ring_mask =
1546 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1547 		soc->intr_ctx[i].rxdma2host_ring_mask =
1548 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1549 		soc->intr_ctx[i].soc = soc;
1550 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1551 	}
1552 
1553 	qdf_timer_init(soc->osdev, &soc->int_timer,
1554 			dp_interrupt_timer, (void *)soc,
1555 			QDF_TIMER_TYPE_WAKE_APPS);
1556 
1557 	return QDF_STATUS_SUCCESS;
1558 }
1559 
1560 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1561 #if defined(CONFIG_MCL)
1562 /*
1563  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1564  * @txrx_soc: DP SOC handle
1565  *
1566  * Call the appropriate attach function based on the mode of operation.
1567  * This is a WAR for enabling monitor mode.
1568  *
1569  * Return: 0 for success. nonzero for failure.
1570  */
1571 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1572 {
1573 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1574 
1575 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1576 	     con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
1577 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1578 				  "%s: Poll mode", __func__);
1579 		return dp_soc_attach_poll(txrx_soc);
1580 	} else {
1581 
1582 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1583 				  "%s: Interrupt  mode", __func__);
1584 		return dp_soc_interrupt_attach(txrx_soc);
1585 	}
1586 }
1587 #else
1588 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1589 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1590 {
1591 	return dp_soc_attach_poll(txrx_soc);
1592 }
1593 #else
1594 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1595 {
1596 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1597 
1598 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1599 		return dp_soc_attach_poll(txrx_soc);
1600 	else
1601 		return dp_soc_interrupt_attach(txrx_soc);
1602 }
1603 #endif
1604 #endif
1605 
1606 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1607 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1608 {
1609 	int j;
1610 	int num_irq = 0;
1611 
1612 	int tx_mask =
1613 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1614 	int rx_mask =
1615 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1616 	int rx_mon_mask =
1617 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1618 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1619 					soc->wlan_cfg_ctx, intr_ctx_num);
1620 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1621 					soc->wlan_cfg_ctx, intr_ctx_num);
1622 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1623 					soc->wlan_cfg_ctx, intr_ctx_num);
1624 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1625 					soc->wlan_cfg_ctx, intr_ctx_num);
1626 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1627 					soc->wlan_cfg_ctx, intr_ctx_num);
1628 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1629 					soc->wlan_cfg_ctx, intr_ctx_num);
1630 
1631 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1632 
1633 		if (tx_mask & (1 << j)) {
1634 			irq_id_map[num_irq++] =
1635 				(wbm2host_tx_completions_ring1 - j);
1636 		}
1637 
1638 		if (rx_mask & (1 << j)) {
1639 			irq_id_map[num_irq++] =
1640 				(reo2host_destination_ring1 - j);
1641 		}
1642 
1643 		if (rxdma2host_ring_mask & (1 << j)) {
1644 			irq_id_map[num_irq++] =
1645 				rxdma2host_destination_ring_mac1 -
1646 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1647 		}
1648 
1649 		if (host2rxdma_ring_mask & (1 << j)) {
1650 			irq_id_map[num_irq++] =
1651 				host2rxdma_host_buf_ring_mac1 -
1652 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1653 		}
1654 
1655 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1656 			irq_id_map[num_irq++] =
1657 				host2rxdma_monitor_ring1 -
1658 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1659 		}
1660 
1661 		if (rx_mon_mask & (1 << j)) {
1662 			irq_id_map[num_irq++] =
1663 				ppdu_end_interrupts_mac1 -
1664 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1665 			irq_id_map[num_irq++] =
1666 				rxdma2host_monitor_status_ring_mac1 -
1667 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1668 		}
1669 
1670 		if (rx_wbm_rel_ring_mask & (1 << j))
1671 			irq_id_map[num_irq++] = wbm2host_rx_release;
1672 
1673 		if (rx_err_ring_mask & (1 << j))
1674 			irq_id_map[num_irq++] = reo2host_exception;
1675 
1676 		if (reo_status_ring_mask & (1 << j))
1677 			irq_id_map[num_irq++] = reo2host_status;
1678 
1679 	}
1680 	*num_irq_r = num_irq;
1681 }
1682 
1683 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1684 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1685 		int msi_vector_count, int msi_vector_start)
1686 {
1687 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1688 					soc->wlan_cfg_ctx, intr_ctx_num);
1689 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1690 					soc->wlan_cfg_ctx, intr_ctx_num);
1691 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1692 					soc->wlan_cfg_ctx, intr_ctx_num);
1693 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1694 					soc->wlan_cfg_ctx, intr_ctx_num);
1695 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1696 					soc->wlan_cfg_ctx, intr_ctx_num);
1697 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1698 					soc->wlan_cfg_ctx, intr_ctx_num);
1699 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1700 					soc->wlan_cfg_ctx, intr_ctx_num);
1701 
1702 	unsigned int vector =
1703 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1704 	int num_irq = 0;
1705 
1706 	soc->intr_mode = DP_INTR_MSI;
1707 
1708 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1709 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1710 		irq_id_map[num_irq++] =
1711 			pld_get_msi_irq(soc->osdev->dev, vector);
1712 
1713 	*num_irq_r = num_irq;
1714 }
1715 
1716 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1717 				    int *irq_id_map, int *num_irq)
1718 {
1719 	int msi_vector_count, ret;
1720 	uint32_t msi_base_data, msi_vector_start;
1721 
1722 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1723 					    &msi_vector_count,
1724 					    &msi_base_data,
1725 					    &msi_vector_start);
1726 	if (ret)
1727 		return dp_soc_interrupt_map_calculate_integrated(soc,
1728 				intr_ctx_num, irq_id_map, num_irq);
1729 
1730 	else
1731 		dp_soc_interrupt_map_calculate_msi(soc,
1732 				intr_ctx_num, irq_id_map, num_irq,
1733 				msi_vector_count, msi_vector_start);
1734 }
1735 
1736 /*
1737  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1738  * @txrx_soc: DP SOC handle
1739  *
1740  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1741  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1742  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1743  *
1744  * Return: 0 for success. nonzero for failure.
1745  */
1746 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1747 {
1748 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1749 
1750 	int i = 0;
1751 	int num_irq = 0;
1752 
1753 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1754 		int ret = 0;
1755 
1756 		/* Map of IRQ ids registered with one interrupt context */
1757 		int irq_id_map[HIF_MAX_GRP_IRQ];
1758 
1759 		int tx_mask =
1760 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1761 		int rx_mask =
1762 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1763 		int rx_mon_mask =
1764 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1765 		int rx_err_ring_mask =
1766 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1767 		int rx_wbm_rel_ring_mask =
1768 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1769 		int reo_status_ring_mask =
1770 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1771 		int rxdma2host_ring_mask =
1772 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1773 		int host2rxdma_ring_mask =
1774 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1775 		int host2rxdma_mon_ring_mask =
1776 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1777 				soc->wlan_cfg_ctx, i);
1778 
1779 		soc->intr_ctx[i].dp_intr_id = i;
1780 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1781 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1782 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1783 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1784 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1785 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1786 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1787 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1788 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1789 			 host2rxdma_mon_ring_mask;
1790 
1791 		soc->intr_ctx[i].soc = soc;
1792 
1793 		num_irq = 0;
1794 
1795 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1796 					       &num_irq);
1797 
1798 		ret = hif_register_ext_group(soc->hif_handle,
1799 				num_irq, irq_id_map, dp_service_srngs,
1800 				&soc->intr_ctx[i], "dp_intr",
1801 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1802 
1803 		if (ret) {
1804 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1805 			FL("failed, ret = %d"), ret);
1806 
1807 			return QDF_STATUS_E_FAILURE;
1808 		}
1809 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1810 	}
1811 
1812 	hif_configure_ext_group_interrupts(soc->hif_handle);
1813 
1814 	return QDF_STATUS_SUCCESS;
1815 }
1816 
1817 /*
1818  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1819  * @txrx_soc: DP SOC handle
1820  *
1821  * Return: void
1822  */
1823 static void dp_soc_interrupt_detach(void *txrx_soc)
1824 {
1825 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1826 	int i;
1827 
1828 	if (soc->intr_mode == DP_INTR_POLL) {
1829 		qdf_timer_stop(&soc->int_timer);
1830 		qdf_timer_free(&soc->int_timer);
1831 	} else {
1832 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1833 	}
1834 
1835 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1836 		soc->intr_ctx[i].tx_ring_mask = 0;
1837 		soc->intr_ctx[i].rx_ring_mask = 0;
1838 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1839 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1840 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1841 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1842 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1843 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1844 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
1845 
1846 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1847 	}
1848 }
1849 
1850 #define AVG_MAX_MPDUS_PER_TID 128
1851 #define AVG_TIDS_PER_CLIENT 2
1852 #define AVG_FLOWS_PER_TID 2
1853 #define AVG_MSDUS_PER_FLOW 128
1854 #define AVG_MSDUS_PER_MPDU 4
1855 
1856 /*
1857  * Allocate and setup link descriptor pool that will be used by HW for
1858  * various link and queue descriptors and managed by WBM
1859  */
1860 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1861 {
1862 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1863 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1864 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1865 	uint32_t num_mpdus_per_link_desc =
1866 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1867 	uint32_t num_msdus_per_link_desc =
1868 		hal_num_msdus_per_link_desc(soc->hal_soc);
1869 	uint32_t num_mpdu_links_per_queue_desc =
1870 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1871 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1872 	uint32_t total_link_descs, total_mem_size;
1873 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1874 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1875 	uint32_t num_link_desc_banks;
1876 	uint32_t last_bank_size = 0;
1877 	uint32_t entry_size, num_entries;
1878 	int i;
1879 	uint32_t desc_id = 0;
1880 	qdf_dma_addr_t *baseaddr = NULL;
1881 
1882 	/* Only Tx queue descriptors are allocated from common link descriptor
1883 	 * pool Rx queue descriptors are not included in this because (REO queue
1884 	 * extension descriptors) they are expected to be allocated contiguously
1885 	 * with REO queue descriptors
1886 	 */
1887 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1888 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1889 
1890 	num_mpdu_queue_descs = num_mpdu_link_descs /
1891 		num_mpdu_links_per_queue_desc;
1892 
1893 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1894 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1895 		num_msdus_per_link_desc;
1896 
1897 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1898 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1899 
1900 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1901 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1902 
1903 	/* Round up to power of 2 */
1904 	total_link_descs = 1;
1905 	while (total_link_descs < num_entries)
1906 		total_link_descs <<= 1;
1907 
1908 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1909 		FL("total_link_descs: %u, link_desc_size: %d"),
1910 		total_link_descs, link_desc_size);
1911 	total_mem_size =  total_link_descs * link_desc_size;
1912 
1913 	total_mem_size += link_desc_align;
1914 
1915 	if (total_mem_size <= max_alloc_size) {
1916 		num_link_desc_banks = 0;
1917 		last_bank_size = total_mem_size;
1918 	} else {
1919 		num_link_desc_banks = (total_mem_size) /
1920 			(max_alloc_size - link_desc_align);
1921 		last_bank_size = total_mem_size %
1922 			(max_alloc_size - link_desc_align);
1923 	}
1924 
1925 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1926 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
1927 		total_mem_size, num_link_desc_banks);
1928 
1929 	for (i = 0; i < num_link_desc_banks; i++) {
1930 		if (!dp_is_soc_reinit(soc)) {
1931 			baseaddr = &soc->link_desc_banks[i].
1932 					base_paddr_unaligned;
1933 			soc->link_desc_banks[i].base_vaddr_unaligned =
1934 				qdf_mem_alloc_consistent(soc->osdev,
1935 							 soc->osdev->dev,
1936 							 max_alloc_size,
1937 							 baseaddr);
1938 		}
1939 		soc->link_desc_banks[i].size = max_alloc_size;
1940 
1941 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1942 			soc->link_desc_banks[i].base_vaddr_unaligned) +
1943 			((unsigned long)(
1944 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1945 			link_desc_align));
1946 
1947 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
1948 			soc->link_desc_banks[i].base_paddr_unaligned) +
1949 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1950 			(unsigned long)(
1951 			soc->link_desc_banks[i].base_vaddr_unaligned));
1952 
1953 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
1954 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1955 				FL("Link descriptor memory alloc failed"));
1956 			goto fail;
1957 		}
1958 		qdf_minidump_log((void *)(soc->link_desc_banks[i].base_vaddr),
1959 			soc->link_desc_banks[i].size, "link_desc_bank");
1960 	}
1961 
1962 	if (last_bank_size) {
1963 		/* Allocate last bank in case total memory required is not exact
1964 		 * multiple of max_alloc_size
1965 		 */
1966 		if (!dp_is_soc_reinit(soc)) {
1967 			baseaddr = &soc->link_desc_banks[i].
1968 					base_paddr_unaligned;
1969 			soc->link_desc_banks[i].base_vaddr_unaligned =
1970 				qdf_mem_alloc_consistent(soc->osdev,
1971 							 soc->osdev->dev,
1972 							 last_bank_size,
1973 							 baseaddr);
1974 		}
1975 		soc->link_desc_banks[i].size = last_bank_size;
1976 
1977 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1978 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
1979 			((unsigned long)(
1980 			soc->link_desc_banks[i].base_vaddr_unaligned) %
1981 			link_desc_align));
1982 
1983 		soc->link_desc_banks[i].base_paddr =
1984 			(unsigned long)(
1985 			soc->link_desc_banks[i].base_paddr_unaligned) +
1986 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1987 			(unsigned long)(
1988 			soc->link_desc_banks[i].base_vaddr_unaligned));
1989 
1990 		qdf_minidump_log((void *)(soc->link_desc_banks[i].base_vaddr),
1991 			soc->link_desc_banks[i].size, "link_desc_bank");
1992 	}
1993 
1994 
1995 	/* Allocate and setup link descriptor idle list for HW internal use */
1996 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1997 	total_mem_size = entry_size * total_link_descs;
1998 
1999 	if (total_mem_size <= max_alloc_size) {
2000 		void *desc;
2001 
2002 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
2003 			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
2004 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2005 				FL("Link desc idle ring setup failed"));
2006 			goto fail;
2007 		}
2008 
2009 		qdf_minidump_log(
2010 			(void *)(soc->wbm_idle_link_ring.base_vaddr_unaligned),
2011 			soc->wbm_idle_link_ring.alloc_size,
2012 			"wbm_idle_link_ring");
2013 
2014 		hal_srng_access_start_unlocked(soc->hal_soc,
2015 			soc->wbm_idle_link_ring.hal_srng);
2016 
2017 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2018 			soc->link_desc_banks[i].base_paddr; i++) {
2019 			uint32_t num_entries = (soc->link_desc_banks[i].size -
2020 				((unsigned long)(
2021 				soc->link_desc_banks[i].base_vaddr) -
2022 				(unsigned long)(
2023 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2024 				/ link_desc_size;
2025 			unsigned long paddr = (unsigned long)(
2026 				soc->link_desc_banks[i].base_paddr);
2027 
2028 			while (num_entries && (desc = hal_srng_src_get_next(
2029 				soc->hal_soc,
2030 				soc->wbm_idle_link_ring.hal_srng))) {
2031 				hal_set_link_desc_addr(desc,
2032 					LINK_DESC_COOKIE(desc_id, i), paddr);
2033 				num_entries--;
2034 				desc_id++;
2035 				paddr += link_desc_size;
2036 			}
2037 		}
2038 		hal_srng_access_end_unlocked(soc->hal_soc,
2039 			soc->wbm_idle_link_ring.hal_srng);
2040 	} else {
2041 		uint32_t num_scatter_bufs;
2042 		uint32_t num_entries_per_buf;
2043 		uint32_t rem_entries;
2044 		uint8_t *scatter_buf_ptr;
2045 		uint16_t scatter_buf_num;
2046 		uint32_t buf_size = 0;
2047 
2048 		soc->wbm_idle_scatter_buf_size =
2049 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2050 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2051 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
2052 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2053 					soc->hal_soc, total_mem_size,
2054 					soc->wbm_idle_scatter_buf_size);
2055 
2056 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2057 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2058 					FL("scatter bufs size out of bounds"));
2059 			goto fail;
2060 		}
2061 
2062 		for (i = 0; i < num_scatter_bufs; i++) {
2063 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2064 			if (!dp_is_soc_reinit(soc)) {
2065 				buf_size = soc->wbm_idle_scatter_buf_size;
2066 				soc->wbm_idle_scatter_buf_base_vaddr[i] =
2067 					qdf_mem_alloc_consistent(soc->osdev,
2068 								 soc->osdev->
2069 								 dev,
2070 								 buf_size,
2071 								 baseaddr);
2072 			}
2073 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2074 				QDF_TRACE(QDF_MODULE_ID_DP,
2075 					  QDF_TRACE_LEVEL_ERROR,
2076 					  FL("Scatter lst memory alloc fail"));
2077 				goto fail;
2078 			}
2079 		}
2080 
2081 		/* Populate idle list scatter buffers with link descriptor
2082 		 * pointers
2083 		 */
2084 		scatter_buf_num = 0;
2085 		scatter_buf_ptr = (uint8_t *)(
2086 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2087 		rem_entries = num_entries_per_buf;
2088 
2089 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2090 			soc->link_desc_banks[i].base_paddr; i++) {
2091 			uint32_t num_link_descs =
2092 				(soc->link_desc_banks[i].size -
2093 				((unsigned long)(
2094 				soc->link_desc_banks[i].base_vaddr) -
2095 				(unsigned long)(
2096 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2097 				/ link_desc_size;
2098 			unsigned long paddr = (unsigned long)(
2099 				soc->link_desc_banks[i].base_paddr);
2100 
2101 			while (num_link_descs) {
2102 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
2103 					LINK_DESC_COOKIE(desc_id, i), paddr);
2104 				num_link_descs--;
2105 				desc_id++;
2106 				paddr += link_desc_size;
2107 				rem_entries--;
2108 				if (rem_entries) {
2109 					scatter_buf_ptr += entry_size;
2110 				} else {
2111 					rem_entries = num_entries_per_buf;
2112 					scatter_buf_num++;
2113 
2114 					if (scatter_buf_num >= num_scatter_bufs)
2115 						break;
2116 
2117 					scatter_buf_ptr = (uint8_t *)(
2118 						soc->wbm_idle_scatter_buf_base_vaddr[
2119 						scatter_buf_num]);
2120 				}
2121 			}
2122 		}
2123 		/* Setup link descriptor idle list in HW */
2124 		hal_setup_link_idle_list(soc->hal_soc,
2125 			soc->wbm_idle_scatter_buf_base_paddr,
2126 			soc->wbm_idle_scatter_buf_base_vaddr,
2127 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2128 			(uint32_t)(scatter_buf_ptr -
2129 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2130 			scatter_buf_num-1])), total_link_descs);
2131 	}
2132 	return 0;
2133 
2134 fail:
2135 	if (soc->wbm_idle_link_ring.hal_srng) {
2136 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2137 				WBM_IDLE_LINK, 0);
2138 	}
2139 
2140 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2141 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2142 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2143 				soc->wbm_idle_scatter_buf_size,
2144 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2145 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2146 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2147 		}
2148 	}
2149 
2150 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2151 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2152 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2153 				soc->link_desc_banks[i].size,
2154 				soc->link_desc_banks[i].base_vaddr_unaligned,
2155 				soc->link_desc_banks[i].base_paddr_unaligned,
2156 				0);
2157 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2158 		}
2159 	}
2160 	return QDF_STATUS_E_FAILURE;
2161 }
2162 
2163 /*
2164  * Free link descriptor pool that was setup HW
2165  */
2166 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
2167 {
2168 	int i;
2169 
2170 	if (soc->wbm_idle_link_ring.hal_srng) {
2171 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2172 			WBM_IDLE_LINK, 0);
2173 	}
2174 
2175 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2176 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2177 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2178 				soc->wbm_idle_scatter_buf_size,
2179 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2180 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2181 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2182 		}
2183 	}
2184 
2185 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2186 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2187 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2188 				soc->link_desc_banks[i].size,
2189 				soc->link_desc_banks[i].base_vaddr_unaligned,
2190 				soc->link_desc_banks[i].base_paddr_unaligned,
2191 				0);
2192 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2193 		}
2194 	}
2195 }
2196 
2197 #ifdef IPA_OFFLOAD
2198 #define REO_DST_RING_SIZE_QCA6290 1023
2199 #ifndef QCA_WIFI_QCA8074_VP
2200 #define REO_DST_RING_SIZE_QCA8074 1023
2201 #else
2202 #define REO_DST_RING_SIZE_QCA8074 8
2203 #endif /* QCA_WIFI_QCA8074_VP */
2204 
2205 #else
2206 
2207 #define REO_DST_RING_SIZE_QCA6290 1024
2208 #ifndef QCA_WIFI_QCA8074_VP
2209 #define REO_DST_RING_SIZE_QCA8074 2048
2210 #else
2211 #define REO_DST_RING_SIZE_QCA8074 8
2212 #endif /* QCA_WIFI_QCA8074_VP */
2213 #endif /* IPA_OFFLOAD */
2214 
2215 /*
2216  * dp_ast_aging_timer_fn() - Timer callback function for WDS aging
2217  * @soc: Datapath SOC handle
2218  *
2219  * This is a timer function used to age out stale AST nodes from
2220  * AST table
2221  */
2222 #ifdef FEATURE_WDS
2223 static void dp_ast_aging_timer_fn(void *soc_hdl)
2224 {
2225 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
2226 	struct dp_pdev *pdev;
2227 	struct dp_vdev *vdev;
2228 	struct dp_peer *peer;
2229 	struct dp_ast_entry *ase, *temp_ase;
2230 	int i;
2231 	bool check_wds_ase = false;
2232 
2233 	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
2234 		soc->wds_ast_aging_timer_cnt = 0;
2235 		check_wds_ase = true;
2236 	}
2237 
2238 	 /* Peer list access lock */
2239 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
2240 
2241 	/* AST list access lock */
2242 	qdf_spin_lock_bh(&soc->ast_lock);
2243 
2244 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
2245 		pdev = soc->pdev_list[i];
2246 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
2247 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2248 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
2249 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
2250 					/*
2251 					 * Do not expire static ast entries
2252 					 * and HM WDS entries
2253 					 */
2254 					if (ase->type !=
2255 					    CDP_TXRX_AST_TYPE_WDS &&
2256 					    ase->type !=
2257 					    CDP_TXRX_AST_TYPE_MEC &&
2258 					    ase->type !=
2259 					    CDP_TXRX_AST_TYPE_DA)
2260 						continue;
2261 
2262 					/* Expire MEC entry every n sec.
2263 					 * This needs to be expired in
2264 					 * case if STA backbone is made as
2265 					 * AP backbone, In this case it needs
2266 					 * to be re-added as a WDS entry.
2267 					 */
2268 					if (ase->is_active && ase->type ==
2269 					    CDP_TXRX_AST_TYPE_MEC) {
2270 						ase->is_active = FALSE;
2271 						continue;
2272 					} else if (ase->is_active &&
2273 						   check_wds_ase) {
2274 						ase->is_active = FALSE;
2275 						continue;
2276 					}
2277 
2278 					if (ase->type ==
2279 					    CDP_TXRX_AST_TYPE_MEC) {
2280 						DP_STATS_INC(soc,
2281 							     ast.aged_out, 1);
2282 						dp_peer_del_ast(soc, ase);
2283 					} else if (check_wds_ase) {
2284 						DP_STATS_INC(soc,
2285 							     ast.aged_out, 1);
2286 						dp_peer_del_ast(soc, ase);
2287 					}
2288 				}
2289 			}
2290 		}
2291 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2292 	}
2293 
2294 	qdf_spin_unlock_bh(&soc->ast_lock);
2295 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2296 
2297 	if (qdf_atomic_read(&soc->cmn_init_done))
2298 		qdf_timer_mod(&soc->ast_aging_timer,
2299 			      DP_AST_AGING_TIMER_DEFAULT_MS);
2300 }
2301 
2302 
2303 /*
2304  * dp_soc_wds_attach() - Setup WDS timer and AST table
2305  * @soc:		Datapath SOC handle
2306  *
2307  * Return: None
2308  */
2309 static void dp_soc_wds_attach(struct dp_soc *soc)
2310 {
2311 	soc->wds_ast_aging_timer_cnt = 0;
2312 	qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
2313 		       dp_ast_aging_timer_fn, (void *)soc,
2314 		       QDF_TIMER_TYPE_WAKE_APPS);
2315 
2316 	qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
2317 }
2318 
2319 /*
2320  * dp_soc_wds_detach() - Detach WDS data structures and timers
2321  * @txrx_soc: DP SOC handle
2322  *
2323  * Return: None
2324  */
2325 static void dp_soc_wds_detach(struct dp_soc *soc)
2326 {
2327 	qdf_timer_stop(&soc->ast_aging_timer);
2328 	qdf_timer_free(&soc->ast_aging_timer);
2329 }
2330 #else
2331 static void dp_soc_wds_attach(struct dp_soc *soc)
2332 {
2333 }
2334 
2335 static void dp_soc_wds_detach(struct dp_soc *soc)
2336 {
2337 }
2338 #endif
2339 
2340 /*
2341  * dp_soc_reset_ring_map() - Reset cpu ring map
2342  * @soc: Datapath soc handler
2343  *
2344  * This api resets the default cpu ring map
2345  */
2346 
2347 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2348 {
2349 	uint8_t i;
2350 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2351 
2352 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2353 		switch (nss_config) {
2354 		case dp_nss_cfg_first_radio:
2355 			/*
2356 			 * Setting Tx ring map for one nss offloaded radio
2357 			 */
2358 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2359 			break;
2360 
2361 		case dp_nss_cfg_second_radio:
2362 			/*
2363 			 * Setting Tx ring for two nss offloaded radios
2364 			 */
2365 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2366 			break;
2367 
2368 		case dp_nss_cfg_dbdc:
2369 			/*
2370 			 * Setting Tx ring map for 2 nss offloaded radios
2371 			 */
2372 			soc->tx_ring_map[i] =
2373 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2374 			break;
2375 
2376 		case dp_nss_cfg_dbtc:
2377 			/*
2378 			 * Setting Tx ring map for 3 nss offloaded radios
2379 			 */
2380 			soc->tx_ring_map[i] =
2381 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2382 			break;
2383 
2384 		default:
2385 			dp_err("tx_ring_map failed due to invalid nss cfg");
2386 			break;
2387 		}
2388 	}
2389 }
2390 
2391 /*
2392  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2393  * @dp_soc - DP soc handle
2394  * @ring_type - ring type
2395  * @ring_num - ring_num
2396  *
2397  * return 0 or 1
2398  */
2399 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2400 {
2401 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2402 	uint8_t status = 0;
2403 
2404 	switch (ring_type) {
2405 	case WBM2SW_RELEASE:
2406 	case REO_DST:
2407 	case RXDMA_BUF:
2408 		status = ((nss_config) & (1 << ring_num));
2409 		break;
2410 	default:
2411 		break;
2412 	}
2413 
2414 	return status;
2415 }
2416 
2417 /*
2418  * dp_soc_reset_intr_mask() - reset interrupt mask
2419  * @dp_soc - DP Soc handle
2420  *
2421  * Return: Return void
2422  */
2423 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2424 {
2425 	uint8_t j;
2426 	int *grp_mask = NULL;
2427 	int group_number, mask, num_ring;
2428 
2429 	/* number of tx ring */
2430 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2431 
2432 	/*
2433 	 * group mask for tx completion  ring.
2434 	 */
2435 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2436 
2437 	/* loop and reset the mask for only offloaded ring */
2438 	for (j = 0; j < num_ring; j++) {
2439 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2440 			continue;
2441 		}
2442 
2443 		/*
2444 		 * Group number corresponding to tx offloaded ring.
2445 		 */
2446 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2447 		if (group_number < 0) {
2448 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2449 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2450 					WBM2SW_RELEASE, j);
2451 			return;
2452 		}
2453 
2454 		/* reset the tx mask for offloaded ring */
2455 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2456 		mask &= (~(1 << j));
2457 
2458 		/*
2459 		 * reset the interrupt mask for offloaded ring.
2460 		 */
2461 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2462 	}
2463 
2464 	/* number of rx rings */
2465 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2466 
2467 	/*
2468 	 * group mask for reo destination ring.
2469 	 */
2470 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2471 
2472 	/* loop and reset the mask for only offloaded ring */
2473 	for (j = 0; j < num_ring; j++) {
2474 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2475 			continue;
2476 		}
2477 
2478 		/*
2479 		 * Group number corresponding to rx offloaded ring.
2480 		 */
2481 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2482 		if (group_number < 0) {
2483 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2484 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2485 					REO_DST, j);
2486 			return;
2487 		}
2488 
2489 		/* set the interrupt mask for offloaded ring */
2490 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2491 		mask &= (~(1 << j));
2492 
2493 		/*
2494 		 * set the interrupt mask to zero for rx offloaded radio.
2495 		 */
2496 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2497 	}
2498 
2499 	/*
2500 	 * group mask for Rx buffer refill ring
2501 	 */
2502 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2503 
2504 	/* loop and reset the mask for only offloaded ring */
2505 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2506 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2507 			continue;
2508 		}
2509 
2510 		/*
2511 		 * Group number corresponding to rx offloaded ring.
2512 		 */
2513 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2514 		if (group_number < 0) {
2515 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2516 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2517 					REO_DST, j);
2518 			return;
2519 		}
2520 
2521 		/* set the interrupt mask for offloaded ring */
2522 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2523 				group_number);
2524 		mask &= (~(1 << j));
2525 
2526 		/*
2527 		 * set the interrupt mask to zero for rx offloaded radio.
2528 		 */
2529 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2530 			group_number, mask);
2531 	}
2532 }
2533 
2534 #ifdef IPA_OFFLOAD
2535 /**
2536  * dp_reo_remap_config() - configure reo remap register value based
2537  *                         nss configuration.
2538  *		based on offload_radio value below remap configuration
2539  *		get applied.
2540  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2541  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2542  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2543  *		3 - both Radios handled by NSS (remap not required)
2544  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2545  *
2546  * @remap1: output parameter indicates reo remap 1 register value
2547  * @remap2: output parameter indicates reo remap 2 register value
2548  * Return: bool type, true if remap is configured else false.
2549  */
2550 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
2551 {
2552 	*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2553 		(0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2554 
2555 	*remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2556 		(0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2557 
2558 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2559 
2560 	return true;
2561 }
2562 #else
2563 static bool dp_reo_remap_config(struct dp_soc *soc,
2564 				uint32_t *remap1,
2565 				uint32_t *remap2)
2566 {
2567 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2568 
2569 	switch (offload_radio) {
2570 	case dp_nss_cfg_default:
2571 		*remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2572 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2573 			(0x3 << 18) | (0x4 << 21)) << 8;
2574 
2575 		*remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2576 			(0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2577 			(0x3 << 18) | (0x4 << 21)) << 8;
2578 		break;
2579 	case dp_nss_cfg_first_radio:
2580 		*remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2581 			(0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2582 			(0x2 << 18) | (0x3 << 21)) << 8;
2583 
2584 		*remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2585 			(0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2586 			(0x4 << 18) | (0x2 << 21)) << 8;
2587 		break;
2588 
2589 	case dp_nss_cfg_second_radio:
2590 		*remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2591 			(0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2592 			(0x1 << 18) | (0x3 << 21)) << 8;
2593 
2594 		*remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2595 			(0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2596 			(0x4 << 18) | (0x1 << 21)) << 8;
2597 		break;
2598 
2599 	case dp_nss_cfg_dbdc:
2600 	case dp_nss_cfg_dbtc:
2601 		/* return false if both or all are offloaded to NSS */
2602 		return false;
2603 	}
2604 
2605 	dp_debug("remap1 %x remap2 %x offload_radio %u",
2606 		 *remap1, *remap2, offload_radio);
2607 	return true;
2608 }
2609 #endif
2610 
2611 /*
2612  * dp_reo_frag_dst_set() - configure reo register to set the
2613  *                        fragment destination ring
2614  * @soc : Datapath soc
2615  * @frag_dst_ring : output parameter to set fragment destination ring
2616  *
2617  * Based on offload_radio below fragment destination rings is selected
2618  * 0 - TCL
2619  * 1 - SW1
2620  * 2 - SW2
2621  * 3 - SW3
2622  * 4 - SW4
2623  * 5 - Release
2624  * 6 - FW
2625  * 7 - alternate select
2626  *
2627  * return: void
2628  */
2629 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2630 {
2631 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2632 
2633 	switch (offload_radio) {
2634 	case dp_nss_cfg_default:
2635 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2636 		break;
2637 	case dp_nss_cfg_dbdc:
2638 	case dp_nss_cfg_dbtc:
2639 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2640 		break;
2641 	default:
2642 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2643 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2644 		break;
2645 	}
2646 }
2647 
2648 #ifdef ENABLE_VERBOSE_DEBUG
2649 static void dp_enable_verbose_debug(struct dp_soc *soc)
2650 {
2651 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2652 
2653 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2654 
2655 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
2656 		is_dp_verbose_debug_enabled = true;
2657 
2658 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
2659 		hal_set_verbose_debug(true);
2660 	else
2661 		hal_set_verbose_debug(false);
2662 }
2663 #else
2664 static void dp_enable_verbose_debug(struct dp_soc *soc)
2665 {
2666 }
2667 #endif
2668 
2669 /*
2670  * dp_soc_cmn_setup() - Common SoC level initializion
2671  * @soc:		Datapath SOC handle
2672  *
2673  * This is an internal function used to setup common SOC data structures,
2674  * to be called from PDEV attach after receiving HW mode capabilities from FW
2675  */
2676 static int dp_soc_cmn_setup(struct dp_soc *soc)
2677 {
2678 	int i;
2679 	struct hal_reo_params reo_params;
2680 	int tx_ring_size;
2681 	int tx_comp_ring_size;
2682 	int reo_dst_ring_size;
2683 	uint32_t entries;
2684 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2685 
2686 	if (qdf_atomic_read(&soc->cmn_init_done))
2687 		return 0;
2688 
2689 	if (dp_hw_link_desc_pool_setup(soc))
2690 		goto fail1;
2691 
2692 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2693 
2694 	dp_enable_verbose_debug(soc);
2695 
2696 	/* Setup SRNG rings */
2697 	/* Common rings */
2698 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2699 		wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
2700 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2701 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2702 		goto fail1;
2703 	}
2704 
2705 	qdf_minidump_log(
2706 		(void *)(soc->wbm_desc_rel_ring.base_vaddr_unaligned),
2707 		soc->wbm_desc_rel_ring.alloc_size, "wbm_desc_rel_ring");
2708 
2709 	soc->num_tcl_data_rings = 0;
2710 	/* Tx data rings */
2711 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2712 		soc->num_tcl_data_rings =
2713 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2714 		tx_comp_ring_size =
2715 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2716 		tx_ring_size =
2717 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2718 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2719 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2720 				TCL_DATA, i, 0, tx_ring_size)) {
2721 				QDF_TRACE(QDF_MODULE_ID_DP,
2722 					QDF_TRACE_LEVEL_ERROR,
2723 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2724 				goto fail1;
2725 			}
2726 			/*
2727 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2728 			 * count
2729 			 */
2730 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2731 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
2732 				QDF_TRACE(QDF_MODULE_ID_DP,
2733 					QDF_TRACE_LEVEL_ERROR,
2734 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2735 				goto fail1;
2736 			}
2737 		}
2738 	} else {
2739 		/* This will be incremented during per pdev ring setup */
2740 		soc->num_tcl_data_rings = 0;
2741 	}
2742 
2743 	if (dp_tx_soc_attach(soc)) {
2744 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2745 				FL("dp_tx_soc_attach failed"));
2746 		goto fail1;
2747 	}
2748 
2749 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2750 	/* TCL command and status rings */
2751 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2752 			  entries)) {
2753 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2754 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2755 		goto fail1;
2756 	}
2757 
2758 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2759 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2760 			  entries)) {
2761 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2762 			FL("dp_srng_setup failed for tcl_status_ring"));
2763 		goto fail1;
2764 	}
2765 
2766 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2767 
2768 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2769 	 * descriptors
2770 	 */
2771 
2772 	/* Rx data rings */
2773 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2774 		soc->num_reo_dest_rings =
2775 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2776 		QDF_TRACE(QDF_MODULE_ID_DP,
2777 			QDF_TRACE_LEVEL_INFO,
2778 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2779 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2780 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2781 				i, 0, reo_dst_ring_size)) {
2782 				QDF_TRACE(QDF_MODULE_ID_DP,
2783 					  QDF_TRACE_LEVEL_ERROR,
2784 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2785 				goto fail1;
2786 			}
2787 		}
2788 	} else {
2789 		/* This will be incremented during per pdev ring setup */
2790 		soc->num_reo_dest_rings = 0;
2791 	}
2792 
2793 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2794 	/* LMAC RxDMA to SW Rings configuration */
2795 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2796 		/* Only valid for MCL */
2797 		struct dp_pdev *pdev = soc->pdev_list[0];
2798 
2799 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2800 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2801 					  RXDMA_DST, 0, i,
2802 					  entries)) {
2803 				QDF_TRACE(QDF_MODULE_ID_DP,
2804 					  QDF_TRACE_LEVEL_ERROR,
2805 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2806 				goto fail1;
2807 			}
2808 		}
2809 	}
2810 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2811 
2812 	/* REO reinjection ring */
2813 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2814 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2815 			  entries)) {
2816 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2817 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2818 		goto fail1;
2819 	}
2820 
2821 
2822 	/* Rx release ring */
2823 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2824 		wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
2825 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2826 			  FL("dp_srng_setup failed for rx_rel_ring"));
2827 		goto fail1;
2828 	}
2829 
2830 
2831 	/* Rx exception ring */
2832 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2833 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2834 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
2835 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2836 			  FL("dp_srng_setup failed for reo_exception_ring"));
2837 		goto fail1;
2838 	}
2839 
2840 
2841 	/* REO command and status rings */
2842 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2843 		wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
2844 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2845 			FL("dp_srng_setup failed for reo_cmd_ring"));
2846 		goto fail1;
2847 	}
2848 
2849 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2850 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2851 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2852 
2853 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2854 		wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
2855 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2856 			FL("dp_srng_setup failed for reo_status_ring"));
2857 		goto fail1;
2858 	}
2859 
2860 
2861 	/* Reset the cpu ring map if radio is NSS offloaded */
2862 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2863 		dp_soc_reset_cpu_ring_map(soc);
2864 		dp_soc_reset_intr_mask(soc);
2865 	}
2866 
2867 	/* Setup HW REO */
2868 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2869 
2870 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2871 
2872 		/*
2873 		 * Reo ring remap is not required if both radios
2874 		 * are offloaded to NSS
2875 		 */
2876 		if (!dp_reo_remap_config(soc,
2877 					&reo_params.remap1,
2878 					&reo_params.remap2))
2879 			goto out;
2880 
2881 		reo_params.rx_hash_enabled = true;
2882 	}
2883 
2884 	/* setup the global rx defrag waitlist */
2885 	TAILQ_INIT(&soc->rx.defrag.waitlist);
2886 	soc->rx.defrag.timeout_ms =
2887 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
2888 	soc->rx.defrag.next_flush_ms = 0;
2889 	soc->rx.flags.defrag_timeout_check =
2890 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
2891 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
2892 
2893 out:
2894 	/*
2895 	 * set the fragment destination ring
2896 	 */
2897 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2898 
2899 	hal_reo_setup(soc->hal_soc, &reo_params);
2900 
2901 	qdf_atomic_set(&soc->cmn_init_done, 1);
2902 	dp_soc_wds_attach(soc);
2903 
2904 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
2905 	return 0;
2906 fail1:
2907 	/*
2908 	 * Cleanup will be done as part of soc_detach, which will
2909 	 * be called on pdev attach failure
2910 	 */
2911 	return QDF_STATUS_E_FAILURE;
2912 }
2913 
2914 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
2915 
2916 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
2917 {
2918 	struct cdp_lro_hash_config lro_hash;
2919 	QDF_STATUS status;
2920 
2921 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2922 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
2923 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2924 		dp_err("LRO, GRO and RX hash disabled");
2925 		return QDF_STATUS_E_FAILURE;
2926 	}
2927 
2928 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2929 
2930 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
2931 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
2932 		lro_hash.lro_enable = 1;
2933 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2934 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
2935 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2936 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
2937 	}
2938 
2939 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
2940 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2941 		 LRO_IPV4_SEED_ARR_SZ));
2942 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2943 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2944 		 LRO_IPV6_SEED_ARR_SZ));
2945 
2946 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2947 
2948 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
2949 		QDF_BUG(0);
2950 		dp_err("lro_hash_config not configured");
2951 		return QDF_STATUS_E_FAILURE;
2952 	}
2953 
2954 	status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
2955 						      &lro_hash);
2956 	if (!QDF_IS_STATUS_SUCCESS(status)) {
2957 		dp_err("failed to send lro_hash_config to FW %u", status);
2958 		return status;
2959 	}
2960 
2961 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2962 		lro_hash.lro_enable, lro_hash.tcp_flag,
2963 		lro_hash.tcp_flag_mask);
2964 
2965 	dp_info("toeplitz_hash_ipv4:");
2966 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2967 			   (void *)lro_hash.toeplitz_hash_ipv4,
2968 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2969 			   LRO_IPV4_SEED_ARR_SZ));
2970 
2971 	dp_info("toeplitz_hash_ipv6:");
2972 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2973 			   (void *)lro_hash.toeplitz_hash_ipv6,
2974 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2975 			   LRO_IPV6_SEED_ARR_SZ));
2976 
2977 	return status;
2978 }
2979 
2980 /*
2981 * dp_rxdma_ring_setup() - configure the RX DMA rings
2982 * @soc: data path SoC handle
2983 * @pdev: Physical device handle
2984 *
2985 * Return: 0 - success, > 0 - failure
2986 */
2987 #ifdef QCA_HOST2FW_RXBUF_RING
2988 static int dp_rxdma_ring_setup(struct dp_soc *soc,
2989 	 struct dp_pdev *pdev)
2990 {
2991 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2992 	int max_mac_rings;
2993 	int i;
2994 
2995 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2996 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
2997 
2998 	for (i = 0; i < max_mac_rings; i++) {
2999 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
3000 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
3001 			RXDMA_BUF, 1, i,
3002 			wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
3003 			QDF_TRACE(QDF_MODULE_ID_DP,
3004 				 QDF_TRACE_LEVEL_ERROR,
3005 				 FL("failed rx mac ring setup"));
3006 			return QDF_STATUS_E_FAILURE;
3007 		}
3008 	}
3009 	return QDF_STATUS_SUCCESS;
3010 }
3011 #else
3012 static int dp_rxdma_ring_setup(struct dp_soc *soc,
3013 	 struct dp_pdev *pdev)
3014 {
3015 	return QDF_STATUS_SUCCESS;
3016 }
3017 #endif
3018 
3019 /**
3020  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
3021  * @pdev - DP_PDEV handle
3022  *
3023  * Return: void
3024  */
3025 static inline void
3026 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3027 {
3028 	uint8_t map_id;
3029 	struct dp_soc *soc = pdev->soc;
3030 
3031 	if (!soc)
3032 		return;
3033 
3034 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3035 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3036 			     default_dscp_tid_map,
3037 			     sizeof(default_dscp_tid_map));
3038 	}
3039 
3040 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3041 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3042 					default_dscp_tid_map,
3043 					map_id);
3044 	}
3045 }
3046 
3047 /**
3048  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
3049  * @pdev - DP_PDEV handle
3050  *
3051  * Return: void
3052  */
3053 static inline void
3054 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3055 {
3056 	struct dp_soc *soc = pdev->soc;
3057 
3058 	if (!soc)
3059 		return;
3060 
3061 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3062 		     sizeof(default_pcp_tid_map));
3063 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3064 }
3065 
3066 #ifdef IPA_OFFLOAD
3067 /**
3068  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
3069  * @soc: data path instance
3070  * @pdev: core txrx pdev context
3071  *
3072  * Return: QDF_STATUS_SUCCESS: success
3073  *         QDF_STATUS_E_RESOURCES: Error return
3074  */
3075 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3076 					   struct dp_pdev *pdev)
3077 {
3078 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3079 	int entries;
3080 
3081 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3082 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
3083 
3084 	/* Setup second Rx refill buffer ring */
3085 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3086 			  IPA_RX_REFILL_BUF_RING_IDX,
3087 			  pdev->pdev_id,
3088 			  entries)) {
3089 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3090 			FL("dp_srng_setup failed second rx refill ring"));
3091 		return QDF_STATUS_E_FAILURE;
3092 	}
3093 	return QDF_STATUS_SUCCESS;
3094 }
3095 
3096 /**
3097  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3098  * @soc: data path instance
3099  * @pdev: core txrx pdev context
3100  *
3101  * Return: void
3102  */
3103 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3104 					      struct dp_pdev *pdev)
3105 {
3106 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3107 			IPA_RX_REFILL_BUF_RING_IDX);
3108 }
3109 
3110 #else
3111 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3112 					   struct dp_pdev *pdev)
3113 {
3114 	return QDF_STATUS_SUCCESS;
3115 }
3116 
3117 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3118 					      struct dp_pdev *pdev)
3119 {
3120 }
3121 #endif
3122 
3123 #if !defined(DISABLE_MON_CONFIG)
3124 /**
3125  * dp_mon_rings_setup() - Initialize Monitor rings based on target
3126  * @soc: soc handle
3127  * @pdev: physical device handle
3128  *
3129  * Return: nonzero on failure and zero on success
3130  */
3131 static
3132 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3133 {
3134 	int mac_id = 0;
3135 	int pdev_id = pdev->pdev_id;
3136 	int entries;
3137 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3138 
3139 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3140 
3141 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3142 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
3143 
3144 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3145 			entries =
3146 			   wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3147 			if (dp_srng_setup(soc,
3148 					  &pdev->rxdma_mon_buf_ring[mac_id],
3149 					  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
3150 					  entries)) {
3151 				QDF_TRACE(QDF_MODULE_ID_DP,
3152 					  QDF_TRACE_LEVEL_ERROR,
3153 					  FL(RNG_ERR "rxdma_mon_buf_ring "));
3154 				return QDF_STATUS_E_NOMEM;
3155 			}
3156 
3157 			entries =
3158 			   wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3159 			if (dp_srng_setup(soc,
3160 					  &pdev->rxdma_mon_dst_ring[mac_id],
3161 					  RXDMA_MONITOR_DST, 0, mac_for_pdev,
3162 					  entries)) {
3163 				QDF_TRACE(QDF_MODULE_ID_DP,
3164 					  QDF_TRACE_LEVEL_ERROR,
3165 					  FL(RNG_ERR "rxdma_mon_dst_ring"));
3166 				return QDF_STATUS_E_NOMEM;
3167 			}
3168 
3169 			entries =
3170 			    wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3171 			if (dp_srng_setup(soc,
3172 					  &pdev->rxdma_mon_status_ring[mac_id],
3173 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3174 					  entries)) {
3175 				QDF_TRACE(QDF_MODULE_ID_DP,
3176 					  QDF_TRACE_LEVEL_ERROR,
3177 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3178 				return QDF_STATUS_E_NOMEM;
3179 			}
3180 
3181 			entries =
3182 			   wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3183 			if (dp_srng_setup(soc,
3184 					  &pdev->rxdma_mon_desc_ring[mac_id],
3185 					  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
3186 					  entries)) {
3187 				QDF_TRACE(QDF_MODULE_ID_DP,
3188 					  QDF_TRACE_LEVEL_ERROR,
3189 					  FL(RNG_ERR "rxdma_mon_desc_ring"));
3190 				return QDF_STATUS_E_NOMEM;
3191 			}
3192 		} else {
3193 			entries =
3194 			   wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3195 			if (dp_srng_setup(soc,
3196 					  &pdev->rxdma_mon_status_ring[mac_id],
3197 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3198 					  entries)) {
3199 				QDF_TRACE(QDF_MODULE_ID_DP,
3200 					  QDF_TRACE_LEVEL_ERROR,
3201 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3202 				return QDF_STATUS_E_NOMEM;
3203 			}
3204 		}
3205 	}
3206 
3207 	return QDF_STATUS_SUCCESS;
3208 }
3209 #else
3210 static
3211 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3212 {
3213 	return QDF_STATUS_SUCCESS;
3214 }
3215 #endif
3216 
3217 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3218  * @pdev_hdl: pdev handle
3219  */
3220 #ifdef ATH_SUPPORT_EXT_STAT
3221 void  dp_iterate_update_peer_list(void *pdev_hdl)
3222 {
3223 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3224 	struct dp_soc *soc = pdev->soc;
3225 	struct dp_vdev *vdev = NULL;
3226 	struct dp_peer *peer = NULL;
3227 
3228 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3229 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3230 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3231 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3232 			dp_cal_client_update_peer_stats(&peer->stats);
3233 		}
3234 	}
3235 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3236 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3237 }
3238 #else
3239 void  dp_iterate_update_peer_list(void *pdev_hdl)
3240 {
3241 }
3242 #endif
3243 /*
3244 * dp_pdev_attach_wifi3() - attach txrx pdev
3245 * @ctrl_pdev: Opaque PDEV object
3246 * @txrx_soc: Datapath SOC handle
3247 * @htc_handle: HTC handle for host-target interface
3248 * @qdf_osdev: QDF OS device
3249 * @pdev_id: PDEV ID
3250 *
3251 * Return: DP PDEV handle on success, NULL on failure
3252 */
3253 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3254 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
3255 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
3256 {
3257 	int tx_ring_size;
3258 	int tx_comp_ring_size;
3259 	int reo_dst_ring_size;
3260 	int entries;
3261 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3262 	int nss_cfg;
3263 	void *sojourn_buf;
3264 
3265 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3266 	struct dp_pdev *pdev = NULL;
3267 
3268 	if (dp_is_soc_reinit(soc))
3269 		pdev = soc->pdev_list[pdev_id];
3270 	else
3271 		pdev = qdf_mem_malloc(sizeof(*pdev));
3272 
3273 	if (!pdev) {
3274 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3275 			FL("DP PDEV memory allocation failed"));
3276 		goto fail0;
3277 	}
3278 	qdf_minidump_log((void *)pdev, sizeof(*pdev), "dp_pdev");
3279 
3280 	/*
3281 	 * Variable to prevent double pdev deinitialization during
3282 	 * radio detach execution .i.e. in the absence of any vdev.
3283 	 */
3284 	pdev->pdev_deinit = 0;
3285 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
3286 
3287 	if (!pdev->invalid_peer) {
3288 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3289 			  FL("Invalid peer memory allocation failed"));
3290 		qdf_mem_free(pdev);
3291 		goto fail0;
3292 	}
3293 
3294 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3295 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3296 
3297 	if (!pdev->wlan_cfg_ctx) {
3298 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3299 			FL("pdev cfg_attach failed"));
3300 
3301 		qdf_mem_free(pdev->invalid_peer);
3302 		qdf_mem_free(pdev);
3303 		goto fail0;
3304 	}
3305 
3306 	/*
3307 	 * set nss pdev config based on soc config
3308 	 */
3309 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3310 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3311 			(nss_cfg & (1 << pdev_id)));
3312 
3313 	pdev->soc = soc;
3314 	pdev->ctrl_pdev = ctrl_pdev;
3315 	pdev->pdev_id = pdev_id;
3316 	soc->pdev_list[pdev_id] = pdev;
3317 
3318 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3319 	soc->pdev_count++;
3320 
3321 	TAILQ_INIT(&pdev->vdev_list);
3322 	qdf_spinlock_create(&pdev->vdev_list_lock);
3323 	pdev->vdev_count = 0;
3324 
3325 	qdf_spinlock_create(&pdev->tx_mutex);
3326 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
3327 	TAILQ_INIT(&pdev->neighbour_peers_list);
3328 	pdev->neighbour_peers_added = false;
3329 	pdev->monitor_configured = false;
3330 
3331 	if (dp_soc_cmn_setup(soc)) {
3332 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3333 			FL("dp_soc_cmn_setup failed"));
3334 		goto fail1;
3335 	}
3336 
3337 	/* Setup per PDEV TCL rings if configured */
3338 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3339 		tx_ring_size =
3340 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
3341 		tx_comp_ring_size =
3342 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3343 
3344 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
3345 			pdev_id, pdev_id, tx_ring_size)) {
3346 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3347 				FL("dp_srng_setup failed for tcl_data_ring"));
3348 			goto fail1;
3349 		}
3350 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
3351 			WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
3352 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3353 				FL("dp_srng_setup failed for tx_comp_ring"));
3354 			goto fail1;
3355 		}
3356 		soc->num_tcl_data_rings++;
3357 	}
3358 
3359 	/* Tx specific init */
3360 	if (dp_tx_pdev_attach(pdev)) {
3361 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3362 			FL("dp_tx_pdev_attach failed"));
3363 		goto fail1;
3364 	}
3365 
3366 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
3367 	/* Setup per PDEV REO rings if configured */
3368 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
3369 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
3370 			pdev_id, pdev_id, reo_dst_ring_size)) {
3371 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3372 				FL("dp_srng_setup failed for reo_dest_ringn"));
3373 			goto fail1;
3374 		}
3375 		soc->num_reo_dest_rings++;
3376 
3377 	}
3378 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
3379 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
3380 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3381 			 FL("dp_srng_setup failed rx refill ring"));
3382 		goto fail1;
3383 	}
3384 
3385 	if (dp_rxdma_ring_setup(soc, pdev)) {
3386 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3387 			 FL("RXDMA ring config failed"));
3388 		goto fail1;
3389 	}
3390 
3391 	if (dp_mon_rings_setup(soc, pdev)) {
3392 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3393 			  FL("MONITOR rings setup failed"));
3394 		goto fail1;
3395 	}
3396 
3397 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
3398 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3399 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
3400 				  0, pdev_id,
3401 				  entries)) {
3402 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3403 				  FL(RNG_ERR "rxdma_err_dst_ring"));
3404 			goto fail1;
3405 		}
3406 	}
3407 
3408 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
3409 		goto fail1;
3410 
3411 	if (dp_ipa_ring_resource_setup(soc, pdev))
3412 		goto fail1;
3413 
3414 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
3415 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3416 			FL("dp_ipa_uc_attach failed"));
3417 		goto fail1;
3418 	}
3419 
3420 	/* Rx specific init */
3421 	if (dp_rx_pdev_attach(pdev)) {
3422 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3423 			  FL("dp_rx_pdev_attach failed"));
3424 		goto fail1;
3425 	}
3426 
3427 	DP_STATS_INIT(pdev);
3428 
3429 	/* Monitor filter init */
3430 	pdev->mon_filter_mode = MON_FILTER_ALL;
3431 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3432 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3433 	pdev->fp_data_filter = FILTER_DATA_ALL;
3434 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3435 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3436 	pdev->mo_data_filter = FILTER_DATA_ALL;
3437 
3438 	dp_local_peer_id_pool_init(pdev);
3439 
3440 	dp_dscp_tid_map_setup(pdev);
3441 	dp_pcp_tid_map_setup(pdev);
3442 
3443 	/* Rx monitor mode specific init */
3444 	if (dp_rx_pdev_mon_attach(pdev)) {
3445 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3446 				"dp_rx_pdev_mon_attach failed");
3447 		goto fail1;
3448 	}
3449 
3450 	if (dp_wdi_event_attach(pdev)) {
3451 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3452 				"dp_wdi_evet_attach failed");
3453 		goto fail1;
3454 	}
3455 
3456 	/* set the reo destination during initialization */
3457 	pdev->reo_dest = pdev->pdev_id + 1;
3458 
3459 	/*
3460 	 * initialize ppdu tlv list
3461 	 */
3462 	TAILQ_INIT(&pdev->ppdu_info_list);
3463 	pdev->tlv_count = 0;
3464 	pdev->list_depth = 0;
3465 
3466 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3467 
3468 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3469 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3470 			      TRUE);
3471 
3472 	if (pdev->sojourn_buf) {
3473 		sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
3474 		qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
3475 	}
3476 	/* initlialize cal client timer */
3477 	dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3478 			     &dp_iterate_update_peer_list);
3479 	qdf_event_create(&pdev->fw_peer_stats_event);
3480 
3481 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3482 	dp_tx_ppdu_stats_attach(pdev);
3483 
3484 	return (struct cdp_pdev *)pdev;
3485 
3486 fail1:
3487 	if (pdev->invalid_peer)
3488 		qdf_mem_free(pdev->invalid_peer);
3489 	dp_pdev_detach((struct cdp_pdev *)pdev, 0);
3490 
3491 fail0:
3492 	return NULL;
3493 }
3494 
3495 /*
3496 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3497 * @soc: data path SoC handle
3498 * @pdev: Physical device handle
3499 *
3500 * Return: void
3501 */
3502 #ifdef QCA_HOST2FW_RXBUF_RING
3503 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3504 	 struct dp_pdev *pdev)
3505 {
3506 	int i;
3507 
3508 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3509 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3510 			 RXDMA_BUF, 1);
3511 
3512 	qdf_timer_free(&soc->mon_reap_timer);
3513 }
3514 #else
3515 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3516 	 struct dp_pdev *pdev)
3517 {
3518 }
3519 #endif
3520 
3521 /*
3522  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3523  * @pdev: device object
3524  *
3525  * Return: void
3526  */
3527 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3528 {
3529 	struct dp_neighbour_peer *peer = NULL;
3530 	struct dp_neighbour_peer *temp_peer = NULL;
3531 
3532 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3533 			neighbour_peer_list_elem, temp_peer) {
3534 		/* delete this peer from the list */
3535 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3536 				peer, neighbour_peer_list_elem);
3537 		qdf_mem_free(peer);
3538 	}
3539 
3540 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3541 }
3542 
3543 /**
3544 * dp_htt_ppdu_stats_detach() - detach stats resources
3545 * @pdev: Datapath PDEV handle
3546 *
3547 * Return: void
3548 */
3549 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3550 {
3551 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3552 
3553 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3554 			ppdu_info_list_elem, ppdu_info_next) {
3555 		if (!ppdu_info)
3556 			break;
3557 		qdf_assert_always(ppdu_info->nbuf);
3558 		qdf_nbuf_free(ppdu_info->nbuf);
3559 		qdf_mem_free(ppdu_info);
3560 	}
3561 }
3562 
3563 #if !defined(DISABLE_MON_CONFIG)
3564 
3565 static
3566 void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3567 			 int mac_id)
3568 {
3569 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3570 			dp_srng_cleanup(soc,
3571 					&pdev->rxdma_mon_buf_ring[mac_id],
3572 					RXDMA_MONITOR_BUF, 0);
3573 
3574 			dp_srng_cleanup(soc,
3575 					&pdev->rxdma_mon_dst_ring[mac_id],
3576 					RXDMA_MONITOR_DST, 0);
3577 
3578 			dp_srng_cleanup(soc,
3579 					&pdev->rxdma_mon_status_ring[mac_id],
3580 					RXDMA_MONITOR_STATUS, 0);
3581 
3582 			dp_srng_cleanup(soc,
3583 					&pdev->rxdma_mon_desc_ring[mac_id],
3584 					RXDMA_MONITOR_DESC, 0);
3585 
3586 			dp_srng_cleanup(soc,
3587 					&pdev->rxdma_err_dst_ring[mac_id],
3588 					RXDMA_DST, 0);
3589 		} else {
3590 			dp_srng_cleanup(soc,
3591 					&pdev->rxdma_mon_status_ring[mac_id],
3592 					RXDMA_MONITOR_STATUS, 0);
3593 
3594 			dp_srng_cleanup(soc,
3595 					&pdev->rxdma_err_dst_ring[mac_id],
3596 					RXDMA_DST, 0);
3597 		}
3598 
3599 }
3600 #else
3601 static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3602 				int mac_id)
3603 {
3604 }
3605 #endif
3606 
3607 /**
3608  * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
3609  *
3610  * @soc: soc handle
3611  * @pdev: datapath physical dev handle
3612  * @mac_id: mac number
3613  *
3614  * Return: None
3615  */
3616 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3617 			       int mac_id)
3618 {
3619 }
3620 
3621 /**
3622  * dp_pdev_mem_reset() - Reset txrx pdev memory
3623  * @pdev: dp pdev handle
3624  *
3625  * Return: None
3626  */
3627 static void dp_pdev_mem_reset(struct dp_pdev *pdev)
3628 {
3629 	uint16_t len = 0;
3630 	uint8_t *dp_pdev_offset = (uint8_t *)pdev;
3631 
3632 	len = sizeof(struct dp_pdev) -
3633 		offsetof(struct dp_pdev, pdev_deinit) -
3634 		sizeof(pdev->pdev_deinit);
3635 	dp_pdev_offset = dp_pdev_offset +
3636 			 offsetof(struct dp_pdev, pdev_deinit) +
3637 			 sizeof(pdev->pdev_deinit);
3638 
3639 	qdf_mem_zero(dp_pdev_offset, len);
3640 }
3641 
3642 /**
3643  * dp_pdev_deinit() - Deinit txrx pdev
3644  * @txrx_pdev: Datapath PDEV handle
3645  * @force: Force deinit
3646  *
3647  * Return: None
3648  */
3649 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
3650 {
3651 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3652 	struct dp_soc *soc = pdev->soc;
3653 	qdf_nbuf_t curr_nbuf, next_nbuf;
3654 	int mac_id;
3655 
3656 	/*
3657 	 * Prevent double pdev deinitialization during radio detach
3658 	 * execution .i.e. in the absence of any vdev
3659 	 */
3660 	if (pdev->pdev_deinit)
3661 		return;
3662 
3663 	pdev->pdev_deinit = 1;
3664 
3665 	dp_wdi_event_detach(pdev);
3666 
3667 	dp_tx_pdev_detach(pdev);
3668 
3669 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3670 		dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
3671 			       TCL_DATA, pdev->pdev_id);
3672 		dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
3673 			       WBM2SW_RELEASE, pdev->pdev_id);
3674 	}
3675 
3676 	dp_pktlogmod_exit(pdev);
3677 
3678 	dp_rx_pdev_detach(pdev);
3679 	dp_rx_pdev_mon_detach(pdev);
3680 	dp_neighbour_peers_detach(pdev);
3681 	qdf_spinlock_destroy(&pdev->tx_mutex);
3682 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3683 
3684 	dp_ipa_uc_detach(soc, pdev);
3685 
3686 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3687 
3688 	/* Cleanup per PDEV REO rings if configured */
3689 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3690 		dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
3691 			       REO_DST, pdev->pdev_id);
3692 	}
3693 
3694 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3695 
3696 	dp_rxdma_ring_cleanup(soc, pdev);
3697 
3698 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3699 		dp_mon_ring_deinit(soc, pdev, mac_id);
3700 		dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
3701 			       RXDMA_DST, 0);
3702 	}
3703 
3704 	curr_nbuf = pdev->invalid_peer_head_msdu;
3705 	while (curr_nbuf) {
3706 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3707 		qdf_nbuf_free(curr_nbuf);
3708 		curr_nbuf = next_nbuf;
3709 	}
3710 	pdev->invalid_peer_head_msdu = NULL;
3711 	pdev->invalid_peer_tail_msdu = NULL;
3712 
3713 	dp_htt_ppdu_stats_detach(pdev);
3714 
3715 	qdf_nbuf_free(pdev->sojourn_buf);
3716 
3717 	dp_cal_client_detach(&pdev->cal_client_ctx);
3718 
3719 	soc->pdev_count--;
3720 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3721 	if (pdev->invalid_peer)
3722 		qdf_mem_free(pdev->invalid_peer);
3723 	qdf_mem_free(pdev->dp_txrx_handle);
3724 	dp_pdev_mem_reset(pdev);
3725 }
3726 
3727 /**
3728  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
3729  * @txrx_pdev: Datapath PDEV handle
3730  * @force: Force deinit
3731  *
3732  * Return: None
3733  */
3734 static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
3735 {
3736 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3737 	struct dp_soc *soc = pdev->soc;
3738 
3739 	soc->dp_soc_reinit = TRUE;
3740 
3741 	dp_pdev_deinit(txrx_pdev, force);
3742 }
3743 
3744 /*
3745  * dp_pdev_detach() - Complete rest of pdev detach
3746  * @txrx_pdev: Datapath PDEV handle
3747  * @force: Force deinit
3748  *
3749  * Return: None
3750  */
3751 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
3752 {
3753 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3754 	struct dp_soc *soc = pdev->soc;
3755 	struct rx_desc_pool *rx_desc_pool;
3756 	int mac_id, mac_for_pdev;
3757 
3758 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3759 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3760 				TCL_DATA, pdev->pdev_id);
3761 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3762 				WBM2SW_RELEASE, pdev->pdev_id);
3763 	}
3764 
3765 	dp_mon_link_free(pdev);
3766 
3767 	dp_tx_ppdu_stats_detach(pdev);
3768 
3769 	/* Cleanup per PDEV REO rings if configured */
3770 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3771 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3772 				REO_DST, pdev->pdev_id);
3773 	}
3774 	dp_rxdma_ring_cleanup(soc, pdev);
3775 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3776 
3777 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3778 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3779 
3780 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3781 		dp_mon_ring_cleanup(soc, pdev, mac_id);
3782 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3783 				RXDMA_DST, 0);
3784 		if (dp_is_soc_reinit(soc)) {
3785 			mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
3786 							      pdev->pdev_id);
3787 			rx_desc_pool = &soc->rx_desc_status[mac_for_pdev];
3788 			dp_rx_desc_pool_free(soc, rx_desc_pool);
3789 			rx_desc_pool = &soc->rx_desc_mon[mac_for_pdev];
3790 			dp_rx_desc_pool_free(soc, rx_desc_pool);
3791 		}
3792 	}
3793 
3794 	if (dp_is_soc_reinit(soc)) {
3795 		rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id];
3796 		dp_rx_desc_pool_free(soc, rx_desc_pool);
3797 	}
3798 
3799 	soc->pdev_list[pdev->pdev_id] = NULL;
3800 	qdf_mem_free(pdev);
3801 }
3802 
3803 /*
3804  * dp_pdev_detach_wifi3() - detach txrx pdev
3805  * @txrx_pdev: Datapath PDEV handle
3806  * @force: Force detach
3807  *
3808  * Return: None
3809  */
3810 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3811 {
3812 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3813 	struct dp_soc *soc = pdev->soc;
3814 
3815 	if (dp_is_soc_reinit(soc)) {
3816 		dp_pdev_detach(txrx_pdev, force);
3817 	} else {
3818 		dp_pdev_deinit(txrx_pdev, force);
3819 		dp_pdev_detach(txrx_pdev, force);
3820 	}
3821 }
3822 
3823 /*
3824  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3825  * @soc: DP SOC handle
3826  */
3827 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3828 {
3829 	struct reo_desc_list_node *desc;
3830 	struct dp_rx_tid *rx_tid;
3831 
3832 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3833 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
3834 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3835 		rx_tid = &desc->rx_tid;
3836 		qdf_mem_unmap_nbytes_single(soc->osdev,
3837 			rx_tid->hw_qdesc_paddr,
3838 			QDF_DMA_BIDIRECTIONAL,
3839 			rx_tid->hw_qdesc_alloc_size);
3840 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
3841 		qdf_mem_free(desc);
3842 	}
3843 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3844 	qdf_list_destroy(&soc->reo_desc_freelist);
3845 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3846 }
3847 
3848 /**
3849  * dp_soc_mem_reset() - Reset Dp Soc memory
3850  * @soc: DP handle
3851  *
3852  * Return: None
3853  */
3854 static void dp_soc_mem_reset(struct dp_soc *soc)
3855 {
3856 	uint16_t len = 0;
3857 	uint8_t *dp_soc_offset = (uint8_t *)soc;
3858 
3859 	len = sizeof(struct dp_soc) -
3860 		offsetof(struct dp_soc, dp_soc_reinit) -
3861 		sizeof(soc->dp_soc_reinit);
3862 	dp_soc_offset = dp_soc_offset +
3863 			offsetof(struct dp_soc, dp_soc_reinit) +
3864 			sizeof(soc->dp_soc_reinit);
3865 
3866 	qdf_mem_zero(dp_soc_offset, len);
3867 }
3868 
3869 /**
3870  * dp_soc_deinit() - Deinitialize txrx SOC
3871  * @txrx_soc: Opaque DP SOC handle
3872  *
3873  * Return: None
3874  */
3875 static void dp_soc_deinit(void *txrx_soc)
3876 {
3877 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3878 	int i;
3879 
3880 	qdf_atomic_set(&soc->cmn_init_done, 0);
3881 
3882 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3883 		if (soc->pdev_list[i])
3884 			dp_pdev_deinit((struct cdp_pdev *)
3885 					soc->pdev_list[i], 1);
3886 	}
3887 
3888 	qdf_flush_work(&soc->htt_stats.work);
3889 	qdf_disable_work(&soc->htt_stats.work);
3890 
3891 	/* Free pending htt stats messages */
3892 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
3893 
3894 	dp_reo_cmdlist_destroy(soc);
3895 
3896 	dp_peer_find_detach(soc);
3897 
3898 	/* Free the ring memories */
3899 	/* Common rings */
3900 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3901 
3902 	/* Tx data rings */
3903 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3904 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
3905 			dp_srng_deinit(soc, &soc->tcl_data_ring[i],
3906 				       TCL_DATA, i);
3907 			dp_srng_deinit(soc, &soc->tx_comp_ring[i],
3908 				       WBM2SW_RELEASE, i);
3909 		}
3910 	}
3911 
3912 	/* TCL command and status rings */
3913 	dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3914 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3915 
3916 	/* Rx data rings */
3917 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3918 		soc->num_reo_dest_rings =
3919 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3920 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3921 			/* TODO: Get number of rings and ring sizes
3922 			 * from wlan_cfg
3923 			 */
3924 			dp_srng_deinit(soc, &soc->reo_dest_ring[i],
3925 				       REO_DST, i);
3926 		}
3927 	}
3928 	/* REO reinjection ring */
3929 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3930 
3931 	/* Rx release ring */
3932 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3933 
3934 	/* Rx exception ring */
3935 	/* TODO: Better to store ring_type and ring_num in
3936 	 * dp_srng during setup
3937 	 */
3938 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3939 
3940 	/* REO command and status rings */
3941 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3942 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
3943 
3944 	dp_soc_wds_detach(soc);
3945 
3946 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
3947 	qdf_spinlock_destroy(&soc->htt_stats.lock);
3948 
3949 	htt_soc_htc_dealloc(soc->htt_handle);
3950 
3951 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3952 
3953 	dp_reo_cmdlist_destroy(soc);
3954 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3955 	dp_reo_desc_freelist_destroy(soc);
3956 
3957 	qdf_spinlock_destroy(&soc->ast_lock);
3958 
3959 	dp_soc_mem_reset(soc);
3960 }
3961 
3962 /**
3963  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
3964  * @txrx_soc: Opaque DP SOC handle
3965  *
3966  * Return: None
3967  */
3968 static void dp_soc_deinit_wifi3(void *txrx_soc)
3969 {
3970 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3971 
3972 	soc->dp_soc_reinit = 1;
3973 	dp_soc_deinit(txrx_soc);
3974 }
3975 
3976 /*
3977  * dp_soc_detach() - Detach rest of txrx SOC
3978  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3979  *
3980  * Return: None
3981  */
3982 static void dp_soc_detach(void *txrx_soc)
3983 {
3984 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3985 	int i;
3986 
3987 	qdf_atomic_set(&soc->cmn_init_done, 0);
3988 
3989 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
3990 	 * SW descriptors
3991 	 */
3992 
3993 	for (i = 0; i < MAX_PDEV_CNT; i++) {
3994 		if (soc->pdev_list[i])
3995 			dp_pdev_detach((struct cdp_pdev *)
3996 					     soc->pdev_list[i], 1);
3997 	}
3998 
3999 	/* Free the ring memories */
4000 	/* Common rings */
4001 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4002 
4003 	dp_tx_soc_detach(soc);
4004 
4005 	/* Tx data rings */
4006 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4007 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4008 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
4009 				TCL_DATA, i);
4010 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
4011 				WBM2SW_RELEASE, i);
4012 		}
4013 	}
4014 
4015 	/* TCL command and status rings */
4016 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
4017 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4018 
4019 	/* Rx data rings */
4020 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4021 		soc->num_reo_dest_rings =
4022 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4023 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
4024 			/* TODO: Get number of rings and ring sizes
4025 			 * from wlan_cfg
4026 			 */
4027 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
4028 				REO_DST, i);
4029 		}
4030 	}
4031 	/* REO reinjection ring */
4032 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4033 
4034 	/* Rx release ring */
4035 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4036 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3);
4037 
4038 	/* Rx exception ring */
4039 	/* TODO: Better to store ring_type and ring_num in
4040 	 * dp_srng during setup
4041 	 */
4042 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4043 
4044 	/* REO command and status rings */
4045 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4046 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
4047 	dp_hw_link_desc_pool_cleanup(soc);
4048 
4049 	htt_soc_detach(soc->htt_handle);
4050 	soc->dp_soc_reinit = 0;
4051 
4052 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
4053 
4054 	qdf_mem_free(soc);
4055 }
4056 
4057 /*
4058  * dp_soc_detach_wifi3() - Detach txrx SOC
4059  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4060  *
4061  * Return: None
4062  */
4063 static void dp_soc_detach_wifi3(void *txrx_soc)
4064 {
4065 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4066 
4067 	if (dp_is_soc_reinit(soc)) {
4068 		dp_soc_detach(txrx_soc);
4069 	} else {
4070 		dp_soc_deinit(txrx_soc);
4071 		dp_soc_detach(txrx_soc);
4072 	}
4073 
4074 }
4075 
4076 #if !defined(DISABLE_MON_CONFIG)
4077 /**
4078  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
4079  * @soc: soc handle
4080  * @pdev: physical device handle
4081  * @mac_id: ring number
4082  * @mac_for_pdev: mac_id
4083  *
4084  * Return: non-zero for failure, zero for success
4085  */
4086 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4087 					struct dp_pdev *pdev,
4088 					int mac_id,
4089 					int mac_for_pdev)
4090 {
4091 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4092 
4093 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
4094 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4095 					pdev->rxdma_mon_buf_ring[mac_id]
4096 					.hal_srng,
4097 					RXDMA_MONITOR_BUF);
4098 
4099 		if (status != QDF_STATUS_SUCCESS) {
4100 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
4101 			return status;
4102 		}
4103 
4104 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4105 					pdev->rxdma_mon_dst_ring[mac_id]
4106 					.hal_srng,
4107 					RXDMA_MONITOR_DST);
4108 
4109 		if (status != QDF_STATUS_SUCCESS) {
4110 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
4111 			return status;
4112 		}
4113 
4114 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4115 					pdev->rxdma_mon_status_ring[mac_id]
4116 					.hal_srng,
4117 					RXDMA_MONITOR_STATUS);
4118 
4119 		if (status != QDF_STATUS_SUCCESS) {
4120 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4121 			return status;
4122 		}
4123 
4124 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4125 					pdev->rxdma_mon_desc_ring[mac_id]
4126 					.hal_srng,
4127 					RXDMA_MONITOR_DESC);
4128 
4129 		if (status != QDF_STATUS_SUCCESS) {
4130 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4131 			return status;
4132 		}
4133 	} else {
4134 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4135 					pdev->rxdma_mon_status_ring[mac_id]
4136 					.hal_srng,
4137 					RXDMA_MONITOR_STATUS);
4138 
4139 		if (status != QDF_STATUS_SUCCESS) {
4140 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4141 			return status;
4142 		}
4143 	}
4144 
4145 	return status;
4146 
4147 }
4148 #else
4149 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4150 					struct dp_pdev *pdev,
4151 					int mac_id,
4152 					int mac_for_pdev)
4153 {
4154 	return QDF_STATUS_SUCCESS;
4155 }
4156 #endif
4157 
4158 /*
4159  * dp_rxdma_ring_config() - configure the RX DMA rings
4160  *
4161  * This function is used to configure the MAC rings.
4162  * On MCL host provides buffers in Host2FW ring
4163  * FW refills (copies) buffers to the ring and updates
4164  * ring_idx in register
4165  *
4166  * @soc: data path SoC handle
4167  *
4168  * Return: zero on success, non-zero on failure
4169  */
4170 #ifdef QCA_HOST2FW_RXBUF_RING
4171 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4172 {
4173 	int i;
4174 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4175 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4176 		struct dp_pdev *pdev = soc->pdev_list[i];
4177 
4178 		if (pdev) {
4179 			int mac_id;
4180 			bool dbs_enable = 0;
4181 			int max_mac_rings =
4182 				 wlan_cfg_get_num_mac_rings
4183 				(pdev->wlan_cfg_ctx);
4184 
4185 			htt_srng_setup(soc->htt_handle, 0,
4186 				 pdev->rx_refill_buf_ring.hal_srng,
4187 				 RXDMA_BUF);
4188 
4189 			if (pdev->rx_refill_buf_ring2.hal_srng)
4190 				htt_srng_setup(soc->htt_handle, 0,
4191 					pdev->rx_refill_buf_ring2.hal_srng,
4192 					RXDMA_BUF);
4193 
4194 			if (soc->cdp_soc.ol_ops->
4195 				is_hw_dbs_2x2_capable) {
4196 				dbs_enable = soc->cdp_soc.ol_ops->
4197 					is_hw_dbs_2x2_capable(soc->ctrl_psoc);
4198 			}
4199 
4200 			if (dbs_enable) {
4201 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4202 				QDF_TRACE_LEVEL_ERROR,
4203 				FL("DBS enabled max_mac_rings %d"),
4204 					 max_mac_rings);
4205 			} else {
4206 				max_mac_rings = 1;
4207 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4208 					 QDF_TRACE_LEVEL_ERROR,
4209 					 FL("DBS disabled, max_mac_rings %d"),
4210 					 max_mac_rings);
4211 			}
4212 
4213 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4214 					 FL("pdev_id %d max_mac_rings %d"),
4215 					 pdev->pdev_id, max_mac_rings);
4216 
4217 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4218 				int mac_for_pdev = dp_get_mac_id_for_pdev(
4219 							mac_id, pdev->pdev_id);
4220 
4221 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4222 					 QDF_TRACE_LEVEL_ERROR,
4223 					 FL("mac_id %d"), mac_for_pdev);
4224 
4225 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4226 					 pdev->rx_mac_buf_ring[mac_id]
4227 						.hal_srng,
4228 					 RXDMA_BUF);
4229 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4230 					pdev->rxdma_err_dst_ring[mac_id]
4231 						.hal_srng,
4232 					RXDMA_DST);
4233 
4234 				/* Configure monitor mode rings */
4235 				status = dp_mon_htt_srng_setup(soc, pdev,
4236 							       mac_id,
4237 							       mac_for_pdev);
4238 				if (status != QDF_STATUS_SUCCESS) {
4239 					dp_err("Failed to send htt monitor messages to target");
4240 					return status;
4241 				}
4242 
4243 			}
4244 		}
4245 	}
4246 
4247 	/*
4248 	 * Timer to reap rxdma status rings.
4249 	 * Needed until we enable ppdu end interrupts
4250 	 */
4251 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4252 			dp_service_mon_rings, (void *)soc,
4253 			QDF_TIMER_TYPE_WAKE_APPS);
4254 	soc->reap_timer_init = 1;
4255 	return status;
4256 }
4257 #else
4258 /* This is only for WIN */
4259 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4260 {
4261 	int i;
4262 	int mac_id;
4263 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4264 
4265 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4266 		struct dp_pdev *pdev = soc->pdev_list[i];
4267 
4268 		if (!pdev)
4269 			continue;
4270 
4271 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4272 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
4273 
4274 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4275 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
4276 #ifndef DISABLE_MON_CONFIG
4277 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4278 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4279 				RXDMA_MONITOR_BUF);
4280 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4281 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
4282 				RXDMA_MONITOR_DST);
4283 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4284 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4285 				RXDMA_MONITOR_STATUS);
4286 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4287 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
4288 				RXDMA_MONITOR_DESC);
4289 #endif
4290 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4291 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
4292 				RXDMA_DST);
4293 		}
4294 	}
4295 	return status;
4296 }
4297 #endif
4298 
4299 #ifdef NO_RX_PKT_HDR_TLV
4300 static QDF_STATUS
4301 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4302 {
4303 	int i;
4304 	int mac_id;
4305 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4306 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4307 
4308 	htt_tlv_filter.mpdu_start = 1;
4309 	htt_tlv_filter.msdu_start = 1;
4310 	htt_tlv_filter.mpdu_end = 1;
4311 	htt_tlv_filter.msdu_end = 1;
4312 	htt_tlv_filter.attention = 1;
4313 	htt_tlv_filter.packet = 1;
4314 	htt_tlv_filter.packet_header = 0;
4315 
4316 	htt_tlv_filter.ppdu_start = 0;
4317 	htt_tlv_filter.ppdu_end = 0;
4318 	htt_tlv_filter.ppdu_end_user_stats = 0;
4319 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4320 	htt_tlv_filter.ppdu_end_status_done = 0;
4321 	htt_tlv_filter.enable_fp = 1;
4322 	htt_tlv_filter.enable_md = 0;
4323 	htt_tlv_filter.enable_md = 0;
4324 	htt_tlv_filter.enable_mo = 0;
4325 
4326 	htt_tlv_filter.fp_mgmt_filter = 0;
4327 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
4328 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
4329 					 FILTER_DATA_MCAST |
4330 					 FILTER_DATA_DATA);
4331 	htt_tlv_filter.mo_mgmt_filter = 0;
4332 	htt_tlv_filter.mo_ctrl_filter = 0;
4333 	htt_tlv_filter.mo_data_filter = 0;
4334 	htt_tlv_filter.md_data_filter = 0;
4335 
4336 	htt_tlv_filter.offset_valid = true;
4337 
4338 	htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
4339 	/*Not subscribing rx_pkt_header*/
4340 	htt_tlv_filter.rx_header_offset = 0;
4341 	htt_tlv_filter.rx_mpdu_start_offset =
4342 				HAL_RX_PKT_TLV_MPDU_START_OFFSET(soc->hal_soc);
4343 	htt_tlv_filter.rx_mpdu_end_offset =
4344 				HAL_RX_PKT_TLV_MPDU_END_OFFSET(soc->hal_soc);
4345 	htt_tlv_filter.rx_msdu_start_offset =
4346 				HAL_RX_PKT_TLV_MSDU_START_OFFSET(soc->hal_soc);
4347 	htt_tlv_filter.rx_msdu_end_offset =
4348 				HAL_RX_PKT_TLV_MSDU_END_OFFSET(soc->hal_soc);
4349 	htt_tlv_filter.rx_attn_offset =
4350 				HAL_RX_PKT_TLV_ATTN_OFFSET(soc->hal_soc);
4351 
4352 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4353 		struct dp_pdev *pdev = soc->pdev_list[i];
4354 
4355 		if (!pdev)
4356 			continue;
4357 
4358 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4359 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4360 					pdev->pdev_id);
4361 
4362 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4363 					    pdev->rx_refill_buf_ring.hal_srng,
4364 					    RXDMA_BUF, RX_BUFFER_SIZE,
4365 					    &htt_tlv_filter);
4366 		}
4367 	}
4368 	return status;
4369 }
4370 #else
4371 static QDF_STATUS
4372 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4373 {
4374 	return QDF_STATUS_SUCCESS;
4375 }
4376 #endif
4377 
4378 /*
4379  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4380  * @cdp_soc: Opaque Datapath SOC handle
4381  *
4382  * Return: zero on success, non-zero on failure
4383  */
4384 static QDF_STATUS
4385 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4386 {
4387 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4388 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4389 
4390 	htt_soc_attach_target(soc->htt_handle);
4391 
4392 	status = dp_rxdma_ring_config(soc);
4393 	if (status != QDF_STATUS_SUCCESS) {
4394 		dp_err("Failed to send htt srng setup messages to target");
4395 		return status;
4396 	}
4397 
4398 	status = dp_rxdma_ring_sel_cfg(soc);
4399 	if (status != QDF_STATUS_SUCCESS) {
4400 		dp_err("Failed to send htt ring config message to target");
4401 		return status;
4402 	}
4403 
4404 	DP_STATS_INIT(soc);
4405 
4406 	/* initialize work queue for stats processing */
4407 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4408 
4409 	qdf_minidump_log((void *)soc, sizeof(*soc), "dp_soc");
4410 
4411 	return QDF_STATUS_SUCCESS;
4412 }
4413 
4414 /*
4415  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
4416  * @txrx_soc: Datapath SOC handle
4417  */
4418 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
4419 {
4420 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4421 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
4422 }
4423 
4424 /*
4425  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
4426  * @txrx_soc: Datapath SOC handle
4427  * @nss_cfg: nss config
4428  */
4429 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
4430 {
4431 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4432 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
4433 
4434 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
4435 
4436 	/*
4437 	 * TODO: masked out based on the per offloaded radio
4438 	 */
4439 	switch (config) {
4440 	case dp_nss_cfg_default:
4441 		break;
4442 	case dp_nss_cfg_dbdc:
4443 	case dp_nss_cfg_dbtc:
4444 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
4445 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
4446 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
4447 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
4448 		break;
4449 	default:
4450 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4451 			  "Invalid offload config %d", config);
4452 	}
4453 
4454 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4455 		  FL("nss-wifi<0> nss config is enabled"));
4456 }
4457 
4458 /*
4459 * dp_vdev_attach_wifi3() - attach txrx vdev
4460 * @txrx_pdev: Datapath PDEV handle
4461 * @vdev_mac_addr: MAC address of the virtual interface
4462 * @vdev_id: VDEV Id
4463 * @wlan_op_mode: VDEV operating mode
4464 *
4465 * Return: DP VDEV handle on success, NULL on failure
4466 */
4467 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
4468 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
4469 {
4470 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4471 	struct dp_soc *soc = pdev->soc;
4472 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4473 
4474 	if (!vdev) {
4475 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4476 			FL("DP VDEV memory allocation failed"));
4477 		goto fail0;
4478 	}
4479 
4480 	vdev->pdev = pdev;
4481 	vdev->vdev_id = vdev_id;
4482 	vdev->opmode = op_mode;
4483 	vdev->osdev = soc->osdev;
4484 
4485 	vdev->osif_rx = NULL;
4486 	vdev->osif_rsim_rx_decap = NULL;
4487 	vdev->osif_get_key = NULL;
4488 	vdev->osif_rx_mon = NULL;
4489 	vdev->osif_tx_free_ext = NULL;
4490 	vdev->osif_vdev = NULL;
4491 
4492 	vdev->delete.pending = 0;
4493 	vdev->safemode = 0;
4494 	vdev->drop_unenc = 1;
4495 	vdev->sec_type = cdp_sec_type_none;
4496 #ifdef notyet
4497 	vdev->filters_num = 0;
4498 #endif
4499 
4500 	qdf_mem_copy(
4501 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
4502 
4503 	/* TODO: Initialize default HTT meta data that will be used in
4504 	 * TCL descriptors for packets transmitted from this VDEV
4505 	 */
4506 
4507 	TAILQ_INIT(&vdev->peer_list);
4508 
4509 	if ((soc->intr_mode == DP_INTR_POLL) &&
4510 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4511 		if ((pdev->vdev_count == 0) ||
4512 		    (wlan_op_mode_monitor == vdev->opmode))
4513 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4514 	}
4515 
4516 	if (wlan_op_mode_monitor == vdev->opmode) {
4517 		pdev->monitor_vdev = vdev;
4518 		return (struct cdp_vdev *)vdev;
4519 	}
4520 
4521 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4522 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4523 	vdev->dscp_tid_map_id = 0;
4524 	vdev->mcast_enhancement_en = 0;
4525 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
4526 	vdev->prev_tx_enq_tstamp = 0;
4527 	vdev->prev_rx_deliver_tstamp = 0;
4528 
4529 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4530 	/* add this vdev into the pdev's list */
4531 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
4532 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4533 	pdev->vdev_count++;
4534 
4535 	dp_tx_vdev_attach(vdev);
4536 
4537 	if (pdev->vdev_count == 1)
4538 		dp_lro_hash_setup(soc, pdev);
4539 
4540 	dp_info("Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
4541 	DP_STATS_INIT(vdev);
4542 
4543 	if (wlan_op_mode_sta == vdev->opmode)
4544 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
4545 							vdev->mac_addr.raw,
4546 							NULL);
4547 
4548 	return (struct cdp_vdev *)vdev;
4549 
4550 fail0:
4551 	return NULL;
4552 }
4553 
4554 /**
4555  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
4556  * @vdev: Datapath VDEV handle
4557  * @osif_vdev: OSIF vdev handle
4558  * @ctrl_vdev: UMAC vdev handle
4559  * @txrx_ops: Tx and Rx operations
4560  *
4561  * Return: DP VDEV handle on success, NULL on failure
4562  */
4563 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
4564 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
4565 	struct ol_txrx_ops *txrx_ops)
4566 {
4567 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4568 	vdev->osif_vdev = osif_vdev;
4569 	vdev->ctrl_vdev = ctrl_vdev;
4570 	vdev->osif_rx = txrx_ops->rx.rx;
4571 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
4572 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
4573 	vdev->osif_get_key = txrx_ops->get_key;
4574 	vdev->osif_rx_mon = txrx_ops->rx.mon;
4575 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
4576 	vdev->tx_comp = txrx_ops->tx.tx_comp;
4577 #ifdef notyet
4578 #if ATH_SUPPORT_WAPI
4579 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
4580 #endif
4581 #endif
4582 #ifdef UMAC_SUPPORT_PROXY_ARP
4583 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
4584 #endif
4585 	vdev->me_convert = txrx_ops->me_convert;
4586 
4587 	/* TODO: Enable the following once Tx code is integrated */
4588 	if (vdev->mesh_vdev)
4589 		txrx_ops->tx.tx = dp_tx_send_mesh;
4590 	else
4591 		txrx_ops->tx.tx = dp_tx_send;
4592 
4593 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
4594 
4595 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
4596 		"DP Vdev Register success");
4597 }
4598 
4599 /**
4600  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
4601  * @vdev: Datapath VDEV handle
4602  * @unmap_only: Flag to indicate "only unmap"
4603  *
4604  * Return: void
4605  */
4606 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
4607 {
4608 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4609 	struct dp_pdev *pdev = vdev->pdev;
4610 	struct dp_soc *soc = pdev->soc;
4611 	struct dp_peer *peer;
4612 	uint16_t *peer_ids;
4613 	struct dp_ast_entry *ase, *tmp_ase;
4614 	uint8_t i = 0, j = 0;
4615 
4616 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
4617 	if (!peer_ids) {
4618 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4619 			"DP alloc failure - unable to flush peers");
4620 		return;
4621 	}
4622 
4623 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4624 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4625 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4626 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
4627 				if (j < soc->max_peers)
4628 					peer_ids[j++] = peer->peer_ids[i];
4629 	}
4630 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4631 
4632 	for (i = 0; i < j ; i++) {
4633 		if (unmap_only) {
4634 			peer = __dp_peer_find_by_id(soc, peer_ids[i]);
4635 
4636 			if (peer) {
4637 				if (soc->is_peer_map_unmap_v2) {
4638 					/* free AST entries of peer before
4639 					 * release peer reference
4640 					 */
4641 					DP_PEER_ITERATE_ASE_LIST(peer, ase,
4642 								 tmp_ase) {
4643 						dp_rx_peer_unmap_handler
4644 							(soc, peer_ids[i],
4645 							 vdev->vdev_id,
4646 							 ase->mac_addr.raw,
4647 							 1);
4648 					}
4649 				}
4650 				dp_rx_peer_unmap_handler(soc, peer_ids[i],
4651 							 vdev->vdev_id,
4652 							 peer->mac_addr.raw,
4653 							 0);
4654 			}
4655 		} else {
4656 			peer = dp_peer_find_by_id(soc, peer_ids[i]);
4657 
4658 			if (peer) {
4659 				dp_info("peer: %pM is getting flush",
4660 					peer->mac_addr.raw);
4661 
4662 				if (soc->is_peer_map_unmap_v2) {
4663 					/* free AST entries of peer before
4664 					 * release peer reference
4665 					 */
4666 					DP_PEER_ITERATE_ASE_LIST(peer, ase,
4667 								 tmp_ase) {
4668 						dp_rx_peer_unmap_handler
4669 							(soc, peer_ids[i],
4670 							 vdev->vdev_id,
4671 							 ase->mac_addr.raw,
4672 							 1);
4673 					}
4674 				}
4675 				dp_peer_delete_wifi3(peer, 0);
4676 				/*
4677 				 * we need to call dp_peer_unref_del_find_by_id
4678 				 * to remove additional ref count incremented
4679 				 * by dp_peer_find_by_id() call.
4680 				 *
4681 				 * Hold the ref count while executing
4682 				 * dp_peer_delete_wifi3() call.
4683 				 *
4684 				 */
4685 				dp_peer_unref_del_find_by_id(peer);
4686 				dp_rx_peer_unmap_handler(soc, peer_ids[i],
4687 							 vdev->vdev_id,
4688 							 peer->mac_addr.raw, 0);
4689 			}
4690 		}
4691 	}
4692 
4693 	qdf_mem_free(peer_ids);
4694 
4695 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4696 		FL("Flushed peers for vdev object %pK "), vdev);
4697 }
4698 
4699 /*
4700  * dp_vdev_detach_wifi3() - Detach txrx vdev
4701  * @txrx_vdev:		Datapath VDEV handle
4702  * @callback:		Callback OL_IF on completion of detach
4703  * @cb_context:	Callback context
4704  *
4705  */
4706 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
4707 	ol_txrx_vdev_delete_cb callback, void *cb_context)
4708 {
4709 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4710 	struct dp_pdev *pdev;
4711 	struct dp_soc *soc;
4712 	struct dp_neighbour_peer *peer = NULL;
4713 	struct dp_neighbour_peer *temp_peer = NULL;
4714 
4715 	/* preconditions */
4716 	qdf_assert_always(vdev);
4717 	pdev = vdev->pdev;
4718 	soc = pdev->soc;
4719 
4720 	if (wlan_op_mode_monitor == vdev->opmode)
4721 		goto free_vdev;
4722 
4723 	if (wlan_op_mode_sta == vdev->opmode)
4724 		dp_peer_delete_wifi3(vdev->vap_self_peer, 0);
4725 
4726 	/*
4727 	 * If Target is hung, flush all peers before detaching vdev
4728 	 * this will free all references held due to missing
4729 	 * unmap commands from Target
4730 	 */
4731 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
4732 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
4733 
4734 	/*
4735 	 * Use peer_ref_mutex while accessing peer_list, in case
4736 	 * a peer is in the process of being removed from the list.
4737 	 */
4738 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4739 	/* check that the vdev has no peers allocated */
4740 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
4741 		/* debug print - will be removed later */
4742 		dp_warn("not deleting vdev object %pK (%pM) until deletion finishes for all its peers",
4743 			vdev, vdev->mac_addr.raw);
4744 		/* indicate that the vdev needs to be deleted */
4745 		vdev->delete.pending = 1;
4746 		vdev->delete.callback = callback;
4747 		vdev->delete.context = cb_context;
4748 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4749 		return;
4750 	}
4751 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4752 
4753 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4754 	if (!soc->hw_nac_monitor_support) {
4755 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4756 			      neighbour_peer_list_elem) {
4757 			QDF_ASSERT(peer->vdev != vdev);
4758 		}
4759 	} else {
4760 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4761 				   neighbour_peer_list_elem, temp_peer) {
4762 			if (peer->vdev == vdev) {
4763 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
4764 					     neighbour_peer_list_elem);
4765 				qdf_mem_free(peer);
4766 			}
4767 		}
4768 	}
4769 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4770 
4771 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4772 	dp_tx_vdev_detach(vdev);
4773 	/* remove the vdev from its parent pdev's list */
4774 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
4775 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4776 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
4777 
4778 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4779 free_vdev:
4780 	qdf_mem_free(vdev);
4781 
4782 	if (callback)
4783 		callback(cb_context);
4784 }
4785 
4786 /*
4787  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
4788  * @soc - datapath soc handle
4789  * @peer - datapath peer handle
4790  *
4791  * Delete the AST entries belonging to a peer
4792  */
4793 #ifdef FEATURE_AST
4794 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4795 					      struct dp_peer *peer)
4796 {
4797 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
4798 
4799 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
4800 		dp_peer_del_ast(soc, ast_entry);
4801 
4802 	peer->self_ast_entry = NULL;
4803 }
4804 #else
4805 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4806 					      struct dp_peer *peer)
4807 {
4808 }
4809 #endif
4810 
4811 #if ATH_SUPPORT_WRAP
4812 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4813 						uint8_t *peer_mac_addr)
4814 {
4815 	struct dp_peer *peer;
4816 
4817 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4818 				      0, vdev->vdev_id);
4819 	if (!peer)
4820 		return NULL;
4821 
4822 	if (peer->bss_peer)
4823 		return peer;
4824 
4825 	dp_peer_unref_delete(peer);
4826 	return NULL;
4827 }
4828 #else
4829 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4830 						uint8_t *peer_mac_addr)
4831 {
4832 	struct dp_peer *peer;
4833 
4834 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4835 				      0, vdev->vdev_id);
4836 	if (!peer)
4837 		return NULL;
4838 
4839 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
4840 		return peer;
4841 
4842 	dp_peer_unref_delete(peer);
4843 	return NULL;
4844 }
4845 #endif
4846 
4847 #ifdef FEATURE_AST
4848 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
4849 					       struct dp_pdev *pdev,
4850 					       uint8_t *peer_mac_addr)
4851 {
4852 	struct dp_ast_entry *ast_entry;
4853 
4854 	qdf_spin_lock_bh(&soc->ast_lock);
4855 	if (soc->ast_override_support)
4856 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
4857 							    pdev->pdev_id);
4858 	else
4859 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
4860 
4861 	if (ast_entry && ast_entry->next_hop &&
4862 	    !ast_entry->delete_in_progress)
4863 		dp_peer_del_ast(soc, ast_entry);
4864 
4865 	qdf_spin_unlock_bh(&soc->ast_lock);
4866 }
4867 #endif
4868 
4869 #ifdef PEER_CACHE_RX_PKTS
4870 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
4871 {
4872 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
4873 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
4874 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
4875 }
4876 #else
4877 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
4878 {
4879 }
4880 #endif
4881 
4882 /*
4883  * dp_peer_create_wifi3() - attach txrx peer
4884  * @txrx_vdev: Datapath VDEV handle
4885  * @peer_mac_addr: Peer MAC address
4886  *
4887  * Return: DP peeer handle on success, NULL on failure
4888  */
4889 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
4890 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
4891 {
4892 	struct dp_peer *peer;
4893 	int i;
4894 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4895 	struct dp_pdev *pdev;
4896 	struct dp_soc *soc;
4897 	struct cdp_peer_cookie peer_cookie;
4898 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
4899 
4900 	/* preconditions */
4901 	qdf_assert(vdev);
4902 	qdf_assert(peer_mac_addr);
4903 
4904 	pdev = vdev->pdev;
4905 	soc = pdev->soc;
4906 
4907 	/*
4908 	 * If a peer entry with given MAC address already exists,
4909 	 * reuse the peer and reset the state of peer.
4910 	 */
4911 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
4912 
4913 	if (peer) {
4914 		qdf_atomic_init(&peer->is_default_route_set);
4915 		dp_peer_cleanup(vdev, peer);
4916 
4917 		qdf_spin_lock_bh(&soc->ast_lock);
4918 		dp_peer_delete_ast_entries(soc, peer);
4919 		peer->delete_in_progress = false;
4920 		qdf_spin_unlock_bh(&soc->ast_lock);
4921 
4922 		if ((vdev->opmode == wlan_op_mode_sta) &&
4923 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
4924 		     QDF_MAC_ADDR_SIZE)) {
4925 			ast_type = CDP_TXRX_AST_TYPE_SELF;
4926 		}
4927 
4928 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4929 
4930 		/*
4931 		* Control path maintains a node count which is incremented
4932 		* for every new peer create command. Since new peer is not being
4933 		* created and earlier reference is reused here,
4934 		* peer_unref_delete event is sent to control path to
4935 		* increment the count back.
4936 		*/
4937 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
4938 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
4939 				peer->mac_addr.raw, vdev->mac_addr.raw,
4940 				vdev->opmode, peer->ctrl_peer, ctrl_peer);
4941 		}
4942 		peer->ctrl_peer = ctrl_peer;
4943 
4944 		dp_local_peer_id_alloc(pdev, peer);
4945 		DP_STATS_INIT(peer);
4946 		DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
4947 
4948 		return (void *)peer;
4949 	} else {
4950 		/*
4951 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
4952 		 * need to remove the AST entry which was earlier added as a WDS
4953 		 * entry.
4954 		 * If an AST entry exists, but no peer entry exists with a given
4955 		 * MAC addresses, we could deduce it as a WDS entry
4956 		 */
4957 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
4958 	}
4959 
4960 #ifdef notyet
4961 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
4962 		soc->mempool_ol_ath_peer);
4963 #else
4964 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
4965 #endif
4966 
4967 	if (!peer)
4968 		return NULL; /* failure */
4969 
4970 	qdf_mem_zero(peer, sizeof(struct dp_peer));
4971 
4972 	TAILQ_INIT(&peer->ast_entry_list);
4973 
4974 	/* store provided params */
4975 	peer->vdev = vdev;
4976 	peer->ctrl_peer = ctrl_peer;
4977 
4978 	if ((vdev->opmode == wlan_op_mode_sta) &&
4979 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
4980 			 QDF_MAC_ADDR_SIZE)) {
4981 		ast_type = CDP_TXRX_AST_TYPE_SELF;
4982 	}
4983 
4984 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4985 
4986 	qdf_spinlock_create(&peer->peer_info_lock);
4987 
4988 	dp_peer_rx_bufq_resources_init(peer);
4989 
4990 	qdf_mem_copy(
4991 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
4992 
4993 	/* TODO: See of rx_opt_proc is really required */
4994 	peer->rx_opt_proc = soc->rx_opt_proc;
4995 
4996 	/* initialize the peer_id */
4997 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4998 		peer->peer_ids[i] = HTT_INVALID_PEER;
4999 
5000 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5001 
5002 	qdf_atomic_init(&peer->ref_cnt);
5003 
5004 	/* keep one reference for attach */
5005 	qdf_atomic_inc(&peer->ref_cnt);
5006 
5007 	/* add this peer into the vdev's list */
5008 	if (wlan_op_mode_sta == vdev->opmode)
5009 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
5010 	else
5011 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
5012 
5013 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5014 
5015 	/* TODO: See if hash based search is required */
5016 	dp_peer_find_hash_add(soc, peer);
5017 
5018 	/* Initialize the peer state */
5019 	peer->state = OL_TXRX_PEER_STATE_DISC;
5020 
5021 	dp_info("vdev %pK created peer %pK (%pM) ref_cnt: %d",
5022 		vdev, peer, peer->mac_addr.raw,
5023 		qdf_atomic_read(&peer->ref_cnt));
5024 	/*
5025 	 * For every peer MAp message search and set if bss_peer
5026 	 */
5027 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5028 			QDF_MAC_ADDR_SIZE) == 0 &&
5029 			(wlan_op_mode_sta != vdev->opmode)) {
5030 		dp_info("vdev bss_peer!!");
5031 		peer->bss_peer = 1;
5032 		vdev->vap_bss_peer = peer;
5033 	}
5034 
5035 	if (wlan_op_mode_sta == vdev->opmode &&
5036 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5037 			QDF_MAC_ADDR_SIZE) == 0) {
5038 		vdev->vap_self_peer = peer;
5039 	}
5040 
5041 	for (i = 0; i < DP_MAX_TIDS; i++)
5042 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
5043 
5044 	peer->valid = 1;
5045 	dp_local_peer_id_alloc(pdev, peer);
5046 	DP_STATS_INIT(peer);
5047 	DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5048 
5049 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5050 		     QDF_MAC_ADDR_SIZE);
5051 	peer_cookie.ctx = NULL;
5052 	peer_cookie.cookie = pdev->next_peer_cookie++;
5053 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5054 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
5055 			     (void *)&peer_cookie,
5056 			     peer->peer_ids[0], WDI_NO_VAL, pdev->pdev_id);
5057 #endif
5058 	if (soc->wlanstats_enabled) {
5059 		if (!peer_cookie.ctx) {
5060 			pdev->next_peer_cookie--;
5061 			qdf_err("Failed to initialize peer rate stats");
5062 		} else {
5063 			peer->wlanstats_ctx = (void *)peer_cookie.ctx;
5064 		}
5065 	}
5066 	return (void *)peer;
5067 }
5068 
5069 /*
5070  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5071  * @vdev: Datapath VDEV handle
5072  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5073  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5074  *
5075  * Return: None
5076  */
5077 static
5078 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5079 				  enum cdp_host_reo_dest_ring *reo_dest,
5080 				  bool *hash_based)
5081 {
5082 	struct dp_soc *soc;
5083 	struct dp_pdev *pdev;
5084 
5085 	pdev = vdev->pdev;
5086 	soc = pdev->soc;
5087 	/*
5088 	 * hash based steering is disabled for Radios which are offloaded
5089 	 * to NSS
5090 	 */
5091 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5092 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
5093 
5094 	/*
5095 	 * Below line of code will ensure the proper reo_dest ring is chosen
5096 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
5097 	 */
5098 	*reo_dest = pdev->reo_dest;
5099 }
5100 
5101 #ifdef IPA_OFFLOAD
5102 /*
5103  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5104  * @vdev: Datapath VDEV handle
5105  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5106  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5107  *
5108  * If IPA is enabled in ini, for SAP mode, disable hash based
5109  * steering, use default reo_dst ring for RX. Use config values for other modes.
5110  * Return: None
5111  */
5112 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5113 				       enum cdp_host_reo_dest_ring *reo_dest,
5114 				       bool *hash_based)
5115 {
5116 	struct dp_soc *soc;
5117 	struct dp_pdev *pdev;
5118 
5119 	pdev = vdev->pdev;
5120 	soc = pdev->soc;
5121 
5122 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5123 
5124 	/*
5125 	 * If IPA is enabled, disable hash-based flow steering and set
5126 	 * reo_dest_ring_4 as the REO ring to receive packets on.
5127 	 * IPA is configured to reap reo_dest_ring_4.
5128 	 *
5129 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
5130 	 * value enum value is from 1 - 4.
5131 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
5132 	 */
5133 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5134 		if (vdev->opmode == wlan_op_mode_ap) {
5135 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5136 			*hash_based = 0;
5137 		}
5138 	}
5139 }
5140 
5141 #else
5142 
5143 /*
5144  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5145  * @vdev: Datapath VDEV handle
5146  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5147  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5148  *
5149  * Use system config values for hash based steering.
5150  * Return: None
5151  */
5152 
5153 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5154 				       enum cdp_host_reo_dest_ring *reo_dest,
5155 				       bool *hash_based)
5156 {
5157 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5158 }
5159 #endif /* IPA_OFFLOAD */
5160 
5161 /*
5162  * dp_peer_setup_wifi3() - initialize the peer
5163  * @vdev_hdl: virtual device object
5164  * @peer: Peer object
5165  *
5166  * Return: void
5167  */
5168 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
5169 {
5170 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
5171 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5172 	struct dp_pdev *pdev;
5173 	struct dp_soc *soc;
5174 	bool hash_based = 0;
5175 	enum cdp_host_reo_dest_ring reo_dest;
5176 
5177 	/* preconditions */
5178 	qdf_assert(vdev);
5179 	qdf_assert(peer);
5180 
5181 	pdev = vdev->pdev;
5182 	soc = pdev->soc;
5183 
5184 	peer->last_assoc_rcvd = 0;
5185 	peer->last_disassoc_rcvd = 0;
5186 	peer->last_deauth_rcvd = 0;
5187 
5188 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
5189 
5190 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
5191 		pdev->pdev_id, vdev->vdev_id,
5192 		vdev->opmode, hash_based, reo_dest);
5193 
5194 
5195 	/*
5196 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
5197 	 * i.e both the devices have same MAC address. In these
5198 	 * cases we want such pkts to be processed in NULL Q handler
5199 	 * which is REO2TCL ring. for this reason we should
5200 	 * not setup reo_queues and default route for bss_peer.
5201 	 */
5202 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
5203 		return;
5204 
5205 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
5206 		/* TODO: Check the destination ring number to be passed to FW */
5207 		soc->cdp_soc.ol_ops->peer_set_default_routing(
5208 				pdev->ctrl_pdev, peer->mac_addr.raw,
5209 				peer->vdev->vdev_id, hash_based, reo_dest);
5210 	}
5211 
5212 	qdf_atomic_set(&peer->is_default_route_set, 1);
5213 
5214 	dp_peer_rx_init(pdev, peer);
5215 	dp_peer_tx_init(pdev, peer);
5216 
5217 	return;
5218 }
5219 
5220 /*
5221  * dp_cp_peer_del_resp_handler - Handle the peer delete response
5222  * @soc_hdl: Datapath SOC handle
5223  * @vdev_hdl: virtual device object
5224  * @mac_addr: Mac address of the peer
5225  *
5226  * Return: void
5227  */
5228 static void dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
5229 					struct cdp_vdev *vdev_hdl,
5230 					uint8_t *mac_addr)
5231 {
5232 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5233 	struct dp_ast_entry  *ast_entry = NULL;
5234 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5235 	txrx_ast_free_cb cb = NULL;
5236 	void *cookie;
5237 
5238 	qdf_spin_lock_bh(&soc->ast_lock);
5239 
5240 	if (soc->ast_override_support)
5241 		ast_entry =
5242 			dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
5243 							vdev->pdev->pdev_id);
5244 	else
5245 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
5246 
5247 	/* in case of qwrap we have multiple BSS peers
5248 	 * with same mac address
5249 	 *
5250 	 * AST entry for this mac address will be created
5251 	 * only for one peer hence it will be NULL here
5252 	 */
5253 	if (!ast_entry || ast_entry->peer || !ast_entry->delete_in_progress) {
5254 		qdf_spin_unlock_bh(&soc->ast_lock);
5255 		return;
5256 	}
5257 
5258 	if (ast_entry->is_mapped)
5259 		soc->ast_table[ast_entry->ast_idx] = NULL;
5260 
5261 	DP_STATS_INC(soc, ast.deleted, 1);
5262 	dp_peer_ast_hash_remove(soc, ast_entry);
5263 
5264 	cb = ast_entry->callback;
5265 	cookie = ast_entry->cookie;
5266 	ast_entry->callback = NULL;
5267 	ast_entry->cookie = NULL;
5268 
5269 	soc->num_ast_entries--;
5270 	qdf_spin_unlock_bh(&soc->ast_lock);
5271 
5272 	if (cb) {
5273 		cb(soc->ctrl_psoc,
5274 		   soc,
5275 		   cookie,
5276 		   CDP_TXRX_AST_DELETED);
5277 	}
5278 	qdf_mem_free(ast_entry);
5279 }
5280 
5281 /*
5282  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
5283  * @vdev_handle: virtual device object
5284  * @htt_pkt_type: type of pkt
5285  *
5286  * Return: void
5287  */
5288 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
5289 	 enum htt_cmn_pkt_type val)
5290 {
5291 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5292 	vdev->tx_encap_type = val;
5293 }
5294 
5295 /*
5296  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
5297  * @vdev_handle: virtual device object
5298  * @htt_pkt_type: type of pkt
5299  *
5300  * Return: void
5301  */
5302 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
5303 	 enum htt_cmn_pkt_type val)
5304 {
5305 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5306 	vdev->rx_decap_type = val;
5307 }
5308 
5309 /*
5310  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
5311  * @txrx_soc: cdp soc handle
5312  * @ac: Access category
5313  * @value: timeout value in millisec
5314  *
5315  * Return: void
5316  */
5317 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5318 				    uint8_t ac, uint32_t value)
5319 {
5320 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5321 
5322 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
5323 }
5324 
5325 /*
5326  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
5327  * @txrx_soc: cdp soc handle
5328  * @ac: access category
5329  * @value: timeout value in millisec
5330  *
5331  * Return: void
5332  */
5333 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5334 				    uint8_t ac, uint32_t *value)
5335 {
5336 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5337 
5338 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
5339 }
5340 
5341 /*
5342  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
5343  * @pdev_handle: physical device object
5344  * @val: reo destination ring index (1 - 4)
5345  *
5346  * Return: void
5347  */
5348 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
5349 	 enum cdp_host_reo_dest_ring val)
5350 {
5351 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5352 
5353 	if (pdev)
5354 		pdev->reo_dest = val;
5355 }
5356 
5357 /*
5358  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
5359  * @pdev_handle: physical device object
5360  *
5361  * Return: reo destination ring index
5362  */
5363 static enum cdp_host_reo_dest_ring
5364 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
5365 {
5366 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5367 
5368 	if (pdev)
5369 		return pdev->reo_dest;
5370 	else
5371 		return cdp_host_reo_dest_ring_unknown;
5372 }
5373 
5374 /*
5375  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
5376  * @pdev_handle: device object
5377  * @val: value to be set
5378  *
5379  * Return: void
5380  */
5381 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
5382 	 uint32_t val)
5383 {
5384 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5385 
5386 	/* Enable/Disable smart mesh filtering. This flag will be checked
5387 	 * during rx processing to check if packets are from NAC clients.
5388 	 */
5389 	pdev->filter_neighbour_peers = val;
5390 	return 0;
5391 }
5392 
5393 /*
5394  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5395  * address for smart mesh filtering
5396  * @vdev_handle: virtual device object
5397  * @cmd: Add/Del command
5398  * @macaddr: nac client mac address
5399  *
5400  * Return: void
5401  */
5402 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
5403 					    uint32_t cmd, uint8_t *macaddr)
5404 {
5405 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5406 	struct dp_pdev *pdev = vdev->pdev;
5407 	struct dp_neighbour_peer *peer = NULL;
5408 
5409 	if (!macaddr)
5410 		goto fail0;
5411 
5412 	/* Store address of NAC (neighbour peer) which will be checked
5413 	 * against TA of received packets.
5414 	 */
5415 	if (cmd == DP_NAC_PARAM_ADD) {
5416 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
5417 				sizeof(*peer));
5418 
5419 		if (!peer) {
5420 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5421 				FL("DP neighbour peer node memory allocation failed"));
5422 			goto fail0;
5423 		}
5424 
5425 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
5426 			macaddr, QDF_MAC_ADDR_SIZE);
5427 		peer->vdev = vdev;
5428 
5429 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5430 
5431 		/* add this neighbour peer into the list */
5432 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
5433 				neighbour_peer_list_elem);
5434 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5435 
5436 		/* first neighbour */
5437 		if (!pdev->neighbour_peers_added) {
5438 			pdev->neighbour_peers_added = true;
5439 			dp_ppdu_ring_cfg(pdev);
5440 		}
5441 		return 1;
5442 
5443 	} else if (cmd == DP_NAC_PARAM_DEL) {
5444 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5445 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5446 				neighbour_peer_list_elem) {
5447 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
5448 				macaddr, QDF_MAC_ADDR_SIZE)) {
5449 				/* delete this peer from the list */
5450 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
5451 					peer, neighbour_peer_list_elem);
5452 				qdf_mem_free(peer);
5453 				break;
5454 			}
5455 		}
5456 		/* last neighbour deleted */
5457 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
5458 			pdev->neighbour_peers_added = false;
5459 			dp_ppdu_ring_cfg(pdev);
5460 		}
5461 
5462 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5463 
5464 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
5465 		    !pdev->enhanced_stats_en)
5466 			dp_ppdu_ring_reset(pdev);
5467 		return 1;
5468 
5469 	}
5470 
5471 fail0:
5472 	return 0;
5473 }
5474 
5475 /*
5476  * dp_get_sec_type() - Get the security type
5477  * @peer:		Datapath peer handle
5478  * @sec_idx:    Security id (mcast, ucast)
5479  *
5480  * return sec_type: Security type
5481  */
5482 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
5483 {
5484 	struct dp_peer *dpeer = (struct dp_peer *)peer;
5485 
5486 	return dpeer->security[sec_idx].sec_type;
5487 }
5488 
5489 /*
5490  * dp_peer_authorize() - authorize txrx peer
5491  * @peer_handle:		Datapath peer handle
5492  * @authorize
5493  *
5494  */
5495 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
5496 {
5497 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5498 	struct dp_soc *soc;
5499 
5500 	if (peer) {
5501 		soc = peer->vdev->pdev->soc;
5502 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
5503 		peer->authorize = authorize ? 1 : 0;
5504 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5505 	}
5506 }
5507 
5508 static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
5509 					  struct dp_pdev *pdev,
5510 					  struct dp_peer *peer,
5511 					  struct dp_vdev *vdev)
5512 {
5513 	struct dp_peer *bss_peer = NULL;
5514 	uint8_t *m_addr = NULL;
5515 
5516 	if (!vdev) {
5517 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5518 			  "vdev is NULL");
5519 	} else {
5520 		if (vdev->vap_bss_peer == peer)
5521 		    vdev->vap_bss_peer = NULL;
5522 		m_addr = peer->mac_addr.raw;
5523 		if (soc->cdp_soc.ol_ops->peer_unref_delete)
5524 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
5525 				m_addr, vdev->mac_addr.raw, vdev->opmode,
5526 				peer->ctrl_peer, NULL);
5527 
5528 		if (vdev && vdev->vap_bss_peer) {
5529 		    bss_peer = vdev->vap_bss_peer;
5530 		    DP_UPDATE_STATS(vdev, peer);
5531 		}
5532 	}
5533 	/*
5534 	 * Peer AST list hast to be empty here
5535 	 */
5536 	DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
5537 
5538 	qdf_mem_free(peer);
5539 }
5540 
5541 /**
5542  * dp_delete_pending_vdev() - check and process vdev delete
5543  * @pdev: DP specific pdev pointer
5544  * @vdev: DP specific vdev pointer
5545  * @vdev_id: vdev id corresponding to vdev
5546  *
5547  * This API does following:
5548  * 1) It releases tx flow pools buffers as vdev is
5549  *    going down and no peers are associated.
5550  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
5551  */
5552 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
5553 				   uint8_t vdev_id)
5554 {
5555 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
5556 	void *vdev_delete_context = NULL;
5557 
5558 	vdev_delete_cb = vdev->delete.callback;
5559 	vdev_delete_context = vdev->delete.context;
5560 
5561 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5562 		  FL("deleting vdev object %pK (%pM)- its last peer is done"),
5563 		  vdev, vdev->mac_addr.raw);
5564 	/* all peers are gone, go ahead and delete it */
5565 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
5566 			FLOW_TYPE_VDEV, vdev_id);
5567 	dp_tx_vdev_detach(vdev);
5568 
5569 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5570 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5571 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5572 
5573 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5574 		  FL("deleting vdev object %pK (%pM)"),
5575 		  vdev, vdev->mac_addr.raw);
5576 	qdf_mem_free(vdev);
5577 	vdev = NULL;
5578 
5579 	if (vdev_delete_cb)
5580 		vdev_delete_cb(vdev_delete_context);
5581 }
5582 
5583 /*
5584  * dp_peer_unref_delete() - unref and delete peer
5585  * @peer_handle:		Datapath peer handle
5586  *
5587  */
5588 void dp_peer_unref_delete(void *peer_handle)
5589 {
5590 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5591 	struct dp_vdev *vdev = peer->vdev;
5592 	struct dp_pdev *pdev = vdev->pdev;
5593 	struct dp_soc *soc = pdev->soc;
5594 	struct dp_peer *tmppeer;
5595 	int found = 0;
5596 	uint16_t peer_id;
5597 	uint16_t vdev_id;
5598 	bool delete_vdev;
5599 	struct cdp_peer_cookie peer_cookie;
5600 
5601 	/*
5602 	 * Hold the lock all the way from checking if the peer ref count
5603 	 * is zero until the peer references are removed from the hash
5604 	 * table and vdev list (if the peer ref count is zero).
5605 	 * This protects against a new HL tx operation starting to use the
5606 	 * peer object just after this function concludes it's done being used.
5607 	 * Furthermore, the lock needs to be held while checking whether the
5608 	 * vdev's list of peers is empty, to make sure that list is not modified
5609 	 * concurrently with the empty check.
5610 	 */
5611 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5612 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
5613 		peer_id = peer->peer_ids[0];
5614 		vdev_id = vdev->vdev_id;
5615 
5616 		/*
5617 		 * Make sure that the reference to the peer in
5618 		 * peer object map is removed
5619 		 */
5620 		if (peer_id != HTT_INVALID_PEER)
5621 			soc->peer_id_to_obj_map[peer_id] = NULL;
5622 
5623 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
5624 			  "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
5625 
5626 		/* remove the reference to the peer from the hash table */
5627 		dp_peer_find_hash_remove(soc, peer);
5628 
5629 		qdf_spin_lock_bh(&soc->ast_lock);
5630 		if (peer->self_ast_entry) {
5631 			dp_peer_del_ast(soc, peer->self_ast_entry);
5632 			peer->self_ast_entry = NULL;
5633 		}
5634 		qdf_spin_unlock_bh(&soc->ast_lock);
5635 
5636 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
5637 			if (tmppeer == peer) {
5638 				found = 1;
5639 				break;
5640 			}
5641 		}
5642 
5643 		if (found) {
5644 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
5645 				peer_list_elem);
5646 		} else {
5647 			/*Ignoring the remove operation as peer not found*/
5648 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
5649 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
5650 				  peer, vdev, &peer->vdev->peer_list);
5651 		}
5652 
5653 		/* send peer destroy event to upper layer */
5654 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5655 			     QDF_MAC_ADDR_SIZE);
5656 		peer_cookie.ctx = NULL;
5657 		peer_cookie.ctx = (void *)peer->wlanstats_ctx;
5658 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5659 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
5660 				     pdev->soc,
5661 				     (void *)&peer_cookie,
5662 				     peer->peer_ids[0],
5663 				     WDI_NO_VAL,
5664 				     pdev->pdev_id);
5665 #endif
5666 		peer->wlanstats_ctx = NULL;
5667 
5668 		/* cleanup the peer data */
5669 		dp_peer_cleanup(vdev, peer);
5670 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5671 		dp_reset_and_release_peer_mem(soc, pdev, peer, vdev);
5672 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
5673 
5674 		/* check whether the parent vdev has no peers left */
5675 		if (TAILQ_EMPTY(&vdev->peer_list)) {
5676 			/*
5677 			 * capture vdev delete pending flag's status
5678 			 * while holding peer_ref_mutex lock
5679 			 */
5680 			delete_vdev = vdev->delete.pending;
5681 			/*
5682 			 * Now that there are no references to the peer, we can
5683 			 * release the peer reference lock.
5684 			 */
5685 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5686 			/*
5687 			 * Check if the parent vdev was waiting for its peers
5688 			 * to be deleted, in order for it to be deleted too.
5689 			 */
5690 			if (delete_vdev)
5691 				dp_delete_pending_vdev(pdev, vdev, vdev_id);
5692 		} else {
5693 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5694 		}
5695 
5696 	} else {
5697 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5698 	}
5699 }
5700 
5701 #ifdef PEER_CACHE_RX_PKTS
5702 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
5703 {
5704 	dp_rx_flush_rx_cached(peer, true);
5705 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
5706 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
5707 }
5708 #else
5709 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
5710 {
5711 }
5712 #endif
5713 
5714 /*
5715  * dp_peer_detach_wifi3() – Detach txrx peer
5716  * @peer_handle: Datapath peer handle
5717  * @bitmap: bitmap indicating special handling of request.
5718  *
5719  */
5720 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
5721 {
5722 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5723 
5724 	/* redirect the peer's rx delivery function to point to a
5725 	 * discard func
5726 	 */
5727 
5728 	peer->rx_opt_proc = dp_rx_discard;
5729 
5730 	/* Do not make ctrl_peer to NULL for connected sta peers.
5731 	 * We need ctrl_peer to release the reference during dp
5732 	 * peer free. This reference was held for
5733 	 * obj_mgr peer during the creation of dp peer.
5734 	 */
5735 	if (!(peer->vdev && (peer->vdev->opmode != wlan_op_mode_sta) &&
5736 	      !peer->bss_peer))
5737 		peer->ctrl_peer = NULL;
5738 
5739 	peer->valid = 0;
5740 
5741 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5742 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
5743 
5744 	dp_local_peer_id_free(peer->vdev->pdev, peer);
5745 
5746 	dp_peer_rx_bufq_resources_deinit(peer);
5747 
5748 	qdf_spinlock_destroy(&peer->peer_info_lock);
5749 
5750 	/*
5751 	 * Remove the reference added during peer_attach.
5752 	 * The peer will still be left allocated until the
5753 	 * PEER_UNMAP message arrives to remove the other
5754 	 * reference, added by the PEER_MAP message.
5755 	 */
5756 	dp_peer_unref_delete(peer_handle);
5757 }
5758 
5759 /*
5760  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
5761  * @peer_handle:		Datapath peer handle
5762  *
5763  */
5764 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
5765 {
5766 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
5767 	return vdev->mac_addr.raw;
5768 }
5769 
5770 /*
5771  * dp_vdev_set_wds() - Enable per packet stats
5772  * @vdev_handle: DP VDEV handle
5773  * @val: value
5774  *
5775  * Return: none
5776  */
5777 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
5778 {
5779 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5780 
5781 	vdev->wds_enabled = val;
5782 	return 0;
5783 }
5784 
5785 /*
5786  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
5787  * @peer_handle:		Datapath peer handle
5788  *
5789  */
5790 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
5791 						uint8_t vdev_id)
5792 {
5793 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
5794 	struct dp_vdev *vdev = NULL;
5795 
5796 	if (qdf_unlikely(!pdev))
5797 		return NULL;
5798 
5799 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5800 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5801 		if (vdev->delete.pending)
5802 			continue;
5803 
5804 		if (vdev->vdev_id == vdev_id)
5805 			break;
5806 	}
5807 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5808 
5809 	return (struct cdp_vdev *)vdev;
5810 }
5811 
5812 /*
5813  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
5814  * @dev: PDEV handle
5815  *
5816  * Return: VDEV handle of monitor mode
5817  */
5818 
5819 static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
5820 {
5821 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
5822 
5823 	if (qdf_unlikely(!pdev))
5824 		return NULL;
5825 
5826 	return (struct cdp_vdev *)pdev->monitor_vdev;
5827 }
5828 
5829 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
5830 {
5831 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5832 
5833 	return vdev->opmode;
5834 }
5835 
5836 static
5837 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
5838 					  ol_txrx_rx_fp *stack_fn_p,
5839 					  ol_osif_vdev_handle *osif_vdev_p)
5840 {
5841 	struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
5842 
5843 	qdf_assert(vdev);
5844 	*stack_fn_p = vdev->osif_rx_stack;
5845 	*osif_vdev_p = vdev->osif_vdev;
5846 }
5847 
5848 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
5849 {
5850 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
5851 	struct dp_pdev *pdev = vdev->pdev;
5852 
5853 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
5854 }
5855 
5856 /**
5857  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
5858  *                                 ring based on target
5859  * @soc: soc handle
5860  * @mac_for_pdev: pdev_id
5861  * @pdev: physical device handle
5862  * @ring_num: mac id
5863  * @htt_tlv_filter: tlv filter
5864  *
5865  * Return: zero on success, non-zero on failure
5866  */
5867 static inline
5868 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
5869 				       struct dp_pdev *pdev, uint8_t ring_num,
5870 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
5871 {
5872 	QDF_STATUS status;
5873 
5874 	if (soc->wlan_cfg_ctx->rxdma1_enable)
5875 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5876 					     pdev->rxdma_mon_buf_ring[ring_num]
5877 					     .hal_srng,
5878 					     RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
5879 					     &htt_tlv_filter);
5880 	else
5881 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5882 					     pdev->rx_mac_buf_ring[ring_num]
5883 					     .hal_srng,
5884 					     RXDMA_BUF, RX_BUFFER_SIZE,
5885 					     &htt_tlv_filter);
5886 
5887 	return status;
5888 }
5889 
5890 /**
5891  * dp_reset_monitor_mode() - Disable monitor mode
5892  * @pdev_handle: Datapath PDEV handle
5893  *
5894  * Return: QDF_STATUS
5895  */
5896 QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
5897 {
5898 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5899 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5900 	struct dp_soc *soc = pdev->soc;
5901 	uint8_t pdev_id;
5902 	int mac_id;
5903 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5904 
5905 	pdev_id = pdev->pdev_id;
5906 	soc = pdev->soc;
5907 
5908 	qdf_spin_lock_bh(&pdev->mon_lock);
5909 
5910 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
5911 
5912 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5913 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
5914 
5915 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5916 						     pdev, mac_id,
5917 						     htt_tlv_filter);
5918 
5919 		if (status != QDF_STATUS_SUCCESS) {
5920 			dp_err("Failed to send tlv filter for monitor mode rings");
5921 			return status;
5922 		}
5923 
5924 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5925 			    pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5926 			    RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
5927 			    &htt_tlv_filter);
5928 	}
5929 
5930 	pdev->monitor_vdev = NULL;
5931 	pdev->mcopy_mode = 0;
5932 	pdev->monitor_configured = false;
5933 
5934 	qdf_spin_unlock_bh(&pdev->mon_lock);
5935 
5936 	return QDF_STATUS_SUCCESS;
5937 }
5938 
5939 /**
5940  * dp_set_nac() - set peer_nac
5941  * @peer_handle: Datapath PEER handle
5942  *
5943  * Return: void
5944  */
5945 static void dp_set_nac(struct cdp_peer *peer_handle)
5946 {
5947 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5948 
5949 	peer->nac = 1;
5950 }
5951 
5952 /**
5953  * dp_get_tx_pending() - read pending tx
5954  * @pdev_handle: Datapath PDEV handle
5955  *
5956  * Return: outstanding tx
5957  */
5958 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
5959 {
5960 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5961 
5962 	return qdf_atomic_read(&pdev->num_tx_outstanding);
5963 }
5964 
5965 /**
5966  * dp_get_peer_mac_from_peer_id() - get peer mac
5967  * @pdev_handle: Datapath PDEV handle
5968  * @peer_id: Peer ID
5969  * @peer_mac: MAC addr of PEER
5970  *
5971  * Return: void
5972  */
5973 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
5974 	uint32_t peer_id, uint8_t *peer_mac)
5975 {
5976 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5977 	struct dp_peer *peer;
5978 
5979 	if (pdev && peer_mac) {
5980 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
5981 		if (peer) {
5982 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
5983 				     QDF_MAC_ADDR_SIZE);
5984 			dp_peer_unref_del_find_by_id(peer);
5985 		}
5986 	}
5987 }
5988 
5989 /**
5990  * dp_pdev_configure_monitor_rings() - configure monitor rings
5991  * @vdev_handle: Datapath VDEV handle
5992  *
5993  * Return: QDF_STATUS
5994  */
5995 QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
5996 {
5997 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
5998 	struct dp_soc *soc;
5999 	uint8_t pdev_id;
6000 	int mac_id;
6001 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6002 
6003 	pdev_id = pdev->pdev_id;
6004 	soc = pdev->soc;
6005 
6006 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
6007 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
6008 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
6009 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
6010 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
6011 		pdev->mo_data_filter);
6012 
6013 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6014 
6015 	htt_tlv_filter.mpdu_start = 1;
6016 	htt_tlv_filter.msdu_start = 1;
6017 	htt_tlv_filter.packet = 1;
6018 	htt_tlv_filter.msdu_end = 1;
6019 	htt_tlv_filter.mpdu_end = 1;
6020 	htt_tlv_filter.packet_header = 1;
6021 	htt_tlv_filter.attention = 1;
6022 	htt_tlv_filter.ppdu_start = 0;
6023 	htt_tlv_filter.ppdu_end = 0;
6024 	htt_tlv_filter.ppdu_end_user_stats = 0;
6025 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
6026 	htt_tlv_filter.ppdu_end_status_done = 0;
6027 	htt_tlv_filter.header_per_msdu = 1;
6028 	htt_tlv_filter.enable_fp =
6029 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
6030 	htt_tlv_filter.enable_md = 0;
6031 	htt_tlv_filter.enable_mo =
6032 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
6033 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
6034 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
6035 
6036 	if (pdev->mcopy_mode) {
6037 		htt_tlv_filter.fp_data_filter = 0;
6038 		htt_tlv_filter.mo_data_filter = 0;
6039 	} else {
6040 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
6041 		htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
6042 	}
6043 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6044 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
6045 	htt_tlv_filter.offset_valid = false;
6046 
6047 	if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6048 	    (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6049 		htt_tlv_filter.fp_mgmt_filter = 0;
6050 		htt_tlv_filter.fp_ctrl_filter = 0;
6051 		htt_tlv_filter.fp_data_filter = 0;
6052 		htt_tlv_filter.mo_mgmt_filter = 0;
6053 		htt_tlv_filter.mo_ctrl_filter = 0;
6054 		htt_tlv_filter.mo_data_filter = 0;
6055 	}
6056 
6057 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6058 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6059 
6060 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6061 						     pdev, mac_id,
6062 						     htt_tlv_filter);
6063 
6064 		if (status != QDF_STATUS_SUCCESS) {
6065 			dp_err("Failed to send tlv filter for monitor mode rings");
6066 			return status;
6067 		}
6068 	}
6069 
6070 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6071 
6072 	htt_tlv_filter.mpdu_start = 1;
6073 	htt_tlv_filter.msdu_start = 0;
6074 	htt_tlv_filter.packet = 0;
6075 	htt_tlv_filter.msdu_end = 0;
6076 	htt_tlv_filter.mpdu_end = 0;
6077 	if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6078 	    (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6079 		htt_tlv_filter.mpdu_end = 1;
6080 	}
6081 	htt_tlv_filter.attention = 0;
6082 	htt_tlv_filter.ppdu_start = 1;
6083 	htt_tlv_filter.ppdu_end = 1;
6084 	htt_tlv_filter.ppdu_end_user_stats = 1;
6085 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6086 	htt_tlv_filter.ppdu_end_status_done = 1;
6087 	htt_tlv_filter.enable_fp = 1;
6088 	htt_tlv_filter.enable_md = 0;
6089 	htt_tlv_filter.enable_mo = 1;
6090 	if (pdev->mcopy_mode ||
6091 	    (pdev->rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
6092 		htt_tlv_filter.packet_header = 1;
6093 		if (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) {
6094 			htt_tlv_filter.header_per_msdu = 0;
6095 			htt_tlv_filter.enable_mo = 0;
6096 		} else if (pdev->rx_enh_capture_mode ==
6097 			   CDP_RX_ENH_CAPTURE_MPDU_MSDU) {
6098 			htt_tlv_filter.header_per_msdu = 1;
6099 			htt_tlv_filter.enable_mo = 0;
6100 		}
6101 	}
6102 
6103 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6104 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6105 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6106 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6107 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6108 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6109 	htt_tlv_filter.offset_valid = false;
6110 
6111 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6112 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6113 						pdev->pdev_id);
6114 
6115 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6116 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6117 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6118 	}
6119 
6120 	return status;
6121 }
6122 
6123 /**
6124  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
6125  * @vdev_handle: Datapath VDEV handle
6126  * @smart_monitor: Flag to denote if its smart monitor mode
6127  *
6128  * Return: 0 on success, not 0 on failure
6129  */
6130 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
6131 					   uint8_t special_monitor)
6132 {
6133 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6134 	struct dp_pdev *pdev;
6135 
6136 	qdf_assert(vdev);
6137 
6138 	pdev = vdev->pdev;
6139 	pdev->monitor_vdev = vdev;
6140 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6141 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
6142 		  pdev, pdev->pdev_id, pdev->soc, vdev);
6143 
6144 	/*
6145 	 * do not configure monitor buf ring and filter for smart and
6146 	 * lite monitor
6147 	 * for smart monitor filters are added along with first NAC
6148 	 * for lite monitor required configuration done through
6149 	 * dp_set_pdev_param
6150 	 */
6151 	if (special_monitor)
6152 		return QDF_STATUS_SUCCESS;
6153 
6154 	/*Check if current pdev's monitor_vdev exists */
6155 	if (pdev->monitor_configured) {
6156 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6157 			  "monitor vap already created vdev=%pK\n", vdev);
6158 		qdf_assert(vdev);
6159 		return QDF_STATUS_E_RESOURCES;
6160 	}
6161 
6162 	pdev->monitor_configured = true;
6163 
6164 	return dp_pdev_configure_monitor_rings(pdev);
6165 }
6166 
6167 /**
6168  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
6169  * @pdev_handle: Datapath PDEV handle
6170  * @filter_val: Flag to select Filter for monitor mode
6171  * Return: 0 on success, not 0 on failure
6172  */
6173 static QDF_STATUS
6174 dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
6175 				   struct cdp_monitor_filter *filter_val)
6176 {
6177 	/* Many monitor VAPs can exists in a system but only one can be up at
6178 	 * anytime
6179 	 */
6180 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6181 	struct dp_vdev *vdev = pdev->monitor_vdev;
6182 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6183 	struct dp_soc *soc;
6184 	uint8_t pdev_id;
6185 	int mac_id;
6186 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6187 
6188 	pdev_id = pdev->pdev_id;
6189 	soc = pdev->soc;
6190 
6191 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6192 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
6193 		pdev, pdev_id, soc, vdev);
6194 
6195 	/*Check if current pdev's monitor_vdev exists */
6196 	if (!pdev->monitor_vdev) {
6197 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6198 			"vdev=%pK", vdev);
6199 		qdf_assert(vdev);
6200 	}
6201 
6202 	/* update filter mode, type in pdev structure */
6203 	pdev->mon_filter_mode = filter_val->mode;
6204 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
6205 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
6206 	pdev->fp_data_filter = filter_val->fp_data;
6207 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
6208 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
6209 	pdev->mo_data_filter = filter_val->mo_data;
6210 
6211 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
6212 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
6213 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
6214 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
6215 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
6216 		pdev->mo_data_filter);
6217 
6218 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6219 
6220 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6221 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6222 
6223 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6224 						     pdev, mac_id,
6225 						     htt_tlv_filter);
6226 
6227 		if (status != QDF_STATUS_SUCCESS) {
6228 			dp_err("Failed to send tlv filter for monitor mode rings");
6229 			return status;
6230 		}
6231 
6232 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6233 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6234 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6235 	}
6236 
6237 	htt_tlv_filter.mpdu_start = 1;
6238 	htt_tlv_filter.msdu_start = 1;
6239 	htt_tlv_filter.packet = 1;
6240 	htt_tlv_filter.msdu_end = 1;
6241 	htt_tlv_filter.mpdu_end = 1;
6242 	htt_tlv_filter.packet_header = 1;
6243 	htt_tlv_filter.attention = 1;
6244 	htt_tlv_filter.ppdu_start = 0;
6245 	htt_tlv_filter.ppdu_end = 0;
6246 	htt_tlv_filter.ppdu_end_user_stats = 0;
6247 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
6248 	htt_tlv_filter.ppdu_end_status_done = 0;
6249 	htt_tlv_filter.header_per_msdu = 1;
6250 	htt_tlv_filter.enable_fp =
6251 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
6252 	htt_tlv_filter.enable_md = 0;
6253 	htt_tlv_filter.enable_mo =
6254 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
6255 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
6256 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
6257 	if (pdev->mcopy_mode)
6258 		htt_tlv_filter.fp_data_filter = 0;
6259 	else
6260 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
6261 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6262 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
6263 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
6264 	htt_tlv_filter.offset_valid = false;
6265 
6266 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6267 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6268 
6269 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6270 						     pdev, mac_id,
6271 						     htt_tlv_filter);
6272 
6273 		if (status != QDF_STATUS_SUCCESS) {
6274 			dp_err("Failed to send tlv filter for monitor mode rings");
6275 			return status;
6276 		}
6277 	}
6278 
6279 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6280 
6281 	htt_tlv_filter.mpdu_start = 1;
6282 	htt_tlv_filter.msdu_start = 0;
6283 	htt_tlv_filter.packet = 0;
6284 	htt_tlv_filter.msdu_end = 0;
6285 	htt_tlv_filter.mpdu_end = 0;
6286 	htt_tlv_filter.attention = 0;
6287 	htt_tlv_filter.ppdu_start = 1;
6288 	htt_tlv_filter.ppdu_end = 1;
6289 	htt_tlv_filter.ppdu_end_user_stats = 1;
6290 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6291 	htt_tlv_filter.ppdu_end_status_done = 1;
6292 	htt_tlv_filter.enable_fp = 1;
6293 	htt_tlv_filter.enable_md = 0;
6294 	htt_tlv_filter.enable_mo = 1;
6295 	if (pdev->mcopy_mode) {
6296 		htt_tlv_filter.packet_header = 1;
6297 	}
6298 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6299 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6300 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6301 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6302 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6303 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6304 	htt_tlv_filter.offset_valid = false;
6305 
6306 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6307 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6308 						pdev->pdev_id);
6309 
6310 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6311 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6312 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6313 	}
6314 
6315 	return QDF_STATUS_SUCCESS;
6316 }
6317 
6318 /**
6319  * dp_get_pdev_id_frm_pdev() - get pdev_id
6320  * @pdev_handle: Datapath PDEV handle
6321  *
6322  * Return: pdev_id
6323  */
6324 static
6325 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
6326 {
6327 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6328 
6329 	return pdev->pdev_id;
6330 }
6331 
6332 /**
6333  * dp_get_delay_stats_flag() - get delay stats flag
6334  * @pdev_handle: Datapath PDEV handle
6335  *
6336  * Return: 0 if flag is disabled else 1
6337  */
6338 static
6339 bool dp_get_delay_stats_flag(struct cdp_pdev *pdev_handle)
6340 {
6341 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6342 
6343 	return pdev->delay_stats_flag;
6344 }
6345 
6346 /**
6347  * dp_pdev_set_chan_noise_floor() - set channel noise floor
6348  * @pdev_handle: Datapath PDEV handle
6349  * @chan_noise_floor: Channel Noise Floor
6350  *
6351  * Return: void
6352  */
6353 static
6354 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
6355 				  int16_t chan_noise_floor)
6356 {
6357 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6358 
6359 	pdev->chan_noise_floor = chan_noise_floor;
6360 }
6361 
6362 /**
6363  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
6364  * @vdev_handle: Datapath VDEV handle
6365  * Return: true on ucast filter flag set
6366  */
6367 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
6368 {
6369 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6370 	struct dp_pdev *pdev;
6371 
6372 	pdev = vdev->pdev;
6373 
6374 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
6375 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
6376 		return true;
6377 
6378 	return false;
6379 }
6380 
6381 /**
6382  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
6383  * @vdev_handle: Datapath VDEV handle
6384  * Return: true on mcast filter flag set
6385  */
6386 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
6387 {
6388 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6389 	struct dp_pdev *pdev;
6390 
6391 	pdev = vdev->pdev;
6392 
6393 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
6394 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
6395 		return true;
6396 
6397 	return false;
6398 }
6399 
6400 /**
6401  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
6402  * @vdev_handle: Datapath VDEV handle
6403  * Return: true on non data filter flag set
6404  */
6405 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
6406 {
6407 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6408 	struct dp_pdev *pdev;
6409 
6410 	pdev = vdev->pdev;
6411 
6412 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
6413 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
6414 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
6415 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
6416 			return true;
6417 		}
6418 	}
6419 
6420 	return false;
6421 }
6422 
6423 #ifdef MESH_MODE_SUPPORT
6424 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
6425 {
6426 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6427 
6428 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6429 		FL("val %d"), val);
6430 	vdev->mesh_vdev = val;
6431 }
6432 
6433 /*
6434  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
6435  * @vdev_hdl: virtual device object
6436  * @val: value to be set
6437  *
6438  * Return: void
6439  */
6440 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
6441 {
6442 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6443 
6444 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6445 		FL("val %d"), val);
6446 	vdev->mesh_rx_filter = val;
6447 }
6448 #endif
6449 
6450 /**
6451  * dp_rx_bar_stats_cb(): BAR received stats callback
6452  * @soc: SOC handle
6453  * @cb_ctxt: Call back context
6454  * @reo_status: Reo status
6455  *
6456  * return: void
6457  */
6458 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
6459 	union hal_reo_status *reo_status)
6460 {
6461 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
6462 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
6463 
6464 	if (!qdf_atomic_read(&soc->cmn_init_done))
6465 		return;
6466 
6467 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
6468 		DP_PRINT_STATS("REO stats failure %d",
6469 			       queue_status->header.status);
6470 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6471 		return;
6472 	}
6473 
6474 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
6475 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6476 
6477 }
6478 
6479 /**
6480  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
6481  * @vdev: DP VDEV handle
6482  *
6483  * return: void
6484  */
6485 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
6486 			     struct cdp_vdev_stats *vdev_stats)
6487 {
6488 	struct dp_peer *peer = NULL;
6489 	struct dp_soc *soc = NULL;
6490 
6491 	if (!vdev || !vdev->pdev)
6492 		return;
6493 
6494 	soc = vdev->pdev->soc;
6495 
6496 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6497 
6498 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
6499 		dp_update_vdev_stats(vdev_stats, peer);
6500 
6501 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6502 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6503 			     vdev_stats, vdev->vdev_id,
6504 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6505 #endif
6506 }
6507 
6508 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
6509 {
6510 	struct dp_vdev *vdev = NULL;
6511 	struct dp_soc *soc;
6512 	struct cdp_vdev_stats *vdev_stats =
6513 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6514 
6515 	if (!vdev_stats) {
6516 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6517 			  "DP alloc failure - unable to get alloc vdev stats");
6518 		return;
6519 	}
6520 
6521 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
6522 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
6523 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
6524 
6525 	if (pdev->mcopy_mode)
6526 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
6527 
6528 	soc = pdev->soc;
6529 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6530 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6531 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6532 
6533 		dp_aggregate_vdev_stats(vdev, vdev_stats);
6534 		dp_update_pdev_stats(pdev, vdev_stats);
6535 		dp_update_pdev_ingress_stats(pdev, vdev);
6536 	}
6537 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6538 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6539 	qdf_mem_free(vdev_stats);
6540 
6541 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6542 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
6543 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
6544 #endif
6545 }
6546 
6547 /**
6548  * dp_vdev_getstats() - get vdev packet level stats
6549  * @vdev_handle: Datapath VDEV handle
6550  * @stats: cdp network device stats structure
6551  *
6552  * Return: void
6553  */
6554 static void dp_vdev_getstats(void *vdev_handle,
6555 		struct cdp_dev_stats *stats)
6556 {
6557 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6558 	struct dp_pdev *pdev;
6559 	struct dp_soc *soc;
6560 	struct cdp_vdev_stats *vdev_stats;
6561 
6562 	if (!vdev)
6563 		return;
6564 
6565 	pdev = vdev->pdev;
6566 	if (!pdev)
6567 		return;
6568 
6569 	soc = pdev->soc;
6570 
6571 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6572 
6573 	if (!vdev_stats) {
6574 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6575 			  "DP alloc failure - unable to get alloc vdev stats");
6576 		return;
6577 	}
6578 
6579 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6580 	dp_aggregate_vdev_stats(vdev, vdev_stats);
6581 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6582 
6583 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
6584 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
6585 
6586 	stats->tx_errors = vdev_stats->tx.tx_failed +
6587 		vdev_stats->tx_i.dropped.dropped_pkt.num;
6588 	stats->tx_dropped = stats->tx_errors;
6589 
6590 	stats->rx_packets = vdev_stats->rx.unicast.num +
6591 		vdev_stats->rx.multicast.num +
6592 		vdev_stats->rx.bcast.num;
6593 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
6594 		vdev_stats->rx.multicast.bytes +
6595 		vdev_stats->rx.bcast.bytes;
6596 
6597 	qdf_mem_free(vdev_stats);
6598 
6599 }
6600 
6601 
6602 /**
6603  * dp_pdev_getstats() - get pdev packet level stats
6604  * @pdev_handle: Datapath PDEV handle
6605  * @stats: cdp network device stats structure
6606  *
6607  * Return: void
6608  */
6609 static void dp_pdev_getstats(void *pdev_handle,
6610 		struct cdp_dev_stats *stats)
6611 {
6612 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6613 
6614 	dp_aggregate_pdev_stats(pdev);
6615 
6616 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
6617 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
6618 
6619 	stats->tx_errors = pdev->stats.tx.tx_failed +
6620 		pdev->stats.tx_i.dropped.dropped_pkt.num;
6621 	stats->tx_dropped = stats->tx_errors;
6622 
6623 	stats->rx_packets = pdev->stats.rx.unicast.num +
6624 		pdev->stats.rx.multicast.num +
6625 		pdev->stats.rx.bcast.num;
6626 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
6627 		pdev->stats.rx.multicast.bytes +
6628 		pdev->stats.rx.bcast.bytes;
6629 }
6630 
6631 /**
6632  * dp_get_device_stats() - get interface level packet stats
6633  * @handle: device handle
6634  * @stats: cdp network device stats structure
6635  * @type: device type pdev/vdev
6636  *
6637  * Return: void
6638  */
6639 static void dp_get_device_stats(void *handle,
6640 		struct cdp_dev_stats *stats, uint8_t type)
6641 {
6642 	switch (type) {
6643 	case UPDATE_VDEV_STATS:
6644 		dp_vdev_getstats(handle, stats);
6645 		break;
6646 	case UPDATE_PDEV_STATS:
6647 		dp_pdev_getstats(handle, stats);
6648 		break;
6649 	default:
6650 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6651 			"apstats cannot be updated for this input "
6652 			"type %d", type);
6653 		break;
6654 	}
6655 
6656 }
6657 
6658 const
6659 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
6660 {
6661 	switch (ring_type) {
6662 	case REO_DST:
6663 		return "Reo_dst";
6664 	case REO_EXCEPTION:
6665 		return "Reo_exception";
6666 	case REO_CMD:
6667 		return "Reo_cmd";
6668 	case REO_REINJECT:
6669 		return "Reo_reinject";
6670 	case REO_STATUS:
6671 		return "Reo_status";
6672 	case WBM2SW_RELEASE:
6673 		return "wbm2sw_release";
6674 	case TCL_DATA:
6675 		return "tcl_data";
6676 	case TCL_CMD:
6677 		return "tcl_cmd";
6678 	case TCL_STATUS:
6679 		return "tcl_status";
6680 	case SW2WBM_RELEASE:
6681 		return "sw2wbm_release";
6682 	case RXDMA_BUF:
6683 		return "Rxdma_buf";
6684 	case RXDMA_DST:
6685 		return "Rxdma_dst";
6686 	case RXDMA_MONITOR_BUF:
6687 		return "Rxdma_monitor_buf";
6688 	case RXDMA_MONITOR_DESC:
6689 		return "Rxdma_monitor_desc";
6690 	case RXDMA_MONITOR_STATUS:
6691 		return "Rxdma_monitor_status";
6692 	default:
6693 		dp_err("Invalid ring type");
6694 		break;
6695 	}
6696 	return "Invalid";
6697 }
6698 
6699 /*
6700  * dp_print_napi_stats(): NAPI stats
6701  * @soc - soc handle
6702  */
6703 static void dp_print_napi_stats(struct dp_soc *soc)
6704 {
6705 	hif_print_napi_stats(soc->hif_handle);
6706 }
6707 
6708 /**
6709  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
6710  * @vdev: DP_VDEV handle
6711  *
6712  * Return:void
6713  */
6714 static inline void
6715 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
6716 {
6717 	struct dp_peer *peer = NULL;
6718 
6719 	if (!vdev || !vdev->pdev)
6720 		return;
6721 
6722 	DP_STATS_CLR(vdev->pdev);
6723 	DP_STATS_CLR(vdev->pdev->soc);
6724 	DP_STATS_CLR(vdev);
6725 
6726 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
6727 
6728 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6729 		if (!peer)
6730 			return;
6731 		DP_STATS_CLR(peer);
6732 
6733 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6734 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6735 				     &peer->stats,  peer->peer_ids[0],
6736 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
6737 #endif
6738 	}
6739 
6740 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6741 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6742 			     &vdev->stats,  vdev->vdev_id,
6743 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6744 #endif
6745 }
6746 
6747 /*
6748  * dp_get_host_peer_stats()- function to print peer stats
6749  * @pdev_handle: DP_PDEV handle
6750  * @mac_addr: mac address of the peer
6751  *
6752  * Return: void
6753  */
6754 static void
6755 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6756 {
6757 	struct dp_peer *peer;
6758 	uint8_t local_id;
6759 
6760 	if (!mac_addr) {
6761 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6762 			  "Invalid MAC address\n");
6763 		return;
6764 	}
6765 
6766 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6767 			&local_id);
6768 
6769 	if (!peer) {
6770 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6771 			  "%s: Invalid peer\n", __func__);
6772 		return;
6773 	}
6774 
6775 	/* Making sure the peer is for the specific pdev */
6776 	if ((struct dp_pdev *)pdev_handle != peer->vdev->pdev) {
6777 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6778 			  "%s: Peer is not for this pdev\n", __func__);
6779 		return;
6780 	}
6781 
6782 	dp_print_peer_stats(peer);
6783 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6784 }
6785 
6786 /**
6787  * dp_txrx_stats_help() - Helper function for Txrx_Stats
6788  *
6789  * Return: None
6790  */
6791 static void dp_txrx_stats_help(void)
6792 {
6793 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
6794 	dp_info("stats_option:");
6795 	dp_info("  1 -- HTT Tx Statistics");
6796 	dp_info("  2 -- HTT Rx Statistics");
6797 	dp_info("  3 -- HTT Tx HW Queue Statistics");
6798 	dp_info("  4 -- HTT Tx HW Sched Statistics");
6799 	dp_info("  5 -- HTT Error Statistics");
6800 	dp_info("  6 -- HTT TQM Statistics");
6801 	dp_info("  7 -- HTT TQM CMDQ Statistics");
6802 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
6803 	dp_info("  9 -- HTT Tx Rate Statistics");
6804 	dp_info(" 10 -- HTT Rx Rate Statistics");
6805 	dp_info(" 11 -- HTT Peer Statistics");
6806 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
6807 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
6808 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
6809 	dp_info(" 15 -- HTT SRNG Statistics");
6810 	dp_info(" 16 -- HTT SFM Info Statistics");
6811 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
6812 	dp_info(" 18 -- HTT Peer List Details");
6813 	dp_info(" 20 -- Clear Host Statistics");
6814 	dp_info(" 21 -- Host Rx Rate Statistics");
6815 	dp_info(" 22 -- Host Tx Rate Statistics");
6816 	dp_info(" 23 -- Host Tx Statistics");
6817 	dp_info(" 24 -- Host Rx Statistics");
6818 	dp_info(" 25 -- Host AST Statistics");
6819 	dp_info(" 26 -- Host SRNG PTR Statistics");
6820 	dp_info(" 27 -- Host Mon Statistics");
6821 	dp_info(" 28 -- Host REO Queue Statistics");
6822 	dp_info(" 29 -- Host Soc cfg param Statistics");
6823 	dp_info(" 30 -- Host pdev cfg param Statistics");
6824 }
6825 
6826 /**
6827  * dp_print_host_stats()- Function to print the stats aggregated at host
6828  * @vdev_handle: DP_VDEV handle
6829  * @type: host stats type
6830  *
6831  * Return: 0 on success, print error message in case of failure
6832  */
6833 static int
6834 dp_print_host_stats(struct cdp_vdev *vdev_handle,
6835 		    struct cdp_txrx_stats_req *req)
6836 {
6837 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6838 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
6839 	enum cdp_host_txrx_stats type =
6840 			dp_stats_mapping_table[req->stats][STATS_HOST];
6841 
6842 	dp_aggregate_pdev_stats(pdev);
6843 
6844 	switch (type) {
6845 	case TXRX_CLEAR_STATS:
6846 		dp_txrx_host_stats_clr(vdev);
6847 		break;
6848 	case TXRX_RX_RATE_STATS:
6849 		dp_print_rx_rates(vdev);
6850 		break;
6851 	case TXRX_TX_RATE_STATS:
6852 		dp_print_tx_rates(vdev);
6853 		break;
6854 	case TXRX_TX_HOST_STATS:
6855 		dp_print_pdev_tx_stats(pdev);
6856 		dp_print_soc_tx_stats(pdev->soc);
6857 		break;
6858 	case TXRX_RX_HOST_STATS:
6859 		dp_print_pdev_rx_stats(pdev);
6860 		dp_print_soc_rx_stats(pdev->soc);
6861 		break;
6862 	case TXRX_AST_STATS:
6863 		dp_print_ast_stats(pdev->soc);
6864 		dp_print_peer_table(vdev);
6865 		break;
6866 	case TXRX_SRNG_PTR_STATS:
6867 		dp_print_ring_stats(pdev);
6868 		break;
6869 	case TXRX_RX_MON_STATS:
6870 		dp_print_pdev_rx_mon_stats(pdev);
6871 		break;
6872 	case TXRX_REO_QUEUE_STATS:
6873 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6874 		break;
6875 	case TXRX_SOC_CFG_PARAMS:
6876 		dp_print_soc_cfg_params(pdev->soc);
6877 		break;
6878 	case TXRX_PDEV_CFG_PARAMS:
6879 		dp_print_pdev_cfg_params(pdev);
6880 		break;
6881 	case TXRX_NAPI_STATS:
6882 		dp_print_napi_stats(pdev->soc);
6883 	case TXRX_SOC_INTERRUPT_STATS:
6884 		dp_print_soc_interrupt_stats(pdev->soc);
6885 		break;
6886 	default:
6887 		dp_info("Wrong Input For TxRx Host Stats");
6888 		dp_txrx_stats_help();
6889 		break;
6890 	}
6891 	return 0;
6892 }
6893 
6894 /*
6895  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6896  * @pdev: DP_PDEV handle
6897  *
6898  * Return: void
6899  */
6900 static void
6901 dp_ppdu_ring_reset(struct dp_pdev *pdev)
6902 {
6903 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6904 	int mac_id;
6905 
6906 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6907 
6908 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6909 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6910 							pdev->pdev_id);
6911 
6912 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6913 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6914 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6915 	}
6916 }
6917 
6918 /*
6919  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6920  * @pdev: DP_PDEV handle
6921  *
6922  * Return: void
6923  */
6924 static void
6925 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6926 {
6927 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
6928 	int mac_id;
6929 
6930 	htt_tlv_filter.mpdu_start = 1;
6931 	htt_tlv_filter.msdu_start = 0;
6932 	htt_tlv_filter.packet = 0;
6933 	htt_tlv_filter.msdu_end = 0;
6934 	htt_tlv_filter.mpdu_end = 0;
6935 	htt_tlv_filter.attention = 0;
6936 	htt_tlv_filter.ppdu_start = 1;
6937 	htt_tlv_filter.ppdu_end = 1;
6938 	htt_tlv_filter.ppdu_end_user_stats = 1;
6939 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6940 	htt_tlv_filter.ppdu_end_status_done = 1;
6941 	htt_tlv_filter.enable_fp = 1;
6942 	htt_tlv_filter.enable_md = 0;
6943 	if (pdev->neighbour_peers_added &&
6944 	    pdev->soc->hw_nac_monitor_support) {
6945 		htt_tlv_filter.enable_md = 1;
6946 		htt_tlv_filter.packet_header = 1;
6947 	}
6948 	if (pdev->mcopy_mode) {
6949 		htt_tlv_filter.packet_header = 1;
6950 		htt_tlv_filter.enable_mo = 1;
6951 	}
6952 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6953 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6954 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6955 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6956 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6957 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6958 	if (pdev->neighbour_peers_added &&
6959 	    pdev->soc->hw_nac_monitor_support)
6960 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
6961 
6962 	htt_tlv_filter.offset_valid = false;
6963 
6964 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6965 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6966 						pdev->pdev_id);
6967 
6968 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6969 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6970 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6971 	}
6972 }
6973 
6974 /*
6975  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6976  *                              modes are enabled or not.
6977  * @dp_pdev: dp pdev handle.
6978  *
6979  * Return: bool
6980  */
6981 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6982 {
6983 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6984 	    !pdev->mcopy_mode)
6985 		return true;
6986 	else
6987 		return false;
6988 }
6989 
6990 /*
6991  *dp_set_bpr_enable() - API to enable/disable bpr feature
6992  *@pdev_handle: DP_PDEV handle.
6993  *@val: Provided value.
6994  *
6995  *Return: 0 for success. nonzero for failure.
6996  */
6997 static QDF_STATUS
6998 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6999 {
7000 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7001 
7002 	switch (val) {
7003 	case CDP_BPR_DISABLE:
7004 		pdev->bpr_enable = CDP_BPR_DISABLE;
7005 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7006 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7007 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7008 		} else if (pdev->enhanced_stats_en &&
7009 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7010 			   !pdev->pktlog_ppdu_stats) {
7011 			dp_h2t_cfg_stats_msg_send(pdev,
7012 						  DP_PPDU_STATS_CFG_ENH_STATS,
7013 						  pdev->pdev_id);
7014 		}
7015 		break;
7016 	case CDP_BPR_ENABLE:
7017 		pdev->bpr_enable = CDP_BPR_ENABLE;
7018 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7019 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7020 			dp_h2t_cfg_stats_msg_send(pdev,
7021 						  DP_PPDU_STATS_CFG_BPR,
7022 						  pdev->pdev_id);
7023 		} else if (pdev->enhanced_stats_en &&
7024 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7025 			   !pdev->pktlog_ppdu_stats) {
7026 			dp_h2t_cfg_stats_msg_send(pdev,
7027 						  DP_PPDU_STATS_CFG_BPR_ENH,
7028 						  pdev->pdev_id);
7029 		} else if (pdev->pktlog_ppdu_stats) {
7030 			dp_h2t_cfg_stats_msg_send(pdev,
7031 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7032 						  pdev->pdev_id);
7033 		}
7034 		break;
7035 	default:
7036 		break;
7037 	}
7038 
7039 	return QDF_STATUS_SUCCESS;
7040 }
7041 
7042 /*
7043  * dp_pdev_tid_stats_ingress_inc
7044  * @pdev: pdev handle
7045  * @val: increase in value
7046  *
7047  * Return: void
7048  */
7049 static void
7050 dp_pdev_tid_stats_ingress_inc(struct cdp_pdev *pdev, uint32_t val)
7051 {
7052 	struct dp_pdev *dp_pdev = (struct dp_pdev *)pdev;
7053 
7054 	dp_pdev->stats.tid_stats.ingress_stack += val;
7055 }
7056 
7057 /*
7058  * dp_pdev_tid_stats_osif_drop
7059  * @pdev: pdev handle
7060  * @val: increase in value
7061  *
7062  * Return: void
7063  */
7064 static void
7065 dp_pdev_tid_stats_osif_drop(struct cdp_pdev *pdev, uint32_t val)
7066 {
7067 	struct dp_pdev *dp_pdev = (struct dp_pdev *)pdev;
7068 
7069 	dp_pdev->stats.tid_stats.osif_drop += val;
7070 }
7071 
7072 /*
7073  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7074  * @pdev_handle: DP_PDEV handle
7075  * @val: user provided value
7076  *
7077  * Return: 0 for success. nonzero for failure.
7078  */
7079 static QDF_STATUS
7080 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
7081 {
7082 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7083 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7084 
7085 	if (pdev->mcopy_mode)
7086 		dp_reset_monitor_mode(pdev_handle);
7087 
7088 	switch (val) {
7089 	case 0:
7090 		pdev->tx_sniffer_enable = 0;
7091 		pdev->mcopy_mode = 0;
7092 		pdev->monitor_configured = false;
7093 
7094 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7095 		    !pdev->bpr_enable) {
7096 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7097 			dp_ppdu_ring_reset(pdev);
7098 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7099 			dp_h2t_cfg_stats_msg_send(pdev,
7100 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7101 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7102 			dp_h2t_cfg_stats_msg_send(pdev,
7103 						  DP_PPDU_STATS_CFG_BPR_ENH,
7104 						  pdev->pdev_id);
7105 		} else {
7106 			dp_h2t_cfg_stats_msg_send(pdev,
7107 						  DP_PPDU_STATS_CFG_BPR,
7108 						  pdev->pdev_id);
7109 		}
7110 		break;
7111 
7112 	case 1:
7113 		pdev->tx_sniffer_enable = 1;
7114 		pdev->mcopy_mode = 0;
7115 		pdev->monitor_configured = false;
7116 
7117 		if (!pdev->pktlog_ppdu_stats)
7118 			dp_h2t_cfg_stats_msg_send(pdev,
7119 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7120 		break;
7121 	case 2:
7122 		if (pdev->monitor_vdev) {
7123 			status = QDF_STATUS_E_RESOURCES;
7124 			break;
7125 		}
7126 
7127 		pdev->mcopy_mode = 1;
7128 		dp_pdev_configure_monitor_rings(pdev);
7129 		pdev->monitor_configured = true;
7130 		pdev->tx_sniffer_enable = 0;
7131 
7132 		if (!pdev->pktlog_ppdu_stats)
7133 			dp_h2t_cfg_stats_msg_send(pdev,
7134 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7135 		break;
7136 
7137 	default:
7138 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7139 			"Invalid value");
7140 		break;
7141 	}
7142 	return status;
7143 }
7144 
7145 /*
7146  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7147  * @pdev_handle: DP_PDEV handle
7148  *
7149  * Return: void
7150  */
7151 static void
7152 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
7153 {
7154 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7155 
7156 	if (pdev->enhanced_stats_en == 0)
7157 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7158 
7159 	pdev->enhanced_stats_en = 1;
7160 
7161 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7162 	    !pdev->monitor_vdev)
7163 		dp_ppdu_ring_cfg(pdev);
7164 
7165 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7166 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7167 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7168 		dp_h2t_cfg_stats_msg_send(pdev,
7169 					  DP_PPDU_STATS_CFG_BPR_ENH,
7170 					  pdev->pdev_id);
7171 	}
7172 }
7173 
7174 /*
7175  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7176  * @pdev_handle: DP_PDEV handle
7177  *
7178  * Return: void
7179  */
7180 static void
7181 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
7182 {
7183 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7184 
7185 	if (pdev->enhanced_stats_en == 1)
7186 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7187 
7188 	pdev->enhanced_stats_en = 0;
7189 
7190 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7191 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7192 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7193 		dp_h2t_cfg_stats_msg_send(pdev,
7194 					  DP_PPDU_STATS_CFG_BPR,
7195 					  pdev->pdev_id);
7196 	}
7197 
7198 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7199 	    !pdev->monitor_vdev)
7200 		dp_ppdu_ring_reset(pdev);
7201 }
7202 
7203 /*
7204  * dp_get_fw_peer_stats()- function to print peer stats
7205  * @pdev_handle: DP_PDEV handle
7206  * @mac_addr: mac address of the peer
7207  * @cap: Type of htt stats requested
7208  * @is_wait: if set, wait on completion from firmware response
7209  *
7210  * Currently Supporting only MAC ID based requests Only
7211  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7212  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7213  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7214  *
7215  * Return: void
7216  */
7217 static void
7218 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
7219 		uint32_t cap, uint32_t is_wait)
7220 {
7221 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7222 	int i;
7223 	uint32_t config_param0 = 0;
7224 	uint32_t config_param1 = 0;
7225 	uint32_t config_param2 = 0;
7226 	uint32_t config_param3 = 0;
7227 
7228 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7229 	config_param0 |= (1 << (cap + 1));
7230 
7231 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7232 		config_param1 |= (1 << i);
7233 	}
7234 
7235 	config_param2 |= (mac_addr[0] & 0x000000ff);
7236 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7237 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7238 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7239 
7240 	config_param3 |= (mac_addr[4] & 0x000000ff);
7241 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7242 
7243 	if (is_wait) {
7244 		qdf_event_reset(&pdev->fw_peer_stats_event);
7245 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7246 					  config_param0, config_param1,
7247 					  config_param2, config_param3,
7248 					  0, 1, 0);
7249 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
7250 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
7251 	} else {
7252 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7253 					  config_param0, config_param1,
7254 					  config_param2, config_param3,
7255 					  0, 0, 0);
7256 	}
7257 
7258 }
7259 
7260 /* This struct definition will be removed from here
7261  * once it get added in FW headers*/
7262 struct httstats_cmd_req {
7263     uint32_t    config_param0;
7264     uint32_t    config_param1;
7265     uint32_t    config_param2;
7266     uint32_t    config_param3;
7267     int cookie;
7268     u_int8_t    stats_id;
7269 };
7270 
7271 /*
7272  * dp_get_htt_stats: function to process the httstas request
7273  * @pdev_handle: DP pdev handle
7274  * @data: pointer to request data
7275  * @data_len: length for request data
7276  *
7277  * return: void
7278  */
7279 static void
7280 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
7281 {
7282 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7283 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
7284 
7285 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
7286 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
7287 				req->config_param0, req->config_param1,
7288 				req->config_param2, req->config_param3,
7289 				req->cookie, 0, 0);
7290 }
7291 
7292 /*
7293  * dp_set_pdev_param: function to set parameters in pdev
7294  * @pdev_handle: DP pdev handle
7295  * @param: parameter type to be set
7296  * @val: value of parameter to be set
7297  *
7298  * Return: 0 for success. nonzero for failure.
7299  */
7300 static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
7301 				    enum cdp_pdev_param_type param,
7302 				    uint8_t val)
7303 {
7304 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7305 	switch (param) {
7306 	case CDP_CONFIG_DEBUG_SNIFFER:
7307 		return dp_config_debug_sniffer(pdev_handle, val);
7308 	case CDP_CONFIG_BPR_ENABLE:
7309 		return dp_set_bpr_enable(pdev_handle, val);
7310 	case CDP_CONFIG_PRIMARY_RADIO:
7311 		pdev->is_primary = val;
7312 		break;
7313 	case CDP_CONFIG_CAPTURE_LATENCY:
7314 		if (val == 1)
7315 			pdev->latency_capture_enable = true;
7316 		else
7317 			pdev->latency_capture_enable = false;
7318 		break;
7319 	case CDP_INGRESS_STATS:
7320 		dp_pdev_tid_stats_ingress_inc(pdev_handle, val);
7321 		break;
7322 	case CDP_OSIF_DROP:
7323 		dp_pdev_tid_stats_osif_drop(pdev_handle, val);
7324 		break;
7325 	case CDP_CONFIG_ENH_RX_CAPTURE:
7326 		return dp_config_enh_rx_capture(pdev_handle, val);
7327 	case CDP_CONFIG_TX_CAPTURE:
7328 		return dp_config_enh_tx_capture(pdev_handle, val);
7329 	default:
7330 		return QDF_STATUS_E_INVAL;
7331 	}
7332 	return QDF_STATUS_SUCCESS;
7333 }
7334 
7335 /*
7336  * dp_calculate_delay_stats: function to get rx delay stats
7337  * @vdev_handle: DP vdev handle
7338  * @nbuf: skb
7339  *
7340  * Return: void
7341  */
7342 static void dp_calculate_delay_stats(struct cdp_vdev *vdev_handle,
7343 				     qdf_nbuf_t nbuf)
7344 {
7345 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7346 
7347 	dp_rx_compute_delay(vdev, nbuf);
7348 }
7349 
7350 /*
7351  * dp_get_vdev_param: function to get parameters from vdev
7352  * @param: parameter type to get value
7353  *
7354  * return: void
7355  */
7356 static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
7357 				  enum cdp_vdev_param_type param)
7358 {
7359 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7360 	uint32_t val;
7361 
7362 	switch (param) {
7363 	case CDP_ENABLE_WDS:
7364 		val = vdev->wds_enabled;
7365 		break;
7366 	case CDP_ENABLE_MEC:
7367 		val = vdev->mec_enabled;
7368 		break;
7369 	case CDP_ENABLE_DA_WAR:
7370 		val = vdev->pdev->soc->da_war_enabled;
7371 		break;
7372 	default:
7373 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7374 			  "param value %d is wrong\n",
7375 			  param);
7376 		val = -1;
7377 		break;
7378 	}
7379 
7380 	return val;
7381 }
7382 
7383 /*
7384  * dp_set_vdev_param: function to set parameters in vdev
7385  * @param: parameter type to be set
7386  * @val: value of parameter to be set
7387  *
7388  * return: void
7389  */
7390 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
7391 		enum cdp_vdev_param_type param, uint32_t val)
7392 {
7393 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7394 	switch (param) {
7395 	case CDP_ENABLE_WDS:
7396 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7397 			  "wds_enable %d for vdev(%p) id(%d)\n",
7398 			  val, vdev, vdev->vdev_id);
7399 		vdev->wds_enabled = val;
7400 		break;
7401 	case CDP_ENABLE_MEC:
7402 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7403 			  "mec_enable %d for vdev(%p) id(%d)\n",
7404 			  val, vdev, vdev->vdev_id);
7405 		vdev->mec_enabled = val;
7406 		break;
7407 	case CDP_ENABLE_DA_WAR:
7408 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7409 			  "da_war_enable %d for vdev(%p) id(%d)\n",
7410 			  val, vdev, vdev->vdev_id);
7411 		vdev->pdev->soc->da_war_enabled = val;
7412 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
7413 					     vdev->pdev->soc));
7414 		break;
7415 	case CDP_ENABLE_NAWDS:
7416 		vdev->nawds_enabled = val;
7417 		break;
7418 	case CDP_ENABLE_MCAST_EN:
7419 		vdev->mcast_enhancement_en = val;
7420 		break;
7421 	case CDP_ENABLE_PROXYSTA:
7422 		vdev->proxysta_vdev = val;
7423 		break;
7424 	case CDP_UPDATE_TDLS_FLAGS:
7425 		vdev->tdls_link_connected = val;
7426 		break;
7427 	case CDP_CFG_WDS_AGING_TIMER:
7428 		if (val == 0)
7429 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
7430 		else if (val != vdev->wds_aging_timer_val)
7431 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
7432 
7433 		vdev->wds_aging_timer_val = val;
7434 		break;
7435 	case CDP_ENABLE_AP_BRIDGE:
7436 		if (wlan_op_mode_sta != vdev->opmode)
7437 			vdev->ap_bridge_enabled = val;
7438 		else
7439 			vdev->ap_bridge_enabled = false;
7440 		break;
7441 	case CDP_ENABLE_CIPHER:
7442 		vdev->sec_type = val;
7443 		break;
7444 	case CDP_ENABLE_QWRAP_ISOLATION:
7445 		vdev->isolation_vdev = val;
7446 		break;
7447 	default:
7448 		break;
7449 	}
7450 
7451 	dp_tx_vdev_update_search_flags(vdev);
7452 }
7453 
7454 /**
7455  * dp_peer_set_nawds: set nawds bit in peer
7456  * @peer_handle: pointer to peer
7457  * @value: enable/disable nawds
7458  *
7459  * return: void
7460  */
7461 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
7462 {
7463 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7464 	peer->nawds_enabled = value;
7465 }
7466 
7467 /*
7468  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
7469  * @vdev_handle: DP_VDEV handle
7470  * @map_id:ID of map that needs to be updated
7471  *
7472  * Return: void
7473  */
7474 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
7475 		uint8_t map_id)
7476 {
7477 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7478 	vdev->dscp_tid_map_id = map_id;
7479 	return;
7480 }
7481 
7482 #ifdef DP_RATETABLE_SUPPORT
7483 static int dp_txrx_get_ratekbps(int preamb, int mcs,
7484 				int htflag, int gintval)
7485 {
7486 	uint32_t rix;
7487 
7488 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
7489 			       (uint8_t)preamb, 1, &rix);
7490 }
7491 #else
7492 static int dp_txrx_get_ratekbps(int preamb, int mcs,
7493 				int htflag, int gintval)
7494 {
7495 	return 0;
7496 }
7497 #endif
7498 
7499 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
7500  * @peer_handle: DP pdev handle
7501  *
7502  * return : cdp_pdev_stats pointer
7503  */
7504 static struct cdp_pdev_stats*
7505 dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
7506 {
7507 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7508 
7509 	dp_aggregate_pdev_stats(pdev);
7510 
7511 	return &pdev->stats;
7512 }
7513 
7514 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
7515  * @peer_handle: DP_PEER handle
7516  *
7517  * return : cdp_peer_stats pointer
7518  */
7519 static struct cdp_peer_stats*
7520 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
7521 {
7522 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7523 
7524 	qdf_assert(peer);
7525 
7526 	return &peer->stats;
7527 }
7528 
7529 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
7530  * @peer_handle: DP_PEER handle
7531  *
7532  * return : void
7533  */
7534 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
7535 {
7536 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7537 
7538 	qdf_assert(peer);
7539 
7540 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
7541 }
7542 
7543 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
7544  * @vdev_handle: DP_VDEV handle
7545  * @buf: buffer for vdev stats
7546  *
7547  * return : int
7548  */
7549 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
7550 				   bool is_aggregate)
7551 {
7552 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7553 	struct cdp_vdev_stats *vdev_stats;
7554 	struct dp_pdev *pdev;
7555 	struct dp_soc *soc;
7556 
7557 	if (!vdev)
7558 		return 1;
7559 
7560 	pdev = vdev->pdev;
7561 	if (!pdev)
7562 		return 1;
7563 
7564 	soc = pdev->soc;
7565 	vdev_stats = (struct cdp_vdev_stats *)buf;
7566 
7567 	if (is_aggregate) {
7568 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
7569 		dp_aggregate_vdev_stats(vdev, buf);
7570 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
7571 	} else {
7572 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
7573 	}
7574 
7575 	return 0;
7576 }
7577 
7578 /*
7579  * dp_get_total_per(): get total per
7580  * @pdev_handle: DP_PDEV handle
7581  *
7582  * Return: % error rate using retries per packet and success packets
7583  */
7584 static int dp_get_total_per(struct cdp_pdev *pdev_handle)
7585 {
7586 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7587 
7588 	dp_aggregate_pdev_stats(pdev);
7589 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
7590 		return 0;
7591 	return ((pdev->stats.tx.retries * 100) /
7592 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
7593 }
7594 
7595 /*
7596  * dp_txrx_stats_publish(): publish pdev stats into a buffer
7597  * @pdev_handle: DP_PDEV handle
7598  * @buf: to hold pdev_stats
7599  *
7600  * Return: int
7601  */
7602 static int
7603 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, struct cdp_stats_extd *buf)
7604 {
7605 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7606 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
7607 	struct cdp_txrx_stats_req req = {0,};
7608 
7609 	dp_aggregate_pdev_stats(pdev);
7610 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
7611 	req.cookie_val = 1;
7612 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
7613 				req.param1, req.param2, req.param3, 0,
7614 				req.cookie_val, 0);
7615 
7616 	msleep(DP_MAX_SLEEP_TIME);
7617 
7618 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
7619 	req.cookie_val = 1;
7620 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
7621 				req.param1, req.param2, req.param3, 0,
7622 				req.cookie_val, 0);
7623 
7624 	msleep(DP_MAX_SLEEP_TIME);
7625 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
7626 
7627 	return TXRX_STATS_LEVEL;
7628 }
7629 
7630 /**
7631  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
7632  * @pdev: DP_PDEV handle
7633  * @map_id: ID of map that needs to be updated
7634  * @tos: index value in map
7635  * @tid: tid value passed by the user
7636  *
7637  * Return: void
7638  */
7639 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
7640 		uint8_t map_id, uint8_t tos, uint8_t tid)
7641 {
7642 	uint8_t dscp;
7643 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
7644 	struct dp_soc *soc = pdev->soc;
7645 
7646 	if (!soc)
7647 		return;
7648 
7649 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
7650 	pdev->dscp_tid_map[map_id][dscp] = tid;
7651 
7652 	if (map_id < soc->num_hw_dscp_tid_map)
7653 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
7654 				       map_id, dscp);
7655 	return;
7656 }
7657 
7658 /**
7659  * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
7660  * @pdev_handle: pdev handle
7661  * @val: hmmc-dscp flag value
7662  *
7663  * Return: void
7664  */
7665 static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
7666 					  bool val)
7667 {
7668 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7669 
7670 	pdev->hmmc_tid_override_en = val;
7671 }
7672 
7673 /**
7674  * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
7675  * @pdev_handle: pdev handle
7676  * @tid: tid value
7677  *
7678  * Return: void
7679  */
7680 static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
7681 				      uint8_t tid)
7682 {
7683 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7684 
7685 	pdev->hmmc_tid = tid;
7686 }
7687 
7688 /**
7689  * dp_fw_stats_process(): Process TxRX FW stats request
7690  * @vdev_handle: DP VDEV handle
7691  * @req: stats request
7692  *
7693  * return: int
7694  */
7695 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
7696 		struct cdp_txrx_stats_req *req)
7697 {
7698 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7699 	struct dp_pdev *pdev = NULL;
7700 	uint32_t stats = req->stats;
7701 	uint8_t mac_id = req->mac_id;
7702 
7703 	if (!vdev) {
7704 		DP_TRACE(NONE, "VDEV not found");
7705 		return 1;
7706 	}
7707 	pdev = vdev->pdev;
7708 
7709 	/*
7710 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
7711 	 * from param0 to param3 according to below rule:
7712 	 *
7713 	 * PARAM:
7714 	 *   - config_param0 : start_offset (stats type)
7715 	 *   - config_param1 : stats bmask from start offset
7716 	 *   - config_param2 : stats bmask from start offset + 32
7717 	 *   - config_param3 : stats bmask from start offset + 64
7718 	 */
7719 	if (req->stats == CDP_TXRX_STATS_0) {
7720 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
7721 		req->param1 = 0xFFFFFFFF;
7722 		req->param2 = 0xFFFFFFFF;
7723 		req->param3 = 0xFFFFFFFF;
7724 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
7725 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
7726 	}
7727 
7728 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
7729 				req->param1, req->param2, req->param3,
7730 				0, 0, mac_id);
7731 }
7732 
7733 /**
7734  * dp_txrx_stats_request - function to map to firmware and host stats
7735  * @vdev: virtual handle
7736  * @req: stats request
7737  *
7738  * Return: QDF_STATUS
7739  */
7740 static
7741 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
7742 				 struct cdp_txrx_stats_req *req)
7743 {
7744 	int host_stats;
7745 	int fw_stats;
7746 	enum cdp_stats stats;
7747 	int num_stats;
7748 
7749 	if (!vdev || !req) {
7750 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7751 				"Invalid vdev/req instance");
7752 		return QDF_STATUS_E_INVAL;
7753 	}
7754 
7755 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
7756 		dp_err("Invalid mac id request");
7757 		return QDF_STATUS_E_INVAL;
7758 	}
7759 
7760 	stats = req->stats;
7761 	if (stats >= CDP_TXRX_MAX_STATS)
7762 		return QDF_STATUS_E_INVAL;
7763 
7764 	/*
7765 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
7766 	 *			has to be updated if new FW HTT stats added
7767 	 */
7768 	if (stats > CDP_TXRX_STATS_HTT_MAX)
7769 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
7770 
7771 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
7772 
7773 	if (stats >= num_stats) {
7774 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7775 			  "%s: Invalid stats option: %d", __func__, stats);
7776 		return QDF_STATUS_E_INVAL;
7777 	}
7778 
7779 	req->stats = stats;
7780 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
7781 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
7782 
7783 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
7784 		stats, fw_stats, host_stats);
7785 
7786 	if (fw_stats != TXRX_FW_STATS_INVALID) {
7787 		/* update request with FW stats type */
7788 		req->stats = fw_stats;
7789 		return dp_fw_stats_process(vdev, req);
7790 	}
7791 
7792 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
7793 			(host_stats <= TXRX_HOST_STATS_MAX))
7794 		return dp_print_host_stats(vdev, req);
7795 	else
7796 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7797 				"Wrong Input for TxRx Stats");
7798 
7799 	return QDF_STATUS_SUCCESS;
7800 }
7801 
7802 /*
7803  * dp_txrx_dump_stats() -  Dump statistics
7804  * @value - Statistics option
7805  */
7806 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7807 				     enum qdf_stats_verbosity_level level)
7808 {
7809 	struct dp_soc *soc =
7810 		(struct dp_soc *)psoc;
7811 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7812 
7813 	if (!soc) {
7814 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7815 			"%s: soc is NULL", __func__);
7816 		return QDF_STATUS_E_INVAL;
7817 	}
7818 
7819 	switch (value) {
7820 	case CDP_TXRX_PATH_STATS:
7821 		dp_txrx_path_stats(soc);
7822 		dp_print_soc_interrupt_stats(soc);
7823 		break;
7824 
7825 	case CDP_RX_RING_STATS:
7826 		dp_print_per_ring_stats(soc);
7827 		break;
7828 
7829 	case CDP_TXRX_TSO_STATS:
7830 		/* TODO: NOT IMPLEMENTED */
7831 		break;
7832 
7833 	case CDP_DUMP_TX_FLOW_POOL_INFO:
7834 		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
7835 		break;
7836 
7837 	case CDP_DP_NAPI_STATS:
7838 		dp_print_napi_stats(soc);
7839 		break;
7840 
7841 	case CDP_TXRX_DESC_STATS:
7842 		/* TODO: NOT IMPLEMENTED */
7843 		break;
7844 
7845 	default:
7846 		status = QDF_STATUS_E_INVAL;
7847 		break;
7848 	}
7849 
7850 	return status;
7851 
7852 }
7853 
7854 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7855 /**
7856  * dp_update_flow_control_parameters() - API to store datapath
7857  *                            config parameters
7858  * @soc: soc handle
7859  * @cfg: ini parameter handle
7860  *
7861  * Return: void
7862  */
7863 static inline
7864 void dp_update_flow_control_parameters(struct dp_soc *soc,
7865 				struct cdp_config_params *params)
7866 {
7867 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7868 					params->tx_flow_stop_queue_threshold;
7869 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7870 					params->tx_flow_start_queue_offset;
7871 }
7872 #else
7873 static inline
7874 void dp_update_flow_control_parameters(struct dp_soc *soc,
7875 				struct cdp_config_params *params)
7876 {
7877 }
7878 #endif
7879 
7880 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
7881 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
7882 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
7883 
7884 /* Max packet limit for RX REAP Loop (dp_rx_process) */
7885 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
7886 
7887 static
7888 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
7889 					struct cdp_config_params *params)
7890 {
7891 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
7892 				params->tx_comp_loop_pkt_limit;
7893 
7894 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
7895 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
7896 	else
7897 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
7898 
7899 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
7900 				params->rx_reap_loop_pkt_limit;
7901 
7902 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
7903 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
7904 	else
7905 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
7906 
7907 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
7908 				params->rx_hp_oos_update_limit;
7909 
7910 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
7911 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
7912 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
7913 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
7914 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
7915 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
7916 }
7917 #else
7918 static inline
7919 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
7920 					struct cdp_config_params *params)
7921 { }
7922 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
7923 
7924 /**
7925  * dp_update_config_parameters() - API to store datapath
7926  *                            config parameters
7927  * @soc: soc handle
7928  * @cfg: ini parameter handle
7929  *
7930  * Return: status
7931  */
7932 static
7933 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7934 				struct cdp_config_params *params)
7935 {
7936 	struct dp_soc *soc = (struct dp_soc *)psoc;
7937 
7938 	if (!(soc)) {
7939 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7940 				"%s: Invalid handle", __func__);
7941 		return QDF_STATUS_E_INVAL;
7942 	}
7943 
7944 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7945 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7946 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7947 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7948 				params->tcp_udp_checksumoffload;
7949 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
7950 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
7951 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
7952 
7953 	dp_update_rx_soft_irq_limit_params(soc, params);
7954 	dp_update_flow_control_parameters(soc, params);
7955 
7956 	return QDF_STATUS_SUCCESS;
7957 }
7958 
7959 /**
7960  * dp_txrx_set_wds_rx_policy() - API to store datapath
7961  *                            config parameters
7962  * @vdev_handle - datapath vdev handle
7963  * @cfg: ini parameter handle
7964  *
7965  * Return: status
7966  */
7967 #ifdef WDS_VENDOR_EXTENSION
7968 void
7969 dp_txrx_set_wds_rx_policy(
7970 		struct cdp_vdev *vdev_handle,
7971 		u_int32_t val)
7972 {
7973 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7974 	struct dp_peer *peer;
7975 	if (vdev->opmode == wlan_op_mode_ap) {
7976 		/* for ap, set it on bss_peer */
7977 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7978 			if (peer->bss_peer) {
7979 				peer->wds_ecm.wds_rx_filter = 1;
7980 				peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7981 				peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7982 				break;
7983 			}
7984 		}
7985 	} else if (vdev->opmode == wlan_op_mode_sta) {
7986 		peer = TAILQ_FIRST(&vdev->peer_list);
7987 		peer->wds_ecm.wds_rx_filter = 1;
7988 		peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
7989 		peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
7990 	}
7991 }
7992 
7993 /**
7994  * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
7995  *
7996  * @peer_handle - datapath peer handle
7997  * @wds_tx_ucast: policy for unicast transmission
7998  * @wds_tx_mcast: policy for multicast transmission
7999  *
8000  * Return: void
8001  */
8002 void
8003 dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
8004 		int wds_tx_ucast, int wds_tx_mcast)
8005 {
8006 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8007 	if (wds_tx_ucast || wds_tx_mcast) {
8008 		peer->wds_enabled = 1;
8009 		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
8010 		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
8011 	} else {
8012 		peer->wds_enabled = 0;
8013 		peer->wds_ecm.wds_tx_ucast_4addr = 0;
8014 		peer->wds_ecm.wds_tx_mcast_4addr = 0;
8015 	}
8016 
8017 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8018 			FL("Policy Update set to :\
8019 				peer->wds_enabled %d\
8020 				peer->wds_ecm.wds_tx_ucast_4addr %d\
8021 				peer->wds_ecm.wds_tx_mcast_4addr %d"),
8022 				peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
8023 				peer->wds_ecm.wds_tx_mcast_4addr);
8024 	return;
8025 }
8026 #endif
8027 
8028 static struct cdp_wds_ops dp_ops_wds = {
8029 	.vdev_set_wds = dp_vdev_set_wds,
8030 #ifdef WDS_VENDOR_EXTENSION
8031 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
8032 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
8033 #endif
8034 };
8035 
8036 /*
8037  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
8038  * @vdev_handle - datapath vdev handle
8039  * @callback - callback function
8040  * @ctxt: callback context
8041  *
8042  */
8043 static void
8044 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
8045 		       ol_txrx_data_tx_cb callback, void *ctxt)
8046 {
8047 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8048 
8049 	vdev->tx_non_std_data_callback.func = callback;
8050 	vdev->tx_non_std_data_callback.ctxt = ctxt;
8051 }
8052 
8053 /**
8054  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
8055  * @pdev_hdl: datapath pdev handle
8056  *
8057  * Return: opaque pointer to dp txrx handle
8058  */
8059 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
8060 {
8061 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8062 
8063 	return pdev->dp_txrx_handle;
8064 }
8065 
8066 /**
8067  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
8068  * @pdev_hdl: datapath pdev handle
8069  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
8070  *
8071  * Return: void
8072  */
8073 static void
8074 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
8075 {
8076 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8077 
8078 	pdev->dp_txrx_handle = dp_txrx_hdl;
8079 }
8080 
8081 /**
8082  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
8083  * @soc_handle: datapath soc handle
8084  *
8085  * Return: opaque pointer to external dp (non-core DP)
8086  */
8087 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
8088 {
8089 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8090 
8091 	return soc->external_txrx_handle;
8092 }
8093 
8094 /**
8095  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
8096  * @soc_handle: datapath soc handle
8097  * @txrx_handle: opaque pointer to external dp (non-core DP)
8098  *
8099  * Return: void
8100  */
8101 static void
8102 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
8103 {
8104 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8105 
8106 	soc->external_txrx_handle = txrx_handle;
8107 }
8108 
8109 /**
8110  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
8111  * @pdev_hdl: datapath pdev handle
8112  * @lmac_id: lmac id
8113  *
8114  * Return: void
8115  */
8116 static void
8117 dp_soc_map_pdev_to_lmac(struct cdp_pdev *pdev_hdl, uint32_t lmac_id)
8118 {
8119 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8120 	struct dp_soc *soc = pdev->soc;
8121 
8122 	pdev->lmac_id = lmac_id;
8123 	wlan_cfg_set_hw_macid(soc->wlan_cfg_ctx,
8124 			      pdev->pdev_id,
8125 			      (lmac_id + 1));
8126 }
8127 
8128 /**
8129  * dp_get_cfg_capabilities() - get dp capabilities
8130  * @soc_handle: datapath soc handle
8131  * @dp_caps: enum for dp capabilities
8132  *
8133  * Return: bool to determine if dp caps is enabled
8134  */
8135 static bool
8136 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
8137 			enum cdp_capabilities dp_caps)
8138 {
8139 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8140 
8141 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
8142 }
8143 
8144 #ifdef FEATURE_AST
8145 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
8146 {
8147 	struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
8148 	struct dp_peer *peer = (struct dp_peer *) peer_hdl;
8149 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8150 
8151 	/*
8152 	 * For BSS peer, new peer is not created on alloc_node if the
8153 	 * peer with same address already exists , instead refcnt is
8154 	 * increased for existing peer. Correspondingly in delete path,
8155 	 * only refcnt is decreased; and peer is only deleted , when all
8156 	 * references are deleted. So delete_in_progress should not be set
8157 	 * for bss_peer, unless only 2 reference remains (peer map reference
8158 	 * and peer hash table reference).
8159 	 */
8160 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
8161 		return;
8162 	}
8163 
8164 	qdf_spin_lock_bh(&soc->ast_lock);
8165 	peer->delete_in_progress = true;
8166 	dp_peer_delete_ast_entries(soc, peer);
8167 	qdf_spin_unlock_bh(&soc->ast_lock);
8168 }
8169 #endif
8170 
8171 #ifdef ATH_SUPPORT_NAC_RSSI
8172 /**
8173  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
8174  * @vdev_hdl: DP vdev handle
8175  * @rssi: rssi value
8176  *
8177  * Return: 0 for success. nonzero for failure.
8178  */
8179 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
8180 					      char *mac_addr,
8181 					      uint8_t *rssi)
8182 {
8183 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8184 	struct dp_pdev *pdev = vdev->pdev;
8185 	struct dp_neighbour_peer *peer = NULL;
8186 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
8187 
8188 	*rssi = 0;
8189 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
8190 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
8191 		      neighbour_peer_list_elem) {
8192 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
8193 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
8194 			*rssi = peer->rssi;
8195 			status = QDF_STATUS_SUCCESS;
8196 			break;
8197 		}
8198 	}
8199 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
8200 	return status;
8201 }
8202 
8203 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
8204 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
8205 		uint8_t chan_num)
8206 {
8207 
8208 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8209 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
8210 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8211 
8212 	pdev->nac_rssi_filtering = 1;
8213 	/* Store address of NAC (neighbour peer) which will be checked
8214 	 * against TA of received packets.
8215 	 */
8216 
8217 	if (cmd == CDP_NAC_PARAM_ADD) {
8218 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
8219 						 client_macaddr);
8220 	} else if (cmd == CDP_NAC_PARAM_DEL) {
8221 		dp_update_filter_neighbour_peers(vdev_handle,
8222 						 DP_NAC_PARAM_DEL,
8223 						 client_macaddr);
8224 	}
8225 
8226 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
8227 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
8228 			((void *)vdev->pdev->ctrl_pdev,
8229 			 vdev->vdev_id, cmd, bssid);
8230 
8231 	return QDF_STATUS_SUCCESS;
8232 }
8233 #endif
8234 
8235 /**
8236  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
8237  * for pktlog
8238  * @txrx_pdev_handle: cdp_pdev handle
8239  * @enb_dsb: Enable or disable peer based filtering
8240  *
8241  * Return: QDF_STATUS
8242  */
8243 static int
8244 dp_enable_peer_based_pktlog(
8245 	struct cdp_pdev *txrx_pdev_handle,
8246 	char *mac_addr, uint8_t enb_dsb)
8247 {
8248 	struct dp_peer *peer;
8249 	uint8_t local_id;
8250 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
8251 
8252 	peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
8253 			mac_addr, &local_id);
8254 
8255 	if (!peer) {
8256 		dp_err("Invalid Peer");
8257 		return QDF_STATUS_E_FAILURE;
8258 	}
8259 
8260 	peer->peer_based_pktlog_filter = enb_dsb;
8261 	pdev->dp_peer_based_pktlog = enb_dsb;
8262 
8263 	return QDF_STATUS_SUCCESS;
8264 }
8265 
8266 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
8267 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
8268 /**
8269  * dp_summarize_tag_stats - sums up the given protocol type's counters
8270  * across all the rings and dumps the same
8271  * @pdev_handle: cdp_pdev handle
8272  * @protocol_type: protocol type for which stats should be displayed
8273  *
8274  * Return: none
8275  */
8276 static uint64_t dp_summarize_tag_stats(struct cdp_pdev *pdev_handle,
8277 				       uint16_t protocol_type)
8278 {
8279 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8280 	uint8_t ring_idx;
8281 	uint64_t total_tag_cnt = 0;
8282 
8283 	for (ring_idx = 0; ring_idx < MAX_REO_DEST_RINGS; ring_idx++) {
8284 		total_tag_cnt +=
8285 		pdev->reo_proto_tag_stats[ring_idx][protocol_type].tag_ctr;
8286 	}
8287 	total_tag_cnt += pdev->rx_err_proto_tag_stats[protocol_type].tag_ctr;
8288 	DP_PRINT_STATS("ProtoID: %d, Tag: %u Tagged MSDU cnt: %llu",
8289 		       protocol_type,
8290 		       pdev->rx_proto_tag_map[protocol_type].tag,
8291 		       total_tag_cnt);
8292 	return total_tag_cnt;
8293 }
8294 
8295 /**
8296  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
8297  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
8298  * @pdev_handle: cdp_pdev handle
8299  * @protocol_type: protocol type for which stats should be displayed
8300  *
8301  * Return: none
8302  */
8303 static void
8304 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
8305 				   uint16_t protocol_type)
8306 {
8307 	uint16_t proto_idx;
8308 
8309 	if (protocol_type != RX_PROTOCOL_TAG_ALL &&
8310 	    protocol_type >= RX_PROTOCOL_TAG_MAX) {
8311 		DP_PRINT_STATS("Invalid protocol type : %u", protocol_type);
8312 		return;
8313 	}
8314 
8315 	/* protocol_type in [0 ... RX_PROTOCOL_TAG_MAX] */
8316 	if (protocol_type != RX_PROTOCOL_TAG_ALL) {
8317 		dp_summarize_tag_stats(pdev_handle, protocol_type);
8318 		return;
8319 	}
8320 
8321 	/* protocol_type == RX_PROTOCOL_TAG_ALL */
8322 	for (proto_idx = 0; proto_idx < RX_PROTOCOL_TAG_MAX; proto_idx++)
8323 		dp_summarize_tag_stats(pdev_handle, proto_idx);
8324 }
8325 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
8326 
8327 /**
8328  * dp_reset_pdev_rx_protocol_tag_stats - resets the stats counters for
8329  * given protocol type
8330  * @pdev_handle: cdp_pdev handle
8331  * @protocol_type: protocol type for which stats should be reset
8332  *
8333  * Return: none
8334  */
8335 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
8336 static void
8337 dp_reset_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
8338 				    uint16_t protocol_type)
8339 {
8340 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8341 	uint8_t ring_idx;
8342 
8343 	for (ring_idx = 0; ring_idx < MAX_REO_DEST_RINGS; ring_idx++)
8344 		pdev->reo_proto_tag_stats[ring_idx][protocol_type].tag_ctr = 0;
8345 	pdev->rx_err_proto_tag_stats[protocol_type].tag_ctr = 0;
8346 }
8347 #else
8348 static void
8349 dp_reset_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
8350 				    uint16_t protocol_type)
8351 {
8352 	/** Stub API  */
8353 }
8354 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
8355 
8356 /**
8357  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
8358  * applied to the desired protocol type packets
8359  * @txrx_pdev_handle: cdp_pdev handle
8360  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
8361  * are enabled for tagging. zero indicates disable feature, non-zero indicates
8362  * enable feature
8363  * @protocol_type: new protocol type for which the tag is being added
8364  * @tag: user configured tag for the new protocol
8365  *
8366  * Return: QDF_STATUS
8367  */
8368 static QDF_STATUS
8369 dp_update_pdev_rx_protocol_tag(struct cdp_pdev *pdev_handle,
8370 			       uint32_t enable_rx_protocol_tag,
8371 			       uint16_t protocol_type,
8372 			       uint16_t tag)
8373 {
8374 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8375 	/*
8376 	 * dynamically enable/disable tagging based on enable_rx_protocol_tag
8377 	 * flag.
8378 	 */
8379 	if (enable_rx_protocol_tag) {
8380 		/* Tagging for one or more protocols has been set by user */
8381 		pdev->is_rx_protocol_tagging_enabled = true;
8382 	} else {
8383 		/*
8384 		 * No protocols being tagged, disable feature till next add
8385 		 * operation
8386 		 */
8387 		pdev->is_rx_protocol_tagging_enabled = false;
8388 	}
8389 
8390 	/** Reset stats counter across all rings for given protocol */
8391 	dp_reset_pdev_rx_protocol_tag_stats(pdev_handle, protocol_type);
8392 
8393 	pdev->rx_proto_tag_map[protocol_type].tag = tag;
8394 
8395 	return QDF_STATUS_SUCCESS;
8396 }
8397 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
8398 
8399 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
8400 					   uint32_t max_peers,
8401 					   uint32_t max_ast_index,
8402 					   bool peer_map_unmap_v2)
8403 {
8404 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8405 
8406 	soc->max_peers = max_peers;
8407 
8408 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
8409 		   __func__, max_peers, max_ast_index);
8410 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
8411 
8412 	if (dp_peer_find_attach(soc))
8413 		return QDF_STATUS_E_FAILURE;
8414 
8415 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
8416 
8417 	return QDF_STATUS_SUCCESS;
8418 }
8419 
8420 /**
8421  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
8422  * @dp_pdev: dp pdev handle
8423  * @ctrl_pdev: UMAC ctrl pdev handle
8424  *
8425  * Return: void
8426  */
8427 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
8428 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
8429 {
8430 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
8431 
8432 	pdev->ctrl_pdev = ctrl_pdev;
8433 }
8434 
8435 static void dp_set_rate_stats_cap(struct cdp_soc_t *soc_hdl,
8436 				  uint8_t val)
8437 {
8438 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8439 
8440 	soc->wlanstats_enabled = val;
8441 }
8442 
8443 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
8444 				      void *stats_ctx)
8445 {
8446 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8447 
8448 	soc->rate_stats_ctx = stats_ctx;
8449 }
8450 
8451 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8452 static void dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
8453 				    struct cdp_pdev *pdev_hdl)
8454 {
8455 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8456 	struct dp_soc *soc = (struct dp_soc *)pdev->soc;
8457 	struct dp_vdev *vdev = NULL;
8458 	struct dp_peer *peer = NULL;
8459 
8460 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
8461 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
8462 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
8463 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
8464 			if (peer)
8465 				dp_wdi_event_handler(
8466 					WDI_EVENT_FLUSH_RATE_STATS_REQ,
8467 					pdev->soc, peer->wlanstats_ctx,
8468 					peer->peer_ids[0],
8469 					WDI_NO_VAL, pdev->pdev_id);
8470 		}
8471 	}
8472 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
8473 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
8474 }
8475 #else
8476 static inline void
8477 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
8478 			struct cdp_pdev *pdev_hdl)
8479 {
8480 }
8481 #endif
8482 
8483 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8484 static void dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
8485 				     struct cdp_pdev *pdev_handle,
8486 				     void *buf)
8487 {
8488 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8489 
8490 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
8491 			      pdev->soc, buf, HTT_INVALID_PEER,
8492 			      WDI_NO_VAL, pdev->pdev_id);
8493 }
8494 #else
8495 static inline void
8496 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
8497 			 struct cdp_pdev *pdev_handle,
8498 			 void *buf)
8499 {
8500 }
8501 #endif
8502 
8503 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
8504 {
8505 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8506 
8507 	return soc->rate_stats_ctx;
8508 }
8509 
8510 /*
8511  * dp_get_cfg() - get dp cfg
8512  * @soc: cdp soc handle
8513  * @cfg: cfg enum
8514  *
8515  * Return: cfg value
8516  */
8517 static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
8518 {
8519 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
8520 	uint32_t value = 0;
8521 
8522 	switch (cfg) {
8523 	case cfg_dp_enable_data_stall:
8524 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
8525 		break;
8526 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
8527 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
8528 		break;
8529 	case cfg_dp_tso_enable:
8530 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
8531 		break;
8532 	case cfg_dp_lro_enable:
8533 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
8534 		break;
8535 	case cfg_dp_gro_enable:
8536 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
8537 		break;
8538 	case cfg_dp_tx_flow_start_queue_offset:
8539 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
8540 		break;
8541 	case cfg_dp_tx_flow_stop_queue_threshold:
8542 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
8543 		break;
8544 	case cfg_dp_disable_intra_bss_fwd:
8545 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
8546 		break;
8547 	default:
8548 		value =  0;
8549 	}
8550 
8551 	return value;
8552 }
8553 
8554 #ifdef CONFIG_WIN
8555 /**
8556  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
8557  * @pdev_hdl: datapath pdev handle
8558  * @param: ol ath params
8559  * @value: value of the flag
8560  * @buff: Buffer to be passed
8561  *
8562  * Implemented this function same as legacy function. In legacy code, single
8563  * function is used to display stats and update pdev params.
8564  *
8565  * Return: 0 for success. nonzero for failure.
8566  */
8567 static uint32_t dp_tx_flow_ctrl_configure_pdev(void *pdev_handle,
8568 					       enum _ol_ath_param_t param,
8569 					       uint32_t value, void *buff)
8570 {
8571 	struct dp_soc *soc = NULL;
8572 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8573 
8574 	if (qdf_unlikely(!pdev))
8575 		return 1;
8576 
8577 	soc = pdev->soc;
8578 	if (!soc)
8579 		return 1;
8580 
8581 	switch (param) {
8582 	case OL_ATH_PARAM_VIDEO_DELAY_STATS_FC:
8583 		if (value)
8584 			pdev->delay_stats_flag = true;
8585 		else
8586 			pdev->delay_stats_flag = false;
8587 		break;
8588 	case OL_ATH_PARAM_VIDEO_STATS_FC:
8589 		qdf_print("------- TID Stats ------\n");
8590 		dp_pdev_print_tid_stats(pdev);
8591 		qdf_print("------ Delay Stats ------\n");
8592 		dp_pdev_print_delay_stats(pdev);
8593 		break;
8594 	case OL_ATH_PARAM_TOTAL_Q_SIZE:
8595 		{
8596 			uint32_t tx_min, tx_max;
8597 
8598 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
8599 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
8600 
8601 			if (!buff) {
8602 				if ((value >= tx_min) && (value <= tx_max)) {
8603 					pdev->num_tx_allowed = value;
8604 				} else {
8605 					QDF_TRACE(QDF_MODULE_ID_DP,
8606 						  QDF_TRACE_LEVEL_INFO,
8607 						  "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
8608 						  tx_min, tx_max);
8609 					break;
8610 				}
8611 			} else {
8612 				*(int *)buff = pdev->num_tx_allowed;
8613 			}
8614 		}
8615 		break;
8616 	default:
8617 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8618 			  "%s: not handled param %d ", __func__, param);
8619 		break;
8620 	}
8621 
8622 	return 0;
8623 }
8624 #endif
8625 
8626 /**
8627  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
8628  * @vdev: DP_PDEV handle
8629  * @pcp: pcp value
8630  * @tid: tid value passed by the user
8631  *
8632  * Return: QDF_STATUS_SUCCESS on success
8633  */
8634 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
8635 						uint8_t pcp, uint8_t tid)
8636 {
8637 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8638 	struct dp_soc *soc = pdev->soc;
8639 
8640 	soc->pcp_tid_map[pcp] = tid;
8641 
8642 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
8643 	return QDF_STATUS_SUCCESS;
8644 }
8645 
8646 /**
8647  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
8648  * @vdev: DP_PDEV handle
8649  * @prio: tidmap priority value passed by the user
8650  *
8651  * Return: QDF_STATUS_SUCCESS on success
8652  */
8653 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct cdp_pdev *pdev_handle,
8654 						uint8_t prio)
8655 {
8656 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8657 	struct dp_soc *soc = pdev->soc;
8658 
8659 	soc->tidmap_prty = prio;
8660 
8661 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
8662 	return QDF_STATUS_SUCCESS;
8663 }
8664 
8665 /**
8666  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
8667  * @vdev: DP_VDEV handle
8668  * @pcp: pcp value
8669  * @tid: tid value passed by the user
8670  *
8671  * Return: QDF_STATUS_SUCCESS on success
8672  */
8673 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
8674 						uint8_t pcp, uint8_t tid)
8675 {
8676 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8677 
8678 	vdev->pcp_tid_map[pcp] = tid;
8679 
8680 	return QDF_STATUS_SUCCESS;
8681 }
8682 
8683 /**
8684  * dp_set_vdev_tidmap_tbl_id_wifi3(): update tidmapi tbl id in vdev
8685  * @vdev: DP_VDEV handle
8686  * @mapid: map_id value passed by the user
8687  *
8688  * Return: QDF_STATUS_SUCCESS on success
8689  */
8690 static QDF_STATUS dp_set_vdev_tidmap_tbl_id_wifi3(struct cdp_vdev *vdev_handle,
8691 						  uint8_t mapid)
8692 {
8693 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8694 
8695 	vdev->tidmap_tbl_id = mapid;
8696 
8697 	return QDF_STATUS_SUCCESS;
8698 }
8699 
8700 /**
8701  * dp_set_vdev_tidmap_prty_wifi3(): update tidmap priority in vdev
8702  * @vdev: DP_VDEV handle
8703  * @prio: tidmap priority value passed by the user
8704  *
8705  * Return: QDF_STATUS_SUCCESS on success
8706  */
8707 static QDF_STATUS dp_set_vdev_tidmap_prty_wifi3(struct cdp_vdev *vdev_handle,
8708 						uint8_t prio)
8709 {
8710 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8711 
8712 	vdev->tidmap_prty = prio;
8713 
8714 	return QDF_STATUS_SUCCESS;
8715 }
8716 
8717 static struct cdp_cmn_ops dp_ops_cmn = {
8718 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
8719 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
8720 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
8721 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
8722 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
8723 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
8724 	.txrx_peer_create = dp_peer_create_wifi3,
8725 	.txrx_peer_setup = dp_peer_setup_wifi3,
8726 #ifdef FEATURE_AST
8727 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
8728 #else
8729 	.txrx_peer_teardown = NULL,
8730 #endif
8731 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
8732 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
8733 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
8734 	.txrx_peer_get_ast_info_by_pdev =
8735 		dp_peer_get_ast_info_by_pdevid_wifi3,
8736 	.txrx_peer_ast_delete_by_soc =
8737 		dp_peer_ast_entry_del_by_soc,
8738 	.txrx_peer_ast_delete_by_pdev =
8739 		dp_peer_ast_entry_del_by_pdev,
8740 	.txrx_peer_delete = dp_peer_delete_wifi3,
8741 	.txrx_vdev_register = dp_vdev_register_wifi3,
8742 	.txrx_vdev_flush_peers = dp_vdev_flush_peers,
8743 	.txrx_soc_detach = dp_soc_detach_wifi3,
8744 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
8745 	.txrx_soc_init = dp_soc_init_wifi3,
8746 	.txrx_tso_soc_attach = dp_tso_soc_attach,
8747 	.txrx_tso_soc_detach = dp_tso_soc_detach,
8748 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
8749 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
8750 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
8751 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
8752 	.txrx_ath_getstats = dp_get_device_stats,
8753 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
8754 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
8755 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
8756 	.delba_process = dp_delba_process_wifi3,
8757 	.set_addba_response = dp_set_addba_response,
8758 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
8759 	.flush_cache_rx_queue = NULL,
8760 	/* TODO: get API's for dscp-tid need to be added*/
8761 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
8762 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
8763 	.hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
8764 	.set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
8765 	.txrx_get_total_per = dp_get_total_per,
8766 	.txrx_stats_request = dp_txrx_stats_request,
8767 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
8768 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
8769 	.txrx_get_vow_config_frm_pdev = dp_get_delay_stats_flag,
8770 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
8771 	.txrx_set_nac = dp_set_nac,
8772 	.txrx_get_tx_pending = dp_get_tx_pending,
8773 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
8774 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
8775 	.display_stats = dp_txrx_dump_stats,
8776 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
8777 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
8778 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
8779 	.txrx_intr_detach = dp_soc_interrupt_detach,
8780 	.set_pn_check = dp_set_pn_check_wifi3,
8781 	.update_config_parameters = dp_update_config_parameters,
8782 	/* TODO: Add other functions */
8783 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
8784 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
8785 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
8786 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
8787 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
8788 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
8789 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
8790 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
8791 	.tx_send = dp_tx_send,
8792 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
8793 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
8794 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
8795 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
8796 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
8797 	.txrx_get_os_rx_handles_from_vdev =
8798 					dp_get_os_rx_handles_from_vdev_wifi3,
8799 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
8800 	.get_dp_capabilities = dp_get_cfg_capabilities,
8801 	.txrx_get_cfg = dp_get_cfg,
8802 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
8803 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
8804 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
8805 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
8806 
8807 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
8808 	.set_pdev_tidmap_prty = dp_set_pdev_tidmap_prty_wifi3,
8809 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
8810 	.set_vdev_tidmap_prty = dp_set_vdev_tidmap_prty_wifi3,
8811 	.set_vdev_tidmap_tbl_id = dp_set_vdev_tidmap_tbl_id_wifi3,
8812 
8813 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
8814 };
8815 
8816 static struct cdp_ctrl_ops dp_ops_ctrl = {
8817 	.txrx_peer_authorize = dp_peer_authorize,
8818 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
8819 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
8820 #ifdef MESH_MODE_SUPPORT
8821 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
8822 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
8823 #endif
8824 	.txrx_set_vdev_param = dp_set_vdev_param,
8825 	.txrx_peer_set_nawds = dp_peer_set_nawds,
8826 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
8827 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
8828 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
8829 	.txrx_update_filter_neighbour_peers =
8830 		dp_update_filter_neighbour_peers,
8831 	.txrx_get_sec_type = dp_get_sec_type,
8832 	/* TODO: Add other functions */
8833 	.txrx_wdi_event_sub = dp_wdi_event_sub,
8834 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
8835 #ifdef WDI_EVENT_ENABLE
8836 	.txrx_get_pldev = dp_get_pldev,
8837 #endif
8838 	.txrx_set_pdev_param = dp_set_pdev_param,
8839 #ifdef ATH_SUPPORT_NAC_RSSI
8840 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
8841 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
8842 #endif
8843 	.set_key = dp_set_michael_key,
8844 	.txrx_get_vdev_param = dp_get_vdev_param,
8845 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
8846 	.calculate_delay_stats = dp_calculate_delay_stats,
8847 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
8848 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
8849 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
8850 	.txrx_dump_pdev_rx_protocol_tag_stats =
8851 				dp_dump_pdev_rx_protocol_tag_stats,
8852 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
8853 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
8854 };
8855 
8856 static struct cdp_me_ops dp_ops_me = {
8857 #ifdef ATH_SUPPORT_IQUE
8858 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
8859 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
8860 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
8861 #endif
8862 };
8863 
8864 static struct cdp_mon_ops dp_ops_mon = {
8865 	.txrx_monitor_set_filter_ucast_data = NULL,
8866 	.txrx_monitor_set_filter_mcast_data = NULL,
8867 	.txrx_monitor_set_filter_non_data = NULL,
8868 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
8869 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
8870 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
8871 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
8872 	/* Added support for HK advance filter */
8873 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
8874 };
8875 
8876 static struct cdp_host_stats_ops dp_ops_host_stats = {
8877 	.txrx_per_peer_stats = dp_get_host_peer_stats,
8878 	.get_fw_peer_stats = dp_get_fw_peer_stats,
8879 	.get_htt_stats = dp_get_htt_stats,
8880 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
8881 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
8882 	.txrx_stats_publish = dp_txrx_stats_publish,
8883 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
8884 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
8885 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
8886 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
8887 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
8888 	.configure_rate_stats = dp_set_rate_stats_cap,
8889 	/* TODO */
8890 };
8891 
8892 static struct cdp_raw_ops dp_ops_raw = {
8893 	/* TODO */
8894 };
8895 
8896 #ifdef CONFIG_WIN
8897 static struct cdp_pflow_ops dp_ops_pflow = {
8898 	dp_tx_flow_ctrl_configure_pdev,
8899 };
8900 #endif /* CONFIG_WIN */
8901 
8902 #ifdef FEATURE_RUNTIME_PM
8903 /**
8904  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
8905  * @opaque_pdev: DP pdev context
8906  *
8907  * DP is ready to runtime suspend if there are no pending TX packets.
8908  *
8909  * Return: QDF_STATUS
8910  */
8911 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
8912 {
8913 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
8914 	struct dp_soc *soc = pdev->soc;
8915 
8916 	/* Abort if there are any pending TX packets */
8917 	if (dp_get_tx_pending(opaque_pdev) > 0) {
8918 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8919 			  FL("Abort suspend due to pending TX packets"));
8920 		return QDF_STATUS_E_AGAIN;
8921 	}
8922 
8923 	if (soc->intr_mode == DP_INTR_POLL)
8924 		qdf_timer_stop(&soc->int_timer);
8925 
8926 	return QDF_STATUS_SUCCESS;
8927 }
8928 
8929 /**
8930  * dp_runtime_resume() - ensure DP is ready to runtime resume
8931  * @opaque_pdev: DP pdev context
8932  *
8933  * Resume DP for runtime PM.
8934  *
8935  * Return: QDF_STATUS
8936  */
8937 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
8938 {
8939 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
8940 	struct dp_soc *soc = pdev->soc;
8941 	void *hal_srng;
8942 	int i;
8943 
8944 	if (soc->intr_mode == DP_INTR_POLL)
8945 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
8946 
8947 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
8948 		hal_srng = soc->tcl_data_ring[i].hal_srng;
8949 		if (hal_srng) {
8950 			/* We actually only need to acquire the lock */
8951 			hal_srng_access_start(soc->hal_soc, hal_srng);
8952 			/* Update SRC ring head pointer for HW to send
8953 			   all pending packets */
8954 			hal_srng_access_end(soc->hal_soc, hal_srng);
8955 		}
8956 	}
8957 
8958 	return QDF_STATUS_SUCCESS;
8959 }
8960 #endif /* FEATURE_RUNTIME_PM */
8961 
8962 /**
8963  * dp_tx_get_success_ack_stats() - get tx success completion count
8964  * @opaque_pdev: dp pdev context
8965  * @vdevid: vdev identifier
8966  *
8967  * Return: tx success ack count
8968  */
8969 static uint32_t dp_tx_get_success_ack_stats(struct cdp_pdev *pdev,
8970 					    uint8_t vdev_id)
8971 {
8972 	struct dp_vdev *vdev =
8973 		(struct dp_vdev *)dp_get_vdev_from_vdev_id_wifi3(pdev,
8974 								 vdev_id);
8975 	struct dp_soc *soc = ((struct dp_pdev *)pdev)->soc;
8976 	struct cdp_vdev_stats *vdev_stats = NULL;
8977 	uint32_t tx_success;
8978 
8979 	if (!vdev) {
8980 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8981 			  FL("Invalid vdev id %d"), vdev_id);
8982 		return 0;
8983 	}
8984 
8985 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
8986 	if (!vdev_stats) {
8987 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8988 			  "DP alloc failure - unable to get alloc vdev stats");
8989 		return 0;
8990 	}
8991 
8992 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
8993 	dp_aggregate_vdev_stats(vdev, vdev_stats);
8994 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
8995 
8996 	tx_success = vdev_stats->tx.tx_success.num;
8997 	qdf_mem_free(vdev_stats);
8998 
8999 	return tx_success;
9000 }
9001 
9002 #ifndef CONFIG_WIN
9003 static struct cdp_misc_ops dp_ops_misc = {
9004 #ifdef FEATURE_WLAN_TDLS
9005 	.tx_non_std = dp_tx_non_std,
9006 #endif /* FEATURE_WLAN_TDLS */
9007 	.get_opmode = dp_get_opmode,
9008 #ifdef FEATURE_RUNTIME_PM
9009 	.runtime_suspend = dp_runtime_suspend,
9010 	.runtime_resume = dp_runtime_resume,
9011 #endif /* FEATURE_RUNTIME_PM */
9012 	.pkt_log_init = dp_pkt_log_init,
9013 	.pkt_log_con_service = dp_pkt_log_con_service,
9014 	.get_num_rx_contexts = dp_get_num_rx_contexts,
9015 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
9016 };
9017 
9018 static struct cdp_flowctl_ops dp_ops_flowctl = {
9019 	/* WIFI 3.0 DP implement as required. */
9020 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
9021 	.flow_pool_map_handler = dp_tx_flow_pool_map,
9022 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
9023 	.register_pause_cb = dp_txrx_register_pause_cb,
9024 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
9025 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
9026 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
9027 };
9028 
9029 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
9030 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9031 };
9032 
9033 #ifdef IPA_OFFLOAD
9034 static struct cdp_ipa_ops dp_ops_ipa = {
9035 	.ipa_get_resource = dp_ipa_get_resource,
9036 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
9037 	.ipa_op_response = dp_ipa_op_response,
9038 	.ipa_register_op_cb = dp_ipa_register_op_cb,
9039 	.ipa_get_stat = dp_ipa_get_stat,
9040 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
9041 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
9042 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
9043 	.ipa_setup = dp_ipa_setup,
9044 	.ipa_cleanup = dp_ipa_cleanup,
9045 	.ipa_setup_iface = dp_ipa_setup_iface,
9046 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
9047 	.ipa_enable_pipes = dp_ipa_enable_pipes,
9048 	.ipa_disable_pipes = dp_ipa_disable_pipes,
9049 	.ipa_set_perf_level = dp_ipa_set_perf_level,
9050 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
9051 };
9052 #endif
9053 
9054 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
9055 {
9056 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9057 	struct dp_soc *soc = pdev->soc;
9058 	int timeout = SUSPEND_DRAIN_WAIT;
9059 	int drain_wait_delay = 50; /* 50 ms */
9060 
9061 	/* Abort if there are any pending TX packets */
9062 	while (dp_get_tx_pending(opaque_pdev) > 0) {
9063 		qdf_sleep(drain_wait_delay);
9064 		if (timeout <= 0) {
9065 			dp_err("TX frames are pending, abort suspend");
9066 			return QDF_STATUS_E_TIMEOUT;
9067 		}
9068 		timeout = timeout - drain_wait_delay;
9069 	}
9070 
9071 	if (soc->intr_mode == DP_INTR_POLL)
9072 		qdf_timer_stop(&soc->int_timer);
9073 
9074 	return QDF_STATUS_SUCCESS;
9075 }
9076 
9077 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
9078 {
9079 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9080 	struct dp_soc *soc = pdev->soc;
9081 
9082 	if (soc->intr_mode == DP_INTR_POLL)
9083 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9084 
9085 	return QDF_STATUS_SUCCESS;
9086 }
9087 
9088 static struct cdp_bus_ops dp_ops_bus = {
9089 	.bus_suspend = dp_bus_suspend,
9090 	.bus_resume = dp_bus_resume
9091 };
9092 
9093 static struct cdp_ocb_ops dp_ops_ocb = {
9094 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9095 };
9096 
9097 
9098 static struct cdp_throttle_ops dp_ops_throttle = {
9099 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9100 };
9101 
9102 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
9103 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9104 };
9105 
9106 static struct cdp_cfg_ops dp_ops_cfg = {
9107 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9108 };
9109 
9110 /*
9111  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
9112  * @dev: physical device instance
9113  * @peer_mac_addr: peer mac address
9114  * @local_id: local id for the peer
9115  * @debug_id: to track enum peer access
9116  *
9117  * Return: peer instance pointer
9118  */
9119 static inline void *
9120 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
9121 			     uint8_t *local_id,
9122 			     enum peer_debug_id_type debug_id)
9123 {
9124 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
9125 	struct dp_peer *peer;
9126 
9127 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
9128 
9129 	if (!peer)
9130 		return NULL;
9131 
9132 	*local_id = peer->local_id;
9133 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
9134 
9135 	return peer;
9136 }
9137 
9138 /*
9139  * dp_peer_release_ref - release peer ref count
9140  * @peer: peer handle
9141  * @debug_id: to track enum peer access
9142  *
9143  * Return: None
9144  */
9145 static inline
9146 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
9147 {
9148 	dp_peer_unref_delete(peer);
9149 }
9150 
9151 static struct cdp_peer_ops dp_ops_peer = {
9152 	.register_peer = dp_register_peer,
9153 	.clear_peer = dp_clear_peer,
9154 	.find_peer_by_addr = dp_find_peer_by_addr,
9155 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
9156 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
9157 	.peer_release_ref = dp_peer_release_ref,
9158 	.local_peer_id = dp_local_peer_id,
9159 	.peer_find_by_local_id = dp_peer_find_by_local_id,
9160 	.peer_state_update = dp_peer_state_update,
9161 	.get_vdevid = dp_get_vdevid,
9162 	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
9163 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
9164 	.get_vdev_for_peer = dp_get_vdev_for_peer,
9165 	.get_peer_state = dp_get_peer_state,
9166 };
9167 #endif
9168 
9169 static struct cdp_ops dp_txrx_ops = {
9170 	.cmn_drv_ops = &dp_ops_cmn,
9171 	.ctrl_ops = &dp_ops_ctrl,
9172 	.me_ops = &dp_ops_me,
9173 	.mon_ops = &dp_ops_mon,
9174 	.host_stats_ops = &dp_ops_host_stats,
9175 	.wds_ops = &dp_ops_wds,
9176 	.raw_ops = &dp_ops_raw,
9177 #ifdef CONFIG_WIN
9178 	.pflow_ops = &dp_ops_pflow,
9179 #endif /* CONFIG_WIN */
9180 #ifndef CONFIG_WIN
9181 	.misc_ops = &dp_ops_misc,
9182 	.cfg_ops = &dp_ops_cfg,
9183 	.flowctl_ops = &dp_ops_flowctl,
9184 	.l_flowctl_ops = &dp_ops_l_flowctl,
9185 #ifdef IPA_OFFLOAD
9186 	.ipa_ops = &dp_ops_ipa,
9187 #endif
9188 	.bus_ops = &dp_ops_bus,
9189 	.ocb_ops = &dp_ops_ocb,
9190 	.peer_ops = &dp_ops_peer,
9191 	.throttle_ops = &dp_ops_throttle,
9192 	.mob_stats_ops = &dp_ops_mob_stats,
9193 #endif
9194 };
9195 
9196 /*
9197  * dp_soc_set_txrx_ring_map()
9198  * @dp_soc: DP handler for soc
9199  *
9200  * Return: Void
9201  */
9202 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
9203 {
9204 	uint32_t i;
9205 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
9206 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
9207 	}
9208 }
9209 
9210 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018)
9211 
9212 #ifndef QCA_MEM_ATTACH_ON_WIFI3
9213 
9214 /**
9215  * dp_soc_attach_wifi3() - Attach txrx SOC
9216  * @ctrl_psoc: Opaque SOC handle from control plane
9217  * @htc_handle: Opaque HTC handle
9218  * @hif_handle: Opaque HIF handle
9219  * @qdf_osdev: QDF device
9220  * @ol_ops: Offload Operations
9221  * @device_id: Device ID
9222  *
9223  * Return: DP SOC handle on success, NULL on failure
9224  */
9225 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
9226 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9227 			  struct ol_if_ops *ol_ops, uint16_t device_id)
9228 {
9229 	struct dp_soc *dp_soc =  NULL;
9230 
9231 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9232 			       ol_ops, device_id);
9233 	if (!dp_soc)
9234 		return NULL;
9235 
9236 	if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
9237 		return NULL;
9238 
9239 	return (void *)dp_soc;
9240 }
9241 #else
9242 
9243 /**
9244  * dp_soc_attach_wifi3() - Attach txrx SOC
9245  * @ctrl_psoc: Opaque SOC handle from control plane
9246  * @htc_handle: Opaque HTC handle
9247  * @hif_handle: Opaque HIF handle
9248  * @qdf_osdev: QDF device
9249  * @ol_ops: Offload Operations
9250  * @device_id: Device ID
9251  *
9252  * Return: DP SOC handle on success, NULL on failure
9253  */
9254 void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
9255 			  HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9256 			  struct ol_if_ops *ol_ops, uint16_t device_id)
9257 {
9258 	struct dp_soc *dp_soc = NULL;
9259 
9260 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9261 			       ol_ops, device_id);
9262 	return (void *)dp_soc;
9263 }
9264 
9265 #endif
9266 
9267 /**
9268  * dp_soc_attach() - Attach txrx SOC
9269  * @ctrl_psoc: Opaque SOC handle from control plane
9270  * @htc_handle: Opaque HTC handle
9271  * @qdf_osdev: QDF device
9272  * @ol_ops: Offload Operations
9273  * @device_id: Device ID
9274  *
9275  * Return: DP SOC handle on success, NULL on failure
9276  */
9277 static struct dp_soc *
9278 dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9279 	      struct ol_if_ops *ol_ops, uint16_t device_id)
9280 {
9281 	int int_ctx;
9282 	struct dp_soc *soc =  NULL;
9283 	struct htt_soc *htt_soc = NULL;
9284 
9285 	soc = qdf_mem_malloc(sizeof(*soc));
9286 
9287 	if (!soc) {
9288 		dp_err("DP SOC memory allocation failed");
9289 		goto fail0;
9290 	}
9291 
9292 	int_ctx = 0;
9293 	soc->device_id = device_id;
9294 	soc->cdp_soc.ops = &dp_txrx_ops;
9295 	soc->cdp_soc.ol_ops = ol_ops;
9296 	soc->ctrl_psoc = ctrl_psoc;
9297 	soc->osdev = qdf_osdev;
9298 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
9299 
9300 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
9301 	if (!soc->wlan_cfg_ctx) {
9302 		dp_err("wlan_cfg_ctx failed\n");
9303 		goto fail1;
9304 	}
9305 	htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
9306 	if (!htt_soc) {
9307 		dp_err("HTT attach failed");
9308 		goto fail1;
9309 	}
9310 	soc->htt_handle = htt_soc;
9311 	htt_soc->dp_soc = soc;
9312 	htt_soc->htc_soc = htc_handle;
9313 
9314 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
9315 		goto fail2;
9316 
9317 	return (void *)soc;
9318 fail2:
9319 	qdf_mem_free(htt_soc);
9320 fail1:
9321 	qdf_mem_free(soc);
9322 fail0:
9323 	return NULL;
9324 }
9325 
9326 /**
9327  * dp_soc_init() - Initialize txrx SOC
9328  * @dp_soc: Opaque DP SOC handle
9329  * @htc_handle: Opaque HTC handle
9330  * @hif_handle: Opaque HIF handle
9331  *
9332  * Return: DP SOC handle on success, NULL on failure
9333  */
9334 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
9335 {
9336 	int target_type;
9337 	struct dp_soc *soc = (struct dp_soc *)dpsoc;
9338 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
9339 
9340 	htt_soc->htc_soc = htc_handle;
9341 	soc->hif_handle = hif_handle;
9342 
9343 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
9344 	if (!soc->hal_soc)
9345 		return NULL;
9346 
9347 	htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
9348 			   soc->hal_soc, soc->osdev);
9349 	target_type = hal_get_target_type(soc->hal_soc);
9350 	switch (target_type) {
9351 	case TARGET_TYPE_QCA6290:
9352 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9353 					       REO_DST_RING_SIZE_QCA6290);
9354 		soc->ast_override_support = 1;
9355 		soc->da_war_enabled = false;
9356 		break;
9357 #ifdef QCA_WIFI_QCA6390
9358 	case TARGET_TYPE_QCA6390:
9359 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9360 					       REO_DST_RING_SIZE_QCA6290);
9361 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9362 		soc->ast_override_support = 1;
9363 		if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
9364 			int int_ctx;
9365 
9366 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
9367 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
9368 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
9369 			}
9370 		}
9371 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
9372 		break;
9373 #endif
9374 	case TARGET_TYPE_QCA8074:
9375 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9376 					       REO_DST_RING_SIZE_QCA8074);
9377 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9378 		soc->hw_nac_monitor_support = 1;
9379 		soc->da_war_enabled = true;
9380 		break;
9381 	case TARGET_TYPE_QCA8074V2:
9382 	case TARGET_TYPE_QCA6018:
9383 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9384 					       REO_DST_RING_SIZE_QCA8074);
9385 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
9386 		soc->hw_nac_monitor_support = 1;
9387 		soc->ast_override_support = 1;
9388 		soc->per_tid_basize_max_tid = 8;
9389 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
9390 		soc->da_war_enabled = false;
9391 		break;
9392 	default:
9393 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
9394 		qdf_assert_always(0);
9395 		break;
9396 	}
9397 
9398 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
9399 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
9400 	soc->cce_disable = false;
9401 
9402 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
9403 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9404 				CDP_CFG_MAX_PEER_ID);
9405 
9406 		if (ret != -EINVAL) {
9407 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
9408 		}
9409 
9410 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9411 				CDP_CFG_CCE_DISABLE);
9412 		if (ret == 1)
9413 			soc->cce_disable = true;
9414 	}
9415 
9416 	qdf_spinlock_create(&soc->peer_ref_mutex);
9417 	qdf_spinlock_create(&soc->ast_lock);
9418 
9419 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
9420 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
9421 
9422 	/* fill the tx/rx cpu ring map*/
9423 	dp_soc_set_txrx_ring_map(soc);
9424 
9425 	qdf_spinlock_create(&soc->htt_stats.lock);
9426 	/* initialize work queue for stats processing */
9427 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
9428 
9429 	return soc;
9430 
9431 }
9432 
9433 /**
9434  * dp_soc_init_wifi3() - Initialize txrx SOC
9435  * @dp_soc: Opaque DP SOC handle
9436  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
9437  * @hif_handle: Opaque HIF handle
9438  * @htc_handle: Opaque HTC handle
9439  * @qdf_osdev: QDF device (Unused)
9440  * @ol_ops: Offload Operations (Unused)
9441  * @device_id: Device ID (Unused)
9442  *
9443  * Return: DP SOC handle on success, NULL on failure
9444  */
9445 void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
9446 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9447 			struct ol_if_ops *ol_ops, uint16_t device_id)
9448 {
9449 	return dp_soc_init(dpsoc, htc_handle, hif_handle);
9450 }
9451 
9452 #endif
9453 
9454 /*
9455  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
9456  *
9457  * @soc: handle to DP soc
9458  * @mac_id: MAC id
9459  *
9460  * Return: Return pdev corresponding to MAC
9461  */
9462 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
9463 {
9464 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
9465 		return soc->pdev_list[mac_id];
9466 
9467 	/* Typically for MCL as there only 1 PDEV*/
9468 	return soc->pdev_list[0];
9469 }
9470 
9471 /*
9472  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
9473  * @soc:		DP SoC context
9474  * @max_mac_rings:	No of MAC rings
9475  *
9476  * Return: None
9477  */
9478 static
9479 void dp_is_hw_dbs_enable(struct dp_soc *soc,
9480 				int *max_mac_rings)
9481 {
9482 	bool dbs_enable = false;
9483 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
9484 		dbs_enable = soc->cdp_soc.ol_ops->
9485 		is_hw_dbs_2x2_capable(soc->ctrl_psoc);
9486 
9487 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
9488 }
9489 
9490 /*
9491 * dp_is_soc_reinit() - Check if soc reinit is true
9492 * @soc: DP SoC context
9493 *
9494 * Return: true or false
9495 */
9496 bool dp_is_soc_reinit(struct dp_soc *soc)
9497 {
9498 	return soc->dp_soc_reinit;
9499 }
9500 
9501 /*
9502 * dp_set_pktlog_wifi3() - attach txrx vdev
9503 * @pdev: Datapath PDEV handle
9504 * @event: which event's notifications are being subscribed to
9505 * @enable: WDI event subscribe or not. (True or False)
9506 *
9507 * Return: Success, NULL on failure
9508 */
9509 #ifdef WDI_EVENT_ENABLE
9510 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
9511 		bool enable)
9512 {
9513 	struct dp_soc *soc = NULL;
9514 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
9515 	int max_mac_rings = wlan_cfg_get_num_mac_rings
9516 					(pdev->wlan_cfg_ctx);
9517 	uint8_t mac_id = 0;
9518 
9519 	soc = pdev->soc;
9520 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
9521 
9522 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9523 			FL("Max_mac_rings %d "),
9524 			max_mac_rings);
9525 
9526 	if (enable) {
9527 		switch (event) {
9528 		case WDI_EVENT_RX_DESC:
9529 			if (pdev->monitor_vdev) {
9530 				/* Nothing needs to be done if monitor mode is
9531 				 * enabled
9532 				 */
9533 				return 0;
9534 			}
9535 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
9536 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
9537 				htt_tlv_filter.mpdu_start = 1;
9538 				htt_tlv_filter.msdu_start = 1;
9539 				htt_tlv_filter.msdu_end = 1;
9540 				htt_tlv_filter.mpdu_end = 1;
9541 				htt_tlv_filter.packet_header = 1;
9542 				htt_tlv_filter.attention = 1;
9543 				htt_tlv_filter.ppdu_start = 1;
9544 				htt_tlv_filter.ppdu_end = 1;
9545 				htt_tlv_filter.ppdu_end_user_stats = 1;
9546 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9547 				htt_tlv_filter.ppdu_end_status_done = 1;
9548 				htt_tlv_filter.enable_fp = 1;
9549 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9550 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9551 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9552 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9553 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9554 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
9555 				htt_tlv_filter.offset_valid = false;
9556 
9557 				for (mac_id = 0; mac_id < max_mac_rings;
9558 								mac_id++) {
9559 					int mac_for_pdev =
9560 						dp_get_mac_id_for_pdev(mac_id,
9561 								pdev->pdev_id);
9562 
9563 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9564 					 mac_for_pdev,
9565 					 pdev->rxdma_mon_status_ring[mac_id]
9566 					 .hal_srng,
9567 					 RXDMA_MONITOR_STATUS,
9568 					 RX_BUFFER_SIZE,
9569 					 &htt_tlv_filter);
9570 
9571 				}
9572 
9573 				if (soc->reap_timer_init)
9574 					qdf_timer_mod(&soc->mon_reap_timer,
9575 					DP_INTR_POLL_TIMER_MS);
9576 			}
9577 			break;
9578 
9579 		case WDI_EVENT_LITE_RX:
9580 			if (pdev->monitor_vdev) {
9581 				/* Nothing needs to be done if monitor mode is
9582 				 * enabled
9583 				 */
9584 				return 0;
9585 			}
9586 
9587 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
9588 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
9589 
9590 				htt_tlv_filter.ppdu_start = 1;
9591 				htt_tlv_filter.ppdu_end = 1;
9592 				htt_tlv_filter.ppdu_end_user_stats = 1;
9593 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9594 				htt_tlv_filter.ppdu_end_status_done = 1;
9595 				htt_tlv_filter.mpdu_start = 1;
9596 				htt_tlv_filter.enable_fp = 1;
9597 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9598 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9599 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9600 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9601 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9602 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
9603 				htt_tlv_filter.offset_valid = false;
9604 
9605 				for (mac_id = 0; mac_id < max_mac_rings;
9606 								mac_id++) {
9607 					int mac_for_pdev =
9608 						dp_get_mac_id_for_pdev(mac_id,
9609 								pdev->pdev_id);
9610 
9611 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9612 					mac_for_pdev,
9613 					pdev->rxdma_mon_status_ring[mac_id]
9614 					.hal_srng,
9615 					RXDMA_MONITOR_STATUS,
9616 					RX_BUFFER_SIZE_PKTLOG_LITE,
9617 					&htt_tlv_filter);
9618 				}
9619 
9620 				if (soc->reap_timer_init)
9621 					qdf_timer_mod(&soc->mon_reap_timer,
9622 					DP_INTR_POLL_TIMER_MS);
9623 			}
9624 			break;
9625 
9626 		case WDI_EVENT_LITE_T2H:
9627 			if (pdev->monitor_vdev) {
9628 				/* Nothing needs to be done if monitor mode is
9629 				 * enabled
9630 				 */
9631 				return 0;
9632 			}
9633 
9634 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
9635 				int mac_for_pdev = dp_get_mac_id_for_pdev(
9636 							mac_id,	pdev->pdev_id);
9637 
9638 				pdev->pktlog_ppdu_stats = true;
9639 				dp_h2t_cfg_stats_msg_send(pdev,
9640 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
9641 					mac_for_pdev);
9642 			}
9643 			break;
9644 
9645 		default:
9646 			/* Nothing needs to be done for other pktlog types */
9647 			break;
9648 		}
9649 	} else {
9650 		switch (event) {
9651 		case WDI_EVENT_RX_DESC:
9652 		case WDI_EVENT_LITE_RX:
9653 			if (pdev->monitor_vdev) {
9654 				/* Nothing needs to be done if monitor mode is
9655 				 * enabled
9656 				 */
9657 				return 0;
9658 			}
9659 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
9660 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
9661 
9662 				for (mac_id = 0; mac_id < max_mac_rings;
9663 								mac_id++) {
9664 					int mac_for_pdev =
9665 						dp_get_mac_id_for_pdev(mac_id,
9666 								pdev->pdev_id);
9667 
9668 					htt_h2t_rx_ring_cfg(soc->htt_handle,
9669 					  mac_for_pdev,
9670 					  pdev->rxdma_mon_status_ring[mac_id]
9671 					  .hal_srng,
9672 					  RXDMA_MONITOR_STATUS,
9673 					  RX_BUFFER_SIZE,
9674 					  &htt_tlv_filter);
9675 				}
9676 
9677 				if (soc->reap_timer_init)
9678 					qdf_timer_stop(&soc->mon_reap_timer);
9679 			}
9680 			break;
9681 		case WDI_EVENT_LITE_T2H:
9682 			if (pdev->monitor_vdev) {
9683 				/* Nothing needs to be done if monitor mode is
9684 				 * enabled
9685 				 */
9686 				return 0;
9687 			}
9688 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
9689 			 * passing value 0. Once these macros will define in htt
9690 			 * header file will use proper macros
9691 			*/
9692 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
9693 				int mac_for_pdev =
9694 						dp_get_mac_id_for_pdev(mac_id,
9695 								pdev->pdev_id);
9696 
9697 				pdev->pktlog_ppdu_stats = false;
9698 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
9699 					dp_h2t_cfg_stats_msg_send(pdev, 0,
9700 								mac_for_pdev);
9701 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
9702 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
9703 								mac_for_pdev);
9704 				} else if (pdev->enhanced_stats_en) {
9705 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
9706 								mac_for_pdev);
9707 				}
9708 			}
9709 
9710 			break;
9711 		default:
9712 			/* Nothing needs to be done for other pktlog types */
9713 			break;
9714 		}
9715 	}
9716 	return 0;
9717 }
9718 #endif
9719 
9720 /**
9721  * dp_bucket_index() - Return index from array
9722  *
9723  * @delay: delay measured
9724  * @array: array used to index corresponding delay
9725  *
9726  * Return: index
9727  */
9728 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
9729 {
9730 	uint8_t i = CDP_DELAY_BUCKET_0;
9731 
9732 	for (; i < CDP_DELAY_BUCKET_MAX; i++) {
9733 		if (delay >= array[i] && delay <= array[i + 1])
9734 			return i;
9735 	}
9736 
9737 	return (CDP_DELAY_BUCKET_MAX - 1);
9738 }
9739 
9740 /**
9741  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
9742  *				type of delay
9743  *
9744  * @pdev: pdev handle
9745  * @delay: delay in ms
9746  * @t: tid value
9747  * @mode: type of tx delay mode
9748  * Return: pointer to cdp_delay_stats structure
9749  */
9750 static struct cdp_delay_stats *
9751 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
9752 		      uint8_t tid, uint8_t mode)
9753 {
9754 	uint8_t delay_index = 0;
9755 	struct cdp_tid_tx_stats *tstats =
9756 		&pdev->stats.tid_stats.tid_tx_stats[tid];
9757 	struct cdp_tid_rx_stats *rstats =
9758 		&pdev->stats.tid_stats.tid_rx_stats[tid];
9759 	/*
9760 	 * cdp_fw_to_hw_delay_range
9761 	 * Fw to hw delay ranges in milliseconds
9762 	 */
9763 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
9764 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
9765 
9766 	/*
9767 	 * cdp_sw_enq_delay_range
9768 	 * Software enqueue delay ranges in milliseconds
9769 	 */
9770 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
9771 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
9772 
9773 	/*
9774 	 * cdp_intfrm_delay_range
9775 	 * Interframe delay ranges in milliseconds
9776 	 */
9777 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
9778 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
9779 
9780 	/*
9781 	 * Update delay stats in proper bucket
9782 	 */
9783 	switch (mode) {
9784 	/* Software Enqueue delay ranges */
9785 	case CDP_DELAY_STATS_SW_ENQ:
9786 
9787 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
9788 		tstats->swq_delay.delay_bucket[delay_index]++;
9789 		return &tstats->swq_delay;
9790 
9791 	/* Tx Completion delay ranges */
9792 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
9793 
9794 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
9795 		tstats->hwtx_delay.delay_bucket[delay_index]++;
9796 		return &tstats->hwtx_delay;
9797 
9798 	/* Interframe tx delay ranges */
9799 	case CDP_DELAY_STATS_TX_INTERFRAME:
9800 
9801 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
9802 		tstats->intfrm_delay.delay_bucket[delay_index]++;
9803 		return &tstats->intfrm_delay;
9804 
9805 	/* Interframe rx delay ranges */
9806 	case CDP_DELAY_STATS_RX_INTERFRAME:
9807 
9808 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
9809 		rstats->intfrm_delay.delay_bucket[delay_index]++;
9810 		return &rstats->intfrm_delay;
9811 
9812 	/* Ring reap to indication to network stack */
9813 	case CDP_DELAY_STATS_REAP_STACK:
9814 
9815 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
9816 		rstats->to_stack_delay.delay_bucket[delay_index]++;
9817 		return &rstats->to_stack_delay;
9818 	default:
9819 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
9820 			  "%s Incorrect delay mode: %d", __func__, mode);
9821 	}
9822 
9823 	return NULL;
9824 }
9825 
9826 /**
9827  * dp_update_delay_stats() - Update delay statistics in structure
9828  *				and fill min, max and avg delay
9829  *
9830  * @pdev: pdev handle
9831  * @delay: delay in ms
9832  * @tid: tid value
9833  * @mode: type of tx delay mode
9834  * Return: none
9835  */
9836 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
9837 			   uint8_t tid, uint8_t mode)
9838 {
9839 	struct cdp_delay_stats *dstats = NULL;
9840 
9841 	/*
9842 	 * Delay ranges are different for different delay modes
9843 	 * Get the correct index to update delay bucket
9844 	 */
9845 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode);
9846 	if (qdf_unlikely(!dstats))
9847 		return;
9848 
9849 	if (delay != 0) {
9850 		/*
9851 		 * Compute minimum,average and maximum
9852 		 * delay
9853 		 */
9854 		if (delay < dstats->min_delay)
9855 			dstats->min_delay = delay;
9856 
9857 		if (delay > dstats->max_delay)
9858 			dstats->max_delay = delay;
9859 
9860 		/*
9861 		 * Average over delay measured till now
9862 		 */
9863 		if (!dstats->avg_delay)
9864 			dstats->avg_delay = delay;
9865 		else
9866 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
9867 	}
9868 }
9869