xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include "dp_rx_mon.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include <wlan_utility.h>
42 #include "cdp_txrx_cmn_struct.h"
43 #include "cdp_txrx_stats_struct.h"
44 #include "cdp_txrx_cmn_reg.h"
45 #include <qdf_util.h>
46 #include "dp_peer.h"
47 #include "dp_rx_mon.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include "dp_mon_filter.h"
57 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
58 #include "cdp_txrx_flow_ctrl_v2.h"
59 #else
60 static inline void
61 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
62 {
63 	return;
64 }
65 #endif
66 #include "dp_ipa.h"
67 #include "dp_cal_client_api.h"
68 #ifdef FEATURE_WDS
69 #include "dp_txrx_wds.h"
70 #endif
71 #ifdef ATH_SUPPORT_IQUE
72 #include "dp_txrx_me.h"
73 #endif
74 #if defined(DP_CON_MON)
75 #ifndef REMOVE_PKT_LOG
76 #include <pktlog_ac_api.h>
77 #include <pktlog_ac.h>
78 #endif
79 #endif
80 
81 #ifdef WLAN_FEATURE_STATS_EXT
82 #define INIT_RX_HW_STATS_LOCK(_soc) \
83 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
84 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
85 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
86 #else
87 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
88 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
89 #endif
90 
91 #ifdef DP_PEER_EXTENDED_API
92 #define SET_PEER_REF_CNT_ONE(_peer) \
93 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
94 #else
95 #define SET_PEER_REF_CNT_ONE(_peer)
96 #endif
97 
98 /*
99  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
100  * If the buffer size is exceeding this size limit,
101  * dp_txrx_get_peer_stats is to be used instead.
102  */
103 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
104 			(sizeof(cdp_peer_stats_param_t) <= 16));
105 
106 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
107 /*
108  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
109  * also should be updated accordingly
110  */
111 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
112 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
113 
114 /*
115  * HIF_EVENT_HIST_MAX should always be power of 2
116  */
117 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
118 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
119 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
120 
121 /*
122  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
123  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
124  */
125 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
126 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
127 			WLAN_CFG_INT_NUM_CONTEXTS);
128 
129 #ifdef WLAN_RX_PKT_CAPTURE_ENH
130 #include "dp_rx_mon_feature.h"
131 #else
132 /*
133  * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
134  * @pdev_handle: DP_PDEV handle
135  * @val: user provided value
136  *
137  * Return: QDF_STATUS
138  */
139 static QDF_STATUS
140 dp_config_enh_rx_capture(struct dp_pdev *pdev_handle, uint8_t val)
141 {
142 	return QDF_STATUS_E_INVAL;
143 }
144 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
145 
146 #ifdef WLAN_TX_PKT_CAPTURE_ENH
147 #include "dp_tx_capture.h"
148 #else
149 /*
150  * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
151  * @pdev_handle: DP_PDEV handle
152  * @val: user provided value
153  *
154  * Return: QDF_STATUS
155  */
156 static QDF_STATUS
157 dp_config_enh_tx_capture(struct dp_pdev *pdev_handle, uint8_t val)
158 {
159 	return QDF_STATUS_E_INVAL;
160 }
161 #endif
162 
163 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
164 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
165 static void dp_pdev_srng_free(struct dp_pdev *pdev);
166 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
167 
168 static void dp_soc_srng_deinit(struct dp_soc *soc);
169 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
170 static void dp_soc_srng_free(struct dp_soc *soc);
171 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
172 
173 static void dp_soc_cfg_init(struct dp_soc *soc);
174 static void dp_soc_cfg_attach(struct dp_soc *soc);
175 
176 static inline
177 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
178 				HTC_HANDLE htc_handle,
179 				qdf_device_t qdf_osdev,
180 				uint8_t pdev_id);
181 
182 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
183 
184 static QDF_STATUS
185 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
186 		   HTC_HANDLE htc_handle,
187 		   qdf_device_t qdf_osdev,
188 		   uint8_t pdev_id);
189 
190 static QDF_STATUS
191 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
192 
193 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
194 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
195 
196 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
197 		  struct hif_opaque_softc *hif_handle);
198 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
199 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
200 				       uint8_t pdev_id,
201 				       int force);
202 static struct dp_soc *
203 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
204 	      struct hif_opaque_softc *hif_handle,
205 	      HTC_HANDLE htc_handle,
206 	      qdf_device_t qdf_osdev,
207 	      struct ol_if_ops *ol_ops, uint16_t device_id);
208 static void dp_pktlogmod_exit(struct dp_pdev *handle);
209 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
210 					      uint8_t vdev_id,
211 					      uint8_t *peer_mac_addr);
212 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
213 				       uint8_t vdev_id,
214 				       uint8_t *peer_mac, uint32_t bitmap);
215 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
216 				bool unmap_only);
217 #ifdef ENABLE_VERBOSE_DEBUG
218 bool is_dp_verbose_debug_enabled;
219 #endif
220 
221 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
222 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
223 			  uint8_t pdev_id,
224 			  bool enable,
225 			  struct cdp_monitor_filter *filter_val);
226 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
227 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
228 			   bool enable);
229 static inline void
230 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
231 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
232 static inline void
233 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
234 static inline void
235 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
236 			 bool enable);
237 #endif
238 static inline bool
239 dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev);
240 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
241 					    enum hal_ring_type ring_type,
242 					    int ring_num);
243 #define DP_INTR_POLL_TIMER_MS	5
244 
245 /* Generic AST entry aging timer value */
246 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
247 #define DP_MCS_LENGTH (6*MAX_MCS)
248 
249 #define DP_CURR_FW_STATS_AVAIL 19
250 #define DP_HTT_DBG_EXT_STATS_MAX 256
251 #define DP_MAX_SLEEP_TIME 100
252 #ifndef QCA_WIFI_3_0_EMU
253 #define SUSPEND_DRAIN_WAIT 500
254 #else
255 #define SUSPEND_DRAIN_WAIT 3000
256 #endif
257 
258 #ifdef IPA_OFFLOAD
259 /* Exclude IPA rings from the interrupt context */
260 #define TX_RING_MASK_VAL	0xb
261 #define RX_RING_MASK_VAL	0x7
262 #else
263 #define TX_RING_MASK_VAL	0xF
264 #define RX_RING_MASK_VAL	0xF
265 #endif
266 
267 #define STR_MAXLEN	64
268 
269 #define RNG_ERR		"SRNG setup failed for"
270 
271 /* Threshold for peer's cached buf queue beyond which frames are dropped */
272 #define DP_RX_CACHED_BUFQ_THRESH 64
273 
274 /* Budget to reap monitor status ring */
275 #define DP_MON_REAP_BUDGET 1024
276 
277 /**
278  * default_dscp_tid_map - Default DSCP-TID mapping
279  *
280  * DSCP        TID
281  * 000000      0
282  * 001000      1
283  * 010000      2
284  * 011000      3
285  * 100000      4
286  * 101000      5
287  * 110000      6
288  * 111000      7
289  */
290 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
291 	0, 0, 0, 0, 0, 0, 0, 0,
292 	1, 1, 1, 1, 1, 1, 1, 1,
293 	2, 2, 2, 2, 2, 2, 2, 2,
294 	3, 3, 3, 3, 3, 3, 3, 3,
295 	4, 4, 4, 4, 4, 4, 4, 4,
296 	5, 5, 5, 5, 5, 5, 5, 5,
297 	6, 6, 6, 6, 6, 6, 6, 6,
298 	7, 7, 7, 7, 7, 7, 7, 7,
299 };
300 
301 /**
302  * default_pcp_tid_map - Default PCP-TID mapping
303  *
304  * PCP     TID
305  * 000      0
306  * 001      1
307  * 010      2
308  * 011      3
309  * 100      4
310  * 101      5
311  * 110      6
312  * 111      7
313  */
314 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
315 	0, 1, 2, 3, 4, 5, 6, 7,
316 };
317 
318 /**
319  * @brief Cpu to tx ring map
320  */
321 uint8_t
322 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
323 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
324 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
325 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
326 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
327 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
328 #ifdef WLAN_TX_PKT_CAPTURE_ENH
329 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
330 #endif
331 };
332 
333 /**
334  * @brief Select the type of statistics
335  */
336 enum dp_stats_type {
337 	STATS_FW = 0,
338 	STATS_HOST = 1,
339 	STATS_TYPE_MAX = 2,
340 };
341 
342 /**
343  * @brief General Firmware statistics options
344  *
345  */
346 enum dp_fw_stats {
347 	TXRX_FW_STATS_INVALID	= -1,
348 };
349 
350 /**
351  * dp_stats_mapping_table - Firmware and Host statistics
352  * currently supported
353  */
354 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
355 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
356 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
357 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
358 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
359 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
360 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
361 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
362 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
363 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
364 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
365 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
366 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
367 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
368 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
369 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
370 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
371 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
372 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
373 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
374 	/* Last ENUM for HTT FW STATS */
375 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
376 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
377 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
378 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
379 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
380 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
381 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
382 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
383 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
384 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
385 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
386 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
387 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
388 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
389 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
390 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}
391 };
392 
393 /* MCL specific functions */
394 #if defined(DP_CON_MON)
395 /**
396  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
397  * @soc: pointer to dp_soc handle
398  * @intr_ctx_num: interrupt context number for which mon mask is needed
399  *
400  * For MCL, monitor mode rings are being processed in timer contexts (polled).
401  * This function is returning 0, since in interrupt mode(softirq based RX),
402  * we donot want to process monitor mode rings in a softirq.
403  *
404  * So, in case packet log is enabled for SAP/STA/P2P modes,
405  * regular interrupt processing will not process monitor mode rings. It would be
406  * done in a separate timer context.
407  *
408  * Return: 0
409  */
410 static inline
411 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
412 {
413 	return 0;
414 }
415 
416 /*
417  * dp_service_mon_rings()- service monitor rings
418  * @soc: soc dp handle
419  * @quota: number of ring entry that can be serviced
420  *
421  * Return: None
422  *
423  */
424 static void dp_service_mon_rings(struct  dp_soc *soc, uint32_t quota)
425 {
426 	int ring = 0, work_done;
427 	struct dp_pdev *pdev = NULL;
428 
429 	for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
430 		pdev = dp_get_pdev_for_lmac_id(soc, ring);
431 		if (!pdev)
432 			continue;
433 		work_done = dp_mon_process(soc, NULL, ring, quota);
434 
435 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
436 			  FL("Reaped %d descs from Monitor rings"),
437 			  work_done);
438 	}
439 }
440 
441 /*
442  * dp_mon_reap_timer_handler()- timer to reap monitor rings
443  * reqd as we are not getting ppdu end interrupts
444  * @arg: SoC Handle
445  *
446  * Return:
447  *
448  */
449 static void dp_mon_reap_timer_handler(void *arg)
450 {
451 	struct dp_soc *soc = (struct dp_soc *)arg;
452 
453 	dp_service_mon_rings(soc, QCA_NAPI_BUDGET);
454 
455 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
456 }
457 
458 #ifndef REMOVE_PKT_LOG
459 /**
460  * dp_pkt_log_init() - API to initialize packet log
461  * @soc_hdl: Datapath soc handle
462  * @pdev_id: id of data path pdev handle
463  * @scn: HIF context
464  *
465  * Return: none
466  */
467 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
468 {
469 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
470 	struct dp_pdev *handle =
471 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
472 
473 	if (!handle) {
474 		dp_err("pdev handle is NULL");
475 		return;
476 	}
477 
478 	if (handle->pkt_log_init) {
479 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
480 			  "%s: Packet log not initialized", __func__);
481 		return;
482 	}
483 
484 	pktlog_sethandle(&handle->pl_dev, scn);
485 	pktlog_set_pdev_id(handle->pl_dev, pdev_id);
486 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
487 
488 	if (pktlogmod_init(scn)) {
489 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
490 			  "%s: pktlogmod_init failed", __func__);
491 		handle->pkt_log_init = false;
492 	} else {
493 		handle->pkt_log_init = true;
494 	}
495 }
496 
497 /**
498  * dp_pkt_log_con_service() - connect packet log service
499  * @soc_hdl: Datapath soc handle
500  * @pdev_id: id of data path pdev handle
501  * @scn: device context
502  *
503  * Return: none
504  */
505 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
506 				   uint8_t pdev_id, void *scn)
507 {
508 	dp_pkt_log_init(soc_hdl, pdev_id, scn);
509 	pktlog_htc_attach();
510 }
511 
512 /**
513  * dp_pktlogmod_exit() - API to cleanup pktlog info
514  * @pdev: Pdev handle
515  *
516  * Return: none
517  */
518 static void dp_pktlogmod_exit(struct dp_pdev *pdev)
519 {
520 	struct dp_soc *soc = pdev->soc;
521 	struct hif_opaque_softc *scn = soc->hif_handle;
522 
523 	if (!scn) {
524 		dp_err("Invalid hif(scn) handle");
525 		return;
526 	}
527 
528 	/* stop mon_reap_timer if it has been started */
529 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
530 	    soc->reap_timer_init && (!dp_is_enable_reap_timer_non_pkt(pdev)))
531 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
532 
533 	pktlogmod_exit(scn);
534 	pdev->pkt_log_init = false;
535 }
536 #else
537 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
538 				   uint8_t pdev_id, void *scn)
539 {
540 }
541 
542 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
543 #endif
544 /**
545  * dp_get_num_rx_contexts() - get number of RX contexts
546  * @soc_hdl: cdp opaque soc handle
547  *
548  * Return: number of RX contexts
549  */
550 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
551 {
552 	int i;
553 	int num_rx_contexts = 0;
554 
555 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
556 
557 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
558 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
559 			num_rx_contexts++;
560 
561 	return num_rx_contexts;
562 }
563 
564 #else
565 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
566 
567 /**
568  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
569  * @soc: pointer to dp_soc handle
570  * @intr_ctx_num: interrupt context number for which mon mask is needed
571  *
572  * Return: mon mask value
573  */
574 static inline
575 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
576 {
577 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
578 }
579 
580 /*
581  * dp_service_lmac_rings()- timer to reap lmac rings
582  * @arg: SoC Handle
583  *
584  * Return:
585  *
586  */
587 static void dp_service_lmac_rings(void *arg)
588 {
589 	struct dp_soc *soc = (struct dp_soc *)arg;
590 	int ring = 0, i;
591 	struct dp_pdev *pdev = NULL;
592 	union dp_rx_desc_list_elem_t *desc_list = NULL;
593 	union dp_rx_desc_list_elem_t *tail = NULL;
594 
595 	/* Process LMAC interrupts */
596 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
597 		int mac_for_pdev = ring;
598 		struct dp_srng *rx_refill_buf_ring;
599 
600 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
601 		if (!pdev)
602 			continue;
603 
604 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
605 
606 		dp_mon_process(soc, NULL, mac_for_pdev,
607 			       QCA_NAPI_BUDGET);
608 
609 		for (i = 0;
610 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
611 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
612 					     mac_for_pdev,
613 					     QCA_NAPI_BUDGET);
614 
615 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
616 						  mac_for_pdev))
617 			dp_rx_buffers_replenish(soc, mac_for_pdev,
618 						rx_refill_buf_ring,
619 						&soc->rx_desc_buf[mac_for_pdev],
620 						0, &desc_list, &tail);
621 	}
622 
623 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
624 }
625 
626 #endif
627 
628 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
629 				 uint8_t vdev_id,
630 				 uint8_t *peer_mac,
631 				 uint8_t *mac_addr,
632 				 enum cdp_txrx_ast_entry_type type,
633 				 uint32_t flags)
634 {
635 	int ret = -1;
636 	QDF_STATUS status = QDF_STATUS_SUCCESS;
637 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
638 						       peer_mac, 0, vdev_id);
639 
640 	if (!peer || peer->delete_in_progress) {
641 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
642 			  "%s: Peer is NULL!\n", __func__);
643 		goto fail;
644 	}
645 
646 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
647 				 peer,
648 				 mac_addr,
649 				 type,
650 				 flags);
651 	if ((status == QDF_STATUS_SUCCESS) ||
652 	    (status == QDF_STATUS_E_ALREADY) ||
653 	    (status == QDF_STATUS_E_AGAIN))
654 		ret = 0;
655 
656 	dp_hmwds_ast_add_notify(peer, mac_addr,
657 				type, status, false);
658 fail:
659 	if (peer)
660 		dp_peer_unref_delete(peer);
661 
662 	return ret;
663 }
664 
665 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
666 						uint8_t vdev_id,
667 						uint8_t *peer_mac,
668 						uint8_t *wds_macaddr,
669 						uint32_t flags)
670 {
671 	int status = -1;
672 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
673 	struct dp_ast_entry  *ast_entry = NULL;
674 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
675 						       peer_mac, 0, vdev_id);
676 
677 	if (!peer || peer->delete_in_progress) {
678 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
679 			  "%s: Peer is NULL!\n", __func__);
680 		goto fail;
681 	}
682 
683 	qdf_spin_lock_bh(&soc->ast_lock);
684 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
685 						    peer->vdev->pdev->pdev_id);
686 
687 	if (ast_entry) {
688 		status = dp_peer_update_ast(soc,
689 					    peer,
690 					    ast_entry, flags);
691 	}
692 	qdf_spin_unlock_bh(&soc->ast_lock);
693 
694 fail:
695 	if (peer)
696 		dp_peer_unref_delete(peer);
697 
698 	return status;
699 }
700 
701 /*
702  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
703  * @soc_handle:		Datapath SOC handle
704  * @wds_macaddr:	WDS entry MAC Address
705  * @peer_macaddr:	WDS entry MAC Address
706  * @vdev_id:		id of vdev handle
707  * Return: QDF_STATUS
708  */
709 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
710 					 uint8_t *wds_macaddr,
711 					 uint8_t *peer_mac_addr,
712 					 uint8_t vdev_id)
713 {
714 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
715 	struct dp_ast_entry *ast_entry = NULL;
716 	struct dp_ast_entry *tmp_ast_entry;
717 	struct dp_peer *peer;
718 	struct dp_pdev *pdev;
719 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
720 
721 	if (!vdev)
722 		return QDF_STATUS_E_FAILURE;
723 
724 	pdev = vdev->pdev;
725 
726 	if (peer_mac_addr) {
727 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
728 					      0, vdev->vdev_id);
729 		if (!peer) {
730 			return QDF_STATUS_E_FAILURE;
731 		}
732 
733 		if (peer->delete_in_progress) {
734 			dp_peer_unref_delete(peer);
735 			return QDF_STATUS_E_FAILURE;
736 		}
737 
738 		qdf_spin_lock_bh(&soc->ast_lock);
739 		DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
740 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
741 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
742 				dp_peer_del_ast(soc, ast_entry);
743 		}
744 		qdf_spin_unlock_bh(&soc->ast_lock);
745 		dp_peer_unref_delete(peer);
746 
747 		return QDF_STATUS_SUCCESS;
748 	} else if (wds_macaddr) {
749 		qdf_spin_lock_bh(&soc->ast_lock);
750 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
751 							    pdev->pdev_id);
752 
753 		if (ast_entry) {
754 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
755 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
756 				dp_peer_del_ast(soc, ast_entry);
757 		}
758 		qdf_spin_unlock_bh(&soc->ast_lock);
759 	}
760 
761 	return QDF_STATUS_SUCCESS;
762 }
763 
764 /*
765  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
766  * @soc:		Datapath SOC handle
767  *
768  * Return: QDF_STATUS
769  */
770 static QDF_STATUS
771 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
772 			     uint8_t vdev_id)
773 {
774 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
775 	struct dp_pdev *pdev;
776 	struct dp_vdev *vdev;
777 	struct dp_peer *peer;
778 	struct dp_ast_entry *ase, *temp_ase;
779 	int i;
780 
781 	qdf_spin_lock_bh(&soc->ast_lock);
782 
783 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
784 		pdev = soc->pdev_list[i];
785 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
786 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
787 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
788 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
789 					if ((ase->type ==
790 						CDP_TXRX_AST_TYPE_WDS_HM) ||
791 					    (ase->type ==
792 						CDP_TXRX_AST_TYPE_WDS_HM_SEC))
793 						dp_peer_del_ast(soc, ase);
794 				}
795 			}
796 		}
797 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
798 	}
799 
800 	qdf_spin_unlock_bh(&soc->ast_lock);
801 
802 	return QDF_STATUS_SUCCESS;
803 }
804 
805 /*
806  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
807  * @soc:		Datapath SOC handle
808  *
809  * Return: None
810  */
811 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
812 {
813 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
814 	struct dp_pdev *pdev;
815 	struct dp_vdev *vdev;
816 	struct dp_peer *peer;
817 	struct dp_ast_entry *ase, *temp_ase;
818 	int i;
819 
820 	qdf_spin_lock_bh(&soc->ast_lock);
821 
822 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
823 		pdev = soc->pdev_list[i];
824 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
825 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
826 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
827 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
828 					if ((ase->type ==
829 						CDP_TXRX_AST_TYPE_STATIC) ||
830 						(ase->type ==
831 						 CDP_TXRX_AST_TYPE_SELF) ||
832 						(ase->type ==
833 						 CDP_TXRX_AST_TYPE_STA_BSS))
834 						continue;
835 					dp_peer_del_ast(soc, ase);
836 				}
837 			}
838 		}
839 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
840 	}
841 
842 	qdf_spin_unlock_bh(&soc->ast_lock);
843 }
844 
845 /**
846  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
847  *                                       and return ast entry information
848  *                                       of first ast entry found in the
849  *                                       table with given mac address
850  *
851  * @soc : data path soc handle
852  * @ast_mac_addr : AST entry mac address
853  * @ast_entry_info : ast entry information
854  *
855  * return : true if ast entry found with ast_mac_addr
856  *          false if ast entry not found
857  */
858 static bool dp_peer_get_ast_info_by_soc_wifi3
859 	(struct cdp_soc_t *soc_hdl,
860 	 uint8_t *ast_mac_addr,
861 	 struct cdp_ast_entry_info *ast_entry_info)
862 {
863 	struct dp_ast_entry *ast_entry = NULL;
864 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
865 
866 	qdf_spin_lock_bh(&soc->ast_lock);
867 
868 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
869 	if (!ast_entry || !ast_entry->peer) {
870 		qdf_spin_unlock_bh(&soc->ast_lock);
871 		return false;
872 	}
873 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
874 		qdf_spin_unlock_bh(&soc->ast_lock);
875 		return false;
876 	}
877 	ast_entry_info->type = ast_entry->type;
878 	ast_entry_info->pdev_id = ast_entry->pdev_id;
879 	ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
880 	ast_entry_info->peer_id = ast_entry->peer->peer_id;
881 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
882 		     &ast_entry->peer->mac_addr.raw[0],
883 		     QDF_MAC_ADDR_SIZE);
884 	qdf_spin_unlock_bh(&soc->ast_lock);
885 	return true;
886 }
887 
888 /**
889  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
890  *                                          and return ast entry information
891  *                                          if mac address and pdev_id matches
892  *
893  * @soc : data path soc handle
894  * @ast_mac_addr : AST entry mac address
895  * @pdev_id : pdev_id
896  * @ast_entry_info : ast entry information
897  *
898  * return : true if ast entry found with ast_mac_addr
899  *          false if ast entry not found
900  */
901 static bool dp_peer_get_ast_info_by_pdevid_wifi3
902 		(struct cdp_soc_t *soc_hdl,
903 		 uint8_t *ast_mac_addr,
904 		 uint8_t pdev_id,
905 		 struct cdp_ast_entry_info *ast_entry_info)
906 {
907 	struct dp_ast_entry *ast_entry;
908 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
909 
910 	qdf_spin_lock_bh(&soc->ast_lock);
911 
912 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
913 
914 	if (!ast_entry || !ast_entry->peer) {
915 		qdf_spin_unlock_bh(&soc->ast_lock);
916 		return false;
917 	}
918 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
919 		qdf_spin_unlock_bh(&soc->ast_lock);
920 		return false;
921 	}
922 	ast_entry_info->type = ast_entry->type;
923 	ast_entry_info->pdev_id = ast_entry->pdev_id;
924 	ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
925 	ast_entry_info->peer_id = ast_entry->peer->peer_id;
926 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
927 		     &ast_entry->peer->mac_addr.raw[0],
928 		     QDF_MAC_ADDR_SIZE);
929 	qdf_spin_unlock_bh(&soc->ast_lock);
930 	return true;
931 }
932 
933 /**
934  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
935  *                            with given mac address
936  *
937  * @soc : data path soc handle
938  * @ast_mac_addr : AST entry mac address
939  * @callback : callback function to called on ast delete response from FW
940  * @cookie : argument to be passed to callback
941  *
942  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
943  *          is sent
944  *          QDF_STATUS_E_INVAL false if ast entry not found
945  */
946 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
947 					       uint8_t *mac_addr,
948 					       txrx_ast_free_cb callback,
949 					       void *cookie)
950 
951 {
952 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
953 	struct dp_ast_entry *ast_entry = NULL;
954 	txrx_ast_free_cb cb = NULL;
955 	void *arg = NULL;
956 
957 	qdf_spin_lock_bh(&soc->ast_lock);
958 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
959 	if (!ast_entry) {
960 		qdf_spin_unlock_bh(&soc->ast_lock);
961 		return -QDF_STATUS_E_INVAL;
962 	}
963 
964 	if (ast_entry->callback) {
965 		cb = ast_entry->callback;
966 		arg = ast_entry->cookie;
967 	}
968 
969 	ast_entry->callback = callback;
970 	ast_entry->cookie = cookie;
971 
972 	/*
973 	 * if delete_in_progress is set AST delete is sent to target
974 	 * and host is waiting for response should not send delete
975 	 * again
976 	 */
977 	if (!ast_entry->delete_in_progress)
978 		dp_peer_del_ast(soc, ast_entry);
979 
980 	qdf_spin_unlock_bh(&soc->ast_lock);
981 	if (cb) {
982 		cb(soc->ctrl_psoc,
983 		   dp_soc_to_cdp_soc(soc),
984 		   arg,
985 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
986 	}
987 	return QDF_STATUS_SUCCESS;
988 }
989 
990 /**
991  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
992  *                                   table if mac address and pdev_id matches
993  *
994  * @soc : data path soc handle
995  * @ast_mac_addr : AST entry mac address
996  * @pdev_id : pdev id
997  * @callback : callback function to called on ast delete response from FW
998  * @cookie : argument to be passed to callback
999  *
1000  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1001  *          is sent
1002  *          QDF_STATUS_E_INVAL false if ast entry not found
1003  */
1004 
1005 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1006 						uint8_t *mac_addr,
1007 						uint8_t pdev_id,
1008 						txrx_ast_free_cb callback,
1009 						void *cookie)
1010 
1011 {
1012 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1013 	struct dp_ast_entry *ast_entry;
1014 	txrx_ast_free_cb cb = NULL;
1015 	void *arg = NULL;
1016 
1017 	qdf_spin_lock_bh(&soc->ast_lock);
1018 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1019 
1020 	if (!ast_entry) {
1021 		qdf_spin_unlock_bh(&soc->ast_lock);
1022 		return -QDF_STATUS_E_INVAL;
1023 	}
1024 
1025 	if (ast_entry->callback) {
1026 		cb = ast_entry->callback;
1027 		arg = ast_entry->cookie;
1028 	}
1029 
1030 	ast_entry->callback = callback;
1031 	ast_entry->cookie = cookie;
1032 
1033 	/*
1034 	 * if delete_in_progress is set AST delete is sent to target
1035 	 * and host is waiting for response should not sent delete
1036 	 * again
1037 	 */
1038 	if (!ast_entry->delete_in_progress)
1039 		dp_peer_del_ast(soc, ast_entry);
1040 
1041 	qdf_spin_unlock_bh(&soc->ast_lock);
1042 
1043 	if (cb) {
1044 		cb(soc->ctrl_psoc,
1045 		   dp_soc_to_cdp_soc(soc),
1046 		   arg,
1047 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1048 	}
1049 	return QDF_STATUS_SUCCESS;
1050 }
1051 
1052 /**
1053  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1054  * @ring_num: ring num of the ring being queried
1055  * @grp_mask: the grp_mask array for the ring type in question.
1056  *
1057  * The grp_mask array is indexed by group number and the bit fields correspond
1058  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1059  *
1060  * Return: the index in the grp_mask array with the ring number.
1061  * -QDF_STATUS_E_NOENT if no entry is found
1062  */
1063 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
1064 {
1065 	int ext_group_num;
1066 	int mask = 1 << ring_num;
1067 
1068 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1069 	     ext_group_num++) {
1070 		if (mask & grp_mask[ext_group_num])
1071 			return ext_group_num;
1072 	}
1073 
1074 	return -QDF_STATUS_E_NOENT;
1075 }
1076 
1077 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1078 				       enum hal_ring_type ring_type,
1079 				       int ring_num)
1080 {
1081 	int *grp_mask;
1082 
1083 	switch (ring_type) {
1084 	case WBM2SW_RELEASE:
1085 		/* dp_tx_comp_handler - soc->tx_comp_ring */
1086 		if (ring_num < 3)
1087 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1088 
1089 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1090 		else if (ring_num == 3) {
1091 			/* sw treats this as a separate ring type */
1092 			grp_mask = &soc->wlan_cfg_ctx->
1093 				int_rx_wbm_rel_ring_mask[0];
1094 			ring_num = 0;
1095 		} else {
1096 			qdf_assert(0);
1097 			return -QDF_STATUS_E_NOENT;
1098 		}
1099 	break;
1100 
1101 	case REO_EXCEPTION:
1102 		/* dp_rx_err_process - &soc->reo_exception_ring */
1103 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1104 	break;
1105 
1106 	case REO_DST:
1107 		/* dp_rx_process - soc->reo_dest_ring */
1108 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1109 	break;
1110 
1111 	case REO_STATUS:
1112 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1113 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1114 	break;
1115 
1116 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1117 	case RXDMA_MONITOR_STATUS:
1118 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1119 	case RXDMA_MONITOR_DST:
1120 		/* dp_mon_process */
1121 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1122 	break;
1123 	case RXDMA_DST:
1124 		/* dp_rxdma_err_process */
1125 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1126 	break;
1127 
1128 	case RXDMA_BUF:
1129 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1130 	break;
1131 
1132 	case RXDMA_MONITOR_BUF:
1133 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1134 	break;
1135 
1136 	case TCL_DATA:
1137 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1138 	case TCL_CMD_CREDIT:
1139 	case REO_CMD:
1140 	case SW2WBM_RELEASE:
1141 	case WBM_IDLE_LINK:
1142 		/* normally empty SW_TO_HW rings */
1143 		return -QDF_STATUS_E_NOENT;
1144 	break;
1145 
1146 	case TCL_STATUS:
1147 	case REO_REINJECT:
1148 		/* misc unused rings */
1149 		return -QDF_STATUS_E_NOENT;
1150 	break;
1151 
1152 	case CE_SRC:
1153 	case CE_DST:
1154 	case CE_DST_STATUS:
1155 		/* CE_rings - currently handled by hif */
1156 	default:
1157 		return -QDF_STATUS_E_NOENT;
1158 	break;
1159 	}
1160 
1161 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
1162 }
1163 
1164 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1165 			      *ring_params, int ring_type, int ring_num)
1166 {
1167 	int msi_group_number;
1168 	int msi_data_count;
1169 	int ret;
1170 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1171 
1172 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1173 					    &msi_data_count, &msi_data_start,
1174 					    &msi_irq_start);
1175 
1176 	if (ret)
1177 		return;
1178 
1179 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
1180 						       ring_num);
1181 	if (msi_group_number < 0) {
1182 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1183 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
1184 			ring_type, ring_num);
1185 		ring_params->msi_addr = 0;
1186 		ring_params->msi_data = 0;
1187 		return;
1188 	}
1189 
1190 	if (msi_group_number > msi_data_count) {
1191 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1192 			FL("2 msi_groups will share an msi; msi_group_num %d"),
1193 			msi_group_number);
1194 
1195 		QDF_ASSERT(0);
1196 	}
1197 
1198 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1199 
1200 	ring_params->msi_addr = addr_low;
1201 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1202 	ring_params->msi_data = (msi_group_number % msi_data_count)
1203 		+ msi_data_start;
1204 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1205 }
1206 
1207 /**
1208  * dp_print_ast_stats() - Dump AST table contents
1209  * @soc: Datapath soc handle
1210  *
1211  * return void
1212  */
1213 #ifdef FEATURE_AST
1214 void dp_print_ast_stats(struct dp_soc *soc)
1215 {
1216 	uint8_t i;
1217 	uint8_t num_entries = 0;
1218 	struct dp_vdev *vdev;
1219 	struct dp_pdev *pdev;
1220 	struct dp_peer *peer;
1221 	struct dp_ast_entry *ase, *tmp_ase;
1222 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1223 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1224 			"DA", "HMWDS_SEC"};
1225 
1226 	DP_PRINT_STATS("AST Stats:");
1227 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1228 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1229 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1230 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1231 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1232 		       soc->stats.ast.ast_mismatch);
1233 
1234 	DP_PRINT_STATS("AST Table:");
1235 
1236 	qdf_spin_lock_bh(&soc->ast_lock);
1237 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1238 		pdev = soc->pdev_list[i];
1239 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1240 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1241 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1242 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1243 				    DP_PRINT_STATS("%6d mac_addr = %pM"
1244 					    " peer_mac_addr = %pM"
1245 					    " peer_id = %u"
1246 					    " type = %s"
1247 					    " next_hop = %d"
1248 					    " is_active = %d"
1249 					    " ast_idx = %d"
1250 					    " ast_hash = %d"
1251 					    " delete_in_progress = %d"
1252 					    " pdev_id = %d"
1253 					    " vdev_id = %d",
1254 					    ++num_entries,
1255 					    ase->mac_addr.raw,
1256 					    ase->peer->mac_addr.raw,
1257 					    ase->peer->peer_id,
1258 					    type[ase->type],
1259 					    ase->next_hop,
1260 					    ase->is_active,
1261 					    ase->ast_idx,
1262 					    ase->ast_hash_value,
1263 					    ase->delete_in_progress,
1264 					    ase->pdev_id,
1265 					    vdev->vdev_id);
1266 				}
1267 			}
1268 		}
1269 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1270 	}
1271 	qdf_spin_unlock_bh(&soc->ast_lock);
1272 }
1273 #else
1274 void dp_print_ast_stats(struct dp_soc *soc)
1275 {
1276 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1277 	return;
1278 }
1279 #endif
1280 
1281 /**
1282  *  dp_print_peer_table() - Dump all Peer stats
1283  * @vdev: Datapath Vdev handle
1284  *
1285  * return void
1286  */
1287 static void dp_print_peer_table(struct dp_vdev *vdev)
1288 {
1289 	struct dp_peer *peer = NULL;
1290 
1291 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1292 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1293 		if (!peer) {
1294 			DP_PRINT_STATS("Invalid Peer");
1295 			return;
1296 		}
1297 		DP_PRINT_STATS("    peer_mac_addr = %pM"
1298 			       " nawds_enabled = %d"
1299 			       " bss_peer = %d"
1300 			       " wds_enabled = %d"
1301 			       " tx_cap_enabled = %d"
1302 			       " rx_cap_enabled = %d"
1303 			       " delete in progress = %d"
1304 			       " peer id = %d",
1305 			       peer->mac_addr.raw,
1306 			       peer->nawds_enabled,
1307 			       peer->bss_peer,
1308 			       peer->wds_enabled,
1309 			       peer->tx_cap_enabled,
1310 			       peer->rx_cap_enabled,
1311 			       peer->delete_in_progress,
1312 			       peer->peer_id);
1313 	}
1314 }
1315 
1316 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1317 /**
1318  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1319  * threshold values from the wlan_srng_cfg table for each ring type
1320  * @soc: device handle
1321  * @ring_params: per ring specific parameters
1322  * @ring_type: Ring type
1323  * @ring_num: Ring number for a given ring type
1324  *
1325  * Fill the ring params with the interrupt threshold
1326  * configuration parameters available in the per ring type wlan_srng_cfg
1327  * table.
1328  *
1329  * Return: None
1330  */
1331 static void
1332 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1333 				       struct hal_srng_params *ring_params,
1334 				       int ring_type, int ring_num,
1335 				       int num_entries)
1336 {
1337 	if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
1338 		ring_params->intr_timer_thres_us =
1339 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1340 		ring_params->intr_batch_cntr_thres_entries =
1341 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1342 	} else {
1343 		ring_params->intr_timer_thres_us =
1344 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1345 		ring_params->intr_batch_cntr_thres_entries =
1346 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1347 	}
1348 	ring_params->low_threshold =
1349 			soc->wlan_srng_cfg[ring_type].low_threshold;
1350 	if (ring_params->low_threshold)
1351 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1352 }
1353 #else
1354 static void
1355 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1356 				       struct hal_srng_params *ring_params,
1357 				       int ring_type, int ring_num,
1358 				       int num_entries)
1359 {
1360 	if (ring_type == REO_DST) {
1361 		ring_params->intr_timer_thres_us =
1362 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1363 		ring_params->intr_batch_cntr_thres_entries =
1364 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1365 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1366 		ring_params->intr_timer_thres_us =
1367 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1368 		ring_params->intr_batch_cntr_thres_entries =
1369 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1370 	} else {
1371 		ring_params->intr_timer_thres_us =
1372 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1373 		ring_params->intr_batch_cntr_thres_entries =
1374 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1375 	}
1376 
1377 	/* Enable low threshold interrupts for rx buffer rings (regular and
1378 	 * monitor buffer rings.
1379 	 * TODO: See if this is required for any other ring
1380 	 */
1381 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1382 	    (ring_type == RXDMA_MONITOR_STATUS)) {
1383 		/* TODO: Setting low threshold to 1/8th of ring size
1384 		 * see if this needs to be configurable
1385 		 */
1386 		ring_params->low_threshold = num_entries >> 3;
1387 		ring_params->intr_timer_thres_us =
1388 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1389 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1390 		ring_params->intr_batch_cntr_thres_entries = 0;
1391 	}
1392 
1393 	/* During initialisation monitor rings are only filled with
1394 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1395 	 * a value less than that. Low threshold value is reconfigured again
1396 	 * to 1/8th of the ring size when monitor vap is created.
1397 	 */
1398 	if (ring_type == RXDMA_MONITOR_BUF)
1399 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1400 
1401 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1402 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1403 	 * Keep batch threshold as 8 so that interrupt is received for
1404 	 * every 4 packets in MONITOR_STATUS ring
1405 	 */
1406 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1407 	    (soc->intr_mode == DP_INTR_MSI))
1408 		ring_params->intr_batch_cntr_thres_entries = 4;
1409 }
1410 #endif
1411 
1412 /*
1413  * dp_srng_free() - Free SRNG memory
1414  * @soc  : Data path soc handle
1415  * @srng : SRNG pointer
1416  *
1417  * return: None
1418  */
1419 
1420 static void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
1421 {
1422 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
1423 		if (!srng->cached) {
1424 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1425 						srng->alloc_size,
1426 						srng->base_vaddr_unaligned,
1427 						srng->base_paddr_unaligned, 0);
1428 		} else {
1429 			qdf_mem_free(srng->base_vaddr_unaligned);
1430 		}
1431 		srng->alloc_size = 0;
1432 		srng->base_vaddr_unaligned = NULL;
1433 	}
1434 	srng->hal_srng = NULL;
1435 }
1436 
1437 /*
1438  * dp_srng_init() - Initialize SRNG
1439  * @soc  : Data path soc handle
1440  * @srng : SRNG pointer
1441  * @ring_type : Ring Type
1442  * @ring_num: Ring number
1443  * @mac_id: mac_id
1444  *
1445  * return: QDF_STATUS
1446  */
1447 static QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
1448 			       int ring_type, int ring_num, int mac_id)
1449 {
1450 	hal_soc_handle_t hal_soc = soc->hal_soc;
1451 	struct hal_srng_params ring_params;
1452 
1453 	if (srng->hal_srng) {
1454 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1455 			  FL("Ring type: %d, num:%d is already initialized"),
1456 			  ring_type, ring_num);
1457 		return QDF_STATUS_SUCCESS;
1458 	}
1459 
1460 	/* memset the srng ring to zero */
1461 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
1462 
1463 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
1464 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
1465 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
1466 
1467 	ring_params.num_entries = srng->num_entries;
1468 
1469 	dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1470 			 ring_type, ring_num,
1471 			 (void *)ring_params.ring_base_vaddr,
1472 			 (void *)ring_params.ring_base_paddr,
1473 			 ring_params.num_entries);
1474 
1475 	if (soc->intr_mode == DP_INTR_MSI) {
1476 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1477 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1478 				 ring_type, ring_num);
1479 
1480 	} else {
1481 		ring_params.msi_data = 0;
1482 		ring_params.msi_addr = 0;
1483 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1484 				 ring_type, ring_num);
1485 	}
1486 
1487 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1488 					       ring_type, ring_num,
1489 					       srng->num_entries);
1490 
1491 	if (srng->cached)
1492 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1493 
1494 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1495 					mac_id, &ring_params);
1496 
1497 	if (!srng->hal_srng) {
1498 		dp_srng_free(soc, srng);
1499 		return QDF_STATUS_E_FAILURE;
1500 	}
1501 
1502 	return QDF_STATUS_SUCCESS;
1503 }
1504 
1505 /*
1506  * dp_srng_alloc() - Allocate memory for SRNG
1507  * @soc  : Data path soc handle
1508  * @srng : SRNG pointer
1509  * @ring_type : Ring Type
1510  * @num_entries: Number of entries
1511  * @cached: cached flag variable
1512  *
1513  * return: QDF_STATUS
1514  */
1515 static QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
1516 				int ring_type, uint32_t num_entries,
1517 				bool cached)
1518 {
1519 	hal_soc_handle_t hal_soc = soc->hal_soc;
1520 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1521 	uint32_t ring_base_align = 32;
1522 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1523 
1524 	if (srng->base_vaddr_unaligned) {
1525 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1526 			  FL("Ring type: %d, is already allocated"), ring_type);
1527 		return QDF_STATUS_SUCCESS;
1528 	}
1529 
1530 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1531 	srng->hal_srng = NULL;
1532 	srng->alloc_size = num_entries * entry_size;
1533 	srng->num_entries = num_entries;
1534 	srng->cached = cached;
1535 
1536 	if (!cached) {
1537 		srng->base_vaddr_aligned =
1538 		    qdf_aligned_mem_alloc_consistent(
1539 					soc->osdev, &srng->alloc_size,
1540 					&srng->base_vaddr_unaligned,
1541 					&srng->base_paddr_unaligned,
1542 					&srng->base_paddr_aligned,
1543 					ring_base_align);
1544 	} else {
1545 		srng->base_vaddr_aligned = qdf_aligned_malloc(
1546 					&srng->alloc_size,
1547 					&srng->base_vaddr_unaligned,
1548 					&srng->base_paddr_unaligned,
1549 					&srng->base_paddr_aligned,
1550 					ring_base_align);
1551 	}
1552 
1553 	if (!srng->base_vaddr_aligned)
1554 		return QDF_STATUS_E_NOMEM;
1555 
1556 	return QDF_STATUS_SUCCESS;
1557 }
1558 
1559 /*
1560  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1561  * @soc: DP SOC handle
1562  * @srng: source ring structure
1563  * @ring_type: type of ring
1564  * @ring_num: ring number
1565  *
1566  * Return: None
1567  */
1568 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1569 			   int ring_type, int ring_num)
1570 {
1571 	if (!srng->hal_srng) {
1572 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1573 			  FL("Ring type: %d, num:%d not setup"),
1574 			  ring_type, ring_num);
1575 		return;
1576 	}
1577 
1578 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1579 	srng->hal_srng = NULL;
1580 }
1581 
1582 /* TODO: Need this interface from HIF */
1583 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
1584 
1585 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1586 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1587 			 hal_ring_handle_t hal_ring_hdl)
1588 {
1589 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1590 	uint32_t hp, tp;
1591 	uint8_t ring_id;
1592 
1593 	if (!int_ctx)
1594 		return hal_srng_access_start(hal_soc, hal_ring_hdl);
1595 
1596 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1597 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1598 
1599 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1600 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
1601 
1602 	return hal_srng_access_start(hal_soc, hal_ring_hdl);
1603 }
1604 
1605 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1606 			hal_ring_handle_t hal_ring_hdl)
1607 {
1608 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1609 	uint32_t hp, tp;
1610 	uint8_t ring_id;
1611 
1612 	if (!int_ctx)
1613 		return hal_srng_access_end(hal_soc, hal_ring_hdl);
1614 
1615 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1616 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1617 
1618 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1619 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
1620 
1621 	return hal_srng_access_end(hal_soc, hal_ring_hdl);
1622 }
1623 
1624 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
1625 					      uint8_t hist_group_id)
1626 {
1627 	hif_record_event(dp_soc->hif_handle, hist_group_id,
1628 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
1629 }
1630 
1631 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
1632 					     uint8_t hist_group_id)
1633 {
1634 	hif_record_event(dp_soc->hif_handle, hist_group_id,
1635 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
1636 }
1637 #else
1638 
1639 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
1640 					      uint8_t hist_group_id)
1641 {
1642 }
1643 
1644 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
1645 					     uint8_t hist_group_id)
1646 {
1647 }
1648 
1649 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1650 
1651 /*
1652  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
1653  * @soc: DP soc handle
1654  * @work_done: work done in softirq context
1655  * @start_time: start time for the softirq
1656  *
1657  * Return: enum with yield code
1658  */
1659 static enum timer_yield_status
1660 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
1661 			  uint64_t start_time)
1662 {
1663 	uint64_t cur_time = qdf_get_log_timestamp();
1664 
1665 	if (!work_done)
1666 		return DP_TIMER_WORK_DONE;
1667 
1668 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
1669 		return DP_TIMER_TIME_EXHAUST;
1670 
1671 	return DP_TIMER_NO_YIELD;
1672 }
1673 
1674 /**
1675  * dp_process_lmac_rings() - Process LMAC rings
1676  * @int_ctx: interrupt context
1677  * @total_budget: budget of work which can be done
1678  *
1679  * Return: work done
1680  */
1681 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
1682 {
1683 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1684 	struct dp_soc *soc = int_ctx->soc;
1685 	uint32_t remaining_quota = total_budget;
1686 	struct dp_pdev *pdev = NULL;
1687 	uint32_t work_done  = 0;
1688 	int budget = total_budget;
1689 	int ring = 0;
1690 
1691 	/* Process LMAC interrupts */
1692 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
1693 		int mac_for_pdev = ring;
1694 
1695 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
1696 		if (!pdev)
1697 			continue;
1698 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1699 			work_done = dp_mon_process(soc, int_ctx, mac_for_pdev,
1700 						   remaining_quota);
1701 			if (work_done)
1702 				intr_stats->num_rx_mon_ring_masks++;
1703 			budget -= work_done;
1704 			if (budget <= 0)
1705 				goto budget_done;
1706 			remaining_quota = budget;
1707 		}
1708 
1709 		if (int_ctx->rxdma2host_ring_mask &
1710 				(1 << mac_for_pdev)) {
1711 			work_done = dp_rxdma_err_process(int_ctx, soc,
1712 							 mac_for_pdev,
1713 							 remaining_quota);
1714 			if (work_done)
1715 				intr_stats->num_rxdma2host_ring_masks++;
1716 			budget -=  work_done;
1717 			if (budget <= 0)
1718 				goto budget_done;
1719 			remaining_quota = budget;
1720 		}
1721 
1722 		if (int_ctx->host2rxdma_ring_mask &
1723 					(1 << mac_for_pdev)) {
1724 			union dp_rx_desc_list_elem_t *desc_list = NULL;
1725 			union dp_rx_desc_list_elem_t *tail = NULL;
1726 			struct dp_srng *rx_refill_buf_ring;
1727 
1728 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1729 				rx_refill_buf_ring =
1730 					&soc->rx_refill_buf_ring[mac_for_pdev];
1731 			else
1732 				rx_refill_buf_ring =
1733 					&soc->rx_refill_buf_ring[pdev->lmac_id];
1734 
1735 			intr_stats->num_host2rxdma_ring_masks++;
1736 			DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1737 				     1);
1738 			dp_rx_buffers_replenish(soc, mac_for_pdev,
1739 						rx_refill_buf_ring,
1740 						&soc->rx_desc_buf[mac_for_pdev],
1741 						0, &desc_list, &tail);
1742 		}
1743 	}
1744 
1745 budget_done:
1746 	return total_budget - budget;
1747 }
1748 
1749 /*
1750  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1751  * @dp_ctx: DP SOC handle
1752  * @budget: Number of frames/descriptors that can be processed in one shot
1753  *
1754  * Return: remaining budget/quota for the soc device
1755  */
1756 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1757 {
1758 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1759 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1760 	struct dp_soc *soc = int_ctx->soc;
1761 	int ring = 0;
1762 	uint32_t work_done  = 0;
1763 	int budget = dp_budget;
1764 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1765 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1766 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1767 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1768 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1769 	uint32_t remaining_quota = dp_budget;
1770 
1771 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
1772 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1773 			 reo_status_mask,
1774 			 int_ctx->rx_mon_ring_mask,
1775 			 int_ctx->host2rxdma_ring_mask,
1776 			 int_ctx->rxdma2host_ring_mask);
1777 
1778 	/* Process Tx completion interrupts first to return back buffers */
1779 	while (tx_mask) {
1780 		if (tx_mask & 0x1) {
1781 			work_done = dp_tx_comp_handler(int_ctx,
1782 						       soc,
1783 						       soc->tx_comp_ring[ring].hal_srng,
1784 						       ring, remaining_quota);
1785 
1786 			if (work_done) {
1787 				intr_stats->num_tx_ring_masks[ring]++;
1788 				dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
1789 						 tx_mask, ring, budget,
1790 						 work_done);
1791 			}
1792 
1793 			budget -= work_done;
1794 			if (budget <= 0)
1795 				goto budget_done;
1796 
1797 			remaining_quota = budget;
1798 		}
1799 		tx_mask = tx_mask >> 1;
1800 		ring++;
1801 	}
1802 
1803 	/* Process REO Exception ring interrupt */
1804 	if (rx_err_mask) {
1805 		work_done = dp_rx_err_process(int_ctx, soc,
1806 					      soc->reo_exception_ring.hal_srng,
1807 					      remaining_quota);
1808 
1809 		if (work_done) {
1810 			intr_stats->num_rx_err_ring_masks++;
1811 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1812 					 work_done, budget);
1813 		}
1814 
1815 		budget -=  work_done;
1816 		if (budget <= 0) {
1817 			goto budget_done;
1818 		}
1819 		remaining_quota = budget;
1820 	}
1821 
1822 	/* Process Rx WBM release ring interrupt */
1823 	if (rx_wbm_rel_mask) {
1824 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
1825 						  soc->rx_rel_ring.hal_srng,
1826 						  remaining_quota);
1827 
1828 		if (work_done) {
1829 			intr_stats->num_rx_wbm_rel_ring_masks++;
1830 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1831 					 work_done, budget);
1832 		}
1833 
1834 		budget -=  work_done;
1835 		if (budget <= 0) {
1836 			goto budget_done;
1837 		}
1838 		remaining_quota = budget;
1839 	}
1840 
1841 	/* Process Rx interrupts */
1842 	if (rx_mask) {
1843 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1844 			if (!(rx_mask & (1 << ring)))
1845 				continue;
1846 			work_done = dp_rx_process(int_ctx,
1847 						  soc->reo_dest_ring[ring].hal_srng,
1848 						  ring,
1849 						  remaining_quota);
1850 			if (work_done) {
1851 				intr_stats->num_rx_ring_masks[ring]++;
1852 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1853 						 rx_mask, ring,
1854 						 work_done, budget);
1855 				budget -=  work_done;
1856 				if (budget <= 0)
1857 					goto budget_done;
1858 				remaining_quota = budget;
1859 			}
1860 		}
1861 	}
1862 
1863 	if (reo_status_mask) {
1864 		if (dp_reo_status_ring_handler(int_ctx, soc))
1865 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1866 	}
1867 
1868 	work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
1869 	if (work_done) {
1870 		budget -=  work_done;
1871 		if (budget <= 0)
1872 			goto budget_done;
1873 		remaining_quota = budget;
1874 	}
1875 
1876 	qdf_lro_flush(int_ctx->lro_ctx);
1877 	intr_stats->num_masks++;
1878 
1879 budget_done:
1880 	return dp_budget - budget;
1881 }
1882 
1883 /* dp_interrupt_timer()- timer poll for interrupts
1884  *
1885  * @arg: SoC Handle
1886  *
1887  * Return:
1888  *
1889  */
1890 static void dp_interrupt_timer(void *arg)
1891 {
1892 	struct dp_soc *soc = (struct dp_soc *) arg;
1893 	struct dp_pdev *pdev = soc->pdev_list[0];
1894 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
1895 	uint32_t work_done  = 0, total_work_done = 0;
1896 	int budget = 0xffff;
1897 	uint32_t remaining_quota = budget;
1898 	uint64_t start_time;
1899 	uint32_t lmac_id;
1900 	uint8_t dp_intr_id;
1901 
1902 	if (!qdf_atomic_read(&soc->cmn_init_done))
1903 		return;
1904 
1905 	if (pdev->mon_chan_band == REG_BAND_UNKNOWN) {
1906 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1907 		return;
1908 	}
1909 
1910 	lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band];
1911 	if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID)) {
1912 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1913 		return;
1914 	}
1915 
1916 	dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
1917 	dp_srng_record_timer_entry(soc, dp_intr_id);
1918 	start_time = qdf_get_log_timestamp();
1919 
1920 	while (yield == DP_TIMER_NO_YIELD) {
1921 		work_done = dp_mon_process(soc, &soc->intr_ctx[dp_intr_id],
1922 					   lmac_id, remaining_quota);
1923 		if (work_done) {
1924 			budget -=  work_done;
1925 			if (budget <= 0) {
1926 				yield = DP_TIMER_WORK_EXHAUST;
1927 				goto budget_done;
1928 			}
1929 			remaining_quota = budget;
1930 			total_work_done += work_done;
1931 		}
1932 
1933 		yield = dp_should_timer_irq_yield(soc, total_work_done,
1934 						  start_time);
1935 		total_work_done = 0;
1936 	}
1937 
1938 budget_done:
1939 	if (yield == DP_TIMER_WORK_EXHAUST ||
1940 	    yield == DP_TIMER_TIME_EXHAUST)
1941 		qdf_timer_mod(&soc->int_timer, 1);
1942 	else
1943 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1944 
1945 	dp_srng_record_timer_exit(soc, dp_intr_id);
1946 }
1947 
1948 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1949 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
1950 					struct dp_intr *intr_ctx)
1951 {
1952 	if (intr_ctx->rx_mon_ring_mask)
1953 		return true;
1954 
1955 	return false;
1956 }
1957 #else
1958 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
1959 					struct dp_intr *intr_ctx)
1960 {
1961 	return false;
1962 }
1963 #endif
1964 
1965 /*
1966  * dp_soc_attach_poll() - Register handlers for DP interrupts
1967  * @txrx_soc: DP SOC handle
1968  *
1969  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1970  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1971  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1972  *
1973  * Return: 0 for success, nonzero for failure.
1974  */
1975 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
1976 {
1977 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1978 	int i;
1979 	int lmac_id = 0;
1980 
1981 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
1982 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
1983 	soc->intr_mode = DP_INTR_POLL;
1984 
1985 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1986 		soc->intr_ctx[i].dp_intr_id = i;
1987 		soc->intr_ctx[i].tx_ring_mask =
1988 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1989 		soc->intr_ctx[i].rx_ring_mask =
1990 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1991 		soc->intr_ctx[i].rx_mon_ring_mask =
1992 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1993 		soc->intr_ctx[i].rx_err_ring_mask =
1994 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1995 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1996 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1997 		soc->intr_ctx[i].reo_status_ring_mask =
1998 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1999 		soc->intr_ctx[i].rxdma2host_ring_mask =
2000 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2001 		soc->intr_ctx[i].soc = soc;
2002 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2003 
2004 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
2005 			hif_event_history_init(soc->hif_handle, i);
2006 			soc->mon_intr_id_lmac_map[lmac_id] = i;
2007 			lmac_id++;
2008 		}
2009 	}
2010 
2011 	qdf_timer_init(soc->osdev, &soc->int_timer,
2012 			dp_interrupt_timer, (void *)soc,
2013 			QDF_TIMER_TYPE_WAKE_APPS);
2014 
2015 	return QDF_STATUS_SUCCESS;
2016 }
2017 
2018 /**
2019  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
2020  * soc: DP soc handle
2021  *
2022  * Set the appropriate interrupt mode flag in the soc
2023  */
2024 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
2025 {
2026 	uint32_t msi_base_data, msi_vector_start;
2027 	int msi_vector_count, ret;
2028 
2029 	soc->intr_mode = DP_INTR_INTEGRATED;
2030 
2031 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2032 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2033 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
2034 		soc->intr_mode = DP_INTR_POLL;
2035 	} else {
2036 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2037 						  &msi_vector_count,
2038 						  &msi_base_data,
2039 						  &msi_vector_start);
2040 		if (ret)
2041 			return;
2042 
2043 		soc->intr_mode = DP_INTR_MSI;
2044 	}
2045 }
2046 
2047 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
2048 #if defined(DP_INTR_POLL_BOTH)
2049 /*
2050  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
2051  * @txrx_soc: DP SOC handle
2052  *
2053  * Call the appropriate attach function based on the mode of operation.
2054  * This is a WAR for enabling monitor mode.
2055  *
2056  * Return: 0 for success. nonzero for failure.
2057  */
2058 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2059 {
2060 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2061 
2062 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2063 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2064 	     soc->cdp_soc.ol_ops->get_con_mode() ==
2065 	     QDF_GLOBAL_MONITOR_MODE)) {
2066 		dp_info("Poll mode");
2067 		return dp_soc_attach_poll(txrx_soc);
2068 	} else {
2069 		dp_info("Interrupt  mode");
2070 		return dp_soc_interrupt_attach(txrx_soc);
2071 	}
2072 }
2073 #else
2074 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
2075 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2076 {
2077 	return dp_soc_attach_poll(txrx_soc);
2078 }
2079 #else
2080 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2081 {
2082 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2083 
2084 	if (hif_is_polled_mode_enabled(soc->hif_handle))
2085 		return dp_soc_attach_poll(txrx_soc);
2086 	else
2087 		return dp_soc_interrupt_attach(txrx_soc);
2088 }
2089 #endif
2090 #endif
2091 
2092 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
2093 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
2094 {
2095 	int j;
2096 	int num_irq = 0;
2097 
2098 	int tx_mask =
2099 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2100 	int rx_mask =
2101 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2102 	int rx_mon_mask =
2103 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2104 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2105 					soc->wlan_cfg_ctx, intr_ctx_num);
2106 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2107 					soc->wlan_cfg_ctx, intr_ctx_num);
2108 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2109 					soc->wlan_cfg_ctx, intr_ctx_num);
2110 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2111 					soc->wlan_cfg_ctx, intr_ctx_num);
2112 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2113 					soc->wlan_cfg_ctx, intr_ctx_num);
2114 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2115 					soc->wlan_cfg_ctx, intr_ctx_num);
2116 
2117 	soc->intr_mode = DP_INTR_INTEGRATED;
2118 
2119 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
2120 
2121 		if (tx_mask & (1 << j)) {
2122 			irq_id_map[num_irq++] =
2123 				(wbm2host_tx_completions_ring1 - j);
2124 		}
2125 
2126 		if (rx_mask & (1 << j)) {
2127 			irq_id_map[num_irq++] =
2128 				(reo2host_destination_ring1 - j);
2129 		}
2130 
2131 		if (rxdma2host_ring_mask & (1 << j)) {
2132 			irq_id_map[num_irq++] =
2133 				rxdma2host_destination_ring_mac1 - j;
2134 		}
2135 
2136 		if (host2rxdma_ring_mask & (1 << j)) {
2137 			irq_id_map[num_irq++] =
2138 				host2rxdma_host_buf_ring_mac1 -	j;
2139 		}
2140 
2141 		if (host2rxdma_mon_ring_mask & (1 << j)) {
2142 			irq_id_map[num_irq++] =
2143 				host2rxdma_monitor_ring1 - j;
2144 		}
2145 
2146 		if (rx_mon_mask & (1 << j)) {
2147 			irq_id_map[num_irq++] =
2148 				ppdu_end_interrupts_mac1 - j;
2149 			irq_id_map[num_irq++] =
2150 				rxdma2host_monitor_status_ring_mac1 - j;
2151 			irq_id_map[num_irq++] =
2152 				rxdma2host_monitor_destination_mac1 - j;
2153 		}
2154 
2155 		if (rx_wbm_rel_ring_mask & (1 << j))
2156 			irq_id_map[num_irq++] = wbm2host_rx_release;
2157 
2158 		if (rx_err_ring_mask & (1 << j))
2159 			irq_id_map[num_irq++] = reo2host_exception;
2160 
2161 		if (reo_status_ring_mask & (1 << j))
2162 			irq_id_map[num_irq++] = reo2host_status;
2163 
2164 	}
2165 	*num_irq_r = num_irq;
2166 }
2167 
2168 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
2169 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
2170 		int msi_vector_count, int msi_vector_start)
2171 {
2172 	int tx_mask = wlan_cfg_get_tx_ring_mask(
2173 					soc->wlan_cfg_ctx, intr_ctx_num);
2174 	int rx_mask = wlan_cfg_get_rx_ring_mask(
2175 					soc->wlan_cfg_ctx, intr_ctx_num);
2176 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
2177 					soc->wlan_cfg_ctx, intr_ctx_num);
2178 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2179 					soc->wlan_cfg_ctx, intr_ctx_num);
2180 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2181 					soc->wlan_cfg_ctx, intr_ctx_num);
2182 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2183 					soc->wlan_cfg_ctx, intr_ctx_num);
2184 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2185 					soc->wlan_cfg_ctx, intr_ctx_num);
2186 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2187 					soc->wlan_cfg_ctx, intr_ctx_num);
2188 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2189 					soc->wlan_cfg_ctx, intr_ctx_num);
2190 
2191 	unsigned int vector =
2192 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
2193 	int num_irq = 0;
2194 
2195 	soc->intr_mode = DP_INTR_MSI;
2196 
2197 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
2198 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
2199 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask)
2200 		irq_id_map[num_irq++] =
2201 			pld_get_msi_irq(soc->osdev->dev, vector);
2202 
2203 	*num_irq_r = num_irq;
2204 }
2205 
2206 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
2207 				    int *irq_id_map, int *num_irq)
2208 {
2209 	int msi_vector_count, ret;
2210 	uint32_t msi_base_data, msi_vector_start;
2211 
2212 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2213 					    &msi_vector_count,
2214 					    &msi_base_data,
2215 					    &msi_vector_start);
2216 	if (ret)
2217 		return dp_soc_interrupt_map_calculate_integrated(soc,
2218 				intr_ctx_num, irq_id_map, num_irq);
2219 
2220 	else
2221 		dp_soc_interrupt_map_calculate_msi(soc,
2222 				intr_ctx_num, irq_id_map, num_irq,
2223 				msi_vector_count, msi_vector_start);
2224 }
2225 
2226 /*
2227  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
2228  * @txrx_soc: DP SOC handle
2229  *
2230  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2231  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2232  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2233  *
2234  * Return: 0 for success. nonzero for failure.
2235  */
2236 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
2237 {
2238 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2239 
2240 	int i = 0;
2241 	int num_irq = 0;
2242 
2243 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2244 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2245 
2246 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2247 		int ret = 0;
2248 
2249 		/* Map of IRQ ids registered with one interrupt context */
2250 		int irq_id_map[HIF_MAX_GRP_IRQ];
2251 
2252 		int tx_mask =
2253 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2254 		int rx_mask =
2255 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2256 		int rx_mon_mask =
2257 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
2258 		int rx_err_ring_mask =
2259 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2260 		int rx_wbm_rel_ring_mask =
2261 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2262 		int reo_status_ring_mask =
2263 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2264 		int rxdma2host_ring_mask =
2265 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2266 		int host2rxdma_ring_mask =
2267 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
2268 		int host2rxdma_mon_ring_mask =
2269 			wlan_cfg_get_host2rxdma_mon_ring_mask(
2270 				soc->wlan_cfg_ctx, i);
2271 
2272 		soc->intr_ctx[i].dp_intr_id = i;
2273 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
2274 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
2275 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
2276 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
2277 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
2278 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
2279 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
2280 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
2281 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
2282 			 host2rxdma_mon_ring_mask;
2283 
2284 		soc->intr_ctx[i].soc = soc;
2285 
2286 		num_irq = 0;
2287 
2288 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
2289 					       &num_irq);
2290 
2291 		ret = hif_register_ext_group(soc->hif_handle,
2292 				num_irq, irq_id_map, dp_service_srngs,
2293 				&soc->intr_ctx[i], "dp_intr",
2294 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
2295 
2296 		if (ret) {
2297 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2298 			FL("failed, ret = %d"), ret);
2299 
2300 			return QDF_STATUS_E_FAILURE;
2301 		}
2302 
2303 		hif_event_history_init(soc->hif_handle, i);
2304 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2305 	}
2306 
2307 	hif_configure_ext_group_interrupts(soc->hif_handle);
2308 	hif_config_irq_set_perf_affinity_hint(soc->hif_handle);
2309 
2310 	return QDF_STATUS_SUCCESS;
2311 }
2312 
2313 /*
2314  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
2315  * @txrx_soc: DP SOC handle
2316  *
2317  * Return: none
2318  */
2319 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
2320 {
2321 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2322 	int i;
2323 
2324 	if (soc->intr_mode == DP_INTR_POLL) {
2325 		qdf_timer_free(&soc->int_timer);
2326 	} else {
2327 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
2328 	}
2329 
2330 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2331 		soc->intr_ctx[i].tx_ring_mask = 0;
2332 		soc->intr_ctx[i].rx_ring_mask = 0;
2333 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
2334 		soc->intr_ctx[i].rx_err_ring_mask = 0;
2335 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
2336 		soc->intr_ctx[i].reo_status_ring_mask = 0;
2337 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
2338 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
2339 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
2340 
2341 		hif_event_history_deinit(soc->hif_handle, i);
2342 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
2343 	}
2344 
2345 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2346 		    REG_BAND_UNKNOWN * sizeof(int), DP_MON_INVALID_LMAC_ID);
2347 }
2348 
2349 #define AVG_MAX_MPDUS_PER_TID 128
2350 #define AVG_TIDS_PER_CLIENT 2
2351 #define AVG_FLOWS_PER_TID 2
2352 #define AVG_MSDUS_PER_FLOW 128
2353 #define AVG_MSDUS_PER_MPDU 4
2354 
2355 /*
2356  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
2357  * @soc: DP SOC handle
2358  * @mac_id: mac id
2359  *
2360  * Return: none
2361  */
2362 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
2363 {
2364 	struct qdf_mem_multi_page_t *pages;
2365 
2366 	if (mac_id != WLAN_INVALID_PDEV_ID)
2367 		pages = &soc->mon_link_desc_pages[mac_id];
2368 	else
2369 		pages = &soc->link_desc_pages;
2370 
2371 	if (pages->dma_pages) {
2372 		wlan_minidump_remove((void *)
2373 				     pages->dma_pages->page_v_addr_start);
2374 		qdf_mem_multi_pages_free(soc->osdev, pages, 0, false);
2375 	}
2376 }
2377 
2378 /*
2379  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
2380  * @soc: DP SOC handle
2381  * @mac_id: mac id
2382  *
2383  * Allocates memory pages for link descriptors, the page size is 4K for
2384  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
2385  * allocated for regular RX/TX and if the there is a proper mac_id link
2386  * descriptors are allocated for RX monitor mode.
2387  *
2388  * Return: QDF_STATUS_SUCCESS: Success
2389  *	   QDF_STATUS_E_FAILURE: Failure
2390  */
2391 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
2392 {
2393 	hal_soc_handle_t hal_soc = soc->hal_soc;
2394 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2395 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
2396 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
2397 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
2398 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
2399 	uint32_t num_mpdu_links_per_queue_desc =
2400 		hal_num_mpdu_links_per_queue_desc(hal_soc);
2401 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2402 	uint32_t *total_link_descs, total_mem_size;
2403 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
2404 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
2405 	uint32_t num_entries;
2406 	struct qdf_mem_multi_page_t *pages;
2407 	struct dp_srng *dp_srng;
2408 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
2409 
2410 	/* Only Tx queue descriptors are allocated from common link descriptor
2411 	 * pool Rx queue descriptors are not included in this because (REO queue
2412 	 * extension descriptors) they are expected to be allocated contiguously
2413 	 * with REO queue descriptors
2414 	 */
2415 	if (mac_id != WLAN_INVALID_PDEV_ID) {
2416 		pages = &soc->mon_link_desc_pages[mac_id];
2417 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
2418 		num_entries = dp_srng->alloc_size /
2419 			hal_srng_get_entrysize(soc->hal_soc,
2420 					       RXDMA_MONITOR_DESC);
2421 		total_link_descs = &soc->total_mon_link_descs[mac_id];
2422 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
2423 			      MINIDUMP_STR_SIZE);
2424 	} else {
2425 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2426 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
2427 
2428 		num_mpdu_queue_descs = num_mpdu_link_descs /
2429 			num_mpdu_links_per_queue_desc;
2430 
2431 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2432 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2433 			num_msdus_per_link_desc;
2434 
2435 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2436 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2437 
2438 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2439 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2440 
2441 		pages = &soc->link_desc_pages;
2442 		total_link_descs = &soc->total_link_descs;
2443 		qdf_str_lcopy(minidump_str, "link_desc_bank",
2444 			      MINIDUMP_STR_SIZE);
2445 	}
2446 
2447 	/* Round up to power of 2 */
2448 	*total_link_descs = 1;
2449 	while (*total_link_descs < num_entries)
2450 		*total_link_descs <<= 1;
2451 
2452 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2453 		  FL("total_link_descs: %u, link_desc_size: %d"),
2454 		  *total_link_descs, link_desc_size);
2455 	total_mem_size =  *total_link_descs * link_desc_size;
2456 	total_mem_size += link_desc_align;
2457 
2458 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2459 		  FL("total_mem_size: %d"), total_mem_size);
2460 
2461 	dp_set_max_page_size(pages, max_alloc_size);
2462 	qdf_mem_multi_pages_alloc(soc->osdev,
2463 				  pages,
2464 				  link_desc_size,
2465 				  *total_link_descs,
2466 				  0, false);
2467 	if (!pages->num_pages) {
2468 		dp_err("Multi page alloc fail for hw link desc pool");
2469 		return QDF_STATUS_E_FAULT;
2470 	}
2471 
2472 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
2473 			  pages->num_pages * pages->page_size,
2474 			  soc->ctrl_psoc,
2475 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2476 			  "hw_link_desc_bank");
2477 
2478 	return QDF_STATUS_SUCCESS;
2479 }
2480 
2481 /*
2482  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
2483  * @soc: DP SOC handle
2484  *
2485  * Return: none
2486  */
2487 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
2488 {
2489 	uint32_t i;
2490 	uint32_t size = soc->wbm_idle_scatter_buf_size;
2491 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
2492 	qdf_dma_addr_t paddr;
2493 
2494 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
2495 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2496 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2497 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2498 			if (vaddr) {
2499 				qdf_mem_free_consistent(soc->osdev,
2500 							soc->osdev->dev,
2501 							size,
2502 							vaddr,
2503 							paddr,
2504 							0);
2505 				vaddr = NULL;
2506 			}
2507 		}
2508 	} else {
2509 		wlan_minidump_remove(vaddr);
2510 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
2511 	}
2512 }
2513 
2514 /*
2515  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
2516  * @soc: DP SOC handle
2517  *
2518  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
2519  * link descriptors is less then the max_allocated size. else
2520  * allocate memory for wbm_idle_scatter_buffer.
2521  *
2522  * Return: QDF_STATUS_SUCCESS: success
2523  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
2524  */
2525 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
2526 {
2527 	uint32_t entry_size, i;
2528 	uint32_t total_mem_size;
2529 	qdf_dma_addr_t *baseaddr = NULL;
2530 	struct dp_srng *dp_srng;
2531 	uint32_t ring_type;
2532 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2533 	uint32_t tlds;
2534 
2535 	ring_type = WBM_IDLE_LINK;
2536 	dp_srng = &soc->wbm_idle_link_ring;
2537 	tlds = soc->total_link_descs;
2538 
2539 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
2540 	total_mem_size = entry_size * tlds;
2541 
2542 	if (total_mem_size <= max_alloc_size) {
2543 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
2544 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2545 				  FL("Link desc idle ring setup failed"));
2546 			goto fail;
2547 		}
2548 
2549 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2550 				  soc->wbm_idle_link_ring.alloc_size,
2551 				  soc->ctrl_psoc,
2552 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2553 				  "wbm_idle_link_ring");
2554 	} else {
2555 		uint32_t num_scatter_bufs;
2556 		uint32_t num_entries_per_buf;
2557 		uint32_t buf_size = 0;
2558 
2559 		soc->wbm_idle_scatter_buf_size =
2560 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2561 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2562 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
2563 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2564 					soc->hal_soc, total_mem_size,
2565 					soc->wbm_idle_scatter_buf_size);
2566 
2567 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2568 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2569 				  FL("scatter bufs size out of bounds"));
2570 			goto fail;
2571 		}
2572 
2573 		for (i = 0; i < num_scatter_bufs; i++) {
2574 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2575 			buf_size = soc->wbm_idle_scatter_buf_size;
2576 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
2577 				qdf_mem_alloc_consistent(soc->osdev,
2578 							 soc->osdev->dev,
2579 							 buf_size,
2580 							 baseaddr);
2581 
2582 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2583 				QDF_TRACE(QDF_MODULE_ID_DP,
2584 					  QDF_TRACE_LEVEL_ERROR,
2585 					  FL("Scatter lst memory alloc fail"));
2586 				goto fail;
2587 			}
2588 		}
2589 		soc->num_scatter_bufs = num_scatter_bufs;
2590 	}
2591 	return QDF_STATUS_SUCCESS;
2592 
2593 fail:
2594 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2595 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2596 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2597 
2598 		if (vaddr) {
2599 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2600 						soc->wbm_idle_scatter_buf_size,
2601 						vaddr,
2602 						paddr, 0);
2603 			vaddr = NULL;
2604 		}
2605 	}
2606 	return QDF_STATUS_E_NOMEM;
2607 }
2608 
2609 /*
2610  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
2611  * @soc: DP SOC handle
2612  *
2613  * Return: QDF_STATUS_SUCCESS: success
2614  *         QDF_STATUS_E_FAILURE: failure
2615  */
2616 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
2617 {
2618 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
2619 
2620 	if (dp_srng->base_vaddr_unaligned) {
2621 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
2622 			return QDF_STATUS_E_FAILURE;
2623 	}
2624 	return QDF_STATUS_SUCCESS;
2625 }
2626 
2627 /*
2628  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
2629  * @soc: DP SOC handle
2630  *
2631  * Return: None
2632  */
2633 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
2634 {
2635 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
2636 }
2637 
2638 /*
2639  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
2640  * @soc: DP SOC handle
2641  * @mac_id: mac id
2642  *
2643  * Return: None
2644  */
2645 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
2646 {
2647 	uint32_t cookie = 0;
2648 	uint32_t page_idx = 0;
2649 	struct qdf_mem_multi_page_t *pages;
2650 	struct qdf_mem_dma_page_t *dma_pages;
2651 	uint32_t offset = 0;
2652 	uint32_t count = 0;
2653 	void *desc_srng;
2654 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2655 	uint32_t total_link_descs;
2656 	uint32_t scatter_buf_num;
2657 	uint32_t num_entries_per_buf = 0;
2658 	uint32_t rem_entries;
2659 	uint32_t num_descs_per_page;
2660 	uint32_t num_scatter_bufs = 0;
2661 	uint8_t *scatter_buf_ptr;
2662 	void *desc;
2663 
2664 	num_scatter_bufs = soc->num_scatter_bufs;
2665 
2666 	if (mac_id == WLAN_INVALID_PDEV_ID) {
2667 		pages = &soc->link_desc_pages;
2668 		total_link_descs = soc->total_link_descs;
2669 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
2670 	} else {
2671 		pages = &soc->mon_link_desc_pages[mac_id];
2672 		total_link_descs = soc->total_mon_link_descs[mac_id];
2673 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
2674 	}
2675 
2676 	dma_pages = pages->dma_pages;
2677 	do {
2678 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
2679 			     pages->page_size);
2680 		page_idx++;
2681 	} while (page_idx < pages->num_pages);
2682 
2683 	if (desc_srng) {
2684 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
2685 		page_idx = 0;
2686 		count = 0;
2687 		offset = 0;
2688 		pages = &soc->link_desc_pages;
2689 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
2690 						     desc_srng)) &&
2691 			(count < total_link_descs)) {
2692 			page_idx = count / pages->num_element_per_page;
2693 			offset = count % pages->num_element_per_page;
2694 			cookie = LINK_DESC_COOKIE(count, page_idx);
2695 
2696 			hal_set_link_desc_addr(desc, cookie,
2697 					       dma_pages[page_idx].page_p_addr
2698 					       + (offset * link_desc_size));
2699 			count++;
2700 		}
2701 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
2702 	} else {
2703 		/* Populate idle list scatter buffers with link descriptor
2704 		 * pointers
2705 		 */
2706 		scatter_buf_num = 0;
2707 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2708 					soc->hal_soc,
2709 					soc->wbm_idle_scatter_buf_size);
2710 
2711 		scatter_buf_ptr = (uint8_t *)(
2712 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2713 		rem_entries = num_entries_per_buf;
2714 		pages = &soc->link_desc_pages;
2715 		page_idx = 0; count = 0;
2716 		offset = 0;
2717 		num_descs_per_page = pages->num_element_per_page;
2718 
2719 		while (count < total_link_descs) {
2720 			page_idx = count / num_descs_per_page;
2721 			offset = count % num_descs_per_page;
2722 			cookie = LINK_DESC_COOKIE(count, page_idx);
2723 			hal_set_link_desc_addr((void *)scatter_buf_ptr,
2724 					       cookie,
2725 					       dma_pages[page_idx].page_p_addr +
2726 					       (offset * link_desc_size));
2727 			rem_entries--;
2728 			if (rem_entries) {
2729 				scatter_buf_ptr += link_desc_size;
2730 			} else {
2731 				rem_entries = num_entries_per_buf;
2732 				scatter_buf_num++;
2733 				if (scatter_buf_num >= num_scatter_bufs)
2734 					break;
2735 				scatter_buf_ptr = (uint8_t *)
2736 					(soc->wbm_idle_scatter_buf_base_vaddr[
2737 					 scatter_buf_num]);
2738 			}
2739 			count++;
2740 		}
2741 		/* Setup link descriptor idle list in HW */
2742 		hal_setup_link_idle_list(soc->hal_soc,
2743 			soc->wbm_idle_scatter_buf_base_paddr,
2744 			soc->wbm_idle_scatter_buf_base_vaddr,
2745 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2746 			(uint32_t)(scatter_buf_ptr -
2747 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2748 			scatter_buf_num-1])), total_link_descs);
2749 	}
2750 }
2751 
2752 #ifdef IPA_OFFLOAD
2753 #define REO_DST_RING_SIZE_QCA6290 1023
2754 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2755 #define REO_DST_RING_SIZE_QCA8074 1023
2756 #define REO_DST_RING_SIZE_QCN9000 2048
2757 #else
2758 #define REO_DST_RING_SIZE_QCA8074 8
2759 #define REO_DST_RING_SIZE_QCN9000 8
2760 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2761 
2762 #else
2763 
2764 #define REO_DST_RING_SIZE_QCA6290 1024
2765 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2766 #define REO_DST_RING_SIZE_QCA8074 2048
2767 #define REO_DST_RING_SIZE_QCN9000 2048
2768 #else
2769 #define REO_DST_RING_SIZE_QCA8074 8
2770 #define REO_DST_RING_SIZE_QCN9000 8
2771 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2772 #endif /* IPA_OFFLOAD */
2773 
2774 #ifndef FEATURE_WDS
2775 static void dp_soc_wds_attach(struct dp_soc *soc)
2776 {
2777 }
2778 
2779 static void dp_soc_wds_detach(struct dp_soc *soc)
2780 {
2781 }
2782 #endif
2783 /*
2784  * dp_soc_reset_ring_map() - Reset cpu ring map
2785  * @soc: Datapath soc handler
2786  *
2787  * This api resets the default cpu ring map
2788  */
2789 
2790 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2791 {
2792 	uint8_t i;
2793 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2794 
2795 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2796 		switch (nss_config) {
2797 		case dp_nss_cfg_first_radio:
2798 			/*
2799 			 * Setting Tx ring map for one nss offloaded radio
2800 			 */
2801 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2802 			break;
2803 
2804 		case dp_nss_cfg_second_radio:
2805 			/*
2806 			 * Setting Tx ring for two nss offloaded radios
2807 			 */
2808 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2809 			break;
2810 
2811 		case dp_nss_cfg_dbdc:
2812 			/*
2813 			 * Setting Tx ring map for 2 nss offloaded radios
2814 			 */
2815 			soc->tx_ring_map[i] =
2816 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2817 			break;
2818 
2819 		case dp_nss_cfg_dbtc:
2820 			/*
2821 			 * Setting Tx ring map for 3 nss offloaded radios
2822 			 */
2823 			soc->tx_ring_map[i] =
2824 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2825 			break;
2826 
2827 		default:
2828 			dp_err("tx_ring_map failed due to invalid nss cfg");
2829 			break;
2830 		}
2831 	}
2832 }
2833 
2834 /*
2835  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2836  * @dp_soc - DP soc handle
2837  * @ring_type - ring type
2838  * @ring_num - ring_num
2839  *
2840  * return 0 or 1
2841  */
2842 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2843 {
2844 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2845 	uint8_t status = 0;
2846 
2847 	switch (ring_type) {
2848 	case WBM2SW_RELEASE:
2849 	case REO_DST:
2850 	case RXDMA_BUF:
2851 		status = ((nss_config) & (1 << ring_num));
2852 		break;
2853 	default:
2854 		break;
2855 	}
2856 
2857 	return status;
2858 }
2859 
2860 /*
2861  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
2862  *					  unused WMAC hw rings
2863  * @dp_soc - DP Soc handle
2864  * @mac_num - wmac num
2865  *
2866  * Return: Return void
2867  */
2868 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
2869 						int mac_num)
2870 {
2871 	int *grp_mask = NULL;
2872 	int group_number;
2873 
2874 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2875 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2876 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2877 					  group_number, 0x0);
2878 
2879 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
2880 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2881 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
2882 				      group_number, 0x0);
2883 
2884 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
2885 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2886 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
2887 					  group_number, 0x0);
2888 
2889 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
2890 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2891 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
2892 					      group_number, 0x0);
2893 }
2894 
2895 /*
2896  * dp_soc_reset_intr_mask() - reset interrupt mask
2897  * @dp_soc - DP Soc handle
2898  *
2899  * Return: Return void
2900  */
2901 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2902 {
2903 	uint8_t j;
2904 	int *grp_mask = NULL;
2905 	int group_number, mask, num_ring;
2906 
2907 	/* number of tx ring */
2908 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2909 
2910 	/*
2911 	 * group mask for tx completion  ring.
2912 	 */
2913 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2914 
2915 	/* loop and reset the mask for only offloaded ring */
2916 	for (j = 0; j < num_ring; j++) {
2917 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2918 			continue;
2919 		}
2920 
2921 		/*
2922 		 * Group number corresponding to tx offloaded ring.
2923 		 */
2924 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2925 		if (group_number < 0) {
2926 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2927 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2928 					WBM2SW_RELEASE, j);
2929 			return;
2930 		}
2931 
2932 		/* reset the tx mask for offloaded ring */
2933 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2934 		mask &= (~(1 << j));
2935 
2936 		/*
2937 		 * reset the interrupt mask for offloaded ring.
2938 		 */
2939 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2940 	}
2941 
2942 	/* number of rx rings */
2943 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2944 
2945 	/*
2946 	 * group mask for reo destination ring.
2947 	 */
2948 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2949 
2950 	/* loop and reset the mask for only offloaded ring */
2951 	for (j = 0; j < num_ring; j++) {
2952 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2953 			continue;
2954 		}
2955 
2956 		/*
2957 		 * Group number corresponding to rx offloaded ring.
2958 		 */
2959 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2960 		if (group_number < 0) {
2961 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2962 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2963 					REO_DST, j);
2964 			return;
2965 		}
2966 
2967 		/* set the interrupt mask for offloaded ring */
2968 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2969 		mask &= (~(1 << j));
2970 
2971 		/*
2972 		 * set the interrupt mask to zero for rx offloaded radio.
2973 		 */
2974 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2975 	}
2976 
2977 	/*
2978 	 * group mask for Rx buffer refill ring
2979 	 */
2980 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2981 
2982 	/* loop and reset the mask for only offloaded ring */
2983 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2984 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
2985 
2986 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2987 			continue;
2988 		}
2989 
2990 		/*
2991 		 * Group number corresponding to rx offloaded ring.
2992 		 */
2993 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
2994 		if (group_number < 0) {
2995 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2996 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2997 					REO_DST, lmac_id);
2998 			return;
2999 		}
3000 
3001 		/* set the interrupt mask for offloaded ring */
3002 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3003 				group_number);
3004 		mask &= (~(1 << lmac_id));
3005 
3006 		/*
3007 		 * set the interrupt mask to zero for rx offloaded radio.
3008 		 */
3009 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3010 			group_number, mask);
3011 	}
3012 }
3013 
3014 #ifdef IPA_OFFLOAD
3015 /**
3016  * dp_reo_remap_config() - configure reo remap register value based
3017  *                         nss configuration.
3018  *		based on offload_radio value below remap configuration
3019  *		get applied.
3020  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
3021  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
3022  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
3023  *		3 - both Radios handled by NSS (remap not required)
3024  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
3025  *
3026  * @remap1: output parameter indicates reo remap 1 register value
3027  * @remap2: output parameter indicates reo remap 2 register value
3028  * Return: bool type, true if remap is configured else false.
3029  */
3030 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
3031 {
3032 	uint32_t ring[4] = {REO_REMAP_SW1, REO_REMAP_SW2,
3033 						REO_REMAP_SW3};
3034 	hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3035 				      3, remap1, remap2);
3036 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
3037 
3038 	return true;
3039 }
3040 
3041 /**
3042  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
3043  *
3044  * @tx_ring_num: Tx ring number
3045  * @tx_ipa_ring_sz: Return param only updated for IPA.
3046  *
3047  * Return: None
3048  */
3049 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz)
3050 {
3051 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
3052 		*tx_ipa_ring_sz = WLAN_CFG_IPA_TX_RING_SIZE;
3053 }
3054 
3055 /**
3056  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
3057  *
3058  * @tx_comp_ring_num: Tx comp ring number
3059  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
3060  *
3061  * Return: None
3062  */
3063 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3064 					 int *tx_comp_ipa_ring_sz)
3065 {
3066 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
3067 		*tx_comp_ipa_ring_sz = WLAN_CFG_IPA_TX_COMP_RING_SIZE;
3068 }
3069 #else
3070 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
3071 {
3072 	uint8_t num = 0;
3073 
3074 	switch (value) {
3075 	case 0xF:
3076 		num = 4;
3077 		ring[0] = REO_REMAP_SW1;
3078 		ring[1] = REO_REMAP_SW2;
3079 		ring[2] = REO_REMAP_SW3;
3080 		ring[3] = REO_REMAP_SW4;
3081 		break;
3082 	case 0xE:
3083 		num = 3;
3084 		ring[0] = REO_REMAP_SW2;
3085 		ring[1] = REO_REMAP_SW3;
3086 		ring[2] = REO_REMAP_SW4;
3087 		break;
3088 	case 0xD:
3089 		num = 3;
3090 		ring[0] = REO_REMAP_SW1;
3091 		ring[1] = REO_REMAP_SW3;
3092 		ring[2] = REO_REMAP_SW4;
3093 		break;
3094 	case 0xC:
3095 		num = 2;
3096 		ring[0] = REO_REMAP_SW3;
3097 		ring[1] = REO_REMAP_SW4;
3098 		break;
3099 	case 0xB:
3100 		num = 3;
3101 		ring[0] = REO_REMAP_SW1;
3102 		ring[1] = REO_REMAP_SW2;
3103 		ring[2] = REO_REMAP_SW4;
3104 		break;
3105 	case 0xA:
3106 		num = 2;
3107 		ring[0] = REO_REMAP_SW2;
3108 		ring[1] = REO_REMAP_SW4;
3109 		break;
3110 	case 0x9:
3111 		num = 2;
3112 		ring[0] = REO_REMAP_SW1;
3113 		ring[1] = REO_REMAP_SW4;
3114 		break;
3115 	case 0x8:
3116 		num = 1;
3117 		ring[0] = REO_REMAP_SW4;
3118 		break;
3119 	case 0x7:
3120 		num = 3;
3121 		ring[0] = REO_REMAP_SW1;
3122 		ring[1] = REO_REMAP_SW2;
3123 		ring[2] = REO_REMAP_SW3;
3124 		break;
3125 	case 0x6:
3126 		num = 2;
3127 		ring[0] = REO_REMAP_SW2;
3128 		ring[1] = REO_REMAP_SW3;
3129 		break;
3130 	case 0x5:
3131 		num = 2;
3132 		ring[0] = REO_REMAP_SW1;
3133 		ring[1] = REO_REMAP_SW3;
3134 		break;
3135 	case 0x4:
3136 		num = 1;
3137 		ring[0] = REO_REMAP_SW3;
3138 		break;
3139 	case 0x3:
3140 		num = 2;
3141 		ring[0] = REO_REMAP_SW1;
3142 		ring[1] = REO_REMAP_SW2;
3143 		break;
3144 	case 0x2:
3145 		num = 1;
3146 		ring[0] = REO_REMAP_SW2;
3147 		break;
3148 	case 0x1:
3149 		num = 1;
3150 		ring[0] = REO_REMAP_SW1;
3151 		break;
3152 	}
3153 	return num;
3154 }
3155 
3156 static bool dp_reo_remap_config(struct dp_soc *soc,
3157 				uint32_t *remap1,
3158 				uint32_t *remap2)
3159 {
3160 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3161 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
3162 	uint8_t target_type, num;
3163 	uint32_t ring[4];
3164 	uint32_t value;
3165 
3166 	target_type = hal_get_target_type(soc->hal_soc);
3167 
3168 	switch (offload_radio) {
3169 	case dp_nss_cfg_default:
3170 		value = reo_config & 0xF;
3171 		num = dp_reo_ring_selection(value, ring);
3172 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3173 					      num, remap1, remap2);
3174 
3175 		break;
3176 	case dp_nss_cfg_first_radio:
3177 		value = reo_config & 0xE;
3178 		num = dp_reo_ring_selection(value, ring);
3179 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3180 					      num, remap1, remap2);
3181 
3182 		break;
3183 	case dp_nss_cfg_second_radio:
3184 		value = reo_config & 0xD;
3185 		num = dp_reo_ring_selection(value, ring);
3186 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3187 					      num, remap1, remap2);
3188 
3189 		break;
3190 	case dp_nss_cfg_dbdc:
3191 	case dp_nss_cfg_dbtc:
3192 		/* return false if both or all are offloaded to NSS */
3193 		return false;
3194 	}
3195 
3196 	dp_debug("remap1 %x remap2 %x offload_radio %u",
3197 		 *remap1, *remap2, offload_radio);
3198 	return true;
3199 }
3200 
3201 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz)
3202 {
3203 }
3204 
3205 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3206 					 int *tx_comp_ipa_ring_sz)
3207 {
3208 }
3209 #endif /* IPA_OFFLOAD */
3210 
3211 /*
3212  * dp_reo_frag_dst_set() - configure reo register to set the
3213  *                        fragment destination ring
3214  * @soc : Datapath soc
3215  * @frag_dst_ring : output parameter to set fragment destination ring
3216  *
3217  * Based on offload_radio below fragment destination rings is selected
3218  * 0 - TCL
3219  * 1 - SW1
3220  * 2 - SW2
3221  * 3 - SW3
3222  * 4 - SW4
3223  * 5 - Release
3224  * 6 - FW
3225  * 7 - alternate select
3226  *
3227  * return: void
3228  */
3229 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
3230 {
3231 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3232 
3233 	switch (offload_radio) {
3234 	case dp_nss_cfg_default:
3235 		*frag_dst_ring = REO_REMAP_TCL;
3236 		break;
3237 	case dp_nss_cfg_first_radio:
3238 		/*
3239 		 * This configuration is valid for single band radio which
3240 		 * is also NSS offload.
3241 		 */
3242 	case dp_nss_cfg_dbdc:
3243 	case dp_nss_cfg_dbtc:
3244 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
3245 		break;
3246 	default:
3247 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3248 				FL("dp_reo_frag_dst_set invalid offload radio config"));
3249 		break;
3250 	}
3251 }
3252 
3253 #ifdef ENABLE_VERBOSE_DEBUG
3254 static void dp_enable_verbose_debug(struct dp_soc *soc)
3255 {
3256 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3257 
3258 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3259 
3260 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
3261 		is_dp_verbose_debug_enabled = true;
3262 
3263 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
3264 		hal_set_verbose_debug(true);
3265 	else
3266 		hal_set_verbose_debug(false);
3267 }
3268 #else
3269 static void dp_enable_verbose_debug(struct dp_soc *soc)
3270 {
3271 }
3272 #endif
3273 
3274 #ifdef WLAN_FEATURE_STATS_EXT
3275 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3276 {
3277 	qdf_event_create(&soc->rx_hw_stats_event);
3278 }
3279 #else
3280 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3281 {
3282 }
3283 #endif
3284 
3285 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
3286 {
3287 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned);
3288 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA, index);
3289 
3290 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned);
3291 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE, index);
3292 }
3293 
3294 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
3295 						uint8_t index)
3296 {
3297 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA, index, 0)) {
3298 		dp_err("dp_srng_init failed for tcl_data_ring");
3299 		goto fail1;
3300 	}
3301 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
3302 			  soc->tcl_data_ring[index].alloc_size,
3303 			  soc->ctrl_psoc,
3304 			  WLAN_MD_DP_SRNG_TCL_DATA,
3305 			  "tcl_data_ring");
3306 
3307 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3308 			 index, 0)) {
3309 		dp_err("dp_srng_init failed for tx_comp_ring");
3310 		goto fail1;
3311 	}
3312 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
3313 			  soc->tx_comp_ring[index].alloc_size,
3314 			  soc->ctrl_psoc,
3315 			  WLAN_MD_DP_SRNG_TX_COMP,
3316 			  "tcl_comp_ring");
3317 
3318 	return QDF_STATUS_SUCCESS;
3319 
3320 fail1:
3321 	return QDF_STATUS_E_FAILURE;
3322 }
3323 
3324 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
3325 {
3326 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
3327 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
3328 }
3329 
3330 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
3331 						 uint8_t index)
3332 {
3333 	int tx_ring_size;
3334 	int tx_comp_ring_size;
3335 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3336 	int cached = 0;
3337 
3338 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
3339 	dp_ipa_get_tx_ring_size(index, &tx_ring_size);
3340 
3341 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
3342 			  tx_ring_size, cached)) {
3343 		dp_err("dp_srng_alloc failed for tcl_data_ring");
3344 		goto fail1;
3345 	}
3346 
3347 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3348 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size);
3349 	/* Enable cached TCL desc if NSS offload is disabled */
3350 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
3351 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
3352 
3353 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3354 			  tx_comp_ring_size, cached)) {
3355 		dp_err("dp_srng_alloc failed for tx_comp_ring");
3356 		goto fail1;
3357 	}
3358 
3359 	return QDF_STATUS_SUCCESS;
3360 
3361 fail1:
3362 	return QDF_STATUS_E_FAILURE;
3363 }
3364 
3365 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3366 {
3367 	struct cdp_lro_hash_config lro_hash;
3368 	QDF_STATUS status;
3369 
3370 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3371 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
3372 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
3373 		dp_err("LRO, GRO and RX hash disabled");
3374 		return QDF_STATUS_E_FAILURE;
3375 	}
3376 
3377 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
3378 
3379 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
3380 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
3381 		lro_hash.lro_enable = 1;
3382 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
3383 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
3384 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
3385 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
3386 	}
3387 
3388 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
3389 			     (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3390 			      LRO_IPV4_SEED_ARR_SZ));
3391 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
3392 			     (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3393 			      LRO_IPV6_SEED_ARR_SZ));
3394 
3395 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
3396 
3397 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
3398 		QDF_BUG(0);
3399 		dp_err("lro_hash_config not configured");
3400 		return QDF_STATUS_E_FAILURE;
3401 	}
3402 
3403 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
3404 						      pdev->pdev_id,
3405 						      &lro_hash);
3406 	if (!QDF_IS_STATUS_SUCCESS(status)) {
3407 		dp_err("failed to send lro_hash_config to FW %u", status);
3408 		return status;
3409 	}
3410 
3411 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
3412 		lro_hash.lro_enable, lro_hash.tcp_flag,
3413 		lro_hash.tcp_flag_mask);
3414 
3415 	dp_info("toeplitz_hash_ipv4:");
3416 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3417 			   lro_hash.toeplitz_hash_ipv4,
3418 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3419 			   LRO_IPV4_SEED_ARR_SZ));
3420 
3421 	dp_info("toeplitz_hash_ipv6:");
3422 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3423 			   lro_hash.toeplitz_hash_ipv6,
3424 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3425 			   LRO_IPV6_SEED_ARR_SZ));
3426 
3427 	return status;
3428 }
3429 
3430 /*
3431  * dp_rxdma_ring_setup() - configure the RX DMA rings
3432  * @soc: data path SoC handle
3433  * @pdev: Physical device handle
3434  *
3435  * Return: 0 - success, > 0 - failure
3436  */
3437 #ifdef QCA_HOST2FW_RXBUF_RING
3438 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3439 {
3440 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3441 	int max_mac_rings;
3442 	int i;
3443 	int ring_size;
3444 
3445 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3446 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
3447 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
3448 
3449 	for (i = 0; i < max_mac_rings; i++) {
3450 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
3451 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
3452 				  RXDMA_BUF, ring_size, 0)) {
3453 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3454 				  FL("failed rx mac ring setup"));
3455 			return QDF_STATUS_E_FAILURE;
3456 		}
3457 
3458 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
3459 				 RXDMA_BUF, 1, i)) {
3460 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3461 				  FL("failed rx mac ring setup"));
3462 
3463 			dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
3464 			return QDF_STATUS_E_FAILURE;
3465 		}
3466 	}
3467 	return QDF_STATUS_SUCCESS;
3468 }
3469 #else
3470 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3471 {
3472 	return QDF_STATUS_SUCCESS;
3473 }
3474 #endif
3475 
3476 /**
3477  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
3478  * @pdev - DP_PDEV handle
3479  *
3480  * Return: void
3481  */
3482 static inline void
3483 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3484 {
3485 	uint8_t map_id;
3486 	struct dp_soc *soc = pdev->soc;
3487 
3488 	if (!soc)
3489 		return;
3490 
3491 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3492 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3493 			     default_dscp_tid_map,
3494 			     sizeof(default_dscp_tid_map));
3495 	}
3496 
3497 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3498 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3499 					default_dscp_tid_map,
3500 					map_id);
3501 	}
3502 }
3503 
3504 /**
3505  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
3506  * @pdev - DP_PDEV handle
3507  *
3508  * Return: void
3509  */
3510 static inline void
3511 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3512 {
3513 	struct dp_soc *soc = pdev->soc;
3514 
3515 	if (!soc)
3516 		return;
3517 
3518 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3519 		     sizeof(default_pcp_tid_map));
3520 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3521 }
3522 
3523 #ifdef IPA_OFFLOAD
3524 /**
3525  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
3526  * @soc: data path instance
3527  * @pdev: core txrx pdev context
3528  *
3529  * Return: QDF_STATUS_SUCCESS: success
3530  *         QDF_STATUS_E_RESOURCES: Error return
3531  */
3532 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3533 					   struct dp_pdev *pdev)
3534 {
3535 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3536 	int entries;
3537 
3538 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3539 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
3540 
3541 	/* Setup second Rx refill buffer ring */
3542 	if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3543 			  entries, 0)) {
3544 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3545 			FL("dp_srng_alloc failed second rx refill ring"));
3546 		return QDF_STATUS_E_FAILURE;
3547 	}
3548 
3549 	if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3550 			 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
3551 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3552 			  FL("dp_srng_init failed second rx refill ring"));
3553 		return QDF_STATUS_E_FAILURE;
3554 	}
3555 
3556 	return QDF_STATUS_SUCCESS;
3557 }
3558 
3559 /**
3560  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3561  * @soc: data path instance
3562  * @pdev: core txrx pdev context
3563  *
3564  * Return: void
3565  */
3566 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3567 					      struct dp_pdev *pdev)
3568 {
3569 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
3570 	dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
3571 }
3572 
3573 #else
3574 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3575 					   struct dp_pdev *pdev)
3576 {
3577 	return QDF_STATUS_SUCCESS;
3578 }
3579 
3580 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3581 					      struct dp_pdev *pdev)
3582 {
3583 }
3584 #endif
3585 
3586 #if !defined(DISABLE_MON_CONFIG)
3587 /**
3588  * dp_mon_ring_deinit() - Deinitialize monitor rings
3589  * @pdev: DP pdev handle
3590  *
3591  */
3592 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
3593 {
3594 	int mac_id = 0;
3595 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3596 	struct dp_soc *soc = pdev->soc;
3597 
3598 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3599 
3600 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3601 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3602 							 pdev->pdev_id);
3603 
3604 		dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id],
3605 			       RXDMA_MONITOR_STATUS, 0);
3606 
3607 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3608 			continue;
3609 
3610 		dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3611 			       RXDMA_MONITOR_BUF, 0);
3612 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3613 			       RXDMA_MONITOR_DST, 0);
3614 		dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3615 			       RXDMA_MONITOR_DESC, 0);
3616 	}
3617 }
3618 
3619 /**
3620  * dp_mon_rings_free() - free monitor rings
3621  * @pdev: Datapath pdev handle
3622  *
3623  */
3624 static void dp_mon_rings_free(struct dp_pdev *pdev)
3625 {
3626 	int mac_id = 0;
3627 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3628 	struct dp_soc *soc = pdev->soc;
3629 
3630 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3631 
3632 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3633 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3634 							 pdev->pdev_id);
3635 
3636 		dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]);
3637 
3638 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3639 			continue;
3640 
3641 		dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
3642 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
3643 		dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]);
3644 	}
3645 }
3646 
3647 /**
3648  * dp_mon_rings_init() - Initialize monitor srng rings
3649  * @pdev: Datapath pdev handle
3650  *
3651  * return: QDF_STATUS_SUCCESS on success
3652  *	   QDF_STATUS_E_NOMEM on failure
3653  */
3654 static
3655 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
3656 {
3657 	int mac_id = 0;
3658 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3659 
3660 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3661 
3662 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3663 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3664 							 pdev->pdev_id);
3665 
3666 		if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id],
3667 				 RXDMA_MONITOR_STATUS, 0, lmac_id)) {
3668 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3669 				  FL(RNG_ERR "rxdma_mon_status_ring"));
3670 			goto fail1;
3671 		}
3672 
3673 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3674 			continue;
3675 
3676 		if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3677 				 RXDMA_MONITOR_BUF, 0, lmac_id)) {
3678 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3679 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
3680 			goto fail1;
3681 		}
3682 
3683 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3684 				 RXDMA_MONITOR_DST, 0, lmac_id)) {
3685 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3686 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
3687 			goto fail1;
3688 		}
3689 
3690 		if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3691 				 RXDMA_MONITOR_DESC, 0, lmac_id)) {
3692 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3693 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
3694 			goto fail1;
3695 		}
3696 	}
3697 	return QDF_STATUS_SUCCESS;
3698 
3699 fail1:
3700 	dp_mon_rings_deinit(pdev);
3701 	return QDF_STATUS_E_NOMEM;
3702 }
3703 
3704 /**
3705  * dp_mon_rings_alloc() - Allocate memory for monitor srng rings
3706  * @soc: Datapath soc handle
3707  * @pdev: Datapath pdev handle
3708  *
3709  * return: QDF_STATUS_SUCCESS on success
3710  *	   QDF_STATUS_E_NOMEM on failure
3711  */
3712 static
3713 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
3714 {
3715 	int mac_id = 0;
3716 	int entries;
3717 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3718 
3719 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3720 
3721 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3722 		int lmac_id =
3723 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
3724 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3725 		if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id],
3726 				  RXDMA_MONITOR_STATUS, entries, 0)) {
3727 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3728 				  FL(RNG_ERR "rxdma_mon_status_ring"));
3729 			goto fail1;
3730 		}
3731 
3732 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3733 			continue;
3734 
3735 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3736 		if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3737 				  RXDMA_MONITOR_BUF, entries, 0)) {
3738 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3739 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
3740 			goto fail1;
3741 		}
3742 
3743 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3744 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3745 				  RXDMA_MONITOR_DST, entries, 0)) {
3746 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3747 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
3748 			goto fail1;
3749 		}
3750 
3751 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3752 		if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3753 				  RXDMA_MONITOR_DESC, entries, 0)) {
3754 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3755 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
3756 			goto fail1;
3757 		}
3758 	}
3759 	return QDF_STATUS_SUCCESS;
3760 
3761 fail1:
3762 	dp_mon_rings_free(pdev);
3763 	return QDF_STATUS_E_NOMEM;
3764 }
3765 #else
3766 static void dp_mon_rings_free(struct dp_pdev *pdev)
3767 {
3768 }
3769 
3770 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
3771 {
3772 }
3773 
3774 static
3775 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
3776 {
3777 	return QDF_STATUS_SUCCESS;
3778 }
3779 
3780 static
3781 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
3782 {
3783 	return QDF_STATUS_SUCCESS;
3784 }
3785 #endif
3786 
3787 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3788  * @pdev_hdl: pdev handle
3789  */
3790 #ifdef ATH_SUPPORT_EXT_STAT
3791 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3792 {
3793 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3794 	struct dp_soc *soc = pdev->soc;
3795 	struct dp_vdev *vdev = NULL;
3796 	struct dp_peer *peer = NULL;
3797 
3798 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3799 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3800 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3801 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3802 			dp_cal_client_update_peer_stats(&peer->stats);
3803 		}
3804 	}
3805 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3806 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3807 }
3808 #else
3809 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3810 {
3811 }
3812 #endif
3813 
3814 /*
3815  * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
3816  * @pdev: Datapath PDEV handle
3817  *
3818  * Return: QDF_STATUS_SUCCESS: Success
3819  *         QDF_STATUS_E_NOMEM: Error
3820  */
3821 static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
3822 {
3823 	pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
3824 
3825 	if (!pdev->ppdu_tlv_buf) {
3826 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
3827 		return QDF_STATUS_E_NOMEM;
3828 	}
3829 
3830 	return QDF_STATUS_SUCCESS;
3831 }
3832 
3833 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
3834 /**
3835  * dp_soc_rx_history_attach() - Attach the ring history record buffers
3836  * @soc: DP soc structure
3837  *
3838  * This function allocates the memory for recording the rx ring, rx error
3839  * ring and the reinject ring entries. There is no error returned in case
3840  * of allocation failure since the record function checks if the history is
3841  * initialized or not. We do not want to fail the driver load in case of
3842  * failure to allocate memory for debug history.
3843  *
3844  * Returns: None
3845  */
3846 static void dp_soc_rx_history_attach(struct dp_soc *soc)
3847 {
3848 	int i;
3849 	uint32_t rx_ring_hist_size;
3850 	uint32_t rx_err_ring_hist_size;
3851 	uint32_t rx_reinject_hist_size;
3852 
3853 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[i]);
3854 	rx_err_ring_hist_size = sizeof(*soc->rx_err_ring_history);
3855 	rx_reinject_hist_size = sizeof(*soc->rx_reinject_ring_history);
3856 
3857 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
3858 		soc->rx_ring_history[i] = qdf_mem_malloc(rx_ring_hist_size);
3859 		if (soc->rx_ring_history[i])
3860 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
3861 	}
3862 
3863 	soc->rx_err_ring_history = qdf_mem_malloc(rx_err_ring_hist_size);
3864 	if (soc->rx_err_ring_history)
3865 		qdf_atomic_init(&soc->rx_err_ring_history->index);
3866 
3867 	soc->rx_reinject_ring_history = qdf_mem_malloc(rx_reinject_hist_size);
3868 	if (soc->rx_reinject_ring_history)
3869 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
3870 }
3871 
3872 static void dp_soc_rx_history_detach(struct dp_soc *soc)
3873 {
3874 	int i;
3875 
3876 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
3877 		qdf_mem_free(soc->rx_ring_history[i]);
3878 
3879 	qdf_mem_free(soc->rx_err_ring_history);
3880 	qdf_mem_free(soc->rx_reinject_ring_history);
3881 }
3882 
3883 #else
3884 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
3885 {
3886 }
3887 
3888 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
3889 {
3890 }
3891 #endif
3892 
3893 /*
3894 * dp_pdev_attach_wifi3() - attach txrx pdev
3895 * @txrx_soc: Datapath SOC handle
3896 * @htc_handle: HTC handle for host-target interface
3897 * @qdf_osdev: QDF OS device
3898 * @pdev_id: PDEV ID
3899 *
3900 * Return: QDF_STATUS
3901 */
3902 static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3903 					      HTC_HANDLE htc_handle,
3904 					      qdf_device_t qdf_osdev,
3905 					      uint8_t pdev_id)
3906 {
3907 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3908 	struct dp_pdev *pdev = NULL;
3909 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3910 	int nss_cfg;
3911 
3912 	pdev = qdf_mem_malloc(sizeof(*pdev));
3913 	if (!pdev) {
3914 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3915 			  FL("DP PDEV memory allocation failed"));
3916 		goto fail0;
3917 	}
3918 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
3919 			  WLAN_MD_DP_PDEV, "dp_pdev");
3920 
3921 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3922 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3923 
3924 	if (!pdev->wlan_cfg_ctx) {
3925 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3926 			  FL("pdev cfg_attach failed"));
3927 		goto fail1;
3928 	}
3929 
3930 	/*
3931 	 * set nss pdev config based on soc config
3932 	 */
3933 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3934 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3935 					 (nss_cfg & (1 << pdev_id)));
3936 
3937 	pdev->soc = soc;
3938 	pdev->pdev_id = pdev_id;
3939 	soc->pdev_list[pdev_id] = pdev;
3940 
3941 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3942 	soc->pdev_count++;
3943 
3944 	/* Allocate memory for pdev srng rings */
3945 	if (dp_pdev_srng_alloc(pdev)) {
3946 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3947 			  FL("dp_pdev_srng_alloc failed"));
3948 		goto fail2;
3949 	}
3950 
3951 	/* Rx specific init */
3952 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
3953 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3954 			  FL("dp_rx_pdev_attach failed"));
3955 		goto fail3;
3956 	}
3957 
3958 	/* Rx monitor mode specific init */
3959 	if (dp_rx_pdev_mon_desc_pool_alloc(pdev)) {
3960 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3961 			  "dp_rx_pdev_mon_attach failed");
3962 		goto fail4;
3963 	}
3964 
3965 	return QDF_STATUS_SUCCESS;
3966 fail4:
3967 	dp_rx_pdev_desc_pool_free(pdev);
3968 fail3:
3969 	dp_pdev_srng_free(pdev);
3970 fail2:
3971 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3972 fail1:
3973 	qdf_mem_free(pdev);
3974 fail0:
3975 	return QDF_STATUS_E_FAILURE;
3976 }
3977 
3978 /*
3979  * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3980  * @soc: data path SoC handle
3981  * @pdev: Physical device handle
3982  *
3983  * Return: void
3984  */
3985 #ifdef QCA_HOST2FW_RXBUF_RING
3986 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
3987 {
3988 	int i;
3989 
3990 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
3991 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
3992 		dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
3993 	}
3994 
3995 	if (soc->reap_timer_init) {
3996 		qdf_timer_free(&soc->mon_reap_timer);
3997 		soc->reap_timer_init = 0;
3998 	}
3999 }
4000 #else
4001 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4002 {
4003 	if (soc->lmac_timer_init) {
4004 		qdf_timer_stop(&soc->lmac_reap_timer);
4005 		qdf_timer_free(&soc->lmac_reap_timer);
4006 		soc->lmac_timer_init = 0;
4007 	}
4008 }
4009 #endif
4010 
4011 /*
4012  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
4013  * @pdev: device object
4014  *
4015  * Return: void
4016  */
4017 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
4018 {
4019 	struct dp_neighbour_peer *peer = NULL;
4020 	struct dp_neighbour_peer *temp_peer = NULL;
4021 
4022 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4023 			   neighbour_peer_list_elem, temp_peer) {
4024 		/* delete this peer from the list */
4025 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
4026 			     peer, neighbour_peer_list_elem);
4027 		qdf_mem_free(peer);
4028 	}
4029 
4030 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
4031 }
4032 
4033 /**
4034 * dp_htt_ppdu_stats_detach() - detach stats resources
4035 * @pdev: Datapath PDEV handle
4036 *
4037 * Return: void
4038 */
4039 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
4040 {
4041 	struct ppdu_info *ppdu_info, *ppdu_info_next;
4042 
4043 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
4044 			   ppdu_info_list_elem, ppdu_info_next) {
4045 		if (!ppdu_info)
4046 			break;
4047 		qdf_assert_always(ppdu_info->nbuf);
4048 		qdf_nbuf_free(ppdu_info->nbuf);
4049 		qdf_mem_free(ppdu_info);
4050 		pdev->list_depth--;
4051 	}
4052 
4053 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->sched_comp_ppdu_list,
4054 			   ppdu_info_list_elem, ppdu_info_next) {
4055 		if (!ppdu_info)
4056 			break;
4057 		qdf_assert_always(ppdu_info->nbuf);
4058 		qdf_nbuf_free(ppdu_info->nbuf);
4059 		qdf_mem_free(ppdu_info);
4060 		pdev->sched_comp_list_depth--;
4061 	}
4062 
4063 	if (pdev->ppdu_tlv_buf)
4064 		qdf_mem_free(pdev->ppdu_tlv_buf);
4065 
4066 }
4067 
4068 #ifdef WLAN_DP_PENDING_MEM_FLUSH
4069 /**
4070  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
4071  * @pdev: Datapath PDEV handle
4072  *
4073  * This is the last chance to flush all pending dp vdevs/peers,
4074  * some peer/vdev leak case like Non-SSR + peer unmap missing
4075  * will be covered here.
4076  *
4077  * Return: None
4078  */
4079 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4080 {
4081 	struct dp_vdev *vdev = NULL;
4082 
4083 	while (true) {
4084 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
4085 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4086 			if (vdev->delete.pending)
4087 				break;
4088 		}
4089 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4090 
4091 		/*
4092 		 * vdev will be freed when all peers get cleanup,
4093 		 * dp_delete_pending_vdev will remove vdev from vdev_list
4094 		 * in pdev.
4095 		 */
4096 		if (vdev)
4097 			dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
4098 		else
4099 			break;
4100 	}
4101 }
4102 #else
4103 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4104 {
4105 }
4106 #endif
4107 
4108 /**
4109  * dp_pdev_deinit() - Deinit txrx pdev
4110  * @txrx_pdev: Datapath PDEV handle
4111  * @force: Force deinit
4112  *
4113  * Return: None
4114  */
4115 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
4116 {
4117 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4118 	qdf_nbuf_t curr_nbuf, next_nbuf;
4119 
4120 	if (pdev->pdev_deinit)
4121 		return;
4122 
4123 	dp_tx_me_exit(pdev);
4124 	dp_rx_fst_detach(pdev->soc, pdev);
4125 	dp_rx_pdev_mon_buffers_free(pdev);
4126 	dp_rx_pdev_buffers_free(pdev);
4127 	dp_rx_pdev_mon_desc_pool_deinit(pdev);
4128 	dp_rx_pdev_desc_pool_deinit(pdev);
4129 	dp_htt_ppdu_stats_detach(pdev);
4130 	dp_tx_ppdu_stats_detach(pdev);
4131 	qdf_event_destroy(&pdev->fw_peer_stats_event);
4132 	dp_cal_client_detach(&pdev->cal_client_ctx);
4133 	if (pdev->sojourn_buf)
4134 		qdf_nbuf_free(pdev->sojourn_buf);
4135 
4136 	dp_pdev_flush_pending_vdevs(pdev);
4137 	dp_tx_pdev_detach(pdev);
4138 	dp_pktlogmod_exit(pdev);
4139 	dp_neighbour_peers_detach(pdev);
4140 
4141 	qdf_spinlock_destroy(&pdev->tx_mutex);
4142 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
4143 
4144 	if (pdev->invalid_peer)
4145 		qdf_mem_free(pdev->invalid_peer);
4146 
4147 	if (pdev->filter)
4148 		dp_mon_filter_dealloc(pdev);
4149 
4150 	dp_pdev_srng_deinit(pdev);
4151 
4152 	dp_ipa_uc_detach(pdev->soc, pdev);
4153 	dp_cleanup_ipa_rx_refill_buf_ring(pdev->soc, pdev);
4154 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
4155 
4156 	curr_nbuf = pdev->invalid_peer_head_msdu;
4157 	while (curr_nbuf) {
4158 		next_nbuf = qdf_nbuf_next(curr_nbuf);
4159 		qdf_nbuf_free(curr_nbuf);
4160 		curr_nbuf = next_nbuf;
4161 	}
4162 	pdev->invalid_peer_head_msdu = NULL;
4163 	pdev->invalid_peer_tail_msdu = NULL;
4164 
4165 	dp_wdi_event_detach(pdev);
4166 	pdev->pdev_deinit = 1;
4167 }
4168 
4169 /**
4170  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
4171  * @psoc: Datapath psoc handle
4172  * @pdev_id: Id of datapath PDEV handle
4173  * @force: Force deinit
4174  *
4175  * Return: QDF_STATUS
4176  */
4177 static QDF_STATUS
4178 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4179 		     int force)
4180 {
4181 	struct dp_pdev *txrx_pdev;
4182 
4183 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4184 						       pdev_id);
4185 
4186 	if (!txrx_pdev)
4187 		return QDF_STATUS_E_FAILURE;
4188 
4189 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
4190 
4191 	return QDF_STATUS_SUCCESS;
4192 }
4193 
4194 /*
4195  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
4196  * @txrx_pdev: Datapath PDEV handle
4197  *
4198  * Return: None
4199  */
4200 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
4201 {
4202 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4203 
4204 	dp_tx_capture_debugfs_init(pdev);
4205 }
4206 
4207 /*
4208  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
4209  * @psoc: Datapath soc handle
4210  * @pdev_id: pdev id of pdev
4211  *
4212  * Return: QDF_STATUS
4213  */
4214 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
4215 				     uint8_t pdev_id)
4216 {
4217 	struct dp_pdev *pdev;
4218 
4219 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4220 						  pdev_id);
4221 
4222 	if (!pdev) {
4223 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4224 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
4225 		return QDF_STATUS_E_FAILURE;
4226 	}
4227 
4228 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
4229 	return QDF_STATUS_SUCCESS;
4230 }
4231 
4232 /*
4233  * dp_pdev_detach() - Complete rest of pdev detach
4234  * @txrx_pdev: Datapath PDEV handle
4235  * @force: Force deinit
4236  *
4237  * Return: None
4238  */
4239 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
4240 {
4241 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4242 	struct dp_soc *soc = pdev->soc;
4243 
4244 	dp_rx_pdev_mon_desc_pool_free(pdev);
4245 	dp_rx_pdev_desc_pool_free(pdev);
4246 	dp_pdev_srng_free(pdev);
4247 
4248 	soc->pdev_count--;
4249 	soc->pdev_list[pdev->pdev_id] = NULL;
4250 
4251 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4252 	wlan_minidump_remove(pdev);
4253 	qdf_mem_free(pdev);
4254 }
4255 
4256 /*
4257  * dp_pdev_detach_wifi3() - detach txrx pdev
4258  * @psoc: Datapath soc handle
4259  * @pdev_id: pdev id of pdev
4260  * @force: Force detach
4261  *
4262  * Return: QDF_STATUS
4263  */
4264 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4265 				       int force)
4266 {
4267 	struct dp_pdev *pdev;
4268 
4269 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4270 						  pdev_id);
4271 
4272 	if (!pdev) {
4273 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4274 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
4275 		return QDF_STATUS_E_FAILURE;
4276 	}
4277 
4278 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
4279 	return QDF_STATUS_SUCCESS;
4280 }
4281 
4282 /*
4283  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
4284  * @soc: DP SOC handle
4285  */
4286 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
4287 {
4288 	struct reo_desc_list_node *desc;
4289 	struct dp_rx_tid *rx_tid;
4290 
4291 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4292 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
4293 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
4294 		rx_tid = &desc->rx_tid;
4295 		qdf_mem_unmap_nbytes_single(soc->osdev,
4296 			rx_tid->hw_qdesc_paddr,
4297 			QDF_DMA_BIDIRECTIONAL,
4298 			rx_tid->hw_qdesc_alloc_size);
4299 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
4300 		qdf_mem_free(desc);
4301 	}
4302 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4303 	qdf_list_destroy(&soc->reo_desc_freelist);
4304 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
4305 }
4306 
4307 /*
4308  * dp_soc_reset_txrx_ring_map() - reset tx ring map
4309  * @soc: DP SOC handle
4310  *
4311  */
4312 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
4313 {
4314 	uint32_t i;
4315 
4316 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
4317 		soc->tx_ring_map[i] = 0;
4318 }
4319 
4320 /**
4321  * dp_soc_deinit() - Deinitialize txrx SOC
4322  * @txrx_soc: Opaque DP SOC handle
4323  *
4324  * Return: None
4325  */
4326 static void dp_soc_deinit(void *txrx_soc)
4327 {
4328 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4329 	struct htt_soc *htt_soc = soc->htt_handle;
4330 
4331 	qdf_atomic_set(&soc->cmn_init_done, 0);
4332 
4333 	/* free peer tables & AST tables allocated during peer_map_attach */
4334 	if (soc->peer_map_attach_success) {
4335 		dp_peer_find_detach(soc);
4336 		soc->peer_map_attach_success = FALSE;
4337 	}
4338 
4339 	qdf_flush_work(&soc->htt_stats.work);
4340 	qdf_disable_work(&soc->htt_stats.work);
4341 
4342 	qdf_spinlock_destroy(&soc->htt_stats.lock);
4343 
4344 	dp_soc_reset_txrx_ring_map(soc);
4345 
4346 	dp_reo_desc_freelist_destroy(soc);
4347 
4348 	DEINIT_RX_HW_STATS_LOCK(soc);
4349 
4350 	qdf_spinlock_destroy(&soc->ast_lock);
4351 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
4352 
4353 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
4354 
4355 	dp_soc_wds_detach(soc);
4356 
4357 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
4358 
4359 	dp_reo_cmdlist_destroy(soc);
4360 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
4361 
4362 	dp_soc_tx_desc_sw_pools_deinit(soc);
4363 
4364 	dp_soc_srng_deinit(soc);
4365 
4366 	dp_hw_link_desc_ring_deinit(soc);
4367 
4368 	htt_soc_htc_dealloc(soc->htt_handle);
4369 
4370 	htt_soc_detach(htt_soc);
4371 
4372 	/* Free wbm sg list and reset flags in down path */
4373 	dp_rx_wbm_sg_list_deinit(soc);
4374 
4375 	wlan_minidump_remove(soc);
4376 }
4377 
4378 /**
4379  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
4380  * @txrx_soc: Opaque DP SOC handle
4381  *
4382  * Return: None
4383  */
4384 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
4385 {
4386 	dp_soc_deinit(txrx_soc);
4387 }
4388 
4389 /*
4390  * dp_soc_detach() - Detach rest of txrx SOC
4391  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4392  *
4393  * Return: None
4394  */
4395 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
4396 {
4397 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4398 
4399 	dp_soc_tx_desc_sw_pools_free(soc);
4400 	dp_soc_srng_free(soc);
4401 	dp_hw_link_desc_ring_free(soc);
4402 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
4403 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
4404 	dp_soc_rx_history_detach(soc);
4405 
4406 	qdf_mem_free(soc);
4407 }
4408 
4409 /*
4410  * dp_soc_detach_wifi3() - Detach txrx SOC
4411  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4412  *
4413  * Return: None
4414  */
4415 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
4416 {
4417 	dp_soc_detach(txrx_soc);
4418 }
4419 
4420 #if !defined(DISABLE_MON_CONFIG)
4421 /**
4422  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
4423  * @soc: soc handle
4424  * @pdev: physical device handle
4425  * @mac_id: ring number
4426  * @mac_for_pdev: mac_id
4427  *
4428  * Return: non-zero for failure, zero for success
4429  */
4430 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4431 					struct dp_pdev *pdev,
4432 					int mac_id,
4433 					int mac_for_pdev)
4434 {
4435 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4436 
4437 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
4438 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4439 					soc->rxdma_mon_buf_ring[mac_id]
4440 					.hal_srng,
4441 					RXDMA_MONITOR_BUF);
4442 
4443 		if (status != QDF_STATUS_SUCCESS) {
4444 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
4445 			return status;
4446 		}
4447 
4448 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4449 					soc->rxdma_mon_dst_ring[mac_id]
4450 					.hal_srng,
4451 					RXDMA_MONITOR_DST);
4452 
4453 		if (status != QDF_STATUS_SUCCESS) {
4454 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
4455 			return status;
4456 		}
4457 
4458 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4459 					soc->rxdma_mon_status_ring[mac_id]
4460 					.hal_srng,
4461 					RXDMA_MONITOR_STATUS);
4462 
4463 		if (status != QDF_STATUS_SUCCESS) {
4464 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4465 			return status;
4466 		}
4467 
4468 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4469 				soc->rxdma_mon_desc_ring[mac_id]
4470 					.hal_srng,
4471 					RXDMA_MONITOR_DESC);
4472 
4473 		if (status != QDF_STATUS_SUCCESS) {
4474 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4475 			return status;
4476 		}
4477 	} else {
4478 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4479 					soc->rxdma_mon_status_ring[mac_id]
4480 					.hal_srng,
4481 					RXDMA_MONITOR_STATUS);
4482 
4483 		if (status != QDF_STATUS_SUCCESS) {
4484 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4485 			return status;
4486 		}
4487 	}
4488 
4489 	return status;
4490 
4491 }
4492 #else
4493 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4494 					struct dp_pdev *pdev,
4495 					int mac_id,
4496 					int mac_for_pdev)
4497 {
4498 	return QDF_STATUS_SUCCESS;
4499 }
4500 #endif
4501 
4502 /*
4503  * dp_rxdma_ring_config() - configure the RX DMA rings
4504  *
4505  * This function is used to configure the MAC rings.
4506  * On MCL host provides buffers in Host2FW ring
4507  * FW refills (copies) buffers to the ring and updates
4508  * ring_idx in register
4509  *
4510  * @soc: data path SoC handle
4511  *
4512  * Return: zero on success, non-zero on failure
4513  */
4514 #ifdef QCA_HOST2FW_RXBUF_RING
4515 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4516 {
4517 	int i;
4518 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4519 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4520 		struct dp_pdev *pdev = soc->pdev_list[i];
4521 
4522 		if (pdev) {
4523 			int mac_id;
4524 			bool dbs_enable = 0;
4525 			int max_mac_rings =
4526 				 wlan_cfg_get_num_mac_rings
4527 				(pdev->wlan_cfg_ctx);
4528 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
4529 
4530 			htt_srng_setup(soc->htt_handle, 0,
4531 				       soc->rx_refill_buf_ring[lmac_id]
4532 				       .hal_srng,
4533 				       RXDMA_BUF);
4534 
4535 			if (pdev->rx_refill_buf_ring2.hal_srng)
4536 				htt_srng_setup(soc->htt_handle, 0,
4537 					pdev->rx_refill_buf_ring2.hal_srng,
4538 					RXDMA_BUF);
4539 
4540 			if (soc->cdp_soc.ol_ops->
4541 				is_hw_dbs_2x2_capable) {
4542 				dbs_enable = soc->cdp_soc.ol_ops->
4543 					is_hw_dbs_2x2_capable(
4544 							(void *)soc->ctrl_psoc);
4545 			}
4546 
4547 			if (dbs_enable) {
4548 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4549 				QDF_TRACE_LEVEL_ERROR,
4550 				FL("DBS enabled max_mac_rings %d"),
4551 					 max_mac_rings);
4552 			} else {
4553 				max_mac_rings = 1;
4554 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4555 					 QDF_TRACE_LEVEL_ERROR,
4556 					 FL("DBS disabled, max_mac_rings %d"),
4557 					 max_mac_rings);
4558 			}
4559 
4560 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4561 					 FL("pdev_id %d max_mac_rings %d"),
4562 					 pdev->pdev_id, max_mac_rings);
4563 
4564 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4565 				int mac_for_pdev =
4566 					dp_get_mac_id_for_pdev(mac_id,
4567 							       pdev->pdev_id);
4568 				/*
4569 				 * Obtain lmac id from pdev to access the LMAC
4570 				 * ring in soc context
4571 				 */
4572 				lmac_id =
4573 				dp_get_lmac_id_for_pdev_id(soc,
4574 							   mac_id,
4575 							   pdev->pdev_id);
4576 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4577 					 QDF_TRACE_LEVEL_ERROR,
4578 					 FL("mac_id %d"), mac_for_pdev);
4579 
4580 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4581 					 pdev->rx_mac_buf_ring[mac_id]
4582 						.hal_srng,
4583 					 RXDMA_BUF);
4584 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4585 				soc->rxdma_err_dst_ring[lmac_id]
4586 					.hal_srng,
4587 					RXDMA_DST);
4588 
4589 				/* Configure monitor mode rings */
4590 				status = dp_mon_htt_srng_setup(soc, pdev,
4591 							       lmac_id,
4592 							       mac_for_pdev);
4593 				if (status != QDF_STATUS_SUCCESS) {
4594 					dp_err("Failed to send htt monitor messages to target");
4595 					return status;
4596 				}
4597 
4598 			}
4599 		}
4600 	}
4601 
4602 	/*
4603 	 * Timer to reap rxdma status rings.
4604 	 * Needed until we enable ppdu end interrupts
4605 	 */
4606 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4607 		       dp_mon_reap_timer_handler, (void *)soc,
4608 		       QDF_TIMER_TYPE_WAKE_APPS);
4609 	soc->reap_timer_init = 1;
4610 	return status;
4611 }
4612 #else
4613 /* This is only for WIN */
4614 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4615 {
4616 	int i;
4617 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4618 	int mac_for_pdev;
4619 	int lmac_id;
4620 
4621 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4622 		struct dp_pdev *pdev =  soc->pdev_list[i];
4623 
4624 		if (!pdev)
4625 			continue;
4626 
4627 		mac_for_pdev = i;
4628 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
4629 
4630 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4631 			       soc->rx_refill_buf_ring[lmac_id].
4632 			       hal_srng, RXDMA_BUF);
4633 #ifndef DISABLE_MON_CONFIG
4634 
4635 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4636 			       soc->rxdma_mon_buf_ring[lmac_id].hal_srng,
4637 			       RXDMA_MONITOR_BUF);
4638 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4639 			       soc->rxdma_mon_dst_ring[lmac_id].hal_srng,
4640 			       RXDMA_MONITOR_DST);
4641 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4642 			       soc->rxdma_mon_status_ring[lmac_id].hal_srng,
4643 			       RXDMA_MONITOR_STATUS);
4644 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4645 			       soc->rxdma_mon_desc_ring[lmac_id].hal_srng,
4646 			       RXDMA_MONITOR_DESC);
4647 #endif
4648 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4649 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
4650 			       RXDMA_DST);
4651 	}
4652 
4653 	/* Configure LMAC rings in Polled mode */
4654 	if (soc->lmac_polled_mode) {
4655 		/*
4656 		 * Timer to reap lmac rings.
4657 		 */
4658 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
4659 			       dp_service_lmac_rings, (void *)soc,
4660 			       QDF_TIMER_TYPE_WAKE_APPS);
4661 		soc->lmac_timer_init = 1;
4662 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
4663 	}
4664 	return status;
4665 }
4666 #endif
4667 
4668 #ifdef NO_RX_PKT_HDR_TLV
4669 static QDF_STATUS
4670 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4671 {
4672 	int i;
4673 	int mac_id;
4674 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4675 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4676 
4677 	htt_tlv_filter.mpdu_start = 1;
4678 	htt_tlv_filter.msdu_start = 1;
4679 	htt_tlv_filter.mpdu_end = 1;
4680 	htt_tlv_filter.msdu_end = 1;
4681 	htt_tlv_filter.attention = 1;
4682 	htt_tlv_filter.packet = 1;
4683 	htt_tlv_filter.packet_header = 0;
4684 
4685 	htt_tlv_filter.ppdu_start = 0;
4686 	htt_tlv_filter.ppdu_end = 0;
4687 	htt_tlv_filter.ppdu_end_user_stats = 0;
4688 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4689 	htt_tlv_filter.ppdu_end_status_done = 0;
4690 	htt_tlv_filter.enable_fp = 1;
4691 	htt_tlv_filter.enable_md = 0;
4692 	htt_tlv_filter.enable_md = 0;
4693 	htt_tlv_filter.enable_mo = 0;
4694 
4695 	htt_tlv_filter.fp_mgmt_filter = 0;
4696 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
4697 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
4698 					 FILTER_DATA_MCAST |
4699 					 FILTER_DATA_DATA);
4700 	htt_tlv_filter.mo_mgmt_filter = 0;
4701 	htt_tlv_filter.mo_ctrl_filter = 0;
4702 	htt_tlv_filter.mo_data_filter = 0;
4703 	htt_tlv_filter.md_data_filter = 0;
4704 
4705 	htt_tlv_filter.offset_valid = true;
4706 
4707 	htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
4708 	/*Not subscribing rx_pkt_header*/
4709 	htt_tlv_filter.rx_header_offset = 0;
4710 	htt_tlv_filter.rx_mpdu_start_offset =
4711 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
4712 	htt_tlv_filter.rx_mpdu_end_offset =
4713 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
4714 	htt_tlv_filter.rx_msdu_start_offset =
4715 				hal_rx_msdu_start_offset_get(soc->hal_soc);
4716 	htt_tlv_filter.rx_msdu_end_offset =
4717 				hal_rx_msdu_end_offset_get(soc->hal_soc);
4718 	htt_tlv_filter.rx_attn_offset =
4719 				hal_rx_attn_offset_get(soc->hal_soc);
4720 
4721 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4722 		struct dp_pdev *pdev = soc->pdev_list[i];
4723 
4724 		if (!pdev)
4725 			continue;
4726 
4727 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4728 			int mac_for_pdev =
4729 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4730 			/*
4731 			 * Obtain lmac id from pdev to access the LMAC ring
4732 			 * in soc context
4733 			 */
4734 			int lmac_id =
4735 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
4736 							   pdev->pdev_id);
4737 
4738 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4739 					    soc->rx_refill_buf_ring[lmac_id].
4740 					    hal_srng,
4741 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
4742 					    &htt_tlv_filter);
4743 		}
4744 	}
4745 	return status;
4746 }
4747 #else
4748 static QDF_STATUS
4749 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4750 {
4751 	return QDF_STATUS_SUCCESS;
4752 }
4753 #endif
4754 
4755 /*
4756  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
4757  *
4758  * This function is used to configure the FSE HW block in RX OLE on a
4759  * per pdev basis. Here, we will be programming parameters related to
4760  * the Flow Search Table.
4761  *
4762  * @soc: data path SoC handle
4763  *
4764  * Return: zero on success, non-zero on failure
4765  */
4766 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4767 static QDF_STATUS
4768 dp_rx_target_fst_config(struct dp_soc *soc)
4769 {
4770 	int i;
4771 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4772 
4773 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4774 		struct dp_pdev *pdev = soc->pdev_list[i];
4775 
4776 		/* Flow search is not enabled if NSS offload is enabled */
4777 		if (pdev &&
4778 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
4779 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
4780 			if (status != QDF_STATUS_SUCCESS)
4781 				break;
4782 		}
4783 	}
4784 	return status;
4785 }
4786 #elif defined(WLAN_SUPPORT_RX_FISA)
4787 /**
4788  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
4789  * @soc: SoC handle
4790  *
4791  * Return: Success
4792  */
4793 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
4794 {
4795 	/* Check if it is enabled in the INI */
4796 	if (!soc->fisa_enable) {
4797 		dp_err("RX FISA feature is disabled");
4798 		return QDF_STATUS_E_NOSUPPORT;
4799 	}
4800 
4801 	return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
4802 }
4803 
4804 #define FISA_MAX_TIMEOUT 0xffffffff
4805 #define FISA_DISABLE_TIMEOUT 0
4806 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
4807 {
4808 	struct dp_htt_rx_fisa_cfg fisa_config;
4809 
4810 	fisa_config.pdev_id = 0;
4811 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
4812 
4813 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
4814 }
4815 #else /* !WLAN_SUPPORT_RX_FISA */
4816 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
4817 {
4818 	return QDF_STATUS_SUCCESS;
4819 }
4820 #endif /* !WLAN_SUPPORT_RX_FISA */
4821 
4822 #ifndef WLAN_SUPPORT_RX_FISA
4823 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
4824 {
4825 	return QDF_STATUS_SUCCESS;
4826 }
4827 
4828 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
4829 {
4830 	return QDF_STATUS_SUCCESS;
4831 }
4832 
4833 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
4834 {
4835 }
4836 #endif /* !WLAN_SUPPORT_RX_FISA */
4837 
4838 /*
4839  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4840  * @cdp_soc: Opaque Datapath SOC handle
4841  *
4842  * Return: zero on success, non-zero on failure
4843  */
4844 static QDF_STATUS
4845 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4846 {
4847 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4848 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4849 
4850 	htt_soc_attach_target(soc->htt_handle);
4851 
4852 	status = dp_rxdma_ring_config(soc);
4853 	if (status != QDF_STATUS_SUCCESS) {
4854 		dp_err("Failed to send htt srng setup messages to target");
4855 		return status;
4856 	}
4857 
4858 	status = dp_rxdma_ring_sel_cfg(soc);
4859 	if (status != QDF_STATUS_SUCCESS) {
4860 		dp_err("Failed to send htt ring config message to target");
4861 		return status;
4862 	}
4863 
4864 	status = dp_rx_target_fst_config(soc);
4865 	if (status != QDF_STATUS_SUCCESS &&
4866 	    status != QDF_STATUS_E_NOSUPPORT) {
4867 		dp_err("Failed to send htt fst setup config message to target");
4868 		return status;
4869 	}
4870 
4871 	if (status == QDF_STATUS_SUCCESS) {
4872 		status = dp_rx_fisa_config(soc);
4873 		if (status != QDF_STATUS_SUCCESS) {
4874 			dp_err("Failed to send htt FISA config message to target");
4875 			return status;
4876 		}
4877 	}
4878 
4879 	DP_STATS_INIT(soc);
4880 
4881 	/* initialize work queue for stats processing */
4882 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4883 
4884 	return QDF_STATUS_SUCCESS;
4885 }
4886 
4887 #ifdef QCA_SUPPORT_FULL_MON
4888 static inline QDF_STATUS
4889 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
4890 {
4891 	struct dp_soc *soc = pdev->soc;
4892 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4893 
4894 	if (!soc->full_mon_mode)
4895 		return QDF_STATUS_SUCCESS;
4896 
4897 	if ((htt_h2t_full_mon_cfg(soc->htt_handle,
4898 				  pdev->pdev_id,
4899 				  val)) != QDF_STATUS_SUCCESS) {
4900 		status = QDF_STATUS_E_FAILURE;
4901 	}
4902 
4903 	return status;
4904 }
4905 #else
4906 static inline QDF_STATUS
4907 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
4908 {
4909 	return 0;
4910 }
4911 #endif
4912 
4913 /*
4914 * dp_vdev_attach_wifi3() - attach txrx vdev
4915 * @txrx_pdev: Datapath PDEV handle
4916 * @vdev_mac_addr: MAC address of the virtual interface
4917 * @vdev_id: VDEV Id
4918 * @wlan_op_mode: VDEV operating mode
4919 * @subtype: VDEV operating subtype
4920 *
4921 * Return: status
4922 */
4923 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
4924 				       uint8_t pdev_id,
4925 				       uint8_t *vdev_mac_addr,
4926 				       uint8_t vdev_id,
4927 				       enum wlan_op_mode op_mode,
4928 				       enum wlan_op_subtype subtype)
4929 {
4930 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4931 	struct dp_pdev *pdev =
4932 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4933 						   pdev_id);
4934 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4935 
4936 	if (!pdev) {
4937 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4938 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
4939 		qdf_mem_free(vdev);
4940 		goto fail0;
4941 	}
4942 
4943 	if (!vdev) {
4944 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4945 			FL("DP VDEV memory allocation failed"));
4946 		goto fail0;
4947 	}
4948 
4949 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
4950 			  WLAN_MD_DP_VDEV, "dp_vdev");
4951 
4952 	vdev->pdev = pdev;
4953 	vdev->vdev_id = vdev_id;
4954 	vdev->opmode = op_mode;
4955 	vdev->subtype = subtype;
4956 	vdev->osdev = soc->osdev;
4957 
4958 	vdev->osif_rx = NULL;
4959 	vdev->osif_rsim_rx_decap = NULL;
4960 	vdev->osif_get_key = NULL;
4961 	vdev->osif_rx_mon = NULL;
4962 	vdev->osif_tx_free_ext = NULL;
4963 	vdev->osif_vdev = NULL;
4964 
4965 	vdev->delete.pending = 0;
4966 	vdev->safemode = 0;
4967 	vdev->drop_unenc = 1;
4968 	vdev->sec_type = cdp_sec_type_none;
4969 	vdev->multipass_en = false;
4970 #ifdef notyet
4971 	vdev->filters_num = 0;
4972 #endif
4973 	vdev->lmac_id = pdev->lmac_id;
4974 
4975 	qdf_mem_copy(
4976 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
4977 
4978 	/* TODO: Initialize default HTT meta data that will be used in
4979 	 * TCL descriptors for packets transmitted from this VDEV
4980 	 */
4981 
4982 	TAILQ_INIT(&vdev->peer_list);
4983 	dp_peer_multipass_list_init(vdev);
4984 
4985 	if ((soc->intr_mode == DP_INTR_POLL) &&
4986 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4987 		if ((pdev->vdev_count == 0) ||
4988 		    (wlan_op_mode_monitor == vdev->opmode))
4989 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4990 	}
4991 
4992 	soc->vdev_id_map[vdev_id] = vdev;
4993 
4994 	if (wlan_op_mode_monitor == vdev->opmode) {
4995 		pdev->monitor_vdev = vdev;
4996 		return QDF_STATUS_SUCCESS;
4997 	}
4998 
4999 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5000 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5001 	vdev->dscp_tid_map_id = 0;
5002 	vdev->mcast_enhancement_en = 0;
5003 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
5004 	vdev->prev_tx_enq_tstamp = 0;
5005 	vdev->prev_rx_deliver_tstamp = 0;
5006 
5007 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5008 	/* add this vdev into the pdev's list */
5009 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
5010 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5011 	pdev->vdev_count++;
5012 
5013 	if (wlan_op_mode_sta != vdev->opmode)
5014 		vdev->ap_bridge_enabled = true;
5015 	else
5016 		vdev->ap_bridge_enabled = false;
5017 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5018 		  "%s: wlan_cfg_ap_bridge_enabled %d",
5019 		  __func__, vdev->ap_bridge_enabled);
5020 
5021 	dp_tx_vdev_attach(vdev);
5022 
5023 	if (pdev->vdev_count == 1)
5024 		dp_lro_hash_setup(soc, pdev);
5025 
5026 	dp_info("Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
5027 	DP_STATS_INIT(vdev);
5028 
5029 	if (wlan_op_mode_sta == vdev->opmode)
5030 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
5031 				     vdev->mac_addr.raw);
5032 
5033 	return QDF_STATUS_SUCCESS;
5034 
5035 fail0:
5036 	return QDF_STATUS_E_FAILURE;
5037 }
5038 
5039 /**
5040  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
5041  * @soc: Datapath soc handle
5042  * @vdev_id: id of Datapath VDEV handle
5043  * @osif_vdev: OSIF vdev handle
5044  * @txrx_ops: Tx and Rx operations
5045  *
5046  * Return: DP VDEV handle on success, NULL on failure
5047  */
5048 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc,
5049 					 uint8_t vdev_id,
5050 					 ol_osif_vdev_handle osif_vdev,
5051 					 struct ol_txrx_ops *txrx_ops)
5052 {
5053 	struct dp_vdev *vdev =
5054 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
5055 						   vdev_id);
5056 
5057 	if (!vdev)
5058 		return QDF_STATUS_E_FAILURE;
5059 
5060 	vdev->osif_vdev = osif_vdev;
5061 	vdev->osif_rx = txrx_ops->rx.rx;
5062 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
5063 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
5064 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
5065 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
5066 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
5067 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
5068 	vdev->osif_get_key = txrx_ops->get_key;
5069 	vdev->osif_rx_mon = txrx_ops->rx.mon;
5070 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
5071 	vdev->tx_comp = txrx_ops->tx.tx_comp;
5072 	vdev->stats_cb = txrx_ops->rx.stats_rx;
5073 #ifdef notyet
5074 #if ATH_SUPPORT_WAPI
5075 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
5076 #endif
5077 #endif
5078 #ifdef UMAC_SUPPORT_PROXY_ARP
5079 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
5080 #endif
5081 	vdev->me_convert = txrx_ops->me_convert;
5082 
5083 	/* TODO: Enable the following once Tx code is integrated */
5084 	if (vdev->mesh_vdev)
5085 		txrx_ops->tx.tx = dp_tx_send_mesh;
5086 	else
5087 		txrx_ops->tx.tx = dp_tx_send;
5088 
5089 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
5090 
5091 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
5092 		"DP Vdev Register success");
5093 
5094 	return QDF_STATUS_SUCCESS;
5095 }
5096 
5097 /**
5098  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
5099  * @vdev: Datapath VDEV handle
5100  * @unmap_only: Flag to indicate "only unmap"
5101  *
5102  * Return: void
5103  */
5104 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
5105 {
5106 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5107 	struct dp_pdev *pdev = vdev->pdev;
5108 	struct dp_soc *soc = pdev->soc;
5109 	struct dp_peer *peer;
5110 	uint16_t *peer_ids;
5111 	struct dp_peer **peer_array = NULL;
5112 	uint8_t i = 0, j = 0;
5113 	uint8_t m = 0, n = 0;
5114 
5115 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(*peer_ids));
5116 	if (!peer_ids) {
5117 		dp_err("DP alloc failure - unable to flush peers");
5118 		return;
5119 	}
5120 
5121 	if (!unmap_only) {
5122 		peer_array = qdf_mem_malloc(
5123 				soc->max_peers * sizeof(struct dp_peer *));
5124 		if (!peer_array) {
5125 			qdf_mem_free(peer_ids);
5126 			dp_err("DP alloc failure - unable to flush peers");
5127 			return;
5128 		}
5129 	}
5130 
5131 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5132 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5133 		if (!unmap_only && n < soc->max_peers)
5134 			peer_array[n++] = peer;
5135 
5136 		if (peer->peer_id != HTT_INVALID_PEER)
5137 			if (j < soc->max_peers)
5138 				peer_ids[j++] = peer->peer_id;
5139 	}
5140 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5141 
5142 	/*
5143 	 * If peer id is invalid, need to flush the peer if
5144 	 * peer valid flag is true, this is needed for NAN + SSR case.
5145 	 */
5146 	if (!unmap_only) {
5147 		for (m = 0; m < n ; m++) {
5148 			peer = peer_array[m];
5149 
5150 			dp_info("peer: %pM is getting deleted",
5151 				peer->mac_addr.raw);
5152 			/* only if peer valid is true */
5153 			if (peer->valid)
5154 				dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
5155 						     vdev->vdev_id,
5156 						     peer->mac_addr.raw, 0);
5157 		}
5158 		qdf_mem_free(peer_array);
5159 	}
5160 
5161 	for (i = 0; i < j ; i++) {
5162 		peer = __dp_peer_find_by_id(soc, peer_ids[i]);
5163 
5164 		if (!peer)
5165 			continue;
5166 
5167 		dp_info("peer ref cnt %d", qdf_atomic_read(&peer->ref_cnt));
5168 		/*
5169 		 * set ref count to one to force delete the peers
5170 		 * with ref count leak
5171 		 */
5172 		SET_PEER_REF_CNT_ONE(peer);
5173 		dp_info("peer: %pM is getting unmap",
5174 			peer->mac_addr.raw);
5175 
5176 		dp_rx_peer_unmap_handler(soc, peer_ids[i],
5177 					 vdev->vdev_id,
5178 					 peer->mac_addr.raw, 0,
5179 					 DP_PEER_WDS_COUNT_INVALID);
5180 	}
5181 
5182 	qdf_mem_free(peer_ids);
5183 	dp_info("Flushed peers for vdev object %pK ", vdev);
5184 }
5185 
5186 /*
5187  * dp_vdev_detach_wifi3() - Detach txrx vdev
5188  * @cdp_soc: Datapath soc handle
5189  * @vdev_id: VDEV Id
5190  * @callback: Callback OL_IF on completion of detach
5191  * @cb_context:	Callback context
5192  *
5193  */
5194 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
5195 				       uint8_t vdev_id,
5196 				       ol_txrx_vdev_delete_cb callback,
5197 				       void *cb_context)
5198 {
5199 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5200 	struct dp_pdev *pdev;
5201 	struct dp_neighbour_peer *peer = NULL;
5202 	struct dp_neighbour_peer *temp_peer = NULL;
5203 	struct dp_peer *vap_self_peer = NULL;
5204 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5205 
5206 	if (!vdev)
5207 		return QDF_STATUS_E_FAILURE;
5208 
5209 	pdev = vdev->pdev;
5210 
5211 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev);
5212 	if (vap_self_peer) {
5213 		qdf_spin_lock_bh(&soc->ast_lock);
5214 		if (vap_self_peer->self_ast_entry) {
5215 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
5216 			vap_self_peer->self_ast_entry = NULL;
5217 		}
5218 		qdf_spin_unlock_bh(&soc->ast_lock);
5219 
5220 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
5221 				     vap_self_peer->mac_addr.raw, 0);
5222 		dp_peer_unref_delete(vap_self_peer);
5223 	}
5224 
5225 	/*
5226 	 * If Target is hung, flush all peers before detaching vdev
5227 	 * this will free all references held due to missing
5228 	 * unmap commands from Target
5229 	 */
5230 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
5231 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
5232 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
5233 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);
5234 
5235 	dp_rx_vdev_detach(vdev);
5236 	/*
5237 	 * move it after dp_rx_vdev_detach(),
5238 	 * as the call back done in dp_rx_vdev_detach()
5239 	 * still need to get vdev pointer by vdev_id.
5240 	 */
5241 	soc->vdev_id_map[vdev->vdev_id] = NULL;
5242 	/*
5243 	 * Use peer_ref_mutex while accessing peer_list, in case
5244 	 * a peer is in the process of being removed from the list.
5245 	 */
5246 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5247 	/* check that the vdev has no peers allocated */
5248 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
5249 		/* debug print - will be removed later */
5250 		dp_warn("not deleting vdev object %pK (%pM) until deletion finishes for all its peers",
5251 			vdev, vdev->mac_addr.raw);
5252 
5253 		if (vdev->vdev_dp_ext_handle) {
5254 			qdf_mem_free(vdev->vdev_dp_ext_handle);
5255 			vdev->vdev_dp_ext_handle = NULL;
5256 		}
5257 		/* indicate that the vdev needs to be deleted */
5258 		vdev->delete.pending = 1;
5259 		vdev->delete.callback = callback;
5260 		vdev->delete.context = cb_context;
5261 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5262 		return QDF_STATUS_E_FAILURE;
5263 	}
5264 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5265 
5266 	if (wlan_op_mode_monitor == vdev->opmode)
5267 		goto free_vdev;
5268 
5269 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5270 	if (!soc->hw_nac_monitor_support) {
5271 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5272 			      neighbour_peer_list_elem) {
5273 			QDF_ASSERT(peer->vdev != vdev);
5274 		}
5275 	} else {
5276 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
5277 				   neighbour_peer_list_elem, temp_peer) {
5278 			if (peer->vdev == vdev) {
5279 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
5280 					     neighbour_peer_list_elem);
5281 				qdf_mem_free(peer);
5282 			}
5283 		}
5284 	}
5285 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5286 
5287 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5288 	/* remove the vdev from its parent pdev's list */
5289 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5290 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5291 
5292 	dp_tx_vdev_detach(vdev);
5293 	wlan_minidump_remove(vdev);
5294 
5295 free_vdev:
5296 	if (wlan_op_mode_monitor == vdev->opmode) {
5297 		if (soc->intr_mode == DP_INTR_POLL)
5298 			qdf_timer_sync_cancel(&soc->int_timer);
5299 		pdev->monitor_vdev = NULL;
5300 	}
5301 
5302 	if (vdev->vdev_dp_ext_handle) {
5303 		qdf_mem_free(vdev->vdev_dp_ext_handle);
5304 		vdev->vdev_dp_ext_handle = NULL;
5305 	}
5306 
5307 	dp_info("deleting vdev object %pK (%pM)", vdev, vdev->mac_addr.raw);
5308 
5309 	qdf_mem_free(vdev);
5310 
5311 	if (callback)
5312 		callback(cb_context);
5313 
5314 	return QDF_STATUS_SUCCESS;
5315 }
5316 
5317 #ifdef FEATURE_AST
5318 /*
5319  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
5320  * @soc - datapath soc handle
5321  * @peer - datapath peer handle
5322  *
5323  * Delete the AST entries belonging to a peer
5324  */
5325 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5326 					      struct dp_peer *peer)
5327 {
5328 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
5329 
5330 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
5331 		dp_peer_del_ast(soc, ast_entry);
5332 
5333 	peer->self_ast_entry = NULL;
5334 }
5335 #else
5336 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5337 					      struct dp_peer *peer)
5338 {
5339 }
5340 #endif
5341 #if ATH_SUPPORT_WRAP
5342 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5343 						uint8_t *peer_mac_addr)
5344 {
5345 	struct dp_peer *peer;
5346 
5347 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5348 				      0, vdev->vdev_id);
5349 	if (!peer)
5350 		return NULL;
5351 
5352 	if (peer->bss_peer)
5353 		return peer;
5354 
5355 	dp_peer_unref_delete(peer);
5356 	return NULL;
5357 }
5358 #else
5359 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5360 						uint8_t *peer_mac_addr)
5361 {
5362 	struct dp_peer *peer;
5363 
5364 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5365 				      0, vdev->vdev_id);
5366 	if (!peer)
5367 		return NULL;
5368 
5369 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
5370 		return peer;
5371 
5372 	dp_peer_unref_delete(peer);
5373 	return NULL;
5374 }
5375 #endif
5376 
5377 #ifdef FEATURE_AST
5378 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
5379 					       struct dp_pdev *pdev,
5380 					       uint8_t *peer_mac_addr)
5381 {
5382 	struct dp_ast_entry *ast_entry;
5383 
5384 	qdf_spin_lock_bh(&soc->ast_lock);
5385 	if (soc->ast_override_support)
5386 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
5387 							    pdev->pdev_id);
5388 	else
5389 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
5390 
5391 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
5392 		dp_peer_del_ast(soc, ast_entry);
5393 
5394 	qdf_spin_unlock_bh(&soc->ast_lock);
5395 }
5396 #endif
5397 
5398 #ifdef PEER_CACHE_RX_PKTS
5399 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5400 {
5401 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
5402 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
5403 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
5404 }
5405 #else
5406 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5407 {
5408 }
5409 #endif
5410 
5411 /*
5412  * dp_peer_create_wifi3() - attach txrx peer
5413  * @soc_hdl: Datapath soc handle
5414  * @vdev_id: id of vdev
5415  * @peer_mac_addr: Peer MAC address
5416  *
5417  * Return: 0 on success, -1 on failure
5418  */
5419 static QDF_STATUS
5420 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5421 		     uint8_t *peer_mac_addr)
5422 {
5423 	struct dp_peer *peer;
5424 	int i;
5425 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5426 	struct dp_pdev *pdev;
5427 	struct cdp_peer_cookie peer_cookie;
5428 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
5429 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5430 
5431 	if (!vdev || !peer_mac_addr)
5432 		return QDF_STATUS_E_FAILURE;
5433 
5434 	pdev = vdev->pdev;
5435 	soc = pdev->soc;
5436 
5437 	/*
5438 	 * If a peer entry with given MAC address already exists,
5439 	 * reuse the peer and reset the state of peer.
5440 	 */
5441 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
5442 
5443 	if (peer) {
5444 		qdf_atomic_init(&peer->is_default_route_set);
5445 		dp_peer_cleanup(vdev, peer, true);
5446 
5447 		qdf_spin_lock_bh(&soc->ast_lock);
5448 		dp_peer_delete_ast_entries(soc, peer);
5449 		peer->delete_in_progress = false;
5450 		qdf_spin_unlock_bh(&soc->ast_lock);
5451 
5452 		if ((vdev->opmode == wlan_op_mode_sta) &&
5453 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5454 		     QDF_MAC_ADDR_SIZE)) {
5455 			ast_type = CDP_TXRX_AST_TYPE_SELF;
5456 		}
5457 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5458 		/*
5459 		* Control path maintains a node count which is incremented
5460 		* for every new peer create command. Since new peer is not being
5461 		* created and earlier reference is reused here,
5462 		* peer_unref_delete event is sent to control path to
5463 		* increment the count back.
5464 		*/
5465 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
5466 			soc->cdp_soc.ol_ops->peer_unref_delete(
5467 				soc->ctrl_psoc,
5468 				pdev->pdev_id,
5469 				peer->mac_addr.raw, vdev->mac_addr.raw,
5470 				vdev->opmode);
5471 		}
5472 
5473 		peer->valid = 1;
5474 		dp_local_peer_id_alloc(pdev, peer);
5475 
5476 		qdf_spinlock_create(&peer->peer_info_lock);
5477 		dp_peer_rx_bufq_resources_init(peer);
5478 
5479 		DP_STATS_INIT(peer);
5480 		DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5481 
5482 		/*
5483 		 * In tx_monitor mode, filter may be set for unassociated peer
5484 		 * when unassociated peer get associated peer need to
5485 		 * update tx_cap_enabled flag to support peer filter.
5486 		 */
5487 		dp_peer_tx_capture_filter_check(pdev, peer);
5488 
5489 		dp_set_peer_isolation(peer, false);
5490 
5491 		return QDF_STATUS_SUCCESS;
5492 	} else {
5493 		/*
5494 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
5495 		 * need to remove the AST entry which was earlier added as a WDS
5496 		 * entry.
5497 		 * If an AST entry exists, but no peer entry exists with a given
5498 		 * MAC addresses, we could deduce it as a WDS entry
5499 		 */
5500 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
5501 	}
5502 
5503 #ifdef notyet
5504 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
5505 		soc->mempool_ol_ath_peer);
5506 #else
5507 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
5508 #endif
5509 	wlan_minidump_log(peer,
5510 			  sizeof(*peer),
5511 			  soc->ctrl_psoc,
5512 			  WLAN_MD_DP_PEER, "dp_peer");
5513 	if (!peer)
5514 		return QDF_STATUS_E_FAILURE; /* failure */
5515 
5516 	qdf_mem_zero(peer, sizeof(struct dp_peer));
5517 
5518 	TAILQ_INIT(&peer->ast_entry_list);
5519 
5520 	/* store provided params */
5521 	peer->vdev = vdev;
5522 
5523 	if ((vdev->opmode == wlan_op_mode_sta) &&
5524 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5525 			 QDF_MAC_ADDR_SIZE)) {
5526 		ast_type = CDP_TXRX_AST_TYPE_SELF;
5527 	}
5528 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5529 	qdf_spinlock_create(&peer->peer_info_lock);
5530 
5531 	dp_peer_rx_bufq_resources_init(peer);
5532 
5533 	qdf_mem_copy(
5534 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
5535 
5536 	/* initialize the peer_id */
5537 	peer->peer_id = HTT_INVALID_PEER;
5538 
5539 	/* reset the ast index to flowid table */
5540 	dp_peer_reset_flowq_map(peer);
5541 
5542 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5543 
5544 	qdf_atomic_init(&peer->ref_cnt);
5545 
5546 	/* keep one reference for attach */
5547 	qdf_atomic_inc(&peer->ref_cnt);
5548 
5549 	/* add this peer into the vdev's list */
5550 	if (wlan_op_mode_sta == vdev->opmode)
5551 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
5552 	else
5553 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
5554 
5555 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5556 
5557 	/* TODO: See if hash based search is required */
5558 	dp_peer_find_hash_add(soc, peer);
5559 
5560 	/* Initialize the peer state */
5561 	peer->state = OL_TXRX_PEER_STATE_DISC;
5562 
5563 	dp_info("vdev %pK created peer %pK (%pM) ref_cnt: %d",
5564 		vdev, peer, peer->mac_addr.raw,
5565 		qdf_atomic_read(&peer->ref_cnt));
5566 	/*
5567 	 * For every peer MAp message search and set if bss_peer
5568 	 */
5569 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5570 			QDF_MAC_ADDR_SIZE) == 0 &&
5571 			(wlan_op_mode_sta != vdev->opmode)) {
5572 		dp_info("vdev bss_peer!!");
5573 		peer->bss_peer = 1;
5574 	}
5575 
5576 	if (wlan_op_mode_sta == vdev->opmode &&
5577 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5578 			QDF_MAC_ADDR_SIZE) == 0) {
5579 		peer->sta_self_peer = 1;
5580 	}
5581 
5582 	for (i = 0; i < DP_MAX_TIDS; i++)
5583 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
5584 
5585 	peer->valid = 1;
5586 	dp_local_peer_id_alloc(pdev, peer);
5587 	DP_STATS_INIT(peer);
5588 	DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5589 
5590 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5591 		     QDF_MAC_ADDR_SIZE);
5592 	peer_cookie.ctx = NULL;
5593 	peer_cookie.pdev_id = pdev->pdev_id;
5594 	peer_cookie.cookie = pdev->next_peer_cookie++;
5595 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5596 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
5597 			     (void *)&peer_cookie,
5598 			     peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
5599 #endif
5600 	if (soc->wlanstats_enabled) {
5601 		if (!peer_cookie.ctx) {
5602 			pdev->next_peer_cookie--;
5603 			qdf_err("Failed to initialize peer rate stats");
5604 		} else {
5605 			peer->wlanstats_ctx = (struct cdp_peer_rate_stats_ctx *)
5606 						peer_cookie.ctx;
5607 		}
5608 	}
5609 
5610 	/*
5611 	 * Allocate peer extended stats context. Fall through in
5612 	 * case of failure as its not an implicit requirement to have
5613 	 * this object for regular statistics updates.
5614 	 */
5615 	if (dp_peer_ext_stats_ctx_alloc(soc, peer) !=
5616 			QDF_STATUS_SUCCESS)
5617 		dp_warn("peer ext_stats ctx alloc failed");
5618 
5619 	/*
5620 	 * In tx_monitor mode, filter may be set for unassociated peer
5621 	 * when unassociated peer get associated peer need to
5622 	 * update tx_cap_enabled flag to support peer filter.
5623 	 */
5624 	dp_peer_tx_capture_filter_check(pdev, peer);
5625 
5626 	dp_set_peer_isolation(peer, false);
5627 
5628 	return QDF_STATUS_SUCCESS;
5629 }
5630 
5631 /*
5632  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5633  * @vdev: Datapath VDEV handle
5634  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5635  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5636  *
5637  * Return: None
5638  */
5639 static
5640 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5641 				  enum cdp_host_reo_dest_ring *reo_dest,
5642 				  bool *hash_based)
5643 {
5644 	struct dp_soc *soc;
5645 	struct dp_pdev *pdev;
5646 
5647 	pdev = vdev->pdev;
5648 	soc = pdev->soc;
5649 	/*
5650 	 * hash based steering is disabled for Radios which are offloaded
5651 	 * to NSS
5652 	 */
5653 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5654 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
5655 
5656 	/*
5657 	 * Below line of code will ensure the proper reo_dest ring is chosen
5658 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
5659 	 */
5660 	*reo_dest = pdev->reo_dest;
5661 }
5662 
5663 #ifdef IPA_OFFLOAD
5664 /**
5665  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
5666  * @vdev: Virtual device
5667  *
5668  * Return: true if the vdev is of subtype P2P
5669  *	   false if the vdev is of any other subtype
5670  */
5671 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
5672 {
5673 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
5674 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
5675 	    vdev->subtype == wlan_op_subtype_p2p_go)
5676 		return true;
5677 
5678 	return false;
5679 }
5680 
5681 /*
5682  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5683  * @vdev: Datapath VDEV handle
5684  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5685  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5686  *
5687  * If IPA is enabled in ini, for SAP mode, disable hash based
5688  * steering, use default reo_dst ring for RX. Use config values for other modes.
5689  * Return: None
5690  */
5691 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5692 				       enum cdp_host_reo_dest_ring *reo_dest,
5693 				       bool *hash_based)
5694 {
5695 	struct dp_soc *soc;
5696 	struct dp_pdev *pdev;
5697 
5698 	pdev = vdev->pdev;
5699 	soc = pdev->soc;
5700 
5701 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5702 
5703 	/* For P2P-GO interfaces we do not need to change the REO
5704 	 * configuration even if IPA config is enabled
5705 	 */
5706 	if (dp_is_vdev_subtype_p2p(vdev))
5707 		return;
5708 
5709 	/*
5710 	 * If IPA is enabled, disable hash-based flow steering and set
5711 	 * reo_dest_ring_4 as the REO ring to receive packets on.
5712 	 * IPA is configured to reap reo_dest_ring_4.
5713 	 *
5714 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
5715 	 * value enum value is from 1 - 4.
5716 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
5717 	 */
5718 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5719 		if (vdev->opmode == wlan_op_mode_ap) {
5720 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5721 			*hash_based = 0;
5722 		} else if (vdev->opmode == wlan_op_mode_sta &&
5723 			   dp_ipa_is_mdm_platform()) {
5724 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5725 		}
5726 	}
5727 }
5728 
5729 #else
5730 
5731 /*
5732  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5733  * @vdev: Datapath VDEV handle
5734  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5735  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5736  *
5737  * Use system config values for hash based steering.
5738  * Return: None
5739  */
5740 
5741 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5742 				       enum cdp_host_reo_dest_ring *reo_dest,
5743 				       bool *hash_based)
5744 {
5745 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5746 }
5747 #endif /* IPA_OFFLOAD */
5748 
5749 /*
5750  * dp_peer_setup_wifi3() - initialize the peer
5751  * @soc_hdl: soc handle object
5752  * @vdev_id : vdev_id of vdev object
5753  * @peer_mac: Peer's mac address
5754  *
5755  * Return: QDF_STATUS
5756  */
5757 static QDF_STATUS
5758 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5759 		    uint8_t *peer_mac)
5760 {
5761 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5762 	struct dp_pdev *pdev;
5763 	bool hash_based = 0;
5764 	enum cdp_host_reo_dest_ring reo_dest;
5765 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5766 	struct dp_vdev *vdev =
5767 			dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5768 	struct dp_peer *peer =
5769 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
5770 
5771 	if (!vdev || !peer || peer->delete_in_progress) {
5772 		status = QDF_STATUS_E_FAILURE;
5773 		goto fail;
5774 	}
5775 
5776 	pdev = vdev->pdev;
5777 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
5778 
5779 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
5780 		pdev->pdev_id, vdev->vdev_id,
5781 		vdev->opmode, hash_based, reo_dest);
5782 
5783 
5784 	/*
5785 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
5786 	 * i.e both the devices have same MAC address. In these
5787 	 * cases we want such pkts to be processed in NULL Q handler
5788 	 * which is REO2TCL ring. for this reason we should
5789 	 * not setup reo_queues and default route for bss_peer.
5790 	 */
5791 	dp_peer_tx_init(pdev, peer);
5792 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
5793 		status = QDF_STATUS_E_FAILURE;
5794 		goto fail;
5795 	}
5796 
5797 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
5798 		/* TODO: Check the destination ring number to be passed to FW */
5799 		soc->cdp_soc.ol_ops->peer_set_default_routing(
5800 				soc->ctrl_psoc,
5801 				peer->vdev->pdev->pdev_id,
5802 				peer->mac_addr.raw,
5803 				peer->vdev->vdev_id, hash_based, reo_dest);
5804 	}
5805 
5806 	qdf_atomic_set(&peer->is_default_route_set, 1);
5807 
5808 	dp_peer_rx_init(pdev, peer);
5809 
5810 	dp_peer_ppdu_delayed_ba_init(peer);
5811 
5812 fail:
5813 	if (peer)
5814 		dp_peer_unref_delete(peer);
5815 	return status;
5816 }
5817 
5818 /*
5819  * dp_cp_peer_del_resp_handler - Handle the peer delete response
5820  * @soc_hdl: Datapath SOC handle
5821  * @vdev_id: id of virtual device object
5822  * @mac_addr: Mac address of the peer
5823  *
5824  * Return: QDF_STATUS
5825  */
5826 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
5827 					      uint8_t vdev_id,
5828 					      uint8_t *mac_addr)
5829 {
5830 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5831 	struct dp_ast_entry  *ast_entry = NULL;
5832 	txrx_ast_free_cb cb = NULL;
5833 	void *cookie;
5834 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5835 
5836 	if (!vdev)
5837 		return QDF_STATUS_E_FAILURE;
5838 
5839 	qdf_spin_lock_bh(&soc->ast_lock);
5840 
5841 	if (soc->ast_override_support)
5842 		ast_entry =
5843 			dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
5844 							vdev->pdev->pdev_id);
5845 	else
5846 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
5847 
5848 	/* in case of qwrap we have multiple BSS peers
5849 	 * with same mac address
5850 	 *
5851 	 * AST entry for this mac address will be created
5852 	 * only for one peer hence it will be NULL here
5853 	 */
5854 	if (!ast_entry || ast_entry->peer || !ast_entry->delete_in_progress) {
5855 		qdf_spin_unlock_bh(&soc->ast_lock);
5856 		return QDF_STATUS_E_FAILURE;
5857 	}
5858 
5859 	if (ast_entry->is_mapped)
5860 		soc->ast_table[ast_entry->ast_idx] = NULL;
5861 
5862 	DP_STATS_INC(soc, ast.deleted, 1);
5863 	dp_peer_ast_hash_remove(soc, ast_entry);
5864 
5865 	cb = ast_entry->callback;
5866 	cookie = ast_entry->cookie;
5867 	ast_entry->callback = NULL;
5868 	ast_entry->cookie = NULL;
5869 
5870 	soc->num_ast_entries--;
5871 	qdf_spin_unlock_bh(&soc->ast_lock);
5872 
5873 	if (cb) {
5874 		cb(soc->ctrl_psoc,
5875 		   dp_soc_to_cdp_soc(soc),
5876 		   cookie,
5877 		   CDP_TXRX_AST_DELETED);
5878 	}
5879 	qdf_mem_free(ast_entry);
5880 
5881 	return QDF_STATUS_SUCCESS;
5882 }
5883 
5884 /*
5885  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
5886  * @txrx_soc: cdp soc handle
5887  * @ac: Access category
5888  * @value: timeout value in millisec
5889  *
5890  * Return: void
5891  */
5892 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5893 				    uint8_t ac, uint32_t value)
5894 {
5895 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5896 
5897 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
5898 }
5899 
5900 /*
5901  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
5902  * @txrx_soc: cdp soc handle
5903  * @ac: access category
5904  * @value: timeout value in millisec
5905  *
5906  * Return: void
5907  */
5908 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5909 				    uint8_t ac, uint32_t *value)
5910 {
5911 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5912 
5913 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
5914 }
5915 
5916 /*
5917  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
5918  * @txrx_soc: cdp soc handle
5919  * @pdev_id: id of physical device object
5920  * @val: reo destination ring index (1 - 4)
5921  *
5922  * Return: QDF_STATUS
5923  */
5924 static QDF_STATUS
5925 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
5926 		     enum cdp_host_reo_dest_ring val)
5927 {
5928 	struct dp_pdev *pdev =
5929 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
5930 						   pdev_id);
5931 
5932 	if (pdev) {
5933 		pdev->reo_dest = val;
5934 		return QDF_STATUS_SUCCESS;
5935 	}
5936 
5937 	return QDF_STATUS_E_FAILURE;
5938 }
5939 
5940 /*
5941  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
5942  * @txrx_soc: cdp soc handle
5943  * @pdev_id: id of physical device object
5944  *
5945  * Return: reo destination ring index
5946  */
5947 static enum cdp_host_reo_dest_ring
5948 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
5949 {
5950 	struct dp_pdev *pdev =
5951 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
5952 						   pdev_id);
5953 
5954 	if (pdev)
5955 		return pdev->reo_dest;
5956 	else
5957 		return cdp_host_reo_dest_ring_unknown;
5958 }
5959 
5960 #ifdef ATH_SUPPORT_NAC
5961 /*
5962  * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh
5963  * @pdev_handle: device object
5964  * @val: value to be set
5965  *
5966  * Return: void
5967  */
5968 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
5969 				     bool val)
5970 {
5971 	/* Enable/Disable smart mesh filtering. This flag will be checked
5972 	 * during rx processing to check if packets are from NAC clients.
5973 	 */
5974 	pdev->filter_neighbour_peers = val;
5975 	return 0;
5976 }
5977 #else
5978 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
5979 				     bool val)
5980 {
5981 	return 0;
5982 }
5983 #endif /* ATH_SUPPORT_NAC */
5984 
5985 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
5986 /*
5987  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5988  * address for smart mesh filtering
5989  * @txrx_soc: cdp soc handle
5990  * @vdev_id: id of virtual device object
5991  * @cmd: Add/Del command
5992  * @macaddr: nac client mac address
5993  *
5994  * Return: success/failure
5995  */
5996 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc,
5997 					    uint8_t vdev_id,
5998 					    uint32_t cmd, uint8_t *macaddr)
5999 {
6000 	struct dp_pdev *pdev;
6001 	struct dp_neighbour_peer *peer = NULL;
6002 	struct dp_vdev *vdev =
6003 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6004 						   vdev_id);
6005 
6006 	if (!vdev || !macaddr)
6007 		goto fail0;
6008 
6009 	pdev = vdev->pdev;
6010 
6011 	if (!pdev)
6012 		goto fail0;
6013 
6014 	/* Store address of NAC (neighbour peer) which will be checked
6015 	 * against TA of received packets.
6016 	 */
6017 	if (cmd == DP_NAC_PARAM_ADD) {
6018 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
6019 				sizeof(*peer));
6020 
6021 		if (!peer) {
6022 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6023 				FL("DP neighbour peer node memory allocation failed"));
6024 			goto fail0;
6025 		}
6026 
6027 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
6028 			macaddr, QDF_MAC_ADDR_SIZE);
6029 		peer->vdev = vdev;
6030 
6031 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
6032 
6033 		/* add this neighbour peer into the list */
6034 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
6035 				neighbour_peer_list_elem);
6036 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
6037 
6038 		/* first neighbour */
6039 		if (!pdev->neighbour_peers_added) {
6040 			QDF_STATUS status = QDF_STATUS_SUCCESS;
6041 
6042 			pdev->neighbour_peers_added = true;
6043 			dp_mon_filter_setup_smart_monitor(pdev);
6044 			status = dp_mon_filter_update(pdev);
6045 			if (status != QDF_STATUS_SUCCESS) {
6046 				QDF_TRACE(QDF_MODULE_ID_DP,
6047 					  QDF_TRACE_LEVEL_ERROR,
6048 					  FL("smart mon filter setup failed"));
6049 				dp_mon_filter_reset_smart_monitor(pdev);
6050 				pdev->neighbour_peers_added = false;
6051 			}
6052 		}
6053 		return 1;
6054 
6055 	} else if (cmd == DP_NAC_PARAM_DEL) {
6056 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
6057 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
6058 				neighbour_peer_list_elem) {
6059 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
6060 				macaddr, QDF_MAC_ADDR_SIZE)) {
6061 				/* delete this peer from the list */
6062 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
6063 					peer, neighbour_peer_list_elem);
6064 				qdf_mem_free(peer);
6065 				break;
6066 			}
6067 		}
6068 		/* last neighbour deleted */
6069 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
6070 			QDF_STATUS status = QDF_STATUS_SUCCESS;
6071 
6072 			pdev->neighbour_peers_added = false;
6073 			dp_mon_filter_reset_smart_monitor(pdev);
6074 			status = dp_mon_filter_update(pdev);
6075 			if (status != QDF_STATUS_SUCCESS) {
6076 				QDF_TRACE(QDF_MODULE_ID_DP,
6077 					  QDF_TRACE_LEVEL_ERROR,
6078 					  FL("smart mon filter clear failed"));
6079 			}
6080 
6081 		}
6082 
6083 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
6084 		return 1;
6085 
6086 	}
6087 
6088 fail0:
6089 	return 0;
6090 }
6091 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
6092 
6093 /*
6094  * dp_get_sec_type() - Get the security type
6095  * @soc: soc handle
6096  * @vdev_id: id of dp handle
6097  * @peer_mac: mac of datapath PEER handle
6098  * @sec_idx:    Security id (mcast, ucast)
6099  *
6100  * return sec_type: Security type
6101  */
6102 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
6103 			   uint8_t *peer_mac, uint8_t sec_idx)
6104 {
6105 	int sec_type = 0;
6106 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
6107 						       peer_mac, 0, vdev_id);
6108 
6109 	if (!peer || peer->delete_in_progress) {
6110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6111 			  "%s: Peer is NULL!\n", __func__);
6112 		goto fail;
6113 	}
6114 
6115 	sec_type = peer->security[sec_idx].sec_type;
6116 fail:
6117 	if (peer)
6118 		dp_peer_unref_delete(peer);
6119 	return sec_type;
6120 }
6121 
6122 /*
6123  * dp_peer_authorize() - authorize txrx peer
6124  * @soc: soc handle
6125  * @vdev_id: id of dp handle
6126  * @peer_mac: mac of datapath PEER handle
6127  * @authorize
6128  *
6129  */
6130 static QDF_STATUS
6131 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6132 		  uint8_t *peer_mac, uint32_t authorize)
6133 {
6134 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6135 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6136 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
6137 						      peer_mac,
6138 						      0, vdev_id);
6139 
6140 	if (!peer || peer->delete_in_progress) {
6141 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6142 			  "%s: Peer is NULL!\n", __func__);
6143 		status = QDF_STATUS_E_FAILURE;
6144 	} else {
6145 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
6146 		peer->authorize = authorize ? 1 : 0;
6147 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6148 	}
6149 
6150 	if (peer)
6151 		dp_peer_unref_delete(peer);
6152 
6153 	return status;
6154 }
6155 
6156 /*
6157  * dp_peer_release_mem() - free dp peer handle memory
6158  * @soc: dataptah soc handle
6159  * @pdev: datapath pdev handle
6160  * @peer: datapath peer handle
6161  * @vdev_opmode: Vdev operation mode
6162  * @vdev_mac_addr: Vdev Mac address
6163  *
6164  * Return: None
6165  */
6166 static void dp_peer_release_mem(struct dp_soc *soc,
6167 				struct dp_pdev *pdev,
6168 				struct dp_peer *peer,
6169 				enum wlan_op_mode vdev_opmode,
6170 				uint8_t *vdev_mac_addr)
6171 {
6172 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
6173 		soc->cdp_soc.ol_ops->peer_unref_delete(
6174 				soc->ctrl_psoc,
6175 				pdev->pdev_id,
6176 				peer->mac_addr.raw, vdev_mac_addr,
6177 				vdev_opmode);
6178 
6179 	/*
6180 	 * Peer AST list hast to be empty here
6181 	 */
6182 	DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
6183 
6184 	qdf_mem_free(peer);
6185 }
6186 
6187 /**
6188  * dp_delete_pending_vdev() - check and process vdev delete
6189  * @pdev: DP specific pdev pointer
6190  * @vdev: DP specific vdev pointer
6191  * @vdev_id: vdev id corresponding to vdev
6192  *
6193  * This API does following:
6194  * 1) It releases tx flow pools buffers as vdev is
6195  *    going down and no peers are associated.
6196  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
6197  */
6198 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
6199 				   uint8_t vdev_id)
6200 {
6201 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
6202 	void *vdev_delete_context = NULL;
6203 
6204 	vdev_delete_cb = vdev->delete.callback;
6205 	vdev_delete_context = vdev->delete.context;
6206 
6207 	dp_info("deleting vdev object %pK (%pM)- its last peer is done",
6208 		vdev, vdev->mac_addr.raw);
6209 	/* all peers are gone, go ahead and delete it */
6210 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
6211 			FLOW_TYPE_VDEV, vdev_id);
6212 	dp_tx_vdev_detach(vdev);
6213 
6214 	pdev->soc->vdev_id_map[vdev_id] = NULL;
6215 
6216 	if (wlan_op_mode_monitor == vdev->opmode) {
6217 		pdev->monitor_vdev = NULL;
6218 	} else {
6219 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
6220 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6221 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6222 	}
6223 
6224 	dp_info("deleting vdev object %pK (%pM)",
6225 		vdev, vdev->mac_addr.raw);
6226 	qdf_mem_free(vdev);
6227 	vdev = NULL;
6228 
6229 	if (vdev_delete_cb)
6230 		vdev_delete_cb(vdev_delete_context);
6231 }
6232 
6233 /*
6234  * dp_peer_unref_delete() - unref and delete peer
6235  * @peer_handle:		Datapath peer handle
6236  *
6237  */
6238 void dp_peer_unref_delete(struct dp_peer *peer)
6239 {
6240 	struct dp_vdev *vdev = peer->vdev;
6241 	struct dp_pdev *pdev = vdev->pdev;
6242 	struct dp_soc *soc = pdev->soc;
6243 	struct dp_peer *tmppeer;
6244 	int found = 0;
6245 	uint16_t peer_id;
6246 	uint16_t vdev_id;
6247 	bool vdev_delete = false;
6248 	struct cdp_peer_cookie peer_cookie;
6249 	enum wlan_op_mode vdev_opmode;
6250 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
6251 
6252 	/*
6253 	 * Hold the lock all the way from checking if the peer ref count
6254 	 * is zero until the peer references are removed from the hash
6255 	 * table and vdev list (if the peer ref count is zero).
6256 	 * This protects against a new HL tx operation starting to use the
6257 	 * peer object just after this function concludes it's done being used.
6258 	 * Furthermore, the lock needs to be held while checking whether the
6259 	 * vdev's list of peers is empty, to make sure that list is not modified
6260 	 * concurrently with the empty check.
6261 	 */
6262 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6263 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
6264 		peer_id = peer->peer_id;
6265 		vdev_id = vdev->vdev_id;
6266 
6267 		/*
6268 		 * Make sure that the reference to the peer in
6269 		 * peer object map is removed
6270 		 */
6271 		if (peer_id != HTT_INVALID_PEER)
6272 			soc->peer_id_to_obj_map[peer_id] = NULL;
6273 
6274 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6275 			  "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
6276 
6277 		/* remove the reference to the peer from the hash table */
6278 		dp_peer_find_hash_remove(soc, peer);
6279 
6280 		qdf_spin_lock_bh(&soc->ast_lock);
6281 		if (peer->self_ast_entry) {
6282 			dp_peer_del_ast(soc, peer->self_ast_entry);
6283 		}
6284 		qdf_spin_unlock_bh(&soc->ast_lock);
6285 
6286 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
6287 			if (tmppeer == peer) {
6288 				found = 1;
6289 				break;
6290 			}
6291 		}
6292 
6293 		if (found) {
6294 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
6295 				peer_list_elem);
6296 		} else {
6297 			/*Ignoring the remove operation as peer not found*/
6298 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6299 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
6300 				  peer, vdev, &peer->vdev->peer_list);
6301 		}
6302 
6303 		/*
6304 		 * Deallocate the extended stats contenxt
6305 		 */
6306 		dp_peer_ext_stats_ctx_dealloc(soc, peer);
6307 
6308 		/* send peer destroy event to upper layer */
6309 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6310 			     QDF_MAC_ADDR_SIZE);
6311 		peer_cookie.ctx = NULL;
6312 		peer_cookie.ctx = (struct cdp_stats_cookie *)
6313 					peer->wlanstats_ctx;
6314 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6315 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
6316 				     pdev->soc,
6317 				     (void *)&peer_cookie,
6318 				     peer->peer_id,
6319 				     WDI_NO_VAL,
6320 				     pdev->pdev_id);
6321 #endif
6322 		peer->wlanstats_ctx = NULL;
6323 
6324 		/* cleanup the peer data */
6325 		dp_peer_cleanup(vdev, peer, false);
6326 		if (!peer->bss_peer)
6327 			DP_UPDATE_STATS(vdev, peer);
6328 		/* save vdev related member in case vdev freed */
6329 		vdev_opmode = vdev->opmode;
6330 		qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
6331 			     QDF_MAC_ADDR_SIZE);
6332 		/*
6333 		 * check whether the parent vdev is pending for deleting
6334 		 * and no peers left.
6335 		 */
6336 		if (vdev->delete.pending && TAILQ_EMPTY(&vdev->peer_list))
6337 			vdev_delete = true;
6338 		/*
6339 		 * Now that there are no references to the peer, we can
6340 		 * release the peer reference lock.
6341 		 */
6342 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6343 
6344 		wlan_minidump_remove(peer);
6345 		/*
6346 		 * Invoke soc.ol_ops->peer_unref_delete out of
6347 		 * peer_ref_mutex in case deadlock issue.
6348 		 */
6349 		dp_peer_release_mem(soc, pdev, peer,
6350 				    vdev_opmode,
6351 				    vdev_mac_addr);
6352 		/*
6353 		 * Delete the vdev if it's waiting all peer deleted
6354 		 * and it's chance now.
6355 		 */
6356 		if (vdev_delete)
6357 			dp_delete_pending_vdev(pdev, vdev, vdev_id);
6358 
6359 	} else {
6360 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6361 	}
6362 }
6363 
6364 #ifdef PEER_CACHE_RX_PKTS
6365 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6366 {
6367 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
6368 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
6369 }
6370 #else
6371 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6372 {
6373 }
6374 #endif
6375 
6376 /*
6377  * dp_peer_detach_wifi3() – Detach txrx peer
6378  * @soc_hdl: soc handle
6379  * @vdev_id: id of dp handle
6380  * @peer_mac: mac of datapath PEER handle
6381  * @bitmap: bitmap indicating special handling of request.
6382  *
6383  */
6384 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
6385 				       uint8_t vdev_id,
6386 				       uint8_t *peer_mac, uint32_t bitmap)
6387 {
6388 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6389 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
6390 						      0, vdev_id);
6391 
6392 	/* Peer can be null for monitor vap mac address */
6393 	if (!peer) {
6394 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6395 			  "%s: Invalid peer\n", __func__);
6396 		return QDF_STATUS_E_FAILURE;
6397 	}
6398 
6399 	if (!peer->valid) {
6400 		dp_peer_unref_delete(peer);
6401 		dp_err("Invalid peer: %pM", peer_mac);
6402 		return QDF_STATUS_E_ALREADY;
6403 	}
6404 
6405 	peer->valid = 0;
6406 
6407 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
6408 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
6409 
6410 	dp_local_peer_id_free(peer->vdev->pdev, peer);
6411 
6412 	/* Drop all rx packets before deleting peer */
6413 	dp_clear_peer_internal(soc, peer);
6414 
6415 	dp_peer_rx_bufq_resources_deinit(peer);
6416 
6417 	qdf_spinlock_destroy(&peer->peer_info_lock);
6418 	dp_peer_multipass_list_remove(peer);
6419 
6420 	/*
6421 	 * Remove the reference added during peer_attach.
6422 	 * The peer will still be left allocated until the
6423 	 * PEER_UNMAP message arrives to remove the other
6424 	 * reference, added by the PEER_MAP message.
6425 	 */
6426 	dp_peer_unref_delete(peer);
6427 	/*
6428 	 * Remove the reference taken above
6429 	 */
6430 	dp_peer_unref_delete(peer);
6431 
6432 	return QDF_STATUS_SUCCESS;
6433 }
6434 
6435 /*
6436  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
6437  * @soc_hdl: Datapath soc handle
6438  * @vdev_id: virtual interface id
6439  *
6440  * Return: MAC address on success, NULL on failure.
6441  *
6442  */
6443 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
6444 					 uint8_t vdev_id)
6445 {
6446 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6447 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6448 
6449 	if (!vdev)
6450 		return NULL;
6451 
6452 	return vdev->mac_addr.raw;
6453 }
6454 
6455 /*
6456  * dp_vdev_set_wds() - Enable per packet stats
6457  * @soc: DP soc handle
6458  * @vdev_id: id of DP VDEV handle
6459  * @val: value
6460  *
6461  * Return: none
6462  */
6463 static int dp_vdev_set_wds(struct cdp_soc_t *soc, uint8_t vdev_id, uint32_t val)
6464 {
6465 	struct dp_vdev *vdev =
6466 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6467 						   vdev_id);
6468 
6469 	if (!vdev)
6470 		return QDF_STATUS_E_FAILURE;
6471 
6472 	vdev->wds_enabled = val;
6473 	return QDF_STATUS_SUCCESS;
6474 }
6475 
6476 /*
6477  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
6478  * @soc_hdl: datapath soc handle
6479  * @pdev_id: physical device instance id
6480  *
6481  * Return: virtual interface id
6482  */
6483 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
6484 					       uint8_t pdev_id)
6485 {
6486 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6487 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
6488 
6489 	if (qdf_unlikely(!pdev))
6490 		return -EINVAL;
6491 
6492 	return pdev->monitor_vdev->vdev_id;
6493 }
6494 
6495 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
6496 {
6497 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6498 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6499 
6500 	if (!vdev) {
6501 		dp_err("vdev for id %d is NULL", vdev_id);
6502 		return -EINVAL;
6503 	}
6504 
6505 	return vdev->opmode;
6506 }
6507 
6508 /**
6509  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
6510  * @soc_hdl: ol_txrx_soc_handle handle
6511  * @vdev_id: vdev id for which os rx handles are needed
6512  * @stack_fn_p: pointer to stack function pointer
6513  * @osif_handle_p: pointer to ol_osif_vdev_handle
6514  *
6515  * Return: void
6516  */
6517 static
6518 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
6519 					  uint8_t vdev_id,
6520 					  ol_txrx_rx_fp *stack_fn_p,
6521 					  ol_osif_vdev_handle *osif_vdev_p)
6522 {
6523 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6524 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6525 
6526 	if (!vdev)
6527 		return;
6528 
6529 	*stack_fn_p = vdev->osif_rx_stack;
6530 	*osif_vdev_p = vdev->osif_vdev;
6531 }
6532 
6533 /**
6534  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
6535  * @soc_hdl: datapath soc handle
6536  * @vdev_id: virtual device/interface id
6537  *
6538  * Return: Handle to control pdev
6539  */
6540 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
6541 						struct cdp_soc_t *soc_hdl,
6542 						uint8_t vdev_id)
6543 {
6544 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6545 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6546 	struct dp_pdev *pdev;
6547 
6548 	if (!vdev || !vdev->pdev)
6549 		return NULL;
6550 
6551 	pdev = vdev->pdev;
6552 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
6553 }
6554 
6555 /**
6556  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
6557  *                                 ring based on target
6558  * @soc: soc handle
6559  * @mac_for_pdev: WIN- pdev_id, MCL- mac id
6560  * @pdev: physical device handle
6561  * @ring_num: mac id
6562  * @htt_tlv_filter: tlv filter
6563  *
6564  * Return: zero on success, non-zero on failure
6565  */
6566 static inline
6567 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
6568 				       struct dp_pdev *pdev, uint8_t ring_num,
6569 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
6570 {
6571 	QDF_STATUS status;
6572 
6573 	if (soc->wlan_cfg_ctx->rxdma1_enable)
6574 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6575 					     soc->rxdma_mon_buf_ring[ring_num]
6576 					     .hal_srng,
6577 					     RXDMA_MONITOR_BUF,
6578 					     RX_MONITOR_BUFFER_SIZE,
6579 					     &htt_tlv_filter);
6580 	else
6581 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6582 					     pdev->rx_mac_buf_ring[ring_num]
6583 					     .hal_srng,
6584 					     RXDMA_BUF, RX_DATA_BUFFER_SIZE,
6585 					     &htt_tlv_filter);
6586 
6587 	return status;
6588 }
6589 
6590 static inline void
6591 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
6592 {
6593 	pdev->mcopy_mode = M_COPY_DISABLED;
6594 	pdev->monitor_configured = false;
6595 	pdev->monitor_vdev = NULL;
6596 }
6597 
6598 /**
6599  * dp_reset_monitor_mode() - Disable monitor mode
6600  * @soc_hdl: Datapath soc handle
6601  * @pdev_id: id of datapath PDEV handle
6602  *
6603  * Return: QDF_STATUS
6604  */
6605 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
6606 				 uint8_t pdev_id,
6607 				 uint8_t special_monitor)
6608 {
6609 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6610 	struct dp_pdev *pdev =
6611 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6612 						   pdev_id);
6613 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6614 
6615 	if (!pdev)
6616 		return QDF_STATUS_E_FAILURE;
6617 
6618 	qdf_spin_lock_bh(&pdev->mon_lock);
6619 
6620 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
6621 	pdev->monitor_vdev = NULL;
6622 	pdev->monitor_configured = false;
6623 
6624 	/*
6625 	 * Lite monitor mode, smart monitor mode and monitor
6626 	 * mode uses this APIs to filter reset and mode disable
6627 	 */
6628 	if (pdev->mcopy_mode) {
6629 #if defined(FEATURE_PERPKT_INFO)
6630 		dp_pdev_disable_mcopy_code(pdev);
6631 		dp_mon_filter_reset_mcopy_mode(pdev);
6632 #endif /* FEATURE_PERPKT_INFO */
6633 	} else if (special_monitor) {
6634 #if defined(ATH_SUPPORT_NAC)
6635 		dp_mon_filter_reset_smart_monitor(pdev);
6636 #endif /* ATH_SUPPORT_NAC */
6637 	} else {
6638 		dp_mon_filter_reset_mon_mode(pdev);
6639 	}
6640 
6641 	status = dp_mon_filter_update(pdev);
6642 	if (status != QDF_STATUS_SUCCESS) {
6643 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6644 			  FL("Failed to reset monitor filters"));
6645 	}
6646 
6647 	qdf_spin_unlock_bh(&pdev->mon_lock);
6648 	return QDF_STATUS_SUCCESS;
6649 }
6650 
6651 /**
6652  * dp_get_tx_pending() - read pending tx
6653  * @pdev_handle: Datapath PDEV handle
6654  *
6655  * Return: outstanding tx
6656  */
6657 static uint32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
6658 {
6659 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6660 
6661 	return qdf_atomic_read(&pdev->num_tx_outstanding);
6662 }
6663 
6664 /**
6665  * dp_get_peer_mac_from_peer_id() - get peer mac
6666  * @pdev_handle: Datapath PDEV handle
6667  * @peer_id: Peer ID
6668  * @peer_mac: MAC addr of PEER
6669  *
6670  * Return: QDF_STATUS
6671  */
6672 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
6673 					       uint32_t peer_id,
6674 					       uint8_t *peer_mac)
6675 {
6676 	struct dp_peer *peer;
6677 
6678 	if (soc && peer_mac) {
6679 		peer = dp_peer_find_by_id((struct dp_soc *)soc,
6680 					  (uint16_t)peer_id);
6681 		if (peer) {
6682 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
6683 				     QDF_MAC_ADDR_SIZE);
6684 			dp_peer_unref_del_find_by_id(peer);
6685 			return QDF_STATUS_SUCCESS;
6686 		}
6687 	}
6688 
6689 	return QDF_STATUS_E_FAILURE;
6690 }
6691 
6692 /**
6693  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
6694  * @vdev_handle: Datapath VDEV handle
6695  * @smart_monitor: Flag to denote if its smart monitor mode
6696  *
6697  * Return: 0 on success, not 0 on failure
6698  */
6699 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc,
6700 					   uint8_t vdev_id,
6701 					   uint8_t special_monitor)
6702 {
6703 	uint32_t mac_id;
6704 	uint32_t mac_for_pdev;
6705 	struct dp_pdev *pdev;
6706 	uint32_t num_entries;
6707 	struct dp_srng *mon_buf_ring;
6708 	struct dp_vdev *vdev =
6709 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6710 						   vdev_id);
6711 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6712 
6713 	if (!vdev)
6714 		return QDF_STATUS_E_FAILURE;
6715 
6716 	pdev = vdev->pdev;
6717 	pdev->monitor_vdev = vdev;
6718 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6719 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
6720 		  pdev, pdev->pdev_id, pdev->soc, vdev);
6721 
6722 	/*
6723 	 * do not configure monitor buf ring and filter for smart and
6724 	 * lite monitor
6725 	 * for smart monitor filters are added along with first NAC
6726 	 * for lite monitor required configuration done through
6727 	 * dp_set_pdev_param
6728 	 */
6729 	if (special_monitor)
6730 		return QDF_STATUS_SUCCESS;
6731 
6732 	/*Check if current pdev's monitor_vdev exists */
6733 	if (pdev->monitor_configured) {
6734 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6735 			  "monitor vap already created vdev=%pK\n", vdev);
6736 		return QDF_STATUS_E_RESOURCES;
6737 	}
6738 
6739 	pdev->monitor_configured = true;
6740 
6741 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6742 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
6743 							  pdev->pdev_id);
6744 		dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
6745 						 FALSE);
6746 		/*
6747 		 * Configure low interrupt threshld when monitor mode is
6748 		 * configured.
6749 		 */
6750 		mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
6751 		if (mon_buf_ring->hal_srng) {
6752 			num_entries = mon_buf_ring->num_entries;
6753 			hal_set_low_threshold(mon_buf_ring->hal_srng,
6754 					      num_entries >> 3);
6755 			htt_srng_setup(pdev->soc->htt_handle,
6756 				       pdev->pdev_id,
6757 				       mon_buf_ring->hal_srng,
6758 				       RXDMA_MONITOR_BUF);
6759 		}
6760 	}
6761 
6762 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
6763 
6764 	dp_mon_filter_setup_mon_mode(pdev);
6765 	status = dp_mon_filter_update(pdev);
6766 	if (status != QDF_STATUS_SUCCESS) {
6767 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6768 			  FL("Failed to reset monitor filters"));
6769 		dp_mon_filter_reset_mon_mode(pdev);
6770 		pdev->monitor_configured = false;
6771 		pdev->monitor_vdev = NULL;
6772 	}
6773 
6774 	return status;
6775 }
6776 
6777 /**
6778  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
6779  * @soc: soc handle
6780  * @pdev_id: id of Datapath PDEV handle
6781  * @filter_val: Flag to select Filter for monitor mode
6782  * Return: 0 on success, not 0 on failure
6783  */
6784 static QDF_STATUS
6785 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
6786 				   struct cdp_monitor_filter *filter_val)
6787 {
6788 	/* Many monitor VAPs can exists in a system but only one can be up at
6789 	 * anytime
6790 	 */
6791 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6792 	struct dp_vdev *vdev;
6793 	struct dp_pdev *pdev =
6794 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6795 						   pdev_id);
6796 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6797 
6798 	if (!pdev)
6799 		return QDF_STATUS_E_FAILURE;
6800 
6801 	vdev = pdev->monitor_vdev;
6802 
6803 	if (!vdev)
6804 		return QDF_STATUS_E_FAILURE;
6805 
6806 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6807 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
6808 		pdev, pdev_id, soc, vdev);
6809 
6810 	/*Check if current pdev's monitor_vdev exists */
6811 	if (!pdev->monitor_vdev) {
6812 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6813 			"vdev=%pK", vdev);
6814 		qdf_assert(vdev);
6815 	}
6816 
6817 	/* update filter mode, type in pdev structure */
6818 	pdev->mon_filter_mode = filter_val->mode;
6819 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
6820 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
6821 	pdev->fp_data_filter = filter_val->fp_data;
6822 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
6823 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
6824 	pdev->mo_data_filter = filter_val->mo_data;
6825 
6826 	dp_mon_filter_setup_mon_mode(pdev);
6827 	status = dp_mon_filter_update(pdev);
6828 	if (status != QDF_STATUS_SUCCESS) {
6829 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6830 			  FL("Failed to set filter for advance mon mode"));
6831 		dp_mon_filter_reset_mon_mode(pdev);
6832 	}
6833 
6834 	return status;
6835 }
6836 
6837 /**
6838  * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
6839  * @cdp_soc : data path soc handle
6840  * @pdev_id : pdev_id
6841  * @nbuf: Management frame buffer
6842  */
6843 static QDF_STATUS
6844 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
6845 {
6846 	struct dp_pdev *pdev =
6847 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
6848 						   pdev_id);
6849 
6850 	if (!pdev)
6851 		return QDF_STATUS_E_FAILURE;
6852 
6853 	dp_deliver_mgmt_frm(pdev, nbuf);
6854 
6855 	return QDF_STATUS_SUCCESS;
6856 }
6857 
6858 /**
6859  * dp_set_bsscolor() - sets bsscolor for tx capture
6860  * @pdev: Datapath PDEV handle
6861  * @bsscolor: new bsscolor
6862  */
6863 static void
6864 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
6865 {
6866 	pdev->rx_mon_recv_status.bsscolor = bsscolor;
6867 }
6868 
6869 /**
6870  * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter
6871  * @soc : data path soc handle
6872  * @pdev_id : pdev_id
6873  * Return: true on ucast filter flag set
6874  */
6875 static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
6876 {
6877 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6878 
6879 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
6880 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
6881 		return true;
6882 
6883 	return false;
6884 }
6885 
6886 /**
6887  * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter
6888  * @pdev_handle: Datapath PDEV handle
6889  * Return: true on mcast filter flag set
6890  */
6891 static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
6892 {
6893 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6894 
6895 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
6896 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
6897 		return true;
6898 
6899 	return false;
6900 }
6901 
6902 /**
6903  * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter
6904  * @pdev_handle: Datapath PDEV handle
6905  * Return: true on non data filter flag set
6906  */
6907 static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
6908 {
6909 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6910 
6911 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
6912 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
6913 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
6914 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
6915 			return true;
6916 		}
6917 	}
6918 
6919 	return false;
6920 }
6921 
6922 #ifdef MESH_MODE_SUPPORT
6923 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
6924 {
6925 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6926 
6927 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6928 		FL("val %d"), val);
6929 	vdev->mesh_vdev = val;
6930 }
6931 
6932 /*
6933  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
6934  * @vdev_hdl: virtual device object
6935  * @val: value to be set
6936  *
6937  * Return: void
6938  */
6939 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
6940 {
6941 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6942 
6943 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6944 		FL("val %d"), val);
6945 	vdev->mesh_rx_filter = val;
6946 }
6947 #endif
6948 
6949 #ifdef VDEV_PEER_PROTOCOL_COUNT
6950 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc,
6951 					       int8_t vdev_id,
6952 					       bool enable)
6953 {
6954 	struct dp_vdev *vdev;
6955 
6956 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6957 						  vdev_id);
6958 	dp_info("enable %d vdev_id %d", enable, vdev_id);
6959 	vdev->peer_protocol_count_track = enable;
6960 }
6961 
6962 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc,
6963 						   int8_t vdev_id,
6964 						   int drop_mask)
6965 {
6966 	struct dp_vdev *vdev;
6967 
6968 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6969 						  vdev_id);
6970 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
6971 	vdev->peer_protocol_count_dropmask = drop_mask;
6972 }
6973 
6974 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc,
6975 						  int8_t vdev_id)
6976 {
6977 	struct dp_vdev *vdev;
6978 
6979 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6980 						  vdev_id);
6981 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
6982 		vdev_id);
6983 	return vdev->peer_protocol_count_track;
6984 }
6985 
6986 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc,
6987 					       int8_t vdev_id)
6988 {
6989 	struct dp_vdev *vdev;
6990 
6991 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6992 						  vdev_id);
6993 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
6994 		vdev_id);
6995 	return vdev->peer_protocol_count_dropmask;
6996 }
6997 
6998 #endif
6999 
7000 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
7001 {
7002 	uint8_t pdev_count;
7003 
7004 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
7005 		if (soc->pdev_list[pdev_count] &&
7006 		    soc->pdev_list[pdev_count] == data)
7007 			return true;
7008 	}
7009 	return false;
7010 }
7011 
7012 /**
7013  * dp_rx_bar_stats_cb(): BAR received stats callback
7014  * @soc: SOC handle
7015  * @cb_ctxt: Call back context
7016  * @reo_status: Reo status
7017  *
7018  * return: void
7019  */
7020 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
7021 	union hal_reo_status *reo_status)
7022 {
7023 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
7024 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
7025 
7026 	if (!dp_check_pdev_exists(soc, pdev)) {
7027 		dp_err_rl("pdev doesn't exist");
7028 		return;
7029 	}
7030 
7031 	if (!qdf_atomic_read(&soc->cmn_init_done))
7032 		return;
7033 
7034 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
7035 		DP_PRINT_STATS("REO stats failure %d",
7036 			       queue_status->header.status);
7037 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7038 		return;
7039 	}
7040 
7041 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
7042 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7043 
7044 }
7045 
7046 /**
7047  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
7048  * @vdev: DP VDEV handle
7049  *
7050  * return: void
7051  */
7052 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
7053 			     struct cdp_vdev_stats *vdev_stats)
7054 {
7055 	struct dp_peer *peer = NULL;
7056 	struct dp_soc *soc = NULL;
7057 
7058 	if (!vdev || !vdev->pdev)
7059 		return;
7060 
7061 	soc = vdev->pdev->soc;
7062 
7063 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
7064 
7065 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
7066 		dp_update_vdev_stats(vdev_stats, peer);
7067 
7068 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7069 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7070 			     vdev_stats, vdev->vdev_id,
7071 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7072 #endif
7073 }
7074 
7075 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
7076 {
7077 	struct dp_vdev *vdev = NULL;
7078 	struct dp_soc *soc;
7079 	struct cdp_vdev_stats *vdev_stats =
7080 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7081 
7082 	if (!vdev_stats) {
7083 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7084 			  "DP alloc failure - unable to get alloc vdev stats");
7085 		return;
7086 	}
7087 
7088 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
7089 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
7090 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
7091 
7092 	if (pdev->mcopy_mode)
7093 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
7094 
7095 	soc = pdev->soc;
7096 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
7097 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7098 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
7099 
7100 		dp_aggregate_vdev_stats(vdev, vdev_stats);
7101 		dp_update_pdev_stats(pdev, vdev_stats);
7102 		dp_update_pdev_ingress_stats(pdev, vdev);
7103 	}
7104 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7105 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
7106 	qdf_mem_free(vdev_stats);
7107 
7108 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7109 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
7110 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
7111 #endif
7112 }
7113 
7114 /**
7115  * dp_vdev_getstats() - get vdev packet level stats
7116  * @vdev_handle: Datapath VDEV handle
7117  * @stats: cdp network device stats structure
7118  *
7119  * Return: QDF_STATUS
7120  */
7121 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
7122 				   struct cdp_dev_stats *stats)
7123 {
7124 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7125 	struct dp_pdev *pdev;
7126 	struct dp_soc *soc;
7127 	struct cdp_vdev_stats *vdev_stats;
7128 
7129 	if (!vdev)
7130 		return QDF_STATUS_E_FAILURE;
7131 
7132 	pdev = vdev->pdev;
7133 	if (!pdev)
7134 		return QDF_STATUS_E_FAILURE;
7135 
7136 	soc = pdev->soc;
7137 
7138 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7139 
7140 	if (!vdev_stats) {
7141 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7142 			  "DP alloc failure - unable to get alloc vdev stats");
7143 		return QDF_STATUS_E_FAILURE;
7144 	}
7145 
7146 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
7147 	dp_aggregate_vdev_stats(vdev, vdev_stats);
7148 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
7149 
7150 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
7151 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
7152 
7153 	stats->tx_errors = vdev_stats->tx.tx_failed +
7154 		vdev_stats->tx_i.dropped.dropped_pkt.num;
7155 	stats->tx_dropped = stats->tx_errors;
7156 
7157 	stats->rx_packets = vdev_stats->rx.unicast.num +
7158 		vdev_stats->rx.multicast.num +
7159 		vdev_stats->rx.bcast.num;
7160 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
7161 		vdev_stats->rx.multicast.bytes +
7162 		vdev_stats->rx.bcast.bytes;
7163 
7164 	qdf_mem_free(vdev_stats);
7165 
7166 	return QDF_STATUS_SUCCESS;
7167 }
7168 
7169 
7170 /**
7171  * dp_pdev_getstats() - get pdev packet level stats
7172  * @pdev_handle: Datapath PDEV handle
7173  * @stats: cdp network device stats structure
7174  *
7175  * Return: QDF_STATUS
7176  */
7177 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
7178 			     struct cdp_dev_stats *stats)
7179 {
7180 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7181 
7182 	dp_aggregate_pdev_stats(pdev);
7183 
7184 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
7185 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
7186 
7187 	stats->tx_errors = pdev->stats.tx.tx_failed +
7188 		pdev->stats.tx_i.dropped.dropped_pkt.num;
7189 	stats->tx_dropped = stats->tx_errors;
7190 
7191 	stats->rx_packets = pdev->stats.rx.unicast.num +
7192 		pdev->stats.rx.multicast.num +
7193 		pdev->stats.rx.bcast.num;
7194 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
7195 		pdev->stats.rx.multicast.bytes +
7196 		pdev->stats.rx.bcast.bytes;
7197 	stats->rx_errors = pdev->stats.err.desc_alloc_fail +
7198 		pdev->stats.err.ip_csum_err +
7199 		pdev->stats.err.tcp_udp_csum_err +
7200 		pdev->stats.rx.err.mic_err +
7201 		pdev->stats.rx.err.decrypt_err +
7202 		pdev->stats.err.rxdma_error +
7203 		pdev->stats.err.reo_error;
7204 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
7205 		pdev->stats.dropped.mec +
7206 		pdev->stats.dropped.mesh_filter +
7207 		pdev->stats.dropped.wifi_parse +
7208 		pdev->stats.dropped.mon_rx_drop +
7209 		pdev->stats.dropped.mon_radiotap_update_err;
7210 }
7211 
7212 /**
7213  * dp_get_device_stats() - get interface level packet stats
7214  * @soc: soc handle
7215  * @id : vdev_id or pdev_id based on type
7216  * @stats: cdp network device stats structure
7217  * @type: device type pdev/vdev
7218  *
7219  * Return: QDF_STATUS
7220  */
7221 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc, uint8_t id,
7222 				      struct cdp_dev_stats *stats,
7223 				      uint8_t type)
7224 {
7225 	switch (type) {
7226 	case UPDATE_VDEV_STATS:
7227 		return dp_vdev_getstats(
7228 			(struct cdp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(
7229 			 (struct dp_soc *)soc, id), stats);
7230 	case UPDATE_PDEV_STATS:
7231 		{
7232 			struct dp_pdev *pdev =
7233 				dp_get_pdev_from_soc_pdev_id_wifi3(
7234 						(struct dp_soc *)soc,
7235 						 id);
7236 			if (pdev) {
7237 				dp_pdev_getstats((struct cdp_pdev *)pdev,
7238 						 stats);
7239 				return QDF_STATUS_SUCCESS;
7240 			}
7241 		}
7242 		break;
7243 	default:
7244 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7245 			"apstats cannot be updated for this input "
7246 			"type %d", type);
7247 		break;
7248 	}
7249 
7250 	return QDF_STATUS_E_FAILURE;
7251 }
7252 
7253 const
7254 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
7255 {
7256 	switch (ring_type) {
7257 	case REO_DST:
7258 		return "Reo_dst";
7259 	case REO_EXCEPTION:
7260 		return "Reo_exception";
7261 	case REO_CMD:
7262 		return "Reo_cmd";
7263 	case REO_REINJECT:
7264 		return "Reo_reinject";
7265 	case REO_STATUS:
7266 		return "Reo_status";
7267 	case WBM2SW_RELEASE:
7268 		return "wbm2sw_release";
7269 	case TCL_DATA:
7270 		return "tcl_data";
7271 	case TCL_CMD_CREDIT:
7272 		return "tcl_cmd_credit";
7273 	case TCL_STATUS:
7274 		return "tcl_status";
7275 	case SW2WBM_RELEASE:
7276 		return "sw2wbm_release";
7277 	case RXDMA_BUF:
7278 		return "Rxdma_buf";
7279 	case RXDMA_DST:
7280 		return "Rxdma_dst";
7281 	case RXDMA_MONITOR_BUF:
7282 		return "Rxdma_monitor_buf";
7283 	case RXDMA_MONITOR_DESC:
7284 		return "Rxdma_monitor_desc";
7285 	case RXDMA_MONITOR_STATUS:
7286 		return "Rxdma_monitor_status";
7287 	default:
7288 		dp_err("Invalid ring type");
7289 		break;
7290 	}
7291 	return "Invalid";
7292 }
7293 
7294 /*
7295  * dp_print_napi_stats(): NAPI stats
7296  * @soc - soc handle
7297  */
7298 void dp_print_napi_stats(struct dp_soc *soc)
7299 {
7300 	hif_print_napi_stats(soc->hif_handle);
7301 }
7302 
7303 /**
7304  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
7305  * @vdev: DP_VDEV handle
7306  * @dp_soc: DP_SOC handle
7307  *
7308  * Return: QDF_STATUS
7309  */
7310 static inline QDF_STATUS
7311 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
7312 {
7313 	struct dp_peer *peer = NULL;
7314 
7315 	if (!vdev || !vdev->pdev)
7316 		return QDF_STATUS_E_FAILURE;
7317 
7318 	/*
7319 	 * if NSS offload is enabled, then send message
7320 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
7321 	 * then clear host statistics.
7322 	 */
7323 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
7324 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
7325 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
7326 							   vdev->vdev_id);
7327 	}
7328 
7329 	DP_STATS_CLR(vdev->pdev);
7330 	DP_STATS_CLR(vdev->pdev->soc);
7331 	DP_STATS_CLR(vdev);
7332 
7333 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
7334 
7335 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7336 		struct dp_rx_tid *rx_tid;
7337 		uint8_t tid;
7338 
7339 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
7340 			rx_tid = &peer->rx_tid[tid];
7341 			DP_STATS_CLR(rx_tid);
7342 		}
7343 
7344 		DP_STATS_CLR(peer);
7345 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7346 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7347 				     &peer->stats,  peer->peer_id,
7348 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
7349 #endif
7350 	}
7351 
7352 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7353 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7354 			     &vdev->stats,  vdev->vdev_id,
7355 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7356 #endif
7357 	return QDF_STATUS_SUCCESS;
7358 }
7359 
7360 /*
7361  * dp_get_host_peer_stats()- function to print peer stats
7362  * @soc: dp_soc handle
7363  * @mac_addr: mac address of the peer
7364  *
7365  * Return: QDF_STATUS
7366  */
7367 static QDF_STATUS
7368 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
7369 {
7370 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7371 	struct dp_peer *peer = NULL;
7372 
7373 	if (!mac_addr) {
7374 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7375 			  "%s: NULL peer mac addr\n", __func__);
7376 		status = QDF_STATUS_E_FAILURE;
7377 		goto fail;
7378 	}
7379 
7380 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
7381 				      mac_addr, 0,
7382 				      DP_VDEV_ALL);
7383 	if (!peer || peer->delete_in_progress) {
7384 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7385 			  "%s: Invalid peer\n", __func__);
7386 		status = QDF_STATUS_E_FAILURE;
7387 		goto fail;
7388 	}
7389 
7390 	dp_print_peer_stats(peer);
7391 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7392 fail:
7393 	if (peer)
7394 		dp_peer_unref_delete(peer);
7395 
7396 	return status;
7397 }
7398 
7399 /**
7400  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7401  *
7402  * Return: None
7403  */
7404 static void dp_txrx_stats_help(void)
7405 {
7406 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7407 	dp_info("stats_option:");
7408 	dp_info("  1 -- HTT Tx Statistics");
7409 	dp_info("  2 -- HTT Rx Statistics");
7410 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7411 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7412 	dp_info("  5 -- HTT Error Statistics");
7413 	dp_info("  6 -- HTT TQM Statistics");
7414 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7415 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7416 	dp_info("  9 -- HTT Tx Rate Statistics");
7417 	dp_info(" 10 -- HTT Rx Rate Statistics");
7418 	dp_info(" 11 -- HTT Peer Statistics");
7419 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7420 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7421 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7422 	dp_info(" 15 -- HTT SRNG Statistics");
7423 	dp_info(" 16 -- HTT SFM Info Statistics");
7424 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7425 	dp_info(" 18 -- HTT Peer List Details");
7426 	dp_info(" 20 -- Clear Host Statistics");
7427 	dp_info(" 21 -- Host Rx Rate Statistics");
7428 	dp_info(" 22 -- Host Tx Rate Statistics");
7429 	dp_info(" 23 -- Host Tx Statistics");
7430 	dp_info(" 24 -- Host Rx Statistics");
7431 	dp_info(" 25 -- Host AST Statistics");
7432 	dp_info(" 26 -- Host SRNG PTR Statistics");
7433 	dp_info(" 27 -- Host Mon Statistics");
7434 	dp_info(" 28 -- Host REO Queue Statistics");
7435 	dp_info(" 29 -- Host Soc cfg param Statistics");
7436 	dp_info(" 30 -- Host pdev cfg param Statistics");
7437 	dp_info(" 31 -- Host FISA stats");
7438 	dp_info(" 32 -- Host Register Work stats");
7439 }
7440 
7441 /**
7442  * dp_print_host_stats()- Function to print the stats aggregated at host
7443  * @vdev_handle: DP_VDEV handle
7444  * @req: host stats type
7445  * @soc: dp soc handler
7446  *
7447  * Return: 0 on success, print error message in case of failure
7448  */
7449 static int
7450 dp_print_host_stats(struct dp_vdev *vdev,
7451 		    struct cdp_txrx_stats_req *req,
7452 		    struct dp_soc *soc)
7453 {
7454 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7455 	enum cdp_host_txrx_stats type =
7456 			dp_stats_mapping_table[req->stats][STATS_HOST];
7457 
7458 	dp_aggregate_pdev_stats(pdev);
7459 
7460 	switch (type) {
7461 	case TXRX_CLEAR_STATS:
7462 		dp_txrx_host_stats_clr(vdev, soc);
7463 		break;
7464 	case TXRX_RX_RATE_STATS:
7465 		dp_print_rx_rates(vdev);
7466 		break;
7467 	case TXRX_TX_RATE_STATS:
7468 		dp_print_tx_rates(vdev);
7469 		break;
7470 	case TXRX_TX_HOST_STATS:
7471 		dp_print_pdev_tx_stats(pdev);
7472 		dp_print_soc_tx_stats(pdev->soc);
7473 		break;
7474 	case TXRX_RX_HOST_STATS:
7475 		dp_print_pdev_rx_stats(pdev);
7476 		dp_print_soc_rx_stats(pdev->soc);
7477 		break;
7478 	case TXRX_AST_STATS:
7479 		dp_print_ast_stats(pdev->soc);
7480 		dp_print_peer_table(vdev);
7481 		break;
7482 	case TXRX_SRNG_PTR_STATS:
7483 		dp_print_ring_stats(pdev);
7484 		break;
7485 	case TXRX_RX_MON_STATS:
7486 		dp_print_pdev_rx_mon_stats(pdev);
7487 		break;
7488 	case TXRX_REO_QUEUE_STATS:
7489 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
7490 				       req->peer_addr);
7491 		break;
7492 	case TXRX_SOC_CFG_PARAMS:
7493 		dp_print_soc_cfg_params(pdev->soc);
7494 		break;
7495 	case TXRX_PDEV_CFG_PARAMS:
7496 		dp_print_pdev_cfg_params(pdev);
7497 		break;
7498 	case TXRX_NAPI_STATS:
7499 		dp_print_napi_stats(pdev->soc);
7500 		break;
7501 	case TXRX_SOC_INTERRUPT_STATS:
7502 		dp_print_soc_interrupt_stats(pdev->soc);
7503 		break;
7504 	case TXRX_SOC_FSE_STATS:
7505 		dp_rx_dump_fisa_table(pdev->soc);
7506 		break;
7507 	case TXRX_HAL_REG_WRITE_STATS:
7508 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
7509 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
7510 		break;
7511 	default:
7512 		dp_info("Wrong Input For TxRx Host Stats");
7513 		dp_txrx_stats_help();
7514 		break;
7515 	}
7516 	return 0;
7517 }
7518 
7519 /*
7520  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7521  *                              modes are enabled or not.
7522  * @dp_pdev: dp pdev handle.
7523  *
7524  * Return: bool
7525  */
7526 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7527 {
7528 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7529 	    !pdev->mcopy_mode)
7530 		return true;
7531 	else
7532 		return false;
7533 }
7534 
7535 /*
7536  *dp_set_bpr_enable() - API to enable/disable bpr feature
7537  *@pdev_handle: DP_PDEV handle.
7538  *@val: Provided value.
7539  *
7540  *Return: 0 for success. nonzero for failure.
7541  */
7542 static QDF_STATUS
7543 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
7544 {
7545 	switch (val) {
7546 	case CDP_BPR_DISABLE:
7547 		pdev->bpr_enable = CDP_BPR_DISABLE;
7548 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7549 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7550 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7551 		} else if (pdev->enhanced_stats_en &&
7552 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7553 			   !pdev->pktlog_ppdu_stats) {
7554 			dp_h2t_cfg_stats_msg_send(pdev,
7555 						  DP_PPDU_STATS_CFG_ENH_STATS,
7556 						  pdev->pdev_id);
7557 		}
7558 		break;
7559 	case CDP_BPR_ENABLE:
7560 		pdev->bpr_enable = CDP_BPR_ENABLE;
7561 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7562 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7563 			dp_h2t_cfg_stats_msg_send(pdev,
7564 						  DP_PPDU_STATS_CFG_BPR,
7565 						  pdev->pdev_id);
7566 		} else if (pdev->enhanced_stats_en &&
7567 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7568 			   !pdev->pktlog_ppdu_stats) {
7569 			dp_h2t_cfg_stats_msg_send(pdev,
7570 						  DP_PPDU_STATS_CFG_BPR_ENH,
7571 						  pdev->pdev_id);
7572 		} else if (pdev->pktlog_ppdu_stats) {
7573 			dp_h2t_cfg_stats_msg_send(pdev,
7574 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7575 						  pdev->pdev_id);
7576 		}
7577 		break;
7578 	default:
7579 		break;
7580 	}
7581 
7582 	return QDF_STATUS_SUCCESS;
7583 }
7584 
7585 /*
7586  * dp_pdev_tid_stats_ingress_inc
7587  * @pdev: pdev handle
7588  * @val: increase in value
7589  *
7590  * Return: void
7591  */
7592 static void
7593 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
7594 {
7595 	pdev->stats.tid_stats.ingress_stack += val;
7596 }
7597 
7598 /*
7599  * dp_pdev_tid_stats_osif_drop
7600  * @pdev: pdev handle
7601  * @val: increase in value
7602  *
7603  * Return: void
7604  */
7605 static void
7606 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
7607 {
7608 	pdev->stats.tid_stats.osif_drop += val;
7609 }
7610 
7611 
7612 /*
7613  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7614  * @pdev: DP_PDEV handle
7615  * @val: user provided value
7616  *
7617  * Return: 0 for success. nonzero for failure.
7618  */
7619 static QDF_STATUS
7620 dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
7621 {
7622 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7623 
7624 	/*
7625 	 * Note: The mirror copy mode cannot co-exist with any other
7626 	 * monitor modes. Hence disabling the filter for this mode will
7627 	 * reset the monitor destination ring filters.
7628 	 */
7629 	if (pdev->mcopy_mode) {
7630 #ifdef FEATURE_PERPKT_INFO
7631 		dp_pdev_disable_mcopy_code(pdev);
7632 		dp_mon_filter_reset_mcopy_mode(pdev);
7633 		status = dp_mon_filter_update(pdev);
7634 		if (status != QDF_STATUS_SUCCESS) {
7635 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7636 				  FL("Failed to reset AM copy mode filters"));
7637 		}
7638 #endif /* FEATURE_PERPKT_INFO */
7639 	}
7640 	switch (val) {
7641 	case 0:
7642 		pdev->tx_sniffer_enable = 0;
7643 		pdev->monitor_configured = false;
7644 
7645 		/*
7646 		 * We don't need to reset the Rx monitor status ring  or call
7647 		 * the API dp_ppdu_ring_reset() if all debug sniffer mode is
7648 		 * disabled. The Rx monitor status ring will be disabled when
7649 		 * the last mode using the monitor status ring get disabled.
7650 		 */
7651 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7652 		    !pdev->bpr_enable) {
7653 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7654 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7655 			dp_h2t_cfg_stats_msg_send(pdev,
7656 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7657 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7658 			dp_h2t_cfg_stats_msg_send(pdev,
7659 						  DP_PPDU_STATS_CFG_BPR_ENH,
7660 						  pdev->pdev_id);
7661 		} else {
7662 			dp_h2t_cfg_stats_msg_send(pdev,
7663 						  DP_PPDU_STATS_CFG_BPR,
7664 						  pdev->pdev_id);
7665 		}
7666 		break;
7667 
7668 	case 1:
7669 		pdev->tx_sniffer_enable = 1;
7670 		pdev->monitor_configured = false;
7671 
7672 		if (!pdev->pktlog_ppdu_stats)
7673 			dp_h2t_cfg_stats_msg_send(pdev,
7674 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7675 		break;
7676 	case 2:
7677 	case 4:
7678 		if (pdev->monitor_vdev) {
7679 			status = QDF_STATUS_E_RESOURCES;
7680 			break;
7681 		}
7682 
7683 #ifdef FEATURE_PERPKT_INFO
7684 		pdev->mcopy_mode = val;
7685 		pdev->tx_sniffer_enable = 0;
7686 		pdev->monitor_configured = true;
7687 
7688 		/*
7689 		 * Setup the M copy mode filter.
7690 		 */
7691 		dp_mon_filter_setup_mcopy_mode(pdev);
7692 		status = dp_mon_filter_update(pdev);
7693 		if (status != QDF_STATUS_SUCCESS) {
7694 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7695 				  FL("Failed to set M_copy mode filters"));
7696 			dp_mon_filter_reset_mcopy_mode(pdev);
7697 			dp_pdev_disable_mcopy_code(pdev);
7698 			return status;
7699 		}
7700 
7701 		if (!pdev->pktlog_ppdu_stats)
7702 			dp_h2t_cfg_stats_msg_send(pdev,
7703 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7704 #endif /* FEATURE_PERPKT_INFO */
7705 		break;
7706 
7707 	default:
7708 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7709 			"Invalid value");
7710 		break;
7711 	}
7712 	return status;
7713 }
7714 
7715 #ifdef FEATURE_PERPKT_INFO
7716 /*
7717  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7718  * @soc_handle: DP_SOC handle
7719  * @pdev_id: id of DP_PDEV handle
7720  *
7721  * Return: QDF_STATUS
7722  */
7723 static QDF_STATUS
7724 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7725 {
7726 	struct dp_pdev *pdev = NULL;
7727 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7728 
7729 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7730 						  pdev_id);
7731 
7732 	if (!pdev)
7733 		return QDF_STATUS_E_FAILURE;
7734 
7735 	if (pdev->enhanced_stats_en == 0)
7736 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7737 
7738 	pdev->enhanced_stats_en = 1;
7739 
7740 	dp_mon_filter_setup_enhanced_stats(pdev);
7741 	status = dp_mon_filter_update(pdev);
7742 	if (status != QDF_STATUS_SUCCESS) {
7743 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7744 			  FL("Failed to set enhanced mode filters"));
7745 		dp_mon_filter_reset_enhanced_stats(pdev);
7746 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7747 		pdev->enhanced_stats_en = 0;
7748 		return QDF_STATUS_E_FAILURE;
7749 	}
7750 
7751 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7752 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7753 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7754 		dp_h2t_cfg_stats_msg_send(pdev,
7755 					  DP_PPDU_STATS_CFG_BPR_ENH,
7756 					  pdev->pdev_id);
7757 	}
7758 
7759 	return QDF_STATUS_SUCCESS;
7760 }
7761 
7762 /*
7763  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7764  *
7765  * @param soc - the soc handle
7766  * @param pdev_id - pdev_id of pdev
7767  * @return - QDF_STATUS
7768  */
7769 static QDF_STATUS
7770 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7771 {
7772 	struct dp_pdev *pdev =
7773 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7774 						   pdev_id);
7775 
7776 	if (!pdev)
7777 		return QDF_STATUS_E_FAILURE;
7778 
7779 	if (pdev->enhanced_stats_en == 1)
7780 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7781 
7782 	pdev->enhanced_stats_en = 0;
7783 
7784 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7785 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7786 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7787 		dp_h2t_cfg_stats_msg_send(pdev,
7788 					  DP_PPDU_STATS_CFG_BPR,
7789 					  pdev->pdev_id);
7790 	}
7791 
7792 	dp_mon_filter_reset_enhanced_stats(pdev);
7793 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
7794 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7795 			  FL("Failed to reset enhanced mode filters"));
7796 	}
7797 
7798 	return QDF_STATUS_SUCCESS;
7799 }
7800 #endif /* FEATURE_PERPKT_INFO */
7801 
7802 /*
7803  * dp_get_fw_peer_stats()- function to print peer stats
7804  * @soc: soc handle
7805  * @pdev_id : id of the pdev handle
7806  * @mac_addr: mac address of the peer
7807  * @cap: Type of htt stats requested
7808  * @is_wait: if set, wait on completion from firmware response
7809  *
7810  * Currently Supporting only MAC ID based requests Only
7811  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7812  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7813  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7814  *
7815  * Return: QDF_STATUS
7816  */
7817 static QDF_STATUS
7818 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
7819 		     uint8_t *mac_addr,
7820 		     uint32_t cap, uint32_t is_wait)
7821 {
7822 	int i;
7823 	uint32_t config_param0 = 0;
7824 	uint32_t config_param1 = 0;
7825 	uint32_t config_param2 = 0;
7826 	uint32_t config_param3 = 0;
7827 	struct dp_pdev *pdev =
7828 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7829 						   pdev_id);
7830 
7831 	if (!pdev)
7832 		return QDF_STATUS_E_FAILURE;
7833 
7834 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7835 	config_param0 |= (1 << (cap + 1));
7836 
7837 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7838 		config_param1 |= (1 << i);
7839 	}
7840 
7841 	config_param2 |= (mac_addr[0] & 0x000000ff);
7842 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7843 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7844 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7845 
7846 	config_param3 |= (mac_addr[4] & 0x000000ff);
7847 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7848 
7849 	if (is_wait) {
7850 		qdf_event_reset(&pdev->fw_peer_stats_event);
7851 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7852 					  config_param0, config_param1,
7853 					  config_param2, config_param3,
7854 					  0, 1, 0);
7855 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
7856 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
7857 	} else {
7858 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7859 					  config_param0, config_param1,
7860 					  config_param2, config_param3,
7861 					  0, 0, 0);
7862 	}
7863 
7864 	return QDF_STATUS_SUCCESS;
7865 
7866 }
7867 
7868 /* This struct definition will be removed from here
7869  * once it get added in FW headers*/
7870 struct httstats_cmd_req {
7871     uint32_t    config_param0;
7872     uint32_t    config_param1;
7873     uint32_t    config_param2;
7874     uint32_t    config_param3;
7875     int cookie;
7876     u_int8_t    stats_id;
7877 };
7878 
7879 /*
7880  * dp_get_htt_stats: function to process the httstas request
7881  * @soc: DP soc handle
7882  * @pdev_id: id of pdev handle
7883  * @data: pointer to request data
7884  * @data_len: length for request data
7885  *
7886  * return: QDF_STATUS
7887  */
7888 static QDF_STATUS
7889 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
7890 		 uint32_t data_len)
7891 {
7892 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
7893 	struct dp_pdev *pdev =
7894 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7895 						   pdev_id);
7896 
7897 	if (!pdev)
7898 		return QDF_STATUS_E_FAILURE;
7899 
7900 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
7901 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
7902 				req->config_param0, req->config_param1,
7903 				req->config_param2, req->config_param3,
7904 				req->cookie, 0, 0);
7905 
7906 	return QDF_STATUS_SUCCESS;
7907 }
7908 
7909 /**
7910  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
7911  * @pdev: DP_PDEV handle
7912  * @prio: tidmap priority value passed by the user
7913  *
7914  * Return: QDF_STATUS_SUCCESS on success
7915  */
7916 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
7917 						uint8_t prio)
7918 {
7919 	struct dp_soc *soc = pdev->soc;
7920 
7921 	soc->tidmap_prty = prio;
7922 
7923 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
7924 	return QDF_STATUS_SUCCESS;
7925 }
7926 
7927 /*
7928  * dp_get_peer_param: function to get parameters in peer
7929  * @cdp_soc: DP soc handle
7930  * @vdev_id: id of vdev handle
7931  * @peer_mac: peer mac address
7932  * @param: parameter type to be set
7933  * @val : address of buffer
7934  *
7935  * Return: val
7936  */
7937 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
7938 				    uint8_t *peer_mac,
7939 				    enum cdp_peer_param_type param,
7940 				    cdp_config_param_type *val)
7941 {
7942 	return QDF_STATUS_SUCCESS;
7943 }
7944 
7945 #ifdef WLAN_ATF_ENABLE
7946 static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
7947 {
7948 	if (!pdev) {
7949 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7950 			  "Invalid pdev");
7951 		return;
7952 	}
7953 
7954 	pdev->dp_atf_stats_enable = value;
7955 }
7956 #else
7957 static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
7958 {
7959 }
7960 #endif
7961 
7962 /*
7963  * dp_set_peer_param: function to set parameters in peer
7964  * @cdp_soc: DP soc handle
7965  * @vdev_id: id of vdev handle
7966  * @peer_mac: peer mac address
7967  * @param: parameter type to be set
7968  * @val: value of parameter to be set
7969  *
7970  * Return: 0 for success. nonzero for failure.
7971  */
7972 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
7973 				    uint8_t *peer_mac,
7974 				    enum cdp_peer_param_type param,
7975 				    cdp_config_param_type val)
7976 {
7977 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
7978 						      peer_mac, 0, vdev_id);
7979 
7980 	if (!peer || peer->delete_in_progress)
7981 		goto fail;
7982 
7983 	switch (param) {
7984 	case CDP_CONFIG_NAWDS:
7985 		peer->nawds_enabled = val.cdp_peer_param_nawds;
7986 		break;
7987 	case CDP_CONFIG_NAC:
7988 		peer->nac = !!(val.cdp_peer_param_nac);
7989 		break;
7990 	case CDP_CONFIG_ISOLATION:
7991 		dp_set_peer_isolation(peer, val.cdp_peer_param_isolation);
7992 		break;
7993 	case CDP_CONFIG_IN_TWT:
7994 		peer->in_twt = !!(val.cdp_peer_param_in_twt);
7995 		break;
7996 	default:
7997 		break;
7998 	}
7999 
8000 fail:
8001 	if (peer)
8002 		dp_peer_unref_delete(peer);
8003 
8004 	return QDF_STATUS_SUCCESS;
8005 }
8006 
8007 /*
8008  * dp_get_pdev_param: function to get parameters from pdev
8009  * @cdp_soc: DP soc handle
8010  * @pdev_id: id of pdev handle
8011  * @param: parameter type to be get
8012  * @value : buffer for value
8013  *
8014  * Return: status
8015  */
8016 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
8017 				    enum cdp_pdev_param_type param,
8018 				    cdp_config_param_type *val)
8019 {
8020 	struct cdp_pdev *pdev = (struct cdp_pdev *)
8021 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8022 						   pdev_id);
8023 	if (!pdev)
8024 		return QDF_STATUS_E_FAILURE;
8025 
8026 	switch (param) {
8027 	case CDP_CONFIG_VOW:
8028 		val->cdp_pdev_param_cfg_vow =
8029 				((struct dp_pdev *)pdev)->delay_stats_flag;
8030 		break;
8031 	case CDP_TX_PENDING:
8032 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
8033 		break;
8034 	case CDP_FILTER_MCAST_DATA:
8035 		val->cdp_pdev_param_fltr_mcast =
8036 					dp_pdev_get_filter_mcast_data(pdev);
8037 		break;
8038 	case CDP_FILTER_NO_DATA:
8039 		val->cdp_pdev_param_fltr_none =
8040 					dp_pdev_get_filter_non_data(pdev);
8041 		break;
8042 	case CDP_FILTER_UCAST_DATA:
8043 		val->cdp_pdev_param_fltr_ucast =
8044 					dp_pdev_get_filter_ucast_data(pdev);
8045 		break;
8046 	default:
8047 		return QDF_STATUS_E_FAILURE;
8048 	}
8049 
8050 	return QDF_STATUS_SUCCESS;
8051 }
8052 
8053 /*
8054  * dp_set_pdev_param: function to set parameters in pdev
8055  * @cdp_soc: DP soc handle
8056  * @pdev_id: id of pdev handle
8057  * @param: parameter type to be set
8058  * @val: value of parameter to be set
8059  *
8060  * Return: 0 for success. nonzero for failure.
8061  */
8062 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
8063 				    enum cdp_pdev_param_type param,
8064 				    cdp_config_param_type val)
8065 {
8066 	int target_type;
8067 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8068 	struct dp_pdev *pdev =
8069 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8070 						   pdev_id);
8071 	if (!pdev)
8072 		return QDF_STATUS_E_FAILURE;
8073 
8074 	target_type = hal_get_target_type(soc->hal_soc);
8075 	switch (target_type) {
8076 	case TARGET_TYPE_QCA6750:
8077 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_5G_LMAC_ID;
8078 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
8079 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
8080 		break;
8081 	default:
8082 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_2G_LMAC_ID;
8083 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
8084 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
8085 		break;
8086 	}
8087 
8088 	switch (param) {
8089 	case CDP_CONFIG_TX_CAPTURE:
8090 		return dp_config_debug_sniffer(pdev,
8091 					       val.cdp_pdev_param_tx_capture);
8092 	case CDP_CONFIG_DEBUG_SNIFFER:
8093 		return dp_config_debug_sniffer(pdev,
8094 					       val.cdp_pdev_param_dbg_snf);
8095 	case CDP_CONFIG_BPR_ENABLE:
8096 		return dp_set_bpr_enable(pdev, val.cdp_pdev_param_bpr_enable);
8097 	case CDP_CONFIG_PRIMARY_RADIO:
8098 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
8099 		break;
8100 	case CDP_CONFIG_CAPTURE_LATENCY:
8101 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
8102 		break;
8103 	case CDP_INGRESS_STATS:
8104 		dp_pdev_tid_stats_ingress_inc(pdev,
8105 					      val.cdp_pdev_param_ingrs_stats);
8106 		break;
8107 	case CDP_OSIF_DROP:
8108 		dp_pdev_tid_stats_osif_drop(pdev,
8109 					    val.cdp_pdev_param_osif_drop);
8110 		break;
8111 	case CDP_CONFIG_ENH_RX_CAPTURE:
8112 		return dp_config_enh_rx_capture(pdev,
8113 						val.cdp_pdev_param_en_rx_cap);
8114 	case CDP_CONFIG_ENH_TX_CAPTURE:
8115 		return dp_config_enh_tx_capture(pdev,
8116 						val.cdp_pdev_param_en_tx_cap);
8117 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
8118 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
8119 		break;
8120 	case CDP_CONFIG_HMMC_TID_VALUE:
8121 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
8122 		break;
8123 	case CDP_CHAN_NOISE_FLOOR:
8124 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
8125 		break;
8126 	case CDP_TIDMAP_PRTY:
8127 		dp_set_pdev_tidmap_prty_wifi3(pdev,
8128 					      val.cdp_pdev_param_tidmap_prty);
8129 		break;
8130 	case CDP_FILTER_NEIGH_PEERS:
8131 		dp_set_filter_neigh_peers(pdev,
8132 					  val.cdp_pdev_param_fltr_neigh_peers);
8133 		break;
8134 	case CDP_MONITOR_CHANNEL:
8135 		pdev->mon_chan_num = val.cdp_pdev_param_monitor_chan;
8136 		break;
8137 	case CDP_MONITOR_FREQUENCY:
8138 		pdev->mon_chan_freq = val.cdp_pdev_param_mon_freq;
8139 		pdev->mon_chan_band =
8140 				wlan_reg_freq_to_band(pdev->mon_chan_freq);
8141 		break;
8142 	case CDP_CONFIG_BSS_COLOR:
8143 		dp_mon_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
8144 		break;
8145 	case CDP_SET_ATF_STATS_ENABLE:
8146 		dp_set_atf_stats_enable(pdev,
8147 					val.cdp_pdev_param_atf_stats_enable);
8148 		break;
8149 	default:
8150 		return QDF_STATUS_E_INVAL;
8151 	}
8152 	return QDF_STATUS_SUCCESS;
8153 }
8154 
8155 #ifdef QCA_PEER_EXT_STATS
8156 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
8157 					  qdf_nbuf_t nbuf)
8158 {
8159 	struct dp_peer *peer = NULL;
8160 	uint16_t peer_id, ring_id;
8161 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
8162 	struct cdp_peer_ext_stats *pext_stats = NULL;
8163 
8164 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
8165 	if (peer_id > soc->max_peers)
8166 		return;
8167 
8168 	peer = dp_peer_find_by_id(soc, peer_id);
8169 	if (qdf_unlikely(!peer) || qdf_unlikely(!peer->pext_stats))
8170 		return;
8171 
8172 	pext_stats = peer->pext_stats;
8173 	ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
8174 	dp_rx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id], nbuf);
8175 }
8176 #else
8177 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
8178 						 qdf_nbuf_t nbuf)
8179 {
8180 }
8181 #endif
8182 
8183 /*
8184  * dp_calculate_delay_stats: function to get rx delay stats
8185  * @cdp_soc: DP soc handle
8186  * @vdev_id: id of DP vdev handle
8187  * @nbuf: skb
8188  *
8189  * Return: QDF_STATUS
8190  */
8191 static QDF_STATUS
8192 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8193 			 qdf_nbuf_t nbuf)
8194 {
8195 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8196 	struct dp_vdev *vdev =
8197 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)cdp_soc,
8198 						   vdev_id);
8199 
8200 	if (!vdev)
8201 		return QDF_STATUS_SUCCESS;
8202 
8203 	if (vdev->pdev->delay_stats_flag) {
8204 		dp_rx_compute_delay(vdev, nbuf);
8205 		return QDF_STATUS_SUCCESS;
8206 	}
8207 
8208 	/*
8209 	 * Update the per peer delay stats
8210 	 */
8211 	dp_rx_update_peer_delay_stats(soc, nbuf);
8212 	return QDF_STATUS_SUCCESS;
8213 }
8214 
8215 /*
8216  * dp_get_vdev_param: function to get parameters from vdev
8217  * @cdp_soc : DP soc handle
8218  * @vdev_id: id of DP vdev handle
8219  * @param: parameter type to get value
8220  * @val: buffer address
8221  *
8222  * return: status
8223  */
8224 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8225 				    enum cdp_vdev_param_type param,
8226 				    cdp_config_param_type *val)
8227 {
8228 	struct dp_vdev *vdev =
8229 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)cdp_soc,
8230 						   vdev_id);
8231 	if (!vdev)
8232 		return QDF_STATUS_E_FAILURE;
8233 
8234 	switch (param) {
8235 	case CDP_ENABLE_WDS:
8236 		val->cdp_vdev_param_wds = vdev->wds_enabled;
8237 		break;
8238 	case CDP_ENABLE_MEC:
8239 		val->cdp_vdev_param_mec = vdev->mec_enabled;
8240 		break;
8241 	case CDP_ENABLE_DA_WAR:
8242 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
8243 		break;
8244 	default:
8245 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8246 			  "param value %d is wrong\n",
8247 			  param);
8248 		return QDF_STATUS_E_FAILURE;
8249 	}
8250 
8251 	return QDF_STATUS_SUCCESS;
8252 }
8253 
8254 /*
8255  * dp_set_vdev_param: function to set parameters in vdev
8256  * @cdp_soc : DP soc handle
8257  * @vdev_id: id of DP vdev handle
8258  * @param: parameter type to get value
8259  * @val: value
8260  *
8261  * return: QDF_STATUS
8262  */
8263 static QDF_STATUS
8264 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8265 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
8266 {
8267 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
8268 	struct dp_vdev *vdev =
8269 		dp_get_vdev_from_soc_vdev_id_wifi3(dsoc, vdev_id);
8270 	uint32_t var = 0;
8271 
8272 	if (!vdev)
8273 		return QDF_STATUS_E_FAILURE;
8274 
8275 	switch (param) {
8276 	case CDP_ENABLE_WDS:
8277 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8278 			  "wds_enable %d for vdev(%pK) id(%d)\n",
8279 			  val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
8280 		vdev->wds_enabled = val.cdp_vdev_param_wds;
8281 		break;
8282 	case CDP_ENABLE_MEC:
8283 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8284 			  "mec_enable %d for vdev(%pK) id(%d)\n",
8285 			  val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
8286 		vdev->mec_enabled = val.cdp_vdev_param_mec;
8287 		break;
8288 	case CDP_ENABLE_DA_WAR:
8289 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8290 			  "da_war_enable %d for vdev(%pK) id(%d)\n",
8291 			  val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
8292 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
8293 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
8294 					     vdev->pdev->soc));
8295 		break;
8296 	case CDP_ENABLE_NAWDS:
8297 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
8298 		break;
8299 	case CDP_ENABLE_MCAST_EN:
8300 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
8301 		break;
8302 	case CDP_ENABLE_PROXYSTA:
8303 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
8304 		break;
8305 	case CDP_UPDATE_TDLS_FLAGS:
8306 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
8307 		break;
8308 	case CDP_CFG_WDS_AGING_TIMER:
8309 		var = val.cdp_vdev_param_aging_tmr;
8310 		if (!var)
8311 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
8312 		else if (var != vdev->wds_aging_timer_val)
8313 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
8314 
8315 		vdev->wds_aging_timer_val = var;
8316 		break;
8317 	case CDP_ENABLE_AP_BRIDGE:
8318 		if (wlan_op_mode_sta != vdev->opmode)
8319 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
8320 		else
8321 			vdev->ap_bridge_enabled = false;
8322 		break;
8323 	case CDP_ENABLE_CIPHER:
8324 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
8325 		break;
8326 	case CDP_ENABLE_QWRAP_ISOLATION:
8327 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
8328 		break;
8329 	case CDP_UPDATE_MULTIPASS:
8330 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
8331 		break;
8332 	case CDP_TX_ENCAP_TYPE:
8333 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
8334 		break;
8335 	case CDP_RX_DECAP_TYPE:
8336 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
8337 		break;
8338 	case CDP_TID_VDEV_PRTY:
8339 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
8340 		break;
8341 	case CDP_TIDMAP_TBL_ID:
8342 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
8343 		break;
8344 #ifdef MESH_MODE_SUPPORT
8345 	case CDP_MESH_RX_FILTER:
8346 		dp_peer_set_mesh_rx_filter((struct cdp_vdev *)vdev,
8347 					   val.cdp_vdev_param_mesh_rx_filter);
8348 		break;
8349 	case CDP_MESH_MODE:
8350 		dp_peer_set_mesh_mode((struct cdp_vdev *)vdev,
8351 				      val.cdp_vdev_param_mesh_mode);
8352 		break;
8353 #endif
8354 	default:
8355 		break;
8356 	}
8357 
8358 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
8359 
8360 	return QDF_STATUS_SUCCESS;
8361 }
8362 
8363 /*
8364  * dp_set_psoc_param: function to set parameters in psoc
8365  * @cdp_soc : DP soc handle
8366  * @param: parameter type to be set
8367  * @val: value of parameter to be set
8368  *
8369  * return: QDF_STATUS
8370  */
8371 static QDF_STATUS
8372 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
8373 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
8374 {
8375 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8376 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
8377 
8378 	switch (param) {
8379 	case CDP_ENABLE_RATE_STATS:
8380 		soc->wlanstats_enabled = val.cdp_psoc_param_en_rate_stats;
8381 		break;
8382 	case CDP_SET_NSS_CFG:
8383 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
8384 					    val.cdp_psoc_param_en_nss_cfg);
8385 		/*
8386 		 * TODO: masked out based on the per offloaded radio
8387 		 */
8388 		switch (val.cdp_psoc_param_en_nss_cfg) {
8389 		case dp_nss_cfg_default:
8390 			break;
8391 		case dp_nss_cfg_first_radio:
8392 		/*
8393 		 * This configuration is valid for single band radio which
8394 		 * is also NSS offload.
8395 		 */
8396 		case dp_nss_cfg_dbdc:
8397 		case dp_nss_cfg_dbtc:
8398 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
8399 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
8400 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
8401 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
8402 			break;
8403 		default:
8404 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8405 				  "Invalid offload config %d",
8406 				  val.cdp_psoc_param_en_nss_cfg);
8407 		}
8408 
8409 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8410 			  FL("nss-wifi<0> nss config is enabled"));
8411 		break;
8412 	case CDP_SET_PREFERRED_HW_MODE:
8413 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
8414 		break;
8415 	default:
8416 		break;
8417 	}
8418 
8419 	return QDF_STATUS_SUCCESS;
8420 }
8421 
8422 /*
8423  * dp_get_psoc_param: function to get parameters in soc
8424  * @cdp_soc : DP soc handle
8425  * @param: parameter type to be set
8426  * @val: address of buffer
8427  *
8428  * return: status
8429  */
8430 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
8431 				    enum cdp_psoc_param_type param,
8432 				    cdp_config_param_type *val)
8433 {
8434 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8435 
8436 	if (!soc)
8437 		return QDF_STATUS_E_FAILURE;
8438 
8439 	switch (param) {
8440 	case CDP_CFG_PEER_EXT_STATS:
8441 		val->cdp_psoc_param_pext_stats =
8442 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
8443 		break;
8444 	default:
8445 		dp_warn("Invalid param");
8446 		break;
8447 	}
8448 
8449 	return QDF_STATUS_SUCCESS;
8450 }
8451 
8452 /**
8453  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
8454  * @soc: DP_SOC handle
8455  * @pdev_id: id of DP_PDEV handle
8456  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
8457  * @is_tx_pkt_cap_enable: enable/disable/delete/print
8458  * Tx packet capture in monitor mode
8459  * @peer_mac: MAC address for which the above need to be enabled/disabled
8460  *
8461  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
8462  */
8463 QDF_STATUS
8464 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
8465 				  uint8_t pdev_id,
8466 				  bool is_rx_pkt_cap_enable,
8467 				  uint8_t is_tx_pkt_cap_enable,
8468 				  uint8_t *peer_mac)
8469 {
8470 	QDF_STATUS status;
8471 	struct dp_peer *peer;
8472 	struct dp_pdev *pdev =
8473 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8474 						   pdev_id);
8475 	if (!pdev)
8476 		return QDF_STATUS_E_FAILURE;
8477 
8478 	peer = (struct dp_peer *)dp_find_peer_by_addr((struct cdp_pdev *)pdev,
8479 						      peer_mac);
8480 
8481 	/* we need to set tx pkt capture for non associated peer */
8482 	status = dp_peer_set_tx_capture_enabled(pdev, peer,
8483 						is_tx_pkt_cap_enable,
8484 						peer_mac);
8485 
8486 	status = dp_peer_set_rx_capture_enabled(pdev, peer,
8487 						is_rx_pkt_cap_enable,
8488 						peer_mac);
8489 
8490 	return status;
8491 }
8492 
8493 /*
8494  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8495  * @soc: DP_SOC handle
8496  * @vdev_id: id of DP_VDEV handle
8497  * @map_id:ID of map that needs to be updated
8498  *
8499  * Return: QDF_STATUS
8500  */
8501 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle soc,
8502 						 uint8_t vdev_id,
8503 						 uint8_t map_id)
8504 {
8505 	struct dp_vdev *vdev =
8506 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8507 						   vdev_id);
8508 	if (vdev) {
8509 		vdev->dscp_tid_map_id = map_id;
8510 		return QDF_STATUS_SUCCESS;
8511 	}
8512 
8513 	return QDF_STATUS_E_FAILURE;
8514 }
8515 
8516 #ifdef DP_RATETABLE_SUPPORT
8517 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8518 				int htflag, int gintval)
8519 {
8520 	uint32_t rix;
8521 	uint16_t ratecode;
8522 
8523 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
8524 			       (uint8_t)preamb, 1, &rix, &ratecode);
8525 }
8526 #else
8527 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8528 				int htflag, int gintval)
8529 {
8530 	return 0;
8531 }
8532 #endif
8533 
8534 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8535  * @soc: DP soc handle
8536  * @pdev_id: id of DP pdev handle
8537  * @pdev_stats: buffer to copy to
8538  *
8539  * return : status success/failure
8540  */
8541 static QDF_STATUS
8542 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8543 		       struct cdp_pdev_stats *pdev_stats)
8544 {
8545 	struct dp_pdev *pdev =
8546 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8547 						   pdev_id);
8548 	if (!pdev)
8549 		return QDF_STATUS_E_FAILURE;
8550 
8551 	dp_aggregate_pdev_stats(pdev);
8552 
8553 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
8554 	return QDF_STATUS_SUCCESS;
8555 }
8556 
8557 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
8558  * @vdev: DP vdev handle
8559  * @buf: buffer containing specific stats structure
8560  *
8561  * Returns: void
8562  */
8563 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
8564 					 void *buf)
8565 {
8566 	struct cdp_tx_ingress_stats *host_stats = NULL;
8567 
8568 	if (!buf) {
8569 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8570 			  "Invalid host stats buf");
8571 		return;
8572 	}
8573 	host_stats = (struct cdp_tx_ingress_stats *)buf;
8574 
8575 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
8576 			 host_stats->mcast_en.mcast_pkt.num,
8577 			 host_stats->mcast_en.mcast_pkt.bytes);
8578 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
8579 		     host_stats->mcast_en.dropped_map_error);
8580 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
8581 		     host_stats->mcast_en.dropped_self_mac);
8582 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
8583 		     host_stats->mcast_en.dropped_send_fail);
8584 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
8585 		     host_stats->mcast_en.ucast);
8586 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
8587 		     host_stats->mcast_en.fail_seg_alloc);
8588 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
8589 		     host_stats->mcast_en.clone_fail);
8590 }
8591 
8592 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
8593  * @soc: DP soc handle
8594  * @vdev_id: id of DP vdev handle
8595  * @buf: buffer containing specific stats structure
8596  * @stats_id: stats type
8597  *
8598  * Returns: QDF_STATUS
8599  */
8600 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc,
8601 						 uint8_t vdev_id,
8602 						 void *buf,
8603 						 uint16_t stats_id)
8604 {
8605 	struct dp_vdev *vdev =
8606 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8607 						   vdev_id);
8608 	if (!vdev) {
8609 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8610 			  "Invalid vdev handle");
8611 		return QDF_STATUS_E_FAILURE;
8612 	}
8613 
8614 	switch (stats_id) {
8615 	case DP_VDEV_STATS_PKT_CNT_ONLY:
8616 		break;
8617 	case DP_VDEV_STATS_TX_ME:
8618 		dp_txrx_update_vdev_me_stats(vdev, buf);
8619 		break;
8620 	default:
8621 		qdf_info("Invalid stats_id %d", stats_id);
8622 		break;
8623 	}
8624 
8625 	return QDF_STATUS_SUCCESS;
8626 }
8627 
8628 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8629  * @soc: soc handle
8630  * @vdev_id: id of vdev handle
8631  * @peer_mac: mac of DP_PEER handle
8632  * @peer_stats: buffer to copy to
8633  * return : status success/failure
8634  */
8635 static QDF_STATUS
8636 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8637 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
8638 {
8639 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8640 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8641 						       peer_mac, 0, vdev_id);
8642 
8643 	if (!peer || peer->delete_in_progress) {
8644 		status = QDF_STATUS_E_FAILURE;
8645 	} else
8646 		qdf_mem_copy(peer_stats, &peer->stats,
8647 			     sizeof(struct cdp_peer_stats));
8648 
8649 	if (peer)
8650 		dp_peer_unref_delete(peer);
8651 
8652 	return status;
8653 }
8654 
8655 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
8656  * @param soc - soc handle
8657  * @param vdev_id - vdev_id of vdev object
8658  * @param peer_mac - mac address of the peer
8659  * @param type - enum of required stats
8660  * @param buf - buffer to hold the value
8661  * return : status success/failure
8662  */
8663 static QDF_STATUS
8664 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
8665 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
8666 			     cdp_peer_stats_param_t *buf)
8667 {
8668 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
8669 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8670 						      peer_mac, 0, vdev_id);
8671 
8672 	if (!peer || peer->delete_in_progress) {
8673 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8674 			  "Invalid Peer for Mac %pM", peer_mac);
8675 		ret = QDF_STATUS_E_FAILURE;
8676 	} else if (type < cdp_peer_stats_max) {
8677 		switch (type) {
8678 		case cdp_peer_tx_ucast:
8679 			buf->tx_ucast = peer->stats.tx.ucast;
8680 			break;
8681 		case cdp_peer_tx_mcast:
8682 			buf->tx_mcast = peer->stats.tx.mcast;
8683 			break;
8684 		case cdp_peer_tx_rate:
8685 			buf->tx_rate = peer->stats.tx.tx_rate;
8686 			break;
8687 		case cdp_peer_tx_last_tx_rate:
8688 			buf->last_tx_rate = peer->stats.tx.last_tx_rate;
8689 			break;
8690 		case cdp_peer_tx_inactive_time:
8691 			buf->tx_inactive_time = peer->stats.tx.inactive_time;
8692 			break;
8693 		case cdp_peer_tx_ratecode:
8694 			buf->tx_ratecode = peer->stats.tx.tx_ratecode;
8695 			break;
8696 		case cdp_peer_tx_flags:
8697 			buf->tx_flags = peer->stats.tx.tx_flags;
8698 			break;
8699 		case cdp_peer_tx_power:
8700 			buf->tx_power = peer->stats.tx.tx_power;
8701 			break;
8702 		case cdp_peer_rx_rate:
8703 			buf->rx_rate = peer->stats.rx.rx_rate;
8704 			break;
8705 		case cdp_peer_rx_last_rx_rate:
8706 			buf->last_rx_rate = peer->stats.rx.last_rx_rate;
8707 			break;
8708 		case cdp_peer_rx_ratecode:
8709 			buf->rx_ratecode = peer->stats.rx.rx_ratecode;
8710 			break;
8711 		case cdp_peer_rx_ucast:
8712 			buf->rx_ucast = peer->stats.rx.unicast;
8713 			break;
8714 		case cdp_peer_rx_flags:
8715 			buf->rx_flags = peer->stats.rx.rx_flags;
8716 			break;
8717 		case cdp_peer_rx_avg_rssi:
8718 			buf->rx_avg_rssi = peer->stats.rx.avg_rssi;
8719 			break;
8720 		default:
8721 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8722 				  "Invalid value");
8723 			ret = QDF_STATUS_E_FAILURE;
8724 			break;
8725 		}
8726 	} else {
8727 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8728 			  "Invalid value");
8729 		ret = QDF_STATUS_E_FAILURE;
8730 	}
8731 
8732 	if (peer)
8733 		dp_peer_unref_delete(peer);
8734 
8735 	return ret;
8736 }
8737 
8738 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8739  * @soc: soc handle
8740  * @vdev_id: id of vdev handle
8741  * @peer_mac: mac of DP_PEER handle
8742  *
8743  * return : QDF_STATUS
8744  */
8745 static QDF_STATUS
8746 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8747 			 uint8_t *peer_mac)
8748 {
8749 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8750 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8751 						       peer_mac, 0, vdev_id);
8752 
8753 	if (!peer || peer->delete_in_progress) {
8754 		status = QDF_STATUS_E_FAILURE;
8755 		goto fail;
8756 	}
8757 
8758 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
8759 
8760 fail:
8761 	if (peer)
8762 		dp_peer_unref_delete(peer);
8763 
8764 	return status;
8765 }
8766 
8767 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
8768  * @vdev_handle: DP_VDEV handle
8769  * @buf: buffer for vdev stats
8770  *
8771  * return : int
8772  */
8773 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8774 				   void *buf, bool is_aggregate)
8775 {
8776 	struct cdp_vdev_stats *vdev_stats;
8777 	struct dp_pdev *pdev;
8778 	struct dp_vdev *vdev =
8779 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8780 						   vdev_id);
8781 
8782 	if (!vdev)
8783 		return 1;
8784 
8785 	pdev = vdev->pdev;
8786 	if (!pdev)
8787 		return 1;
8788 
8789 	vdev_stats = (struct cdp_vdev_stats *)buf;
8790 
8791 	if (is_aggregate) {
8792 		qdf_spin_lock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
8793 		dp_aggregate_vdev_stats(vdev, buf);
8794 		qdf_spin_unlock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
8795 	} else {
8796 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8797 	}
8798 
8799 	return 0;
8800 }
8801 
8802 /*
8803  * dp_get_total_per(): get total per
8804  * @soc: DP soc handle
8805  * @pdev_id: id of DP_PDEV handle
8806  *
8807  * Return: % error rate using retries per packet and success packets
8808  */
8809 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
8810 {
8811 	struct dp_pdev *pdev =
8812 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8813 						   pdev_id);
8814 
8815 	if (!pdev)
8816 		return 0;
8817 
8818 	dp_aggregate_pdev_stats(pdev);
8819 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
8820 		return 0;
8821 	return ((pdev->stats.tx.retries * 100) /
8822 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
8823 }
8824 
8825 /*
8826  * dp_txrx_stats_publish(): publish pdev stats into a buffer
8827  * @soc: DP soc handle
8828  * @pdev_id: id of DP_PDEV handle
8829  * @buf: to hold pdev_stats
8830  *
8831  * Return: int
8832  */
8833 static int
8834 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
8835 		      struct cdp_stats_extd *buf)
8836 {
8837 	struct cdp_txrx_stats_req req = {0,};
8838 	struct dp_pdev *pdev =
8839 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8840 						   pdev_id);
8841 
8842 	if (!pdev)
8843 		return TXRX_STATS_LEVEL_OFF;
8844 
8845 	dp_aggregate_pdev_stats(pdev);
8846 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
8847 	req.cookie_val = 1;
8848 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8849 				req.param1, req.param2, req.param3, 0,
8850 				req.cookie_val, 0);
8851 
8852 	msleep(DP_MAX_SLEEP_TIME);
8853 
8854 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
8855 	req.cookie_val = 1;
8856 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8857 				req.param1, req.param2, req.param3, 0,
8858 				req.cookie_val, 0);
8859 
8860 	msleep(DP_MAX_SLEEP_TIME);
8861 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
8862 
8863 	return TXRX_STATS_LEVEL;
8864 }
8865 
8866 /**
8867  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
8868  * @soc: soc handle
8869  * @pdev_id: id of DP_PDEV handle
8870  * @map_id: ID of map that needs to be updated
8871  * @tos: index value in map
8872  * @tid: tid value passed by the user
8873  *
8874  * Return: QDF_STATUS
8875  */
8876 static QDF_STATUS
8877 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
8878 			       uint8_t pdev_id,
8879 			       uint8_t map_id,
8880 			       uint8_t tos, uint8_t tid)
8881 {
8882 	uint8_t dscp;
8883 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8884 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
8885 
8886 	if (!pdev)
8887 		return QDF_STATUS_E_FAILURE;
8888 
8889 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
8890 	pdev->dscp_tid_map[map_id][dscp] = tid;
8891 
8892 	if (map_id < soc->num_hw_dscp_tid_map)
8893 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
8894 				       map_id, dscp);
8895 	else
8896 		return QDF_STATUS_E_FAILURE;
8897 
8898 	return QDF_STATUS_SUCCESS;
8899 }
8900 
8901 /**
8902  * dp_fw_stats_process(): Process TxRX FW stats request
8903  * @vdev_handle: DP VDEV handle
8904  * @req: stats request
8905  *
8906  * return: int
8907  */
8908 static int dp_fw_stats_process(struct dp_vdev *vdev,
8909 			       struct cdp_txrx_stats_req *req)
8910 {
8911 	struct dp_pdev *pdev = NULL;
8912 	uint32_t stats = req->stats;
8913 	uint8_t mac_id = req->mac_id;
8914 
8915 	if (!vdev) {
8916 		DP_TRACE(NONE, "VDEV not found");
8917 		return 1;
8918 	}
8919 	pdev = vdev->pdev;
8920 
8921 	/*
8922 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
8923 	 * from param0 to param3 according to below rule:
8924 	 *
8925 	 * PARAM:
8926 	 *   - config_param0 : start_offset (stats type)
8927 	 *   - config_param1 : stats bmask from start offset
8928 	 *   - config_param2 : stats bmask from start offset + 32
8929 	 *   - config_param3 : stats bmask from start offset + 64
8930 	 */
8931 	if (req->stats == CDP_TXRX_STATS_0) {
8932 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
8933 		req->param1 = 0xFFFFFFFF;
8934 		req->param2 = 0xFFFFFFFF;
8935 		req->param3 = 0xFFFFFFFF;
8936 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
8937 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
8938 	}
8939 
8940 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
8941 		return dp_h2t_ext_stats_msg_send(pdev,
8942 				HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
8943 				req->param0, req->param1, req->param2,
8944 				req->param3, 0, 0, mac_id);
8945 	} else {
8946 		return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
8947 				req->param1, req->param2, req->param3,
8948 				0, 0, mac_id);
8949 	}
8950 }
8951 
8952 /**
8953  * dp_txrx_stats_request - function to map to firmware and host stats
8954  * @soc: soc handle
8955  * @vdev_id: virtual device ID
8956  * @req: stats request
8957  *
8958  * Return: QDF_STATUS
8959  */
8960 static
8961 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
8962 				 uint8_t vdev_id,
8963 				 struct cdp_txrx_stats_req *req)
8964 {
8965 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
8966 	int host_stats;
8967 	int fw_stats;
8968 	enum cdp_stats stats;
8969 	int num_stats;
8970 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
8971 								  vdev_id);
8972 
8973 	if (!vdev || !req) {
8974 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8975 				"Invalid vdev/req instance");
8976 		return QDF_STATUS_E_INVAL;
8977 	}
8978 
8979 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
8980 		dp_err("Invalid mac id request");
8981 		return QDF_STATUS_E_INVAL;
8982 	}
8983 
8984 	stats = req->stats;
8985 	if (stats >= CDP_TXRX_MAX_STATS)
8986 		return QDF_STATUS_E_INVAL;
8987 
8988 	/*
8989 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
8990 	 *			has to be updated if new FW HTT stats added
8991 	 */
8992 	if (stats > CDP_TXRX_STATS_HTT_MAX)
8993 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
8994 
8995 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
8996 
8997 	if (stats >= num_stats) {
8998 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8999 			  "%s: Invalid stats option: %d", __func__, stats);
9000 		return QDF_STATUS_E_INVAL;
9001 	}
9002 
9003 	req->stats = stats;
9004 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
9005 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
9006 
9007 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
9008 		stats, fw_stats, host_stats);
9009 
9010 	if (fw_stats != TXRX_FW_STATS_INVALID) {
9011 		/* update request with FW stats type */
9012 		req->stats = fw_stats;
9013 		return dp_fw_stats_process(vdev, req);
9014 	}
9015 
9016 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
9017 			(host_stats <= TXRX_HOST_STATS_MAX))
9018 		return dp_print_host_stats(vdev, req, soc);
9019 	else
9020 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9021 				"Wrong Input for TxRx Stats");
9022 
9023 	return QDF_STATUS_SUCCESS;
9024 }
9025 
9026 /*
9027  * dp_txrx_dump_stats() -  Dump statistics
9028  * @value - Statistics option
9029  */
9030 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
9031 				     enum qdf_stats_verbosity_level level)
9032 {
9033 	struct dp_soc *soc =
9034 		(struct dp_soc *)psoc;
9035 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9036 
9037 	if (!soc) {
9038 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9039 			"%s: soc is NULL", __func__);
9040 		return QDF_STATUS_E_INVAL;
9041 	}
9042 
9043 	switch (value) {
9044 	case CDP_TXRX_PATH_STATS:
9045 		dp_txrx_path_stats(soc);
9046 		dp_print_soc_interrupt_stats(soc);
9047 		hal_dump_reg_write_stats(soc->hal_soc);
9048 		break;
9049 
9050 	case CDP_RX_RING_STATS:
9051 		dp_print_per_ring_stats(soc);
9052 		break;
9053 
9054 	case CDP_TXRX_TSO_STATS:
9055 		dp_print_tso_stats(soc, level);
9056 		break;
9057 
9058 	case CDP_DUMP_TX_FLOW_POOL_INFO:
9059 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
9060 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
9061 		break;
9062 
9063 	case CDP_DP_NAPI_STATS:
9064 		dp_print_napi_stats(soc);
9065 		break;
9066 
9067 	case CDP_TXRX_DESC_STATS:
9068 		/* TODO: NOT IMPLEMENTED */
9069 		break;
9070 
9071 	case CDP_DP_RX_FISA_STATS:
9072 		dp_rx_dump_fisa_stats(soc);
9073 		break;
9074 
9075 	default:
9076 		status = QDF_STATUS_E_INVAL;
9077 		break;
9078 	}
9079 
9080 	return status;
9081 
9082 }
9083 
9084 /**
9085  * dp_txrx_clear_dump_stats() - clear dumpStats
9086  * @soc- soc handle
9087  * @value - stats option
9088  *
9089  * Return: 0 - Success, non-zero - failure
9090  */
9091 static
9092 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9093 				    uint8_t value)
9094 {
9095 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9096 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9097 
9098 	if (!soc) {
9099 		dp_err("%s: soc is NULL", __func__);
9100 		return QDF_STATUS_E_INVAL;
9101 	}
9102 
9103 	switch (value) {
9104 	case CDP_TXRX_TSO_STATS:
9105 		dp_txrx_clear_tso_stats(soc);
9106 		break;
9107 
9108 	default:
9109 		status = QDF_STATUS_E_INVAL;
9110 		break;
9111 	}
9112 
9113 	return status;
9114 }
9115 
9116 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
9117 /**
9118  * dp_update_flow_control_parameters() - API to store datapath
9119  *                            config parameters
9120  * @soc: soc handle
9121  * @cfg: ini parameter handle
9122  *
9123  * Return: void
9124  */
9125 static inline
9126 void dp_update_flow_control_parameters(struct dp_soc *soc,
9127 				struct cdp_config_params *params)
9128 {
9129 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
9130 					params->tx_flow_stop_queue_threshold;
9131 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
9132 					params->tx_flow_start_queue_offset;
9133 }
9134 #else
9135 static inline
9136 void dp_update_flow_control_parameters(struct dp_soc *soc,
9137 				struct cdp_config_params *params)
9138 {
9139 }
9140 #endif
9141 
9142 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
9143 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
9144 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
9145 
9146 /* Max packet limit for RX REAP Loop (dp_rx_process) */
9147 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
9148 
9149 static
9150 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9151 					struct cdp_config_params *params)
9152 {
9153 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
9154 				params->tx_comp_loop_pkt_limit;
9155 
9156 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
9157 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
9158 	else
9159 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
9160 
9161 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
9162 				params->rx_reap_loop_pkt_limit;
9163 
9164 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
9165 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
9166 	else
9167 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
9168 
9169 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
9170 				params->rx_hp_oos_update_limit;
9171 
9172 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
9173 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
9174 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
9175 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
9176 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
9177 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
9178 }
9179 #else
9180 static inline
9181 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9182 					struct cdp_config_params *params)
9183 { }
9184 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
9185 
9186 /**
9187  * dp_update_config_parameters() - API to store datapath
9188  *                            config parameters
9189  * @soc: soc handle
9190  * @cfg: ini parameter handle
9191  *
9192  * Return: status
9193  */
9194 static
9195 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
9196 				struct cdp_config_params *params)
9197 {
9198 	struct dp_soc *soc = (struct dp_soc *)psoc;
9199 
9200 	if (!(soc)) {
9201 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9202 				"%s: Invalid handle", __func__);
9203 		return QDF_STATUS_E_INVAL;
9204 	}
9205 
9206 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
9207 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
9208 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
9209 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
9210 				params->p2p_tcp_udp_checksumoffload;
9211 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
9212 				params->nan_tcp_udp_checksumoffload;
9213 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
9214 				params->tcp_udp_checksumoffload;
9215 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
9216 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
9217 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
9218 
9219 	dp_update_rx_soft_irq_limit_params(soc, params);
9220 	dp_update_flow_control_parameters(soc, params);
9221 
9222 	return QDF_STATUS_SUCCESS;
9223 }
9224 
9225 static struct cdp_wds_ops dp_ops_wds = {
9226 	.vdev_set_wds = dp_vdev_set_wds,
9227 #ifdef WDS_VENDOR_EXTENSION
9228 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
9229 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
9230 #endif
9231 };
9232 
9233 /*
9234  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
9235  * @soc_hdl - datapath soc handle
9236  * @vdev_id - virtual interface id
9237  * @callback - callback function
9238  * @ctxt: callback context
9239  *
9240  */
9241 static void
9242 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9243 		       ol_txrx_data_tx_cb callback, void *ctxt)
9244 {
9245 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9246 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
9247 
9248 	if (!vdev)
9249 		return;
9250 
9251 	vdev->tx_non_std_data_callback.func = callback;
9252 	vdev->tx_non_std_data_callback.ctxt = ctxt;
9253 }
9254 
9255 /**
9256  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
9257  * @soc: datapath soc handle
9258  * @pdev_id: id of datapath pdev handle
9259  *
9260  * Return: opaque pointer to dp txrx handle
9261  */
9262 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
9263 {
9264 	struct dp_pdev *pdev =
9265 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9266 						   pdev_id);
9267 	if (qdf_unlikely(!pdev))
9268 		return NULL;
9269 
9270 	return pdev->dp_txrx_handle;
9271 }
9272 
9273 /**
9274  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
9275  * @soc: datapath soc handle
9276  * @pdev_id: id of datapath pdev handle
9277  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
9278  *
9279  * Return: void
9280  */
9281 static void
9282 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
9283 			   void *dp_txrx_hdl)
9284 {
9285 	struct dp_pdev *pdev =
9286 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9287 						   pdev_id);
9288 
9289 	if (!pdev)
9290 		return;
9291 
9292 	pdev->dp_txrx_handle = dp_txrx_hdl;
9293 }
9294 
9295 /**
9296  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
9297  * @soc: datapath soc handle
9298  * @vdev_id: vdev id
9299  *
9300  * Return: opaque pointer to dp txrx handle
9301  */
9302 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc, uint8_t vdev_id)
9303 {
9304 	struct dp_vdev *vdev =
9305 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
9306 						   vdev_id);
9307 
9308 	if (!vdev)
9309 		return NULL;
9310 
9311 	return vdev->vdev_dp_ext_handle;
9312 }
9313 
9314 /**
9315  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
9316  * @soc: datapath soc handle
9317  * @vdev_id: vdev id
9318  * @size: size of advance dp handle
9319  *
9320  * Return: QDF_STATUS
9321  */
9322 static QDF_STATUS
9323 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc, uint8_t vdev_id,
9324 			  uint16_t size)
9325 {
9326 	struct dp_vdev *vdev =
9327 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
9328 						   vdev_id);
9329 	void *dp_ext_handle;
9330 
9331 	if (!vdev)
9332 		return QDF_STATUS_E_FAILURE;
9333 
9334 	dp_ext_handle = qdf_mem_malloc(size);
9335 
9336 	if (!dp_ext_handle)
9337 		return QDF_STATUS_E_FAILURE;
9338 
9339 	vdev->vdev_dp_ext_handle = dp_ext_handle;
9340 	return QDF_STATUS_SUCCESS;
9341 }
9342 
9343 /**
9344  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
9345  * @soc_handle: datapath soc handle
9346  *
9347  * Return: opaque pointer to external dp (non-core DP)
9348  */
9349 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
9350 {
9351 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9352 
9353 	return soc->external_txrx_handle;
9354 }
9355 
9356 /**
9357  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
9358  * @soc_handle: datapath soc handle
9359  * @txrx_handle: opaque pointer to external dp (non-core DP)
9360  *
9361  * Return: void
9362  */
9363 static void
9364 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
9365 {
9366 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9367 
9368 	soc->external_txrx_handle = txrx_handle;
9369 }
9370 
9371 /**
9372  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
9373  * @soc_hdl: datapath soc handle
9374  * @pdev_id: id of the datapath pdev handle
9375  * @lmac_id: lmac id
9376  *
9377  * Return: QDF_STATUS
9378  */
9379 static QDF_STATUS
9380 dp_soc_map_pdev_to_lmac
9381 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9382 	 uint32_t lmac_id)
9383 {
9384 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9385 
9386 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
9387 				pdev_id,
9388 				lmac_id);
9389 
9390 	/*Set host PDEV ID for lmac_id*/
9391 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
9392 			      pdev_id,
9393 			      lmac_id);
9394 
9395 	return QDF_STATUS_SUCCESS;
9396 }
9397 
9398 /**
9399  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
9400  * @soc_hdl: datapath soc handle
9401  * @pdev_id: id of the datapath pdev handle
9402  * @lmac_id: lmac id
9403  *
9404  * In the event of a dynamic mode change, update the pdev to lmac mapping
9405  *
9406  * Return: QDF_STATUS
9407  */
9408 static QDF_STATUS
9409 dp_soc_handle_pdev_mode_change
9410 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9411 	 uint32_t lmac_id)
9412 {
9413 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9414 	struct dp_vdev *vdev = NULL;
9415 	uint8_t hw_pdev_id, mac_id;
9416 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
9417 								  pdev_id);
9418 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
9419 
9420 	if (qdf_unlikely(!pdev))
9421 		return QDF_STATUS_E_FAILURE;
9422 
9423 	pdev->lmac_id = lmac_id;
9424 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
9425 
9426 	/*Set host PDEV ID for lmac_id*/
9427 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
9428 			      pdev->pdev_id,
9429 			      lmac_id);
9430 
9431 	hw_pdev_id =
9432 		dp_get_target_pdev_id_for_host_pdev_id(soc,
9433 						       pdev->pdev_id);
9434 
9435 	/*
9436 	 * When NSS offload is enabled, send pdev_id->lmac_id
9437 	 * and pdev_id to hw_pdev_id to NSS FW
9438 	 */
9439 	if (nss_config) {
9440 		mac_id = pdev->lmac_id;
9441 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
9442 			soc->cdp_soc.ol_ops->
9443 				pdev_update_lmac_n_target_pdev_id(
9444 				soc->ctrl_psoc,
9445 				&pdev_id, &mac_id, &hw_pdev_id);
9446 	}
9447 
9448 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9449 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9450 		HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
9451 						hw_pdev_id);
9452 		vdev->lmac_id = pdev->lmac_id;
9453 	}
9454 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9455 
9456 	return QDF_STATUS_SUCCESS;
9457 }
9458 
9459 /**
9460  * dp_soc_set_pdev_status_down() - set pdev down/up status
9461  * @soc: datapath soc handle
9462  * @pdev_id: id of datapath pdev handle
9463  * @is_pdev_down: pdev down/up status
9464  *
9465  * Return: QDF_STATUS
9466  */
9467 static QDF_STATUS
9468 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
9469 			    bool is_pdev_down)
9470 {
9471 	struct dp_pdev *pdev =
9472 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9473 						   pdev_id);
9474 	if (!pdev)
9475 		return QDF_STATUS_E_FAILURE;
9476 
9477 	pdev->is_pdev_down = is_pdev_down;
9478 	return QDF_STATUS_SUCCESS;
9479 }
9480 
9481 /**
9482  * dp_get_cfg_capabilities() - get dp capabilities
9483  * @soc_handle: datapath soc handle
9484  * @dp_caps: enum for dp capabilities
9485  *
9486  * Return: bool to determine if dp caps is enabled
9487  */
9488 static bool
9489 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
9490 			enum cdp_capabilities dp_caps)
9491 {
9492 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9493 
9494 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
9495 }
9496 
9497 #ifdef FEATURE_AST
9498 static QDF_STATUS
9499 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9500 		       uint8_t *peer_mac)
9501 {
9502 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9503 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9504 	struct dp_peer *peer =
9505 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
9506 
9507 	/* Peer can be null for monitor vap mac address */
9508 	if (!peer) {
9509 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9510 			  "%s: Invalid peer\n", __func__);
9511 		return QDF_STATUS_E_FAILURE;
9512 	}
9513 	/*
9514 	 * For BSS peer, new peer is not created on alloc_node if the
9515 	 * peer with same address already exists , instead refcnt is
9516 	 * increased for existing peer. Correspondingly in delete path,
9517 	 * only refcnt is decreased; and peer is only deleted , when all
9518 	 * references are deleted. So delete_in_progress should not be set
9519 	 * for bss_peer, unless only 3 reference remains (peer map reference,
9520 	 * peer hash table reference and above local reference).
9521 	 */
9522 	if ((peer->vdev->opmode == wlan_op_mode_ap) && peer->bss_peer &&
9523 	    (qdf_atomic_read(&peer->ref_cnt) > 3)) {
9524 		status =  QDF_STATUS_E_FAILURE;
9525 		goto fail;
9526 	}
9527 
9528 	qdf_spin_lock_bh(&soc->ast_lock);
9529 	peer->delete_in_progress = true;
9530 	dp_peer_delete_ast_entries(soc, peer);
9531 	qdf_spin_unlock_bh(&soc->ast_lock);
9532 
9533 fail:
9534 	if (peer)
9535 		dp_peer_unref_delete(peer);
9536 	return status;
9537 }
9538 #endif
9539 
9540 #ifdef ATH_SUPPORT_NAC_RSSI
9541 /**
9542  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
9543  * @soc_hdl: DP soc handle
9544  * @vdev_id: id of DP vdev handle
9545  * @mac_addr: neighbour mac
9546  * @rssi: rssi value
9547  *
9548  * Return: 0 for success. nonzero for failure.
9549  */
9550 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc,
9551 					      uint8_t vdev_id,
9552 					      char *mac_addr,
9553 					      uint8_t *rssi)
9554 {
9555 	struct dp_vdev *vdev =
9556 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
9557 						   vdev_id);
9558 	struct dp_pdev *pdev;
9559 	struct dp_neighbour_peer *peer = NULL;
9560 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9561 
9562 	if (!vdev)
9563 		return status;
9564 
9565 	pdev = vdev->pdev;
9566 	*rssi = 0;
9567 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
9568 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
9569 		      neighbour_peer_list_elem) {
9570 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
9571 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
9572 			*rssi = peer->rssi;
9573 			status = QDF_STATUS_SUCCESS;
9574 			break;
9575 		}
9576 	}
9577 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
9578 	return status;
9579 }
9580 
9581 static QDF_STATUS
9582 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
9583 		       uint8_t vdev_id,
9584 		       enum cdp_nac_param_cmd cmd, char *bssid,
9585 		       char *client_macaddr,
9586 		       uint8_t chan_num)
9587 {
9588 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9589 	struct dp_vdev *vdev =
9590 		dp_get_vdev_from_soc_vdev_id_wifi3(soc,
9591 						   vdev_id);
9592 	struct dp_pdev *pdev;
9593 
9594 	if (!vdev)
9595 		return QDF_STATUS_E_FAILURE;
9596 
9597 	pdev = (struct dp_pdev *)vdev->pdev;
9598 	pdev->nac_rssi_filtering = 1;
9599 	/* Store address of NAC (neighbour peer) which will be checked
9600 	 * against TA of received packets.
9601 	 */
9602 
9603 	if (cmd == CDP_NAC_PARAM_ADD) {
9604 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
9605 						 DP_NAC_PARAM_ADD,
9606 						 (uint8_t *)client_macaddr);
9607 	} else if (cmd == CDP_NAC_PARAM_DEL) {
9608 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
9609 						 DP_NAC_PARAM_DEL,
9610 						 (uint8_t *)client_macaddr);
9611 	}
9612 
9613 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
9614 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
9615 			(soc->ctrl_psoc, pdev->pdev_id,
9616 			 vdev->vdev_id, cmd, bssid, client_macaddr);
9617 
9618 	return QDF_STATUS_SUCCESS;
9619 }
9620 #endif
9621 
9622 /**
9623  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
9624  * for pktlog
9625  * @soc: cdp_soc handle
9626  * @pdev_id: id of dp pdev handle
9627  * @mac_addr: Peer mac address
9628  * @enb_dsb: Enable or disable peer based filtering
9629  *
9630  * Return: QDF_STATUS
9631  */
9632 static int
9633 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
9634 			    uint8_t *mac_addr, uint8_t enb_dsb)
9635 {
9636 	struct dp_peer *peer;
9637 	struct dp_pdev *pdev =
9638 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9639 						   pdev_id);
9640 
9641 	if (!pdev) {
9642 		dp_err("Invalid Pdev for pdev_id %d", pdev_id);
9643 		return QDF_STATUS_E_FAILURE;
9644 	}
9645 
9646 	peer = (struct dp_peer *)dp_find_peer_by_addr((struct cdp_pdev *)pdev,
9647 						      mac_addr);
9648 
9649 	if (!peer) {
9650 		dp_err("Invalid Peer");
9651 		return QDF_STATUS_E_FAILURE;
9652 	}
9653 
9654 	peer->peer_based_pktlog_filter = enb_dsb;
9655 	pdev->dp_peer_based_pktlog = enb_dsb;
9656 
9657 	return QDF_STATUS_SUCCESS;
9658 }
9659 
9660 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
9661 /**
9662  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
9663  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
9664  * @soc: cdp_soc handle
9665  * @pdev_id: id of cdp_pdev handle
9666  * @protocol_type: protocol type for which stats should be displayed
9667  *
9668  * Return: none
9669  */
9670 static inline void
9671 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
9672 				   uint16_t protocol_type)
9673 {
9674 }
9675 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9676 
9677 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9678 /**
9679  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
9680  * applied to the desired protocol type packets
9681  * @soc: soc handle
9682  * @pdev_id: id of cdp_pdev handle
9683  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
9684  * are enabled for tagging. zero indicates disable feature, non-zero indicates
9685  * enable feature
9686  * @protocol_type: new protocol type for which the tag is being added
9687  * @tag: user configured tag for the new protocol
9688  *
9689  * Return: Success
9690  */
9691 static inline QDF_STATUS
9692 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
9693 			       uint32_t enable_rx_protocol_tag,
9694 			       uint16_t protocol_type,
9695 			       uint16_t tag)
9696 {
9697 	return QDF_STATUS_SUCCESS;
9698 }
9699 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9700 
9701 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
9702 /**
9703  * dp_set_rx_flow_tag - add/delete a flow
9704  * @soc: soc handle
9705  * @pdev_id: id of cdp_pdev handle
9706  * @flow_info: flow tuple that is to be added to/deleted from flow search table
9707  *
9708  * Return: Success
9709  */
9710 static inline QDF_STATUS
9711 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9712 		   struct cdp_rx_flow_info *flow_info)
9713 {
9714 	return QDF_STATUS_SUCCESS;
9715 }
9716 /**
9717  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
9718  * given flow 5-tuple
9719  * @cdp_soc: soc handle
9720  * @pdev_id: id of cdp_pdev handle
9721  * @flow_info: flow 5-tuple for which stats should be displayed
9722  *
9723  * Return: Success
9724  */
9725 static inline QDF_STATUS
9726 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9727 			  struct cdp_rx_flow_info *flow_info)
9728 {
9729 	return QDF_STATUS_SUCCESS;
9730 }
9731 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
9732 
9733 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
9734 					   uint32_t max_peers,
9735 					   uint32_t max_ast_index,
9736 					   bool peer_map_unmap_v2)
9737 {
9738 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9739 
9740 	soc->max_peers = max_peers;
9741 
9742 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
9743 		   __func__, max_peers, max_ast_index);
9744 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
9745 
9746 	if (dp_peer_find_attach(soc))
9747 		return QDF_STATUS_E_FAILURE;
9748 
9749 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
9750 	soc->peer_map_attach_success = TRUE;
9751 
9752 	return QDF_STATUS_SUCCESS;
9753 }
9754 
9755 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
9756 				   enum cdp_soc_param_t param,
9757 				   uint32_t value)
9758 {
9759 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9760 
9761 	switch (param) {
9762 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
9763 		soc->num_msdu_exception_desc = value;
9764 		dp_info("num_msdu exception_desc %u",
9765 			value);
9766 		break;
9767 	default:
9768 		dp_info("not handled param %d ", param);
9769 		break;
9770 	}
9771 
9772 	return QDF_STATUS_SUCCESS;
9773 }
9774 
9775 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
9776 				      void *stats_ctx)
9777 {
9778 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9779 
9780 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
9781 }
9782 
9783 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9784 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9785 					  uint8_t pdev_id)
9786 {
9787 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9788 	struct dp_vdev *vdev = NULL;
9789 	struct dp_peer *peer = NULL;
9790 	struct dp_pdev *pdev =
9791 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9792 						   pdev_id);
9793 
9794 	if (!pdev)
9795 		return QDF_STATUS_E_FAILURE;
9796 
9797 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
9798 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9799 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9800 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
9801 			if (peer && !peer->bss_peer)
9802 				dp_wdi_event_handler(
9803 					WDI_EVENT_FLUSH_RATE_STATS_REQ,
9804 					soc, peer->wlanstats_ctx,
9805 					peer->peer_id,
9806 					WDI_NO_VAL, pdev_id);
9807 		}
9808 	}
9809 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9810 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
9811 
9812 	return QDF_STATUS_SUCCESS;
9813 }
9814 #else
9815 static inline QDF_STATUS
9816 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9817 			uint8_t pdev_id)
9818 {
9819 	return QDF_STATUS_SUCCESS;
9820 }
9821 #endif
9822 
9823 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9824 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9825 					   uint8_t pdev_id,
9826 					   void *buf)
9827 {
9828 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
9829 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
9830 			      WDI_NO_VAL, pdev_id);
9831 	return QDF_STATUS_SUCCESS;
9832 }
9833 #else
9834 static inline QDF_STATUS
9835 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9836 			 uint8_t pdev_id,
9837 			 void *buf)
9838 {
9839 	return QDF_STATUS_SUCCESS;
9840 }
9841 #endif
9842 
9843 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
9844 {
9845 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9846 
9847 	return soc->rate_stats_ctx;
9848 }
9849 
9850 /*
9851  * dp_get_cfg() - get dp cfg
9852  * @soc: cdp soc handle
9853  * @cfg: cfg enum
9854  *
9855  * Return: cfg value
9856  */
9857 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
9858 {
9859 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
9860 	uint32_t value = 0;
9861 
9862 	switch (cfg) {
9863 	case cfg_dp_enable_data_stall:
9864 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
9865 		break;
9866 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
9867 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
9868 		break;
9869 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
9870 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
9871 		break;
9872 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
9873 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
9874 		break;
9875 	case cfg_dp_tso_enable:
9876 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
9877 		break;
9878 	case cfg_dp_lro_enable:
9879 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
9880 		break;
9881 	case cfg_dp_gro_enable:
9882 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
9883 		break;
9884 	case cfg_dp_tx_flow_start_queue_offset:
9885 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
9886 		break;
9887 	case cfg_dp_tx_flow_stop_queue_threshold:
9888 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
9889 		break;
9890 	case cfg_dp_disable_intra_bss_fwd:
9891 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
9892 		break;
9893 	case cfg_dp_pktlog_buffer_size:
9894 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
9895 		break;
9896 	default:
9897 		value =  0;
9898 	}
9899 
9900 	return value;
9901 }
9902 
9903 #ifdef PEER_FLOW_CONTROL
9904 /**
9905  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
9906  * @soc_handle: datapath soc handle
9907  * @pdev_id: id of datapath pdev handle
9908  * @param: ol ath params
9909  * @value: value of the flag
9910  * @buff: Buffer to be passed
9911  *
9912  * Implemented this function same as legacy function. In legacy code, single
9913  * function is used to display stats and update pdev params.
9914  *
9915  * Return: 0 for success. nonzero for failure.
9916  */
9917 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
9918 					       uint8_t pdev_id,
9919 					       enum _dp_param_t param,
9920 					       uint32_t value, void *buff)
9921 {
9922 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9923 	struct dp_pdev *pdev =
9924 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9925 						   pdev_id);
9926 
9927 	if (qdf_unlikely(!pdev))
9928 		return 1;
9929 
9930 	soc = pdev->soc;
9931 	if (!soc)
9932 		return 1;
9933 
9934 	switch (param) {
9935 #ifdef QCA_ENH_V3_STATS_SUPPORT
9936 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
9937 		if (value)
9938 			pdev->delay_stats_flag = true;
9939 		else
9940 			pdev->delay_stats_flag = false;
9941 		break;
9942 	case DP_PARAM_VIDEO_STATS_FC:
9943 		qdf_print("------- TID Stats ------\n");
9944 		dp_pdev_print_tid_stats(pdev);
9945 		qdf_print("------ Delay Stats ------\n");
9946 		dp_pdev_print_delay_stats(pdev);
9947 		break;
9948 #endif
9949 	case DP_PARAM_TOTAL_Q_SIZE:
9950 		{
9951 			uint32_t tx_min, tx_max;
9952 
9953 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
9954 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
9955 
9956 			if (!buff) {
9957 				if ((value >= tx_min) && (value <= tx_max)) {
9958 					pdev->num_tx_allowed = value;
9959 				} else {
9960 					QDF_TRACE(QDF_MODULE_ID_DP,
9961 						  QDF_TRACE_LEVEL_INFO,
9962 						  "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
9963 						  tx_min, tx_max);
9964 					break;
9965 				}
9966 			} else {
9967 				*(int *)buff = pdev->num_tx_allowed;
9968 			}
9969 		}
9970 		break;
9971 	default:
9972 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9973 			  "%s: not handled param %d ", __func__, param);
9974 		break;
9975 	}
9976 
9977 	return 0;
9978 }
9979 #endif
9980 
9981 /**
9982  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
9983  * @psoc: dp soc handle
9984  * @pdev_id: id of DP_PDEV handle
9985  * @pcp: pcp value
9986  * @tid: tid value passed by the user
9987  *
9988  * Return: QDF_STATUS_SUCCESS on success
9989  */
9990 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
9991 						uint8_t pdev_id,
9992 						uint8_t pcp, uint8_t tid)
9993 {
9994 	struct dp_soc *soc = (struct dp_soc *)psoc;
9995 
9996 	soc->pcp_tid_map[pcp] = tid;
9997 
9998 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
9999 	return QDF_STATUS_SUCCESS;
10000 }
10001 
10002 /**
10003  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
10004  * @soc: DP soc handle
10005  * @vdev_id: id of DP_VDEV handle
10006  * @pcp: pcp value
10007  * @tid: tid value passed by the user
10008  *
10009  * Return: QDF_STATUS_SUCCESS on success
10010  */
10011 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc,
10012 						uint8_t vdev_id,
10013 						uint8_t pcp, uint8_t tid)
10014 {
10015 	struct dp_vdev *vdev =
10016 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
10017 						   vdev_id);
10018 
10019 	if (!vdev)
10020 		return QDF_STATUS_E_FAILURE;
10021 
10022 	vdev->pcp_tid_map[pcp] = tid;
10023 
10024 	return QDF_STATUS_SUCCESS;
10025 }
10026 
10027 #ifdef QCA_SUPPORT_FULL_MON
10028 static inline QDF_STATUS
10029 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
10030 			uint8_t val)
10031 {
10032 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10033 
10034 	soc->full_mon_mode = val;
10035 	qdf_alert("Configure full monitor mode val: %d ", val);
10036 
10037 	return QDF_STATUS_SUCCESS;
10038 }
10039 #else
10040 static inline QDF_STATUS
10041 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
10042 			uint8_t val)
10043 {
10044 	return 0;
10045 }
10046 #endif
10047 
10048 static struct cdp_cmn_ops dp_ops_cmn = {
10049 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
10050 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
10051 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
10052 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
10053 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
10054 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
10055 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
10056 	.txrx_peer_create = dp_peer_create_wifi3,
10057 	.txrx_peer_setup = dp_peer_setup_wifi3,
10058 #ifdef FEATURE_AST
10059 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
10060 #else
10061 	.txrx_peer_teardown = NULL,
10062 #endif
10063 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
10064 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
10065 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
10066 	.txrx_peer_get_ast_info_by_pdev =
10067 		dp_peer_get_ast_info_by_pdevid_wifi3,
10068 	.txrx_peer_ast_delete_by_soc =
10069 		dp_peer_ast_entry_del_by_soc,
10070 	.txrx_peer_ast_delete_by_pdev =
10071 		dp_peer_ast_entry_del_by_pdev,
10072 	.txrx_peer_delete = dp_peer_delete_wifi3,
10073 	.txrx_vdev_register = dp_vdev_register_wifi3,
10074 	.txrx_soc_detach = dp_soc_detach_wifi3,
10075 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
10076 	.txrx_soc_init = dp_soc_init_wifi3,
10077 	.txrx_tso_soc_attach = dp_tso_soc_attach,
10078 	.txrx_tso_soc_detach = dp_tso_soc_detach,
10079 	.txrx_pdev_init = dp_pdev_init_wifi3,
10080 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
10081 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
10082 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
10083 	.txrx_ath_getstats = dp_get_device_stats,
10084 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
10085 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
10086 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
10087 	.delba_process = dp_delba_process_wifi3,
10088 	.set_addba_response = dp_set_addba_response,
10089 	.flush_cache_rx_queue = NULL,
10090 	/* TODO: get API's for dscp-tid need to be added*/
10091 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
10092 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
10093 	.txrx_get_total_per = dp_get_total_per,
10094 	.txrx_stats_request = dp_txrx_stats_request,
10095 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
10096 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
10097 	.display_stats = dp_txrx_dump_stats,
10098 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
10099 	.txrx_intr_detach = dp_soc_interrupt_detach,
10100 	.set_pn_check = dp_set_pn_check_wifi3,
10101 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
10102 	.update_config_parameters = dp_update_config_parameters,
10103 	/* TODO: Add other functions */
10104 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
10105 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
10106 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
10107 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
10108 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
10109 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
10110 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
10111 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
10112 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
10113 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
10114 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
10115 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
10116 	.tx_send = dp_tx_send,
10117 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
10118 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
10119 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
10120 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
10121 	.set_soc_param = dp_soc_set_param,
10122 	.txrx_get_os_rx_handles_from_vdev =
10123 					dp_get_os_rx_handles_from_vdev_wifi3,
10124 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
10125 	.get_dp_capabilities = dp_get_cfg_capabilities,
10126 	.txrx_get_cfg = dp_get_cfg,
10127 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
10128 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
10129 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
10130 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
10131 
10132 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
10133 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
10134 
10135 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
10136 #ifdef QCA_MULTIPASS_SUPPORT
10137 	.set_vlan_groupkey = dp_set_vlan_groupkey,
10138 #endif
10139 	.get_peer_mac_list = dp_get_peer_mac_list,
10140 	.tx_send_exc = dp_tx_send_exception,
10141 };
10142 
10143 static struct cdp_ctrl_ops dp_ops_ctrl = {
10144 	.txrx_peer_authorize = dp_peer_authorize,
10145 #ifdef VDEV_PEER_PROTOCOL_COUNT
10146 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
10147 	.txrx_set_peer_protocol_drop_mask =
10148 		dp_enable_vdev_peer_protocol_drop_mask,
10149 	.txrx_is_peer_protocol_count_enabled =
10150 		dp_is_vdev_peer_protocol_count_enabled,
10151 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
10152 #endif
10153 	.txrx_set_vdev_param = dp_set_vdev_param,
10154 	.txrx_set_psoc_param = dp_set_psoc_param,
10155 	.txrx_get_psoc_param = dp_get_psoc_param,
10156 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
10157 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
10158 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
10159 	.txrx_update_filter_neighbour_peers =
10160 		dp_update_filter_neighbour_peers,
10161 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
10162 	.txrx_get_sec_type = dp_get_sec_type,
10163 	.txrx_wdi_event_sub = dp_wdi_event_sub,
10164 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
10165 #ifdef WDI_EVENT_ENABLE
10166 	.txrx_get_pldev = dp_get_pldev,
10167 #endif
10168 	.txrx_set_pdev_param = dp_set_pdev_param,
10169 	.txrx_get_pdev_param = dp_get_pdev_param,
10170 	.txrx_set_peer_param = dp_set_peer_param,
10171 	.txrx_get_peer_param = dp_get_peer_param,
10172 #ifdef VDEV_PEER_PROTOCOL_COUNT
10173 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
10174 #endif
10175 #ifdef ATH_SUPPORT_NAC_RSSI
10176 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
10177 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
10178 #endif
10179 	.set_key = dp_set_michael_key,
10180 	.txrx_get_vdev_param = dp_get_vdev_param,
10181 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
10182 	.calculate_delay_stats = dp_calculate_delay_stats,
10183 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
10184 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
10185 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
10186 	.txrx_dump_pdev_rx_protocol_tag_stats =
10187 				dp_dump_pdev_rx_protocol_tag_stats,
10188 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
10189 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
10190 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
10191 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
10192 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
10193 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
10194 #ifdef QCA_MULTIPASS_SUPPORT
10195 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
10196 #endif /*QCA_MULTIPASS_SUPPORT*/
10197 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
10198 	.txrx_update_peer_pkt_capture_params =
10199 		 dp_peer_update_pkt_capture_params,
10200 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
10201 };
10202 
10203 static struct cdp_me_ops dp_ops_me = {
10204 #ifdef ATH_SUPPORT_IQUE
10205 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
10206 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
10207 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
10208 #endif
10209 };
10210 
10211 static struct cdp_mon_ops dp_ops_mon = {
10212 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
10213 	/* Added support for HK advance filter */
10214 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
10215 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
10216 	.config_full_mon_mode = dp_config_full_mon_mode,
10217 };
10218 
10219 static struct cdp_host_stats_ops dp_ops_host_stats = {
10220 	.txrx_per_peer_stats = dp_get_host_peer_stats,
10221 	.get_fw_peer_stats = dp_get_fw_peer_stats,
10222 	.get_htt_stats = dp_get_htt_stats,
10223 #ifdef FEATURE_PERPKT_INFO
10224 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
10225 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
10226 #endif /* FEATURE_PERPKT_INFO */
10227 	.txrx_stats_publish = dp_txrx_stats_publish,
10228 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
10229 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
10230 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
10231 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
10232 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
10233 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
10234 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
10235 	/* TODO */
10236 };
10237 
10238 static struct cdp_raw_ops dp_ops_raw = {
10239 	/* TODO */
10240 };
10241 
10242 #ifdef PEER_FLOW_CONTROL
10243 static struct cdp_pflow_ops dp_ops_pflow = {
10244 	dp_tx_flow_ctrl_configure_pdev,
10245 };
10246 #endif /* CONFIG_WIN */
10247 
10248 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10249 static struct cdp_cfr_ops dp_ops_cfr = {
10250 	.txrx_cfr_filter = dp_cfr_filter,
10251 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
10252 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
10253 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
10254 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
10255 	.txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer,
10256 };
10257 #endif
10258 
10259 #ifdef FEATURE_RUNTIME_PM
10260 /**
10261  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
10262  * @soc_hdl: Datapath soc handle
10263  * @pdev_id: id of data path pdev handle
10264  *
10265  * DP is ready to runtime suspend if there are no pending TX packets.
10266  *
10267  * Return: QDF_STATUS
10268  */
10269 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10270 {
10271 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10272 	struct dp_pdev *pdev;
10273 
10274 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10275 	if (!pdev) {
10276 		dp_err("pdev is NULL");
10277 		return QDF_STATUS_E_INVAL;
10278 	}
10279 
10280 	/* Abort if there are any pending TX packets */
10281 	if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) {
10282 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
10283 			  FL("Abort suspend due to pending TX packets"));
10284 		return QDF_STATUS_E_AGAIN;
10285 	}
10286 
10287 	if (soc->intr_mode == DP_INTR_POLL)
10288 		qdf_timer_stop(&soc->int_timer);
10289 
10290 	return QDF_STATUS_SUCCESS;
10291 }
10292 
10293 /**
10294  * dp_flush_ring_hptp() - Update ring shadow
10295  *			  register HP/TP address when runtime
10296  *                        resume
10297  * @opaque_soc: DP soc context
10298  *
10299  * Return: None
10300  */
10301 static
10302 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
10303 {
10304 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
10305 						 HAL_SRNG_FLUSH_EVENT)) {
10306 		/* Acquire the lock */
10307 		hal_srng_access_start(soc->hal_soc, hal_srng);
10308 
10309 		hal_srng_access_end(soc->hal_soc, hal_srng);
10310 
10311 		hal_srng_set_flush_last_ts(hal_srng);
10312 	}
10313 }
10314 
10315 /**
10316  * dp_runtime_resume() - ensure DP is ready to runtime resume
10317  * @soc_hdl: Datapath soc handle
10318  * @pdev_id: id of data path pdev handle
10319  *
10320  * Resume DP for runtime PM.
10321  *
10322  * Return: QDF_STATUS
10323  */
10324 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10325 {
10326 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10327 	int i;
10328 
10329 	if (soc->intr_mode == DP_INTR_POLL)
10330 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10331 
10332 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
10333 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
10334 	}
10335 
10336 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
10337 
10338 	return QDF_STATUS_SUCCESS;
10339 }
10340 #endif /* FEATURE_RUNTIME_PM */
10341 
10342 /**
10343  * dp_tx_get_success_ack_stats() - get tx success completion count
10344  * @soc_hdl: Datapath soc handle
10345  * @vdevid: vdev identifier
10346  *
10347  * Return: tx success ack count
10348  */
10349 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
10350 					    uint8_t vdev_id)
10351 {
10352 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10353 	struct cdp_vdev_stats *vdev_stats = NULL;
10354 	uint32_t tx_success;
10355 	struct dp_vdev *vdev =
10356 		(struct dp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(soc,
10357 								     vdev_id);
10358 
10359 	if (!vdev) {
10360 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10361 			  FL("Invalid vdev id %d"), vdev_id);
10362 		return 0;
10363 	}
10364 
10365 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
10366 	if (!vdev_stats) {
10367 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10368 			  "DP alloc failure - unable to get alloc vdev stats");
10369 		return 0;
10370 	}
10371 
10372 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
10373 	dp_aggregate_vdev_stats(vdev, vdev_stats);
10374 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
10375 
10376 	tx_success = vdev_stats->tx.tx_success.num;
10377 	qdf_mem_free(vdev_stats);
10378 
10379 	return tx_success;
10380 }
10381 
10382 #ifdef WLAN_SUPPORT_DATA_STALL
10383 /**
10384  * dp_register_data_stall_detect_cb() - register data stall callback
10385  * @soc_hdl: Datapath soc handle
10386  * @pdev_id: id of data path pdev handle
10387  * @data_stall_detect_callback: data stall callback function
10388  *
10389  * Return: QDF_STATUS Enumeration
10390  */
10391 static
10392 QDF_STATUS dp_register_data_stall_detect_cb(
10393 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10394 			data_stall_detect_cb data_stall_detect_callback)
10395 {
10396 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10397 	struct dp_pdev *pdev;
10398 
10399 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10400 	if (!pdev) {
10401 		dp_err("pdev NULL!");
10402 		return QDF_STATUS_E_INVAL;
10403 	}
10404 
10405 	pdev->data_stall_detect_callback = data_stall_detect_callback;
10406 	return QDF_STATUS_SUCCESS;
10407 }
10408 
10409 /**
10410  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
10411  * @soc_hdl: Datapath soc handle
10412  * @pdev_id: id of data path pdev handle
10413  * @data_stall_detect_callback: data stall callback function
10414  *
10415  * Return: QDF_STATUS Enumeration
10416  */
10417 static
10418 QDF_STATUS dp_deregister_data_stall_detect_cb(
10419 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10420 			data_stall_detect_cb data_stall_detect_callback)
10421 {
10422 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10423 	struct dp_pdev *pdev;
10424 
10425 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10426 	if (!pdev) {
10427 		dp_err("pdev NULL!");
10428 		return QDF_STATUS_E_INVAL;
10429 	}
10430 
10431 	pdev->data_stall_detect_callback = NULL;
10432 	return QDF_STATUS_SUCCESS;
10433 }
10434 
10435 /**
10436  * dp_txrx_post_data_stall_event() - post data stall event
10437  * @soc_hdl: Datapath soc handle
10438  * @indicator: Module triggering data stall
10439  * @data_stall_type: data stall event type
10440  * @pdev_id: pdev id
10441  * @vdev_id_bitmap: vdev id bitmap
10442  * @recovery_type: data stall recovery type
10443  *
10444  * Return: None
10445  */
10446 static void
10447 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
10448 			      enum data_stall_log_event_indicator indicator,
10449 			      enum data_stall_log_event_type data_stall_type,
10450 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
10451 			      enum data_stall_log_recovery_type recovery_type)
10452 {
10453 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10454 	struct data_stall_event_info data_stall_info;
10455 	struct dp_pdev *pdev;
10456 
10457 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10458 	if (!pdev) {
10459 		dp_err("pdev NULL!");
10460 		return;
10461 	}
10462 
10463 	if (!pdev->data_stall_detect_callback) {
10464 		dp_err("data stall cb not registered!");
10465 		return;
10466 	}
10467 
10468 	dp_info("data_stall_type: %x pdev_id: %d",
10469 		data_stall_type, pdev_id);
10470 
10471 	data_stall_info.indicator = indicator;
10472 	data_stall_info.data_stall_type = data_stall_type;
10473 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
10474 	data_stall_info.pdev_id = pdev_id;
10475 	data_stall_info.recovery_type = recovery_type;
10476 
10477 	pdev->data_stall_detect_callback(&data_stall_info);
10478 }
10479 #endif /* WLAN_SUPPORT_DATA_STALL */
10480 
10481 #ifdef WLAN_FEATURE_STATS_EXT
10482 /* rx hw stats event wait timeout in ms */
10483 #define DP_REO_STATUS_STATS_TIMEOUT 1500
10484 /**
10485  * dp_txrx_ext_stats_request - request dp txrx extended stats request
10486  * @soc_hdl: soc handle
10487  * @pdev_id: pdev id
10488  * @req: stats request
10489  *
10490  * Return: QDF_STATUS
10491  */
10492 static QDF_STATUS
10493 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10494 			  struct cdp_txrx_ext_stats *req)
10495 {
10496 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10497 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10498 
10499 	if (!pdev) {
10500 		dp_err("pdev is null");
10501 		return QDF_STATUS_E_INVAL;
10502 	}
10503 
10504 	dp_aggregate_pdev_stats(pdev);
10505 
10506 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
10507 	req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full;
10508 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
10509 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
10510 	req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
10511 	req->rx_mpdu_error = soc->stats.rx.err_ring_pkts -
10512 				soc->stats.rx.rx_frags;
10513 
10514 	return QDF_STATUS_SUCCESS;
10515 }
10516 
10517 /**
10518  * dp_rx_hw_stats_cb - request rx hw stats response callback
10519  * @soc: soc handle
10520  * @cb_ctxt: callback context
10521  * @reo_status: reo command response status
10522  *
10523  * Return: None
10524  */
10525 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
10526 			      union hal_reo_status *reo_status)
10527 {
10528 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
10529 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
10530 	bool is_query_timeout;
10531 
10532 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10533 	is_query_timeout = rx_hw_stats->is_query_timeout;
10534 	/* free the cb_ctxt if all pending tid stats query is received */
10535 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
10536 		if (!is_query_timeout) {
10537 			qdf_event_set(&soc->rx_hw_stats_event);
10538 			soc->is_last_stats_ctx_init = false;
10539 		}
10540 
10541 		qdf_mem_free(rx_hw_stats);
10542 	}
10543 
10544 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
10545 		dp_info("REO stats failure %d",
10546 			queue_status->header.status);
10547 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10548 		return;
10549 	}
10550 
10551 	if (!is_query_timeout) {
10552 		soc->ext_stats.rx_mpdu_received +=
10553 					queue_status->mpdu_frms_cnt;
10554 		soc->ext_stats.rx_mpdu_missed +=
10555 					queue_status->late_recv_mpdu_cnt;
10556 	}
10557 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10558 }
10559 
10560 /**
10561  * dp_request_rx_hw_stats - request rx hardware stats
10562  * @soc_hdl: soc handle
10563  * @vdev_id: vdev id
10564  *
10565  * Return: None
10566  */
10567 static QDF_STATUS
10568 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
10569 {
10570 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10571 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
10572 	struct dp_peer *peer;
10573 	QDF_STATUS status;
10574 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
10575 	int rx_stats_sent_cnt = 0;
10576 	uint32_t last_rx_mpdu_received;
10577 	uint32_t last_rx_mpdu_missed;
10578 
10579 	if (!vdev) {
10580 		dp_err("vdev is null for vdev_id: %u", vdev_id);
10581 		return QDF_STATUS_E_INVAL;
10582 	}
10583 
10584 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev);
10585 
10586 	if (!peer) {
10587 		dp_err("Peer is NULL");
10588 		return QDF_STATUS_E_INVAL;
10589 	}
10590 
10591 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
10592 
10593 	if (!rx_hw_stats) {
10594 		dp_err("malloc failed for hw stats structure");
10595 		dp_peer_unref_delete(peer);
10596 		return QDF_STATUS_E_NOMEM;
10597 	}
10598 
10599 	qdf_event_reset(&soc->rx_hw_stats_event);
10600 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10601 	/* save the last soc cumulative stats and reset it to 0 */
10602 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
10603 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
10604 	soc->ext_stats.rx_mpdu_received = 0;
10605 	soc->ext_stats.rx_mpdu_missed = 0;
10606 
10607 	rx_stats_sent_cnt =
10608 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
10609 	if (!rx_stats_sent_cnt) {
10610 		dp_err("no tid stats sent successfully");
10611 		qdf_mem_free(rx_hw_stats);
10612 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10613 		dp_peer_unref_delete(peer);
10614 		return QDF_STATUS_E_INVAL;
10615 	}
10616 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
10617 		       rx_stats_sent_cnt);
10618 	rx_hw_stats->is_query_timeout = false;
10619 	soc->is_last_stats_ctx_init = true;
10620 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10621 
10622 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
10623 				       DP_REO_STATUS_STATS_TIMEOUT);
10624 
10625 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10626 	if (status != QDF_STATUS_SUCCESS) {
10627 		dp_info("rx hw stats event timeout");
10628 		if (soc->is_last_stats_ctx_init)
10629 			rx_hw_stats->is_query_timeout = true;
10630 		/**
10631 		 * If query timeout happened, use the last saved stats
10632 		 * for this time query.
10633 		 */
10634 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
10635 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
10636 	}
10637 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10638 	dp_peer_unref_delete(peer);
10639 
10640 	return status;
10641 }
10642 #endif /* WLAN_FEATURE_STATS_EXT */
10643 
10644 #ifdef DP_PEER_EXTENDED_API
10645 static struct cdp_misc_ops dp_ops_misc = {
10646 #ifdef FEATURE_WLAN_TDLS
10647 	.tx_non_std = dp_tx_non_std,
10648 #endif /* FEATURE_WLAN_TDLS */
10649 	.get_opmode = dp_get_opmode,
10650 #ifdef FEATURE_RUNTIME_PM
10651 	.runtime_suspend = dp_runtime_suspend,
10652 	.runtime_resume = dp_runtime_resume,
10653 #endif /* FEATURE_RUNTIME_PM */
10654 	.pkt_log_init = dp_pkt_log_init,
10655 	.pkt_log_con_service = dp_pkt_log_con_service,
10656 	.get_num_rx_contexts = dp_get_num_rx_contexts,
10657 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
10658 #ifdef WLAN_SUPPORT_DATA_STALL
10659 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
10660 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
10661 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
10662 #endif
10663 
10664 #ifdef WLAN_FEATURE_STATS_EXT
10665 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
10666 	.request_rx_hw_stats = dp_request_rx_hw_stats,
10667 #endif /* WLAN_FEATURE_STATS_EXT */
10668 };
10669 #endif
10670 
10671 #ifdef DP_FLOW_CTL
10672 static struct cdp_flowctl_ops dp_ops_flowctl = {
10673 	/* WIFI 3.0 DP implement as required. */
10674 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
10675 	.flow_pool_map_handler = dp_tx_flow_pool_map,
10676 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
10677 	.register_pause_cb = dp_txrx_register_pause_cb,
10678 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
10679 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
10680 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
10681 };
10682 
10683 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
10684 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10685 };
10686 #endif
10687 
10688 #ifdef IPA_OFFLOAD
10689 static struct cdp_ipa_ops dp_ops_ipa = {
10690 	.ipa_get_resource = dp_ipa_get_resource,
10691 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
10692 	.ipa_op_response = dp_ipa_op_response,
10693 	.ipa_register_op_cb = dp_ipa_register_op_cb,
10694 	.ipa_get_stat = dp_ipa_get_stat,
10695 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
10696 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
10697 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
10698 	.ipa_setup = dp_ipa_setup,
10699 	.ipa_cleanup = dp_ipa_cleanup,
10700 	.ipa_setup_iface = dp_ipa_setup_iface,
10701 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
10702 	.ipa_enable_pipes = dp_ipa_enable_pipes,
10703 	.ipa_disable_pipes = dp_ipa_disable_pipes,
10704 	.ipa_set_perf_level = dp_ipa_set_perf_level,
10705 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
10706 };
10707 #endif
10708 
10709 #ifdef DP_POWER_SAVE
10710 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10711 {
10712 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10713 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10714 	int timeout = SUSPEND_DRAIN_WAIT;
10715 	int drain_wait_delay = 50; /* 50 ms */
10716 
10717 	if (qdf_unlikely(!pdev)) {
10718 		dp_err("pdev is NULL");
10719 		return QDF_STATUS_E_INVAL;
10720 	}
10721 
10722 	/* Abort if there are any pending TX packets */
10723 	while (dp_get_tx_pending((struct cdp_pdev *)pdev) > 0) {
10724 		qdf_sleep(drain_wait_delay);
10725 		if (timeout <= 0) {
10726 			dp_err("TX frames are pending, abort suspend");
10727 			return QDF_STATUS_E_TIMEOUT;
10728 		}
10729 		timeout = timeout - drain_wait_delay;
10730 	}
10731 
10732 	if (soc->intr_mode == DP_INTR_POLL)
10733 		qdf_timer_stop(&soc->int_timer);
10734 
10735 	/* Stop monitor reap timer and reap any pending frames in ring */
10736 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
10737 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
10738 	    soc->reap_timer_init) {
10739 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
10740 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
10741 	}
10742 
10743 	return QDF_STATUS_SUCCESS;
10744 }
10745 
10746 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10747 {
10748 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10749 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10750 
10751 	if (qdf_unlikely(!pdev)) {
10752 		dp_err("pdev is NULL");
10753 		return QDF_STATUS_E_INVAL;
10754 	}
10755 
10756 	if (soc->intr_mode == DP_INTR_POLL)
10757 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10758 
10759 	/* Start monitor reap timer */
10760 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
10761 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
10762 	    soc->reap_timer_init)
10763 		qdf_timer_mod(&soc->mon_reap_timer,
10764 			      DP_INTR_POLL_TIMER_MS);
10765 
10766 	return QDF_STATUS_SUCCESS;
10767 }
10768 
10769 /**
10770  * dp_process_wow_ack_rsp() - process wow ack response
10771  * @soc_hdl: datapath soc handle
10772  * @pdev_id: data path pdev handle id
10773  *
10774  * Return: none
10775  */
10776 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10777 {
10778 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10779 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10780 
10781 	if (qdf_unlikely(!pdev)) {
10782 		dp_err("pdev is NULL");
10783 		return;
10784 	}
10785 
10786 	/*
10787 	 * As part of wow enable FW disables the mon status ring and in wow ack
10788 	 * response from FW reap mon status ring to make sure no packets pending
10789 	 * in the ring.
10790 	 */
10791 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
10792 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
10793 	    soc->reap_timer_init) {
10794 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
10795 	}
10796 }
10797 
10798 /**
10799  * dp_process_target_suspend_req() - process target suspend request
10800  * @soc_hdl: datapath soc handle
10801  * @pdev_id: data path pdev handle id
10802  *
10803  * Return: none
10804  */
10805 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
10806 					  uint8_t pdev_id)
10807 {
10808 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10809 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10810 
10811 	if (qdf_unlikely(!pdev)) {
10812 		dp_err("pdev is NULL");
10813 		return;
10814 	}
10815 
10816 	/* Stop monitor reap timer and reap any pending frames in ring */
10817 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
10818 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
10819 	    soc->reap_timer_init) {
10820 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
10821 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
10822 	}
10823 }
10824 
10825 static struct cdp_bus_ops dp_ops_bus = {
10826 	.bus_suspend = dp_bus_suspend,
10827 	.bus_resume = dp_bus_resume,
10828 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
10829 	.process_target_suspend_req = dp_process_target_suspend_req
10830 };
10831 #endif
10832 
10833 #ifdef DP_FLOW_CTL
10834 static struct cdp_throttle_ops dp_ops_throttle = {
10835 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10836 };
10837 
10838 static struct cdp_cfg_ops dp_ops_cfg = {
10839 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10840 };
10841 #endif
10842 
10843 #ifdef DP_PEER_EXTENDED_API
10844 static struct cdp_ocb_ops dp_ops_ocb = {
10845 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10846 };
10847 
10848 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
10849 	.clear_stats = dp_txrx_clear_dump_stats,
10850 };
10851 
10852 static struct cdp_peer_ops dp_ops_peer = {
10853 	.register_peer = dp_register_peer,
10854 	.clear_peer = dp_clear_peer,
10855 	.find_peer_exist = dp_find_peer_exist,
10856 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
10857 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
10858 	.peer_state_update = dp_peer_state_update,
10859 	.get_vdevid = dp_get_vdevid,
10860 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
10861 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
10862 	.get_peer_state = dp_get_peer_state,
10863 };
10864 #endif
10865 
10866 static struct cdp_ops dp_txrx_ops = {
10867 	.cmn_drv_ops = &dp_ops_cmn,
10868 	.ctrl_ops = &dp_ops_ctrl,
10869 	.me_ops = &dp_ops_me,
10870 	.mon_ops = &dp_ops_mon,
10871 	.host_stats_ops = &dp_ops_host_stats,
10872 	.wds_ops = &dp_ops_wds,
10873 	.raw_ops = &dp_ops_raw,
10874 #ifdef PEER_FLOW_CONTROL
10875 	.pflow_ops = &dp_ops_pflow,
10876 #endif /* PEER_FLOW_CONTROL */
10877 #ifdef DP_PEER_EXTENDED_API
10878 	.misc_ops = &dp_ops_misc,
10879 	.ocb_ops = &dp_ops_ocb,
10880 	.peer_ops = &dp_ops_peer,
10881 	.mob_stats_ops = &dp_ops_mob_stats,
10882 #endif
10883 #ifdef DP_FLOW_CTL
10884 	.cfg_ops = &dp_ops_cfg,
10885 	.flowctl_ops = &dp_ops_flowctl,
10886 	.l_flowctl_ops = &dp_ops_l_flowctl,
10887 	.throttle_ops = &dp_ops_throttle,
10888 #endif
10889 #ifdef IPA_OFFLOAD
10890 	.ipa_ops = &dp_ops_ipa,
10891 #endif
10892 #ifdef DP_POWER_SAVE
10893 	.bus_ops = &dp_ops_bus,
10894 #endif
10895 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10896 	.cfr_ops = &dp_ops_cfr,
10897 #endif
10898 };
10899 
10900 /*
10901  * dp_soc_set_txrx_ring_map()
10902  * @dp_soc: DP handler for soc
10903  *
10904  * Return: Void
10905  */
10906 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
10907 {
10908 	uint32_t i;
10909 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
10910 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
10911 	}
10912 }
10913 
10914 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
10915 	defined(QCA_WIFI_QCA5018)
10916 /**
10917  * dp_soc_attach_wifi3() - Attach txrx SOC
10918  * @ctrl_psoc: Opaque SOC handle from control plane
10919  * @htc_handle: Opaque HTC handle
10920  * @hif_handle: Opaque HIF handle
10921  * @qdf_osdev: QDF device
10922  * @ol_ops: Offload Operations
10923  * @device_id: Device ID
10924  *
10925  * Return: DP SOC handle on success, NULL on failure
10926  */
10927 struct cdp_soc_t *
10928 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10929 		    struct hif_opaque_softc *hif_handle,
10930 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10931 		    struct ol_if_ops *ol_ops, uint16_t device_id)
10932 {
10933 	struct dp_soc *dp_soc = NULL;
10934 
10935 	dp_soc = dp_soc_attach(ctrl_psoc, hif_handle, htc_handle, qdf_osdev,
10936 			       ol_ops, device_id);
10937 	return dp_soc_to_cdp_soc_t(dp_soc);
10938 }
10939 
10940 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
10941 {
10942 	int lmac_id;
10943 
10944 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
10945 		/*Set default host PDEV ID for lmac_id*/
10946 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
10947 				      INVALID_PDEV_ID, lmac_id);
10948 	}
10949 }
10950 
10951 /**
10952  * dp_soc_attach() - Attach txrx SOC
10953  * @ctrl_psoc: Opaque SOC handle from control plane
10954  * @hif_handle: Opaque HIF handle
10955  * @htc_handle: Opaque HTC handle
10956  * @qdf_osdev: QDF device
10957  * @ol_ops: Offload Operations
10958  * @device_id: Device ID
10959  *
10960  * Return: DP SOC handle on success, NULL on failure
10961  */
10962 static struct dp_soc *
10963 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10964 	      struct hif_opaque_softc *hif_handle, HTC_HANDLE htc_handle,
10965 	      qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops,
10966 	      uint16_t device_id)
10967 {
10968 	int int_ctx;
10969 	struct dp_soc *soc =  NULL;
10970 
10971 	if (!hif_handle) {
10972 		dp_err("HIF handle is NULL");
10973 		goto fail0;
10974 	}
10975 
10976 	soc = qdf_mem_malloc(sizeof(*soc));
10977 	if (!soc) {
10978 		dp_err("DP SOC memory allocation failed");
10979 		goto fail0;
10980 	}
10981 
10982 	soc->hif_handle = hif_handle;
10983 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
10984 	if (!soc->hal_soc)
10985 		goto fail1;
10986 
10987 	int_ctx = 0;
10988 	soc->device_id = device_id;
10989 	soc->cdp_soc.ops = &dp_txrx_ops;
10990 	soc->cdp_soc.ol_ops = ol_ops;
10991 	soc->ctrl_psoc = ctrl_psoc;
10992 	soc->osdev = qdf_osdev;
10993 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
10994 
10995 	/* Reset wbm sg list and flags */
10996 	dp_rx_wbm_sg_list_reset(soc);
10997 
10998 	dp_soc_rx_history_attach(soc);
10999 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
11000 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
11001 	if (!soc->wlan_cfg_ctx) {
11002 		dp_err("wlan_cfg_ctx failed\n");
11003 		goto fail1;
11004 	}
11005 
11006 	dp_soc_cfg_attach(soc);
11007 
11008 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
11009 		dp_err("failed to allocate link desc pool banks");
11010 		goto fail2;
11011 	}
11012 
11013 	if (dp_hw_link_desc_ring_alloc(soc)) {
11014 		dp_err("failed to allocate link_desc_ring");
11015 		goto fail3;
11016 	}
11017 
11018 	if (dp_soc_srng_alloc(soc)) {
11019 		dp_err("failed to allocate soc srng rings");
11020 		goto fail4;
11021 	}
11022 
11023 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
11024 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
11025 		goto fail5;
11026 	}
11027 
11028 	dp_soc_set_interrupt_mode(soc);
11029 	dp_soc_set_def_pdev(soc);
11030 
11031 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
11032 		qdf_dma_mem_stats_read(),
11033 		qdf_heap_mem_stats_read(),
11034 		qdf_skb_mem_stats_read());
11035 
11036 	return soc;
11037 fail5:
11038 	dp_soc_srng_free(soc);
11039 fail4:
11040 	dp_hw_link_desc_ring_free(soc);
11041 fail3:
11042 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
11043 fail2:
11044 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
11045 fail1:
11046 	qdf_mem_free(soc);
11047 fail0:
11048 	return NULL;
11049 }
11050 
11051 /**
11052  * dp_soc_init() - Initialize txrx SOC
11053  * @dp_soc: Opaque DP SOC handle
11054  * @htc_handle: Opaque HTC handle
11055  * @hif_handle: Opaque HIF handle
11056  *
11057  * Return: DP SOC handle on success, NULL on failure
11058  */
11059 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
11060 		  struct hif_opaque_softc *hif_handle)
11061 {
11062 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
11063 	bool is_monitor_mode = false;
11064 	struct hal_reo_params reo_params;
11065 	uint8_t i;
11066 
11067 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
11068 			  WLAN_MD_DP_SOC, "dp_soc");
11069 
11070 	htt_soc = htt_soc_attach(soc, htc_handle);
11071 	if (!htt_soc)
11072 		goto fail0;
11073 
11074 	soc->htt_handle = htt_soc;
11075 
11076 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
11077 		goto fail1;
11078 
11079 	htt_set_htc_handle(htt_soc, htc_handle);
11080 	soc->hif_handle = hif_handle;
11081 
11082 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
11083 	if (!soc->hal_soc)
11084 		goto fail2;
11085 
11086 	dp_soc_cfg_init(soc);
11087 
11088 	/* Reset/Initialize wbm sg list and flags */
11089 	dp_rx_wbm_sg_list_reset(soc);
11090 
11091 	/* Note: Any SRNG ring initialization should happen only after
11092 	 * Interrupt mode is set and followed by filling up the
11093 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
11094 	 */
11095 	dp_soc_set_interrupt_mode(soc);
11096 	if (soc->cdp_soc.ol_ops->get_con_mode &&
11097 	    soc->cdp_soc.ol_ops->get_con_mode() ==
11098 	    QDF_GLOBAL_MONITOR_MODE)
11099 		is_monitor_mode = true;
11100 
11101 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, soc->intr_mode,
11102 				     is_monitor_mode);
11103 
11104 	/* initialize WBM_IDLE_LINK ring */
11105 	if (dp_hw_link_desc_ring_init(soc)) {
11106 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11107 			  FL("dp_hw_link_desc_ring_init failed"));
11108 		goto fail3;
11109 	}
11110 
11111 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
11112 
11113 	if (dp_soc_srng_init(soc)) {
11114 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11115 			  FL("dp_soc_srng_init failed"));
11116 		goto fail4;
11117 	}
11118 
11119 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
11120 			       htt_get_htc_handle(htt_soc),
11121 			       soc->hal_soc, soc->osdev) == NULL)
11122 		goto fail5;
11123 
11124 	/* Initialize descriptors in TCL Rings */
11125 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
11126 		hal_tx_init_data_ring(soc->hal_soc,
11127 				      soc->tcl_data_ring[i].hal_srng);
11128 	}
11129 
11130 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
11131 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11132 			  FL("dp_tx_soc_attach failed"));
11133 		goto fail6;
11134 	}
11135 
11136 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
11137 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
11138 	soc->cce_disable = false;
11139 
11140 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
11141 	qdf_atomic_init(&soc->num_tx_outstanding);
11142 	qdf_atomic_init(&soc->num_tx_exception);
11143 	soc->num_tx_allowed =
11144 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
11145 
11146 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
11147 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
11148 				CDP_CFG_MAX_PEER_ID);
11149 
11150 		if (ret != -EINVAL)
11151 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
11152 
11153 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
11154 				CDP_CFG_CCE_DISABLE);
11155 		if (ret == 1)
11156 			soc->cce_disable = true;
11157 	}
11158 
11159 	/*
11160 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
11161 	 * and IPQ5018 WMAC2 is not there in these platforms.
11162 	 */
11163 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
11164 	    soc->disable_mac2_intr)
11165 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
11166 
11167 	/*
11168 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
11169 	 * WMAC1 is not there in this platform.
11170 	 */
11171 	if (soc->disable_mac1_intr)
11172 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
11173 
11174 	/* Setup HW REO */
11175 	qdf_mem_zero(&reo_params, sizeof(reo_params));
11176 
11177 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
11178 		/*
11179 		 * Reo ring remap is not required if both radios
11180 		 * are offloaded to NSS
11181 		 */
11182 		if (dp_reo_remap_config(soc,
11183 					&reo_params.remap1,
11184 					&reo_params.remap2))
11185 			reo_params.rx_hash_enabled = true;
11186 		else
11187 			reo_params.rx_hash_enabled = false;
11188 	}
11189 
11190 	/* setup the global rx defrag waitlist */
11191 	TAILQ_INIT(&soc->rx.defrag.waitlist);
11192 	soc->rx.defrag.timeout_ms =
11193 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
11194 	soc->rx.defrag.next_flush_ms = 0;
11195 	soc->rx.flags.defrag_timeout_check =
11196 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
11197 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
11198 
11199 	/*
11200 	 * set the fragment destination ring
11201 	 */
11202 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
11203 
11204 	hal_reo_setup(soc->hal_soc, &reo_params);
11205 
11206 	hal_reo_set_err_dst_remap(soc->hal_soc);
11207 
11208 	qdf_atomic_set(&soc->cmn_init_done, 1);
11209 
11210 	dp_soc_wds_attach(soc);
11211 
11212 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
11213 
11214 	qdf_spinlock_create(&soc->peer_ref_mutex);
11215 	qdf_spinlock_create(&soc->ast_lock);
11216 
11217 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
11218 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
11219 	INIT_RX_HW_STATS_LOCK(soc);
11220 
11221 	/* fill the tx/rx cpu ring map*/
11222 	dp_soc_set_txrx_ring_map(soc);
11223 
11224 	qdf_spinlock_create(&soc->htt_stats.lock);
11225 	/* initialize work queue for stats processing */
11226 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
11227 
11228 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
11229 		qdf_dma_mem_stats_read(),
11230 		qdf_heap_mem_stats_read(),
11231 		qdf_skb_mem_stats_read());
11232 
11233 	return soc;
11234 fail6:
11235 	htt_soc_htc_dealloc(soc->htt_handle);
11236 fail5:
11237 	dp_soc_srng_deinit(soc);
11238 fail4:
11239 	dp_hw_link_desc_ring_deinit(soc);
11240 fail3:
11241 	dp_hw_link_desc_ring_free(soc);
11242 fail2:
11243 	htt_htc_pkt_pool_free(htt_soc);
11244 fail1:
11245 	htt_soc_detach(htt_soc);
11246 fail0:
11247 	return NULL;
11248 }
11249 
11250 /**
11251  * dp_soc_init_wifi3() - Initialize txrx SOC
11252  * @soc: Opaque DP SOC handle
11253  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
11254  * @hif_handle: Opaque HIF handle
11255  * @htc_handle: Opaque HTC handle
11256  * @qdf_osdev: QDF device (Unused)
11257  * @ol_ops: Offload Operations (Unused)
11258  * @device_id: Device ID (Unused)
11259  *
11260  * Return: DP SOC handle on success, NULL on failure
11261  */
11262 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
11263 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11264 			struct hif_opaque_softc *hif_handle,
11265 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
11266 			struct ol_if_ops *ol_ops, uint16_t device_id)
11267 {
11268 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
11269 }
11270 
11271 #endif
11272 
11273 /*
11274  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
11275  *
11276  * @soc: handle to DP soc
11277  * @mac_id: MAC id
11278  *
11279  * Return: Return pdev corresponding to MAC
11280  */
11281 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
11282 {
11283 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
11284 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
11285 
11286 	/* Typically for MCL as there only 1 PDEV*/
11287 	return soc->pdev_list[0];
11288 }
11289 
11290 /*
11291  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
11292  * @soc:		DP SoC context
11293  * @max_mac_rings:	No of MAC rings
11294  *
11295  * Return: None
11296  */
11297 void dp_is_hw_dbs_enable(struct dp_soc *soc,
11298 				int *max_mac_rings)
11299 {
11300 	bool dbs_enable = false;
11301 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
11302 		dbs_enable = soc->cdp_soc.ol_ops->
11303 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
11304 
11305 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
11306 }
11307 
11308 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
11309 /*
11310  * dp_cfr_filter() -  Configure HOST RX monitor status ring for CFR
11311  * @soc_hdl: Datapath soc handle
11312  * @pdev_id: id of data path pdev handle
11313  * @enable: Enable/Disable CFR
11314  * @filter_val: Flag to select Filter for monitor mode
11315  */
11316 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
11317 			  uint8_t pdev_id,
11318 			  bool enable,
11319 			  struct cdp_monitor_filter *filter_val)
11320 {
11321 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11322 	struct dp_pdev *pdev = NULL;
11323 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
11324 	int max_mac_rings;
11325 	uint8_t mac_id = 0;
11326 
11327 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11328 	if (!pdev) {
11329 		dp_err("pdev is NULL");
11330 		return;
11331 	}
11332 
11333 	if (pdev->monitor_vdev) {
11334 		dp_info("No action is needed since monitor mode is enabled\n");
11335 		return;
11336 	}
11337 	soc = pdev->soc;
11338 	pdev->cfr_rcc_mode = false;
11339 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
11340 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
11341 
11342 	dp_debug("Max_mac_rings %d", max_mac_rings);
11343 	dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode);
11344 
11345 	if (enable) {
11346 		pdev->cfr_rcc_mode = true;
11347 
11348 		htt_tlv_filter.ppdu_start = 1;
11349 		htt_tlv_filter.ppdu_end = 1;
11350 		htt_tlv_filter.ppdu_end_user_stats = 1;
11351 		htt_tlv_filter.ppdu_end_user_stats_ext = 1;
11352 		htt_tlv_filter.ppdu_end_status_done = 1;
11353 		htt_tlv_filter.mpdu_start = 1;
11354 		htt_tlv_filter.offset_valid = false;
11355 
11356 		htt_tlv_filter.enable_fp =
11357 			(filter_val->mode & MON_FILTER_PASS) ? 1 : 0;
11358 		htt_tlv_filter.enable_md = 0;
11359 		htt_tlv_filter.enable_mo =
11360 			(filter_val->mode & MON_FILTER_OTHER) ? 1 : 0;
11361 		htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt;
11362 		htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl;
11363 		htt_tlv_filter.fp_data_filter = filter_val->fp_data;
11364 		htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt;
11365 		htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl;
11366 		htt_tlv_filter.mo_data_filter = filter_val->mo_data;
11367 	}
11368 
11369 	for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11370 		int mac_for_pdev =
11371 			dp_get_mac_id_for_pdev(mac_id,
11372 					       pdev->pdev_id);
11373 
11374 		htt_h2t_rx_ring_cfg(soc->htt_handle,
11375 				    mac_for_pdev,
11376 				    soc->rxdma_mon_status_ring[mac_id]
11377 				    .hal_srng,
11378 				    RXDMA_MONITOR_STATUS,
11379 				    RX_MON_STATUS_BUF_SIZE,
11380 				    &htt_tlv_filter);
11381 	}
11382 }
11383 
11384 /**
11385  * dp_get_cfr_rcc() - get cfr rcc config
11386  * @soc_hdl: Datapath soc handle
11387  * @pdev_id: id of objmgr pdev
11388  *
11389  * Return: true/false based on cfr mode setting
11390  */
11391 static
11392 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11393 {
11394 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11395 	struct dp_pdev *pdev = NULL;
11396 
11397 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11398 	if (!pdev) {
11399 		dp_err("pdev is NULL");
11400 		return false;
11401 	}
11402 
11403 	return pdev->cfr_rcc_mode;
11404 }
11405 
11406 /**
11407  * dp_set_cfr_rcc() - enable/disable cfr rcc config
11408  * @soc_hdl: Datapath soc handle
11409  * @pdev_id: id of objmgr pdev
11410  * @enable: Enable/Disable cfr rcc mode
11411  *
11412  * Return: none
11413  */
11414 static
11415 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
11416 {
11417 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11418 	struct dp_pdev *pdev = NULL;
11419 
11420 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11421 	if (!pdev) {
11422 		dp_err("pdev is NULL");
11423 		return;
11424 	}
11425 
11426 	pdev->cfr_rcc_mode = enable;
11427 }
11428 
11429 /*
11430  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
11431  * @soc_hdl: Datapath soc handle
11432  * @pdev_id: id of data path pdev handle
11433  * @cfr_rcc_stats: CFR RCC debug statistics buffer
11434  *
11435  * Return: none
11436  */
11437 static inline void
11438 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11439 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
11440 {
11441 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11442 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11443 
11444 	if (!pdev) {
11445 		dp_err("Invalid pdev");
11446 		return;
11447 	}
11448 
11449 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
11450 		     sizeof(struct cdp_cfr_rcc_stats));
11451 }
11452 
11453 /*
11454  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
11455  * @soc_hdl: Datapath soc handle
11456  * @pdev_id: id of data path pdev handle
11457  *
11458  * Return: none
11459  */
11460 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
11461 				   uint8_t pdev_id)
11462 {
11463 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11464 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11465 
11466 	if (!pdev) {
11467 		dp_err("dp pdev is NULL");
11468 		return;
11469 	}
11470 
11471 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
11472 }
11473 
11474 /*
11475  * dp_enable_mon_reap_timer() - enable/disable reap timer
11476  * @soc_hdl: Datapath soc handle
11477  * @pdev_id: id of objmgr pdev
11478  * @enable: Enable/Disable reap timer of monitor status ring
11479  *
11480  * Return: none
11481  */
11482 static void
11483 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11484 			 bool enable)
11485 {
11486 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11487 	struct dp_pdev *pdev = NULL;
11488 
11489 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11490 	if (!pdev) {
11491 		dp_err("pdev is NULL");
11492 		return;
11493 	}
11494 
11495 	pdev->enable_reap_timer_non_pkt = enable;
11496 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
11497 		dp_debug("pktlog enabled %d", pdev->rx_pktlog_mode);
11498 		return;
11499 	}
11500 
11501 	if (!soc->reap_timer_init) {
11502 		dp_err("reap timer not init");
11503 		return;
11504 	}
11505 
11506 	if (enable)
11507 		qdf_timer_mod(&soc->mon_reap_timer,
11508 			      DP_INTR_POLL_TIMER_MS);
11509 	else
11510 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
11511 }
11512 #endif
11513 
11514 /*
11515  * dp_is_enable_reap_timer_non_pkt() - check if mon reap timer is
11516  * enabled by non-pkt log or not
11517  * @pdev: point to dp pdev
11518  *
11519  * Return: true if mon reap timer is enabled by non-pkt log
11520  */
11521 static bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev)
11522 {
11523 	if (!pdev) {
11524 		dp_err("null pdev");
11525 		return false;
11526 	}
11527 
11528 	return pdev->enable_reap_timer_non_pkt;
11529 }
11530 
11531 /*
11532 * dp_set_pktlog_wifi3() - attach txrx vdev
11533 * @pdev: Datapath PDEV handle
11534 * @event: which event's notifications are being subscribed to
11535 * @enable: WDI event subscribe or not. (True or False)
11536 *
11537 * Return: Success, NULL on failure
11538 */
11539 #ifdef WDI_EVENT_ENABLE
11540 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
11541 		bool enable)
11542 {
11543 	struct dp_soc *soc = NULL;
11544 	int max_mac_rings = wlan_cfg_get_num_mac_rings
11545 					(pdev->wlan_cfg_ctx);
11546 	uint8_t mac_id = 0;
11547 
11548 	soc = pdev->soc;
11549 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
11550 
11551 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
11552 			FL("Max_mac_rings %d "),
11553 			max_mac_rings);
11554 
11555 	if (enable) {
11556 		switch (event) {
11557 		case WDI_EVENT_RX_DESC:
11558 			if (pdev->monitor_vdev) {
11559 				/* Nothing needs to be done if monitor mode is
11560 				 * enabled
11561 				 */
11562 				return 0;
11563 			}
11564 
11565 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
11566 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
11567 				dp_mon_filter_setup_rx_pkt_log_full(pdev);
11568 				if (dp_mon_filter_update(pdev) !=
11569 						QDF_STATUS_SUCCESS) {
11570 					QDF_TRACE(QDF_MODULE_ID_DP,
11571 						  QDF_TRACE_LEVEL_ERROR,
11572 						  FL("Pktlog full filters set failed"));
11573 					dp_mon_filter_reset_rx_pkt_log_full(pdev);
11574 					pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
11575 					return 0;
11576 				}
11577 
11578 				if (soc->reap_timer_init &&
11579 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11580 					qdf_timer_mod(&soc->mon_reap_timer,
11581 					DP_INTR_POLL_TIMER_MS);
11582 			}
11583 			break;
11584 
11585 		case WDI_EVENT_LITE_RX:
11586 			if (pdev->monitor_vdev) {
11587 				/* Nothing needs to be done if monitor mode is
11588 				 * enabled
11589 				 */
11590 				return 0;
11591 			}
11592 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
11593 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
11594 
11595 				/*
11596 				 * Set the packet log lite mode filter.
11597 				 */
11598 				dp_mon_filter_setup_rx_pkt_log_lite(pdev);
11599 				if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
11600 					QDF_TRACE(QDF_MODULE_ID_DP,
11601 						  QDF_TRACE_LEVEL_ERROR,
11602 						  FL("Pktlog lite filters set failed"));
11603 					dp_mon_filter_reset_rx_pkt_log_lite(pdev);
11604 					pdev->rx_pktlog_mode =
11605 						DP_RX_PKTLOG_DISABLED;
11606 					return 0;
11607 				}
11608 
11609 				if (soc->reap_timer_init &&
11610 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11611 					qdf_timer_mod(&soc->mon_reap_timer,
11612 					DP_INTR_POLL_TIMER_MS);
11613 			}
11614 			break;
11615 
11616 		case WDI_EVENT_LITE_T2H:
11617 			if (pdev->monitor_vdev) {
11618 				/* Nothing needs to be done if monitor mode is
11619 				 * enabled
11620 				 */
11621 				return 0;
11622 			}
11623 
11624 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11625 				int mac_for_pdev = dp_get_mac_id_for_pdev(
11626 							mac_id,	pdev->pdev_id);
11627 
11628 				pdev->pktlog_ppdu_stats = true;
11629 				dp_h2t_cfg_stats_msg_send(pdev,
11630 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
11631 					mac_for_pdev);
11632 			}
11633 			break;
11634 
11635 		default:
11636 			/* Nothing needs to be done for other pktlog types */
11637 			break;
11638 		}
11639 	} else {
11640 		switch (event) {
11641 		case WDI_EVENT_RX_DESC:
11642 		case WDI_EVENT_LITE_RX:
11643 			if (pdev->monitor_vdev) {
11644 				/* Nothing needs to be done if monitor mode is
11645 				 * enabled
11646 				 */
11647 				return 0;
11648 			}
11649 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
11650 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
11651 				dp_mon_filter_reset_rx_pkt_log_full(pdev);
11652 				if (dp_mon_filter_update(pdev) !=
11653 						QDF_STATUS_SUCCESS) {
11654 					QDF_TRACE(QDF_MODULE_ID_DP,
11655 						  QDF_TRACE_LEVEL_ERROR,
11656 						  FL("Pktlog filters reset failed"));
11657 					return 0;
11658 				}
11659 
11660 				dp_mon_filter_reset_rx_pkt_log_lite(pdev);
11661 				if (dp_mon_filter_update(pdev) !=
11662 						QDF_STATUS_SUCCESS) {
11663 					QDF_TRACE(QDF_MODULE_ID_DP,
11664 						  QDF_TRACE_LEVEL_ERROR,
11665 						  FL("Pktlog filters reset failed"));
11666 					return 0;
11667 				}
11668 
11669 				if (soc->reap_timer_init &&
11670 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11671 					qdf_timer_stop(&soc->mon_reap_timer);
11672 			}
11673 			break;
11674 		case WDI_EVENT_LITE_T2H:
11675 			if (pdev->monitor_vdev) {
11676 				/* Nothing needs to be done if monitor mode is
11677 				 * enabled
11678 				 */
11679 				return 0;
11680 			}
11681 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
11682 			 * passing value 0. Once these macros will define in htt
11683 			 * header file will use proper macros
11684 			*/
11685 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11686 				int mac_for_pdev =
11687 						dp_get_mac_id_for_pdev(mac_id,
11688 								pdev->pdev_id);
11689 
11690 				pdev->pktlog_ppdu_stats = false;
11691 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
11692 					dp_h2t_cfg_stats_msg_send(pdev, 0,
11693 								mac_for_pdev);
11694 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
11695 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
11696 								mac_for_pdev);
11697 				} else if (pdev->enhanced_stats_en) {
11698 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
11699 								mac_for_pdev);
11700 				}
11701 			}
11702 
11703 			break;
11704 		default:
11705 			/* Nothing needs to be done for other pktlog types */
11706 			break;
11707 		}
11708 	}
11709 	return 0;
11710 }
11711 #endif
11712 
11713 /**
11714  * dp_bucket_index() - Return index from array
11715  *
11716  * @delay: delay measured
11717  * @array: array used to index corresponding delay
11718  *
11719  * Return: index
11720  */
11721 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
11722 {
11723 	uint8_t i = CDP_DELAY_BUCKET_0;
11724 
11725 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
11726 		if (delay >= array[i] && delay <= array[i + 1])
11727 			return i;
11728 	}
11729 
11730 	return (CDP_DELAY_BUCKET_MAX - 1);
11731 }
11732 
11733 /**
11734  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
11735  *				type of delay
11736  *
11737  * @pdev: pdev handle
11738  * @delay: delay in ms
11739  * @tid: tid value
11740  * @mode: type of tx delay mode
11741  * @ring_id: ring number
11742  * Return: pointer to cdp_delay_stats structure
11743  */
11744 static struct cdp_delay_stats *
11745 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
11746 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
11747 {
11748 	uint8_t delay_index = 0;
11749 	struct cdp_tid_tx_stats *tstats =
11750 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
11751 	struct cdp_tid_rx_stats *rstats =
11752 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
11753 	/*
11754 	 * cdp_fw_to_hw_delay_range
11755 	 * Fw to hw delay ranges in milliseconds
11756 	 */
11757 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
11758 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
11759 
11760 	/*
11761 	 * cdp_sw_enq_delay_range
11762 	 * Software enqueue delay ranges in milliseconds
11763 	 */
11764 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
11765 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
11766 
11767 	/*
11768 	 * cdp_intfrm_delay_range
11769 	 * Interframe delay ranges in milliseconds
11770 	 */
11771 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
11772 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
11773 
11774 	/*
11775 	 * Update delay stats in proper bucket
11776 	 */
11777 	switch (mode) {
11778 	/* Software Enqueue delay ranges */
11779 	case CDP_DELAY_STATS_SW_ENQ:
11780 
11781 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
11782 		tstats->swq_delay.delay_bucket[delay_index]++;
11783 		return &tstats->swq_delay;
11784 
11785 	/* Tx Completion delay ranges */
11786 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
11787 
11788 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
11789 		tstats->hwtx_delay.delay_bucket[delay_index]++;
11790 		return &tstats->hwtx_delay;
11791 
11792 	/* Interframe tx delay ranges */
11793 	case CDP_DELAY_STATS_TX_INTERFRAME:
11794 
11795 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11796 		tstats->intfrm_delay.delay_bucket[delay_index]++;
11797 		return &tstats->intfrm_delay;
11798 
11799 	/* Interframe rx delay ranges */
11800 	case CDP_DELAY_STATS_RX_INTERFRAME:
11801 
11802 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11803 		rstats->intfrm_delay.delay_bucket[delay_index]++;
11804 		return &rstats->intfrm_delay;
11805 
11806 	/* Ring reap to indication to network stack */
11807 	case CDP_DELAY_STATS_REAP_STACK:
11808 
11809 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11810 		rstats->to_stack_delay.delay_bucket[delay_index]++;
11811 		return &rstats->to_stack_delay;
11812 	default:
11813 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
11814 			  "%s Incorrect delay mode: %d", __func__, mode);
11815 	}
11816 
11817 	return NULL;
11818 }
11819 
11820 /**
11821  * dp_update_delay_stats() - Update delay statistics in structure
11822  *				and fill min, max and avg delay
11823  *
11824  * @pdev: pdev handle
11825  * @delay: delay in ms
11826  * @tid: tid value
11827  * @mode: type of tx delay mode
11828  * @ring id: ring number
11829  * Return: none
11830  */
11831 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
11832 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
11833 {
11834 	struct cdp_delay_stats *dstats = NULL;
11835 
11836 	/*
11837 	 * Delay ranges are different for different delay modes
11838 	 * Get the correct index to update delay bucket
11839 	 */
11840 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
11841 	if (qdf_unlikely(!dstats))
11842 		return;
11843 
11844 	if (delay != 0) {
11845 		/*
11846 		 * Compute minimum,average and maximum
11847 		 * delay
11848 		 */
11849 		if (delay < dstats->min_delay)
11850 			dstats->min_delay = delay;
11851 
11852 		if (delay > dstats->max_delay)
11853 			dstats->max_delay = delay;
11854 
11855 		/*
11856 		 * Average over delay measured till now
11857 		 */
11858 		if (!dstats->avg_delay)
11859 			dstats->avg_delay = delay;
11860 		else
11861 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
11862 	}
11863 }
11864 
11865 /**
11866  * dp_get_peer_mac_list(): function to get peer mac list of vdev
11867  * @soc: Datapath soc handle
11868  * @vdev_id: vdev id
11869  * @newmac: Table of the clients mac
11870  * @mac_cnt: No. of MACs required
11871  *
11872  * return: no of clients
11873  */
11874 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
11875 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
11876 			      u_int16_t mac_cnt)
11877 {
11878 	struct dp_vdev *vdev =
11879 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
11880 						   vdev_id);
11881 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
11882 	struct dp_peer *peer;
11883 	uint16_t new_mac_cnt = 0;
11884 
11885 	if (!vdev)
11886 		return new_mac_cnt;
11887 
11888 	qdf_spin_lock_bh(&dp_soc->peer_ref_mutex);
11889 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
11890 		if (peer->bss_peer)
11891 			continue;
11892 		if (new_mac_cnt < mac_cnt) {
11893 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
11894 			new_mac_cnt++;
11895 		}
11896 	}
11897 	qdf_spin_unlock_bh(&dp_soc->peer_ref_mutex);
11898 	return new_mac_cnt;
11899 }
11900 
11901 /**
11902  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
11903  *			   monitor rings
11904  * @pdev: Datapath pdev handle
11905  *
11906  */
11907 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
11908 {
11909 	struct dp_soc *soc = pdev->soc;
11910 	uint8_t i;
11911 
11912 	dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], RXDMA_BUF,
11913 		       pdev->lmac_id);
11914 
11915 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
11916 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
11917 
11918 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
11919 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
11920 
11921 		wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned);
11922 		dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
11923 			       RXDMA_DST, lmac_id);
11924 	}
11925 
11926 	dp_mon_rings_deinit(pdev);
11927 }
11928 
11929 /**
11930  * dp_pdev_srng_init() - initialize all pdev srng rings including
11931  *			   monitor rings
11932  * @pdev: Datapath pdev handle
11933  *
11934  * return: QDF_STATUS_SUCCESS on success
11935  *	   QDF_STATUS_E_NOMEM on failure
11936  */
11937 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
11938 {
11939 	struct dp_soc *soc = pdev->soc;
11940 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
11941 	uint32_t i;
11942 
11943 	soc_cfg_ctx = soc->wlan_cfg_ctx;
11944 
11945 	if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
11946 			 RXDMA_BUF, 0, pdev->lmac_id)) {
11947 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11948 			  FL("dp_srng_init failed rx refill ring"));
11949 		goto fail1;
11950 	}
11951 
11952 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
11953 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
11954 			goto fail1;
11955 	}
11956 
11957 	if (dp_mon_rings_init(soc, pdev)) {
11958 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11959 			  FL("MONITOR rings setup failed"));
11960 		goto fail1;
11961 	}
11962 
11963 	/* LMAC RxDMA to SW Rings configuration */
11964 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
11965 		/* Only valid for MCL */
11966 		pdev = soc->pdev_list[0];
11967 
11968 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
11969 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
11970 		struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id];
11971 
11972 		if (srng->hal_srng)
11973 			continue;
11974 
11975 		if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
11976 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11977 				  FL(RNG_ERR "rxdma_err_dst_ring"));
11978 			goto fail1;
11979 		}
11980 		wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned,
11981 				  soc->rxdma_err_dst_ring[lmac_id].alloc_size,
11982 				  soc->ctrl_psoc,
11983 				  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
11984 				  "rxdma_err_dst");
11985 	}
11986 	return QDF_STATUS_SUCCESS;
11987 
11988 fail1:
11989 	dp_pdev_srng_deinit(pdev);
11990 	return QDF_STATUS_E_NOMEM;
11991 }
11992 
11993 /**
11994  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
11995  * pdev: Datapath pdev handle
11996  *
11997  */
11998 static void dp_pdev_srng_free(struct dp_pdev *pdev)
11999 {
12000 	struct dp_soc *soc = pdev->soc;
12001 	uint8_t i;
12002 
12003 	dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
12004 	dp_mon_rings_free(pdev);
12005 
12006 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
12007 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
12008 
12009 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12010 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
12011 
12012 		dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
12013 	}
12014 }
12015 
12016 /**
12017  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
12018  *			  monitor rings
12019  * pdev: Datapath pdev handle
12020  *
12021  * return: QDF_STATUS_SUCCESS on success
12022  *	   QDF_STATUS_E_NOMEM on failure
12023  */
12024 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
12025 {
12026 	struct dp_soc *soc = pdev->soc;
12027 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12028 	uint32_t ring_size;
12029 	uint32_t i;
12030 
12031 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12032 
12033 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
12034 	if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
12035 			  RXDMA_BUF, ring_size, 0)) {
12036 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12037 			  FL("dp_srng_alloc failed rx refill ring"));
12038 		goto fail1;
12039 	}
12040 
12041 	if (dp_mon_rings_alloc(soc, pdev)) {
12042 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12043 			  FL("MONITOR rings setup failed"));
12044 		goto fail1;
12045 	}
12046 
12047 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12048 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
12049 			goto fail1;
12050 	}
12051 
12052 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
12053 	/* LMAC RxDMA to SW Rings configuration */
12054 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
12055 		/* Only valid for MCL */
12056 		pdev = soc->pdev_list[0];
12057 
12058 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12059 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
12060 		struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id];
12061 
12062 		if (srng->base_vaddr_unaligned)
12063 			continue;
12064 
12065 		if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
12066 			QDF_TRACE(QDF_MODULE_ID_DP,
12067 				  QDF_TRACE_LEVEL_ERROR,
12068 				  FL(RNG_ERR "rxdma_err_dst_ring"));
12069 			goto fail1;
12070 		}
12071 	}
12072 
12073 	return QDF_STATUS_SUCCESS;
12074 fail1:
12075 	dp_pdev_srng_free(pdev);
12076 	return QDF_STATUS_E_NOMEM;
12077 }
12078 
12079 /**
12080  * dp_soc_srng_deinit() - de-initialize soc srng rings
12081  * @soc: Datapath soc handle
12082  *
12083  */
12084 static void dp_soc_srng_deinit(struct dp_soc *soc)
12085 {
12086 	uint32_t i;
12087 	/* Free the ring memories */
12088 	/* Common rings */
12089 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned);
12090 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
12091 
12092 	/* Tx data rings */
12093 	for (i = 0; i < soc->num_tcl_data_rings; i++)
12094 		dp_deinit_tx_pair_by_index(soc, i);
12095 
12096 	/* TCL command and status rings */
12097 	wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned);
12098 	dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT, 0);
12099 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned);
12100 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
12101 
12102 	/* Rx data rings */
12103 	soc->num_reo_dest_rings =
12104 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
12105 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12106 		/* TODO: Get number of rings and ring sizes
12107 		 * from wlan_cfg
12108 		 */
12109 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned);
12110 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
12111 	}
12112 
12113 	/* REO reinjection ring */
12114 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned);
12115 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
12116 
12117 	/* Rx release ring */
12118 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned);
12119 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
12120 
12121 	/* Rx exception ring */
12122 	/* TODO: Better to store ring_type and ring_num in
12123 	 * dp_srng during setup
12124 	 */
12125 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned);
12126 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
12127 
12128 	/* REO command and status rings */
12129 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned);
12130 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
12131 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned);
12132 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
12133 }
12134 
12135 /**
12136  * dp_soc_srng_init() - Initialize soc level srng rings
12137  * @soc: Datapath soc handle
12138  *
12139  * return: QDF_STATUS_SUCCESS on success
12140  *	   QDF_STATUS_E_FAILURE on failure
12141  */
12142 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
12143 {
12144 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12145 	uint32_t num_tcl_data_rings, num_reo_dest_rings;
12146 	uint8_t i;
12147 
12148 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12149 
12150 	dp_enable_verbose_debug(soc);
12151 
12152 	/* WBM descriptor release ring */
12153 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
12154 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12155 			  FL("dp_srng_init failed for wbm_desc_rel_ring"));
12156 		goto fail1;
12157 	}
12158 
12159 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
12160 			  soc->wbm_desc_rel_ring.alloc_size,
12161 			  soc->ctrl_psoc,
12162 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
12163 			  "wbm_desc_rel_ring");
12164 
12165 	/* TCL command and status rings */
12166 	if (dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
12167 			 TCL_CMD_CREDIT, 0, 0)) {
12168 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12169 			  FL("dp_srng_init failed for tcl_cmd_ring"));
12170 		goto fail1;
12171 	}
12172 
12173 	wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
12174 			  soc->tcl_cmd_credit_ring.alloc_size,
12175 			  soc->ctrl_psoc,
12176 			  WLAN_MD_DP_SRNG_TCL_CMD,
12177 			  "wbm_desc_rel_ring");
12178 
12179 	if (dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0)) {
12180 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12181 			  FL("dp_srng_init failed for tcl_status_ring"));
12182 		goto fail1;
12183 	}
12184 
12185 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
12186 			  soc->tcl_status_ring.alloc_size,
12187 			  soc->ctrl_psoc,
12188 			  WLAN_MD_DP_SRNG_TCL_STATUS,
12189 			  "wbm_desc_rel_ring");
12190 
12191 	/* REO reinjection ring */
12192 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
12193 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12194 			  FL("dp_srng_init failed for reo_reinject_ring"));
12195 		goto fail1;
12196 	}
12197 
12198 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
12199 			  soc->reo_reinject_ring.alloc_size,
12200 			  soc->ctrl_psoc,
12201 			  WLAN_MD_DP_SRNG_REO_REINJECT,
12202 			  "reo_reinject_ring");
12203 
12204 	/* Rx release ring */
12205 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0)) {
12206 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12207 			  FL("dp_srng_init failed for rx_rel_ring"));
12208 		goto fail1;
12209 	}
12210 
12211 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
12212 			  soc->rx_rel_ring.alloc_size,
12213 			  soc->ctrl_psoc,
12214 			  WLAN_MD_DP_SRNG_RX_REL,
12215 			  "reo_release_ring");
12216 
12217 	/* Rx exception ring */
12218 	if (dp_srng_init(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
12219 			 MAX_REO_DEST_RINGS)) {
12220 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12221 			  FL("dp_srng_init failed for reo_exception_ring"));
12222 		goto fail1;
12223 	}
12224 
12225 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
12226 			  soc->reo_exception_ring.alloc_size,
12227 			  soc->ctrl_psoc,
12228 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
12229 			  "reo_exception_ring");
12230 
12231 	/* REO command and status rings */
12232 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
12233 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12234 			  FL("dp_srng_init failed for reo_cmd_ring"));
12235 		goto fail1;
12236 	}
12237 
12238 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
12239 			  soc->reo_cmd_ring.alloc_size,
12240 			  soc->ctrl_psoc,
12241 			  WLAN_MD_DP_SRNG_REO_CMD,
12242 			  "reo_cmd_ring");
12243 
12244 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
12245 	TAILQ_INIT(&soc->rx.reo_cmd_list);
12246 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
12247 
12248 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
12249 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12250 			  FL("dp_srng_init failed for reo_status_ring"));
12251 		goto fail1;
12252 	}
12253 
12254 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
12255 			  soc->reo_status_ring.alloc_size,
12256 			  soc->ctrl_psoc,
12257 			  WLAN_MD_DP_SRNG_REO_STATUS,
12258 			  "reo_status_ring");
12259 
12260 	num_tcl_data_rings = wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
12261 	num_reo_dest_rings = wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
12262 
12263 	for (i = 0; i < num_tcl_data_rings; i++) {
12264 		if (dp_init_tx_ring_pair_by_index(soc, i))
12265 			goto fail1;
12266 	}
12267 
12268 	dp_create_ext_stats_event(soc);
12269 
12270 	for (i = 0; i < num_reo_dest_rings; i++) {
12271 		/* Initialize REO destination ring */
12272 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
12273 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12274 				  FL("dp_srng_init failed for reo_dest_ringn"));
12275 			goto fail1;
12276 		}
12277 
12278 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
12279 				  soc->reo_dest_ring[i].alloc_size,
12280 				  soc->ctrl_psoc,
12281 				  WLAN_MD_DP_SRNG_REO_DEST,
12282 				  "reo_dest_ring");
12283 	}
12284 
12285 	return QDF_STATUS_SUCCESS;
12286 fail1:
12287 	/*
12288 	 * Cleanup will be done as part of soc_detach, which will
12289 	 * be called on pdev attach failure
12290 	 */
12291 	dp_soc_srng_deinit(soc);
12292 	return QDF_STATUS_E_FAILURE;
12293 }
12294 
12295 /**
12296  * dp_soc_srng_free() - free soc level srng rings
12297  * @soc: Datapath soc handle
12298  *
12299  */
12300 static void dp_soc_srng_free(struct dp_soc *soc)
12301 {
12302 	uint32_t i;
12303 
12304 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
12305 
12306 	for (i = 0; i < soc->num_tcl_data_rings; i++)
12307 		dp_free_tx_ring_pair_by_index(soc, i);
12308 
12309 	dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
12310 	dp_srng_free(soc, &soc->tcl_status_ring);
12311 
12312 	for (i = 0; i < soc->num_reo_dest_rings; i++)
12313 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
12314 
12315 	dp_srng_free(soc, &soc->reo_reinject_ring);
12316 	dp_srng_free(soc, &soc->rx_rel_ring);
12317 	dp_srng_free(soc, &soc->reo_exception_ring);
12318 	dp_srng_free(soc, &soc->reo_cmd_ring);
12319 	dp_srng_free(soc, &soc->reo_status_ring);
12320 }
12321 
12322 /**
12323  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
12324  * @soc: Datapath soc handle
12325  *
12326  * return: QDF_STATUS_SUCCESS on success
12327  *	   QDF_STATUS_E_NOMEM on failure
12328  */
12329 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
12330 {
12331 	uint32_t entries;
12332 	uint32_t i;
12333 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12334 	uint32_t num_tcl_data_rings, num_reo_dest_rings;
12335 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
12336 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
12337 
12338 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12339 
12340 	/* sw2wbm link descriptor release ring */
12341 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
12342 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
12343 			  entries, 0)) {
12344 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12345 			  FL("dp_srng_alloc failed for wbm_desc_rel_ring"));
12346 		goto fail1;
12347 	}
12348 
12349 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
12350 	/* TCL command and status rings */
12351 	if (dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT,
12352 			  entries, 0)) {
12353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12354 			  FL("dp_srng_alloc failed for tcl_cmd_ring"));
12355 		goto fail1;
12356 	}
12357 
12358 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
12359 	if (dp_srng_alloc(soc, &soc->tcl_status_ring, TCL_STATUS, entries,
12360 			  0)) {
12361 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12362 			  FL("dp_srng_alloc failed for tcl_status_ring"));
12363 		goto fail1;
12364 	}
12365 
12366 	/* REO reinjection ring */
12367 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
12368 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
12369 			  entries, 0)) {
12370 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12371 			  FL("dp_srng_alloc failed for reo_reinject_ring"));
12372 		goto fail1;
12373 	}
12374 
12375 	/* Rx release ring */
12376 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
12377 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
12378 			  entries, 0)) {
12379 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12380 			  FL("dp_srng_alloc failed for rx_rel_ring"));
12381 		goto fail1;
12382 	}
12383 
12384 	/* Rx exception ring */
12385 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
12386 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
12387 			  entries, 0)) {
12388 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12389 			  FL("dp_srng_alloc failed for reo_exception_ring"));
12390 		goto fail1;
12391 	}
12392 
12393 	/* REO command and status rings */
12394 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
12395 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
12396 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12397 			  FL("dp_srng_alloc failed for reo_cmd_ring"));
12398 		goto fail1;
12399 	}
12400 
12401 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
12402 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
12403 			  entries, 0)) {
12404 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12405 			  FL("dp_srng_alloc failed for reo_status_ring"));
12406 		goto fail1;
12407 	}
12408 
12409 	num_tcl_data_rings = wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
12410 	num_reo_dest_rings = wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
12411 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
12412 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
12413 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
12414 
12415 	/* Disable cached desc if NSS offload is enabled */
12416 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
12417 		cached = 0;
12418 
12419 	for (i = 0; i < num_tcl_data_rings; i++) {
12420 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
12421 			goto fail1;
12422 	}
12423 
12424 	soc->num_tcl_data_rings = num_tcl_data_rings;
12425 
12426 	for (i = 0; i < num_reo_dest_rings; i++) {
12427 		/* Setup REO destination ring */
12428 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
12429 				  reo_dst_ring_size, cached)) {
12430 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12431 				  FL("dp_srng_alloc failed for reo_dest_ring"));
12432 			goto fail1;
12433 		}
12434 	}
12435 	soc->num_reo_dest_rings = num_reo_dest_rings;
12436 
12437 	return QDF_STATUS_SUCCESS;
12438 
12439 fail1:
12440 	dp_soc_srng_free(soc);
12441 	return QDF_STATUS_E_NOMEM;
12442 }
12443 
12444 /**
12445  * dp_soc_cfg_init() - initialize target specific configuration
12446  *		       during dp_soc_init
12447  * @soc: dp soc handle
12448  */
12449 static void dp_soc_cfg_init(struct dp_soc *soc)
12450 {
12451 	int target_type;
12452 
12453 	target_type = hal_get_target_type(soc->hal_soc);
12454 	switch (target_type) {
12455 	case TARGET_TYPE_QCA6290:
12456 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12457 					       REO_DST_RING_SIZE_QCA6290);
12458 		soc->ast_override_support = 1;
12459 		soc->da_war_enabled = false;
12460 		break;
12461 	case TARGET_TYPE_QCA6390:
12462 	case TARGET_TYPE_QCA6490:
12463 	case TARGET_TYPE_QCA6750:
12464 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12465 					       REO_DST_RING_SIZE_QCA6290);
12466 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
12467 		soc->ast_override_support = 1;
12468 		if (soc->cdp_soc.ol_ops->get_con_mode &&
12469 		    soc->cdp_soc.ol_ops->get_con_mode() ==
12470 		    QDF_GLOBAL_MONITOR_MODE) {
12471 			int int_ctx;
12472 
12473 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
12474 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
12475 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
12476 			}
12477 		}
12478 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
12479 		break;
12480 	case TARGET_TYPE_QCA8074:
12481 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12482 							   MON_BUF_MIN_ENTRIES);
12483 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12484 					       REO_DST_RING_SIZE_QCA8074);
12485 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
12486 		soc->da_war_enabled = true;
12487 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
12488 		break;
12489 	case TARGET_TYPE_QCA8074V2:
12490 	case TARGET_TYPE_QCA6018:
12491 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12492 							   MON_BUF_MIN_ENTRIES);
12493 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12494 					       REO_DST_RING_SIZE_QCA8074);
12495 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12496 		soc->hw_nac_monitor_support = 1;
12497 		soc->ast_override_support = 1;
12498 		soc->per_tid_basize_max_tid = 8;
12499 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12500 		soc->da_war_enabled = false;
12501 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
12502 		break;
12503 	case TARGET_TYPE_QCN9000:
12504 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12505 							   MON_BUF_MIN_ENTRIES);
12506 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12507 					       REO_DST_RING_SIZE_QCN9000);
12508 		soc->ast_override_support = 1;
12509 		soc->da_war_enabled = false;
12510 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12511 		soc->hw_nac_monitor_support = 1;
12512 		soc->per_tid_basize_max_tid = 8;
12513 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12514 		soc->lmac_polled_mode = 0;
12515 		soc->wbm_release_desc_rx_sg_support = 1;
12516 		if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE))
12517 			soc->full_mon_mode = true;
12518 		break;
12519 	case TARGET_TYPE_QCA5018:
12520 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12521 					       REO_DST_RING_SIZE_QCA8074);
12522 		soc->ast_override_support = 1;
12523 		soc->da_war_enabled = false;
12524 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12525 		soc->hw_nac_monitor_support = 1;
12526 		soc->per_tid_basize_max_tid = 8;
12527 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12528 		soc->disable_mac1_intr = 1;
12529 		soc->disable_mac2_intr = 1;
12530 		soc->wbm_release_desc_rx_sg_support = 1;
12531 		break;
12532 	default:
12533 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
12534 		qdf_assert_always(0);
12535 		break;
12536 	}
12537 }
12538 
12539 /**
12540  * dp_soc_cfg_attach() - set target specific configuration in
12541  *			 dp soc cfg.
12542  * @soc: dp soc handle
12543  */
12544 static void dp_soc_cfg_attach(struct dp_soc *soc)
12545 {
12546 	int target_type;
12547 	int nss_cfg = 0;
12548 
12549 	target_type = hal_get_target_type(soc->hal_soc);
12550 	switch (target_type) {
12551 	case TARGET_TYPE_QCA6290:
12552 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12553 					       REO_DST_RING_SIZE_QCA6290);
12554 		break;
12555 	case TARGET_TYPE_QCA6390:
12556 	case TARGET_TYPE_QCA6490:
12557 	case TARGET_TYPE_QCA6750:
12558 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12559 					       REO_DST_RING_SIZE_QCA6290);
12560 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
12561 		break;
12562 	case TARGET_TYPE_QCA8074:
12563 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12564 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12565 					       REO_DST_RING_SIZE_QCA8074);
12566 		break;
12567 	case TARGET_TYPE_QCA8074V2:
12568 	case TARGET_TYPE_QCA6018:
12569 	case TARGET_TYPE_QCA5018:
12570 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12571 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12572 					       REO_DST_RING_SIZE_QCA8074);
12573 		break;
12574 	case TARGET_TYPE_QCN9000:
12575 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12576 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12577 					       REO_DST_RING_SIZE_QCN9000);
12578 		break;
12579 	default:
12580 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
12581 		qdf_assert_always(0);
12582 		break;
12583 	}
12584 
12585 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
12586 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
12587 
12588 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
12589 
12590 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
12591 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
12592 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
12593 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
12594 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
12595 	}
12596 }
12597 
12598 static inline QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
12599 				      HTC_HANDLE htc_handle,
12600 				      qdf_device_t qdf_osdev,
12601 				      uint8_t pdev_id)
12602 {
12603 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12604 	int nss_cfg;
12605 	void *sojourn_buf;
12606 	QDF_STATUS ret;
12607 
12608 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
12609 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
12610 
12611 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12612 	pdev->soc = soc;
12613 	pdev->pdev_id = pdev_id;
12614 
12615 	pdev->filter = dp_mon_filter_alloc(pdev);
12616 	if (!pdev->filter) {
12617 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12618 			  FL("Memory allocation failed for monitor filters"));
12619 		ret = QDF_STATUS_E_NOMEM;
12620 		goto fail0;
12621 	}
12622 
12623 	/*
12624 	 * Variable to prevent double pdev deinitialization during
12625 	 * radio detach execution .i.e. in the absence of any vdev.
12626 	 */
12627 	pdev->pdev_deinit = 0;
12628 
12629 	if (dp_wdi_event_attach(pdev)) {
12630 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
12631 			  "dp_wdi_evet_attach failed");
12632 		goto fail1;
12633 	}
12634 
12635 	if (dp_pdev_srng_init(pdev)) {
12636 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12637 			  FL("Failed to initialize pdev srng rings"));
12638 		goto fail2;
12639 	}
12640 
12641 	/* Initialize descriptors in TCL Rings used by IPA */
12642 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
12643 		hal_tx_init_data_ring(soc->hal_soc,
12644 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
12645 
12646 	/*
12647 	 * Initialize command/credit ring descriptor
12648 	 * Command/CREDIT ring also used for sending DATA cmds
12649 	 */
12650 	hal_tx_init_cmd_credit_ring(soc->hal_soc,
12651 				    soc->tcl_cmd_credit_ring.hal_srng);
12652 
12653 	dp_tx_pdev_init(pdev);
12654 	/*
12655 	 * Variable to prevent double pdev deinitialization during
12656 	 * radio detach execution .i.e. in the absence of any vdev.
12657 	 */
12658 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
12659 
12660 	if (!pdev->invalid_peer) {
12661 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12662 			  FL("Invalid peer memory allocation failed"));
12663 		goto fail3;
12664 	}
12665 
12666 	/*
12667 	 * set nss pdev config based on soc config
12668 	 */
12669 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
12670 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
12671 					 (nss_cfg & (1 << pdev_id)));
12672 
12673 	pdev->target_pdev_id =
12674 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12675 
12676 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
12677 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
12678 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
12679 	}
12680 
12681 	/* Reset the cpu ring map if radio is NSS offloaded */
12682 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
12683 		dp_soc_reset_cpu_ring_map(soc);
12684 		dp_soc_reset_intr_mask(soc);
12685 	}
12686 
12687 	TAILQ_INIT(&pdev->vdev_list);
12688 	qdf_spinlock_create(&pdev->vdev_list_lock);
12689 	pdev->vdev_count = 0;
12690 
12691 	qdf_spinlock_create(&pdev->tx_mutex);
12692 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
12693 	TAILQ_INIT(&pdev->neighbour_peers_list);
12694 	pdev->neighbour_peers_added = false;
12695 	pdev->monitor_configured = false;
12696 	pdev->mon_chan_band = REG_BAND_UNKNOWN;
12697 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
12698 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
12699 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
12700 
12701 	DP_STATS_INIT(pdev);
12702 
12703 	/* Monitor filter init */
12704 	pdev->mon_filter_mode = MON_FILTER_ALL;
12705 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
12706 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
12707 	pdev->fp_data_filter = FILTER_DATA_ALL;
12708 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
12709 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
12710 	pdev->mo_data_filter = FILTER_DATA_ALL;
12711 
12712 	dp_local_peer_id_pool_init(pdev);
12713 
12714 	dp_dscp_tid_map_setup(pdev);
12715 	dp_pcp_tid_map_setup(pdev);
12716 
12717 	/* set the reo destination during initialization */
12718 	pdev->reo_dest = pdev->pdev_id + 1;
12719 
12720 	/*
12721 	 * initialize ppdu tlv list
12722 	 */
12723 	TAILQ_INIT(&pdev->ppdu_info_list);
12724 	TAILQ_INIT(&pdev->sched_comp_ppdu_list);
12725 	pdev->tlv_count = 0;
12726 	pdev->list_depth = 0;
12727 
12728 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
12729 
12730 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
12731 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
12732 			      TRUE);
12733 
12734 	if (!pdev->sojourn_buf) {
12735 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12736 			  FL("Failed to allocate sojourn buf"));
12737 		goto fail4;
12738 	}
12739 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
12740 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
12741 
12742 	/* initlialize cal client timer */
12743 	dp_cal_client_attach(&pdev->cal_client_ctx,
12744 			     dp_pdev_to_cdp_pdev(pdev),
12745 			     pdev->soc->osdev,
12746 			     &dp_iterate_update_peer_list);
12747 	qdf_event_create(&pdev->fw_peer_stats_event);
12748 
12749 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
12750 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
12751 		goto fail5;
12752 
12753 	if (dp_rxdma_ring_setup(soc, pdev)) {
12754 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12755 			  FL("RXDMA ring config failed"));
12756 		goto fail6;
12757 	}
12758 
12759 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
12760 		goto fail7;
12761 
12762 	if (dp_ipa_ring_resource_setup(soc, pdev))
12763 		goto fail8;
12764 
12765 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
12766 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12767 			  FL("dp_ipa_uc_attach failed"));
12768 		goto fail8;
12769 	}
12770 
12771 	ret = dp_rx_fst_attach(soc, pdev);
12772 	if ((ret != QDF_STATUS_SUCCESS) &&
12773 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
12774 		QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR,
12775 			  "RX Flow Search Table attach failed: pdev %d err %d",
12776 			  pdev_id, ret);
12777 		goto fail9;
12778 	}
12779 
12780 	/* initialize sw rx descriptors */
12781 	dp_rx_pdev_desc_pool_init(pdev);
12782 	/* initialize sw monitor rx descriptors */
12783 	dp_rx_pdev_mon_desc_pool_init(pdev);
12784 	/* allocate buffers and replenish the RxDMA ring */
12785 	dp_rx_pdev_buffers_alloc(pdev);
12786 	/* allocate buffers and replenish the monitor RxDMA ring */
12787 	dp_rx_pdev_mon_buffers_alloc(pdev);
12788 
12789 	dp_init_tso_stats(pdev);
12790 	dp_tx_ppdu_stats_attach(pdev);
12791 
12792 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
12793 		qdf_dma_mem_stats_read(),
12794 		qdf_heap_mem_stats_read(),
12795 		qdf_skb_mem_stats_read());
12796 
12797 	return QDF_STATUS_SUCCESS;
12798 fail9:
12799 	dp_ipa_uc_detach(soc, pdev);
12800 fail8:
12801 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
12802 fail7:
12803 	dp_rxdma_ring_cleanup(soc, pdev);
12804 fail6:
12805 	dp_htt_ppdu_stats_detach(pdev);
12806 fail5:
12807 	qdf_nbuf_free(pdev->sojourn_buf);
12808 fail4:
12809 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
12810 	qdf_spinlock_destroy(&pdev->tx_mutex);
12811 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
12812 	qdf_mem_free(pdev->invalid_peer);
12813 fail3:
12814 	dp_pdev_srng_deinit(pdev);
12815 fail2:
12816 	dp_wdi_event_detach(pdev);
12817 fail1:
12818 	dp_mon_filter_dealloc(pdev);
12819 fail0:
12820 	return QDF_STATUS_E_FAILURE;
12821 }
12822 
12823 /*
12824  * dp_pdev_init_wifi3() - Init txrx pdev
12825  * @htc_handle: HTC handle for host-target interface
12826  * @qdf_osdev: QDF OS device
12827  * @force: Force deinit
12828  *
12829  * Return: QDF_STATUS
12830  */
12831 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
12832 				     HTC_HANDLE htc_handle,
12833 				     qdf_device_t qdf_osdev,
12834 				     uint8_t pdev_id)
12835 {
12836 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
12837 }
12838 
12839