xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #ifdef DP_RATETABLE_SUPPORT
36 #include "dp_ratetable.h"
37 #endif
38 #include <cdp_txrx_handle.h>
39 #include <wlan_cfg.h>
40 #include <wlan_utility.h>
41 #include "cdp_txrx_cmn_struct.h"
42 #include "cdp_txrx_stats_struct.h"
43 #include "cdp_txrx_cmn_reg.h"
44 #include <qdf_util.h>
45 #include "dp_peer.h"
46 #include "htt_stats.h"
47 #include "dp_htt.h"
48 #ifdef WLAN_SUPPORT_RX_FISA
49 #include <dp_fisa_rx.h>
50 #endif
51 #include "htt_ppdu_stats.h"
52 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
53 #include "cfg_ucfg_api.h"
54 
55 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
56 #include "cdp_txrx_flow_ctrl_v2.h"
57 #else
58 
59 static inline void
60 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
61 {
62 	return;
63 }
64 #endif
65 #ifdef WIFI_MONITOR_SUPPORT
66 #include <dp_mon.h>
67 #endif
68 #include "dp_ipa.h"
69 #ifdef FEATURE_WDS
70 #include "dp_txrx_wds.h"
71 #endif
72 #ifdef WLAN_SUPPORT_MSCS
73 #include "dp_mscs.h"
74 #endif
75 #ifdef WLAN_SUPPORT_MESH_LATENCY
76 #include "dp_mesh_latency.h"
77 #endif
78 #ifdef ATH_SUPPORT_IQUE
79 #include "dp_txrx_me.h"
80 #endif
81 #if defined(DP_CON_MON)
82 #ifndef REMOVE_PKT_LOG
83 #include <pktlog_ac_api.h>
84 #include <pktlog_ac.h>
85 #endif
86 #endif
87 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
88 #include <dp_swlm.h>
89 #endif
90 
91 #ifdef WLAN_FEATURE_STATS_EXT
92 #define INIT_RX_HW_STATS_LOCK(_soc) \
93 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
94 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
95 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
96 #else
97 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
98 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
99 #endif
100 
101 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
102 #define SET_PEER_REF_CNT_ONE(_peer) \
103 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
104 #else
105 #define SET_PEER_REF_CNT_ONE(_peer)
106 #endif
107 
108 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
109 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
110 
111 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
112 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
113 
114 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
115 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
116 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
117 #define dp_init_info(params...) \
118 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
119 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
120 
121 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
122 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
123 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
124 #define dp_vdev_info(params...) \
125 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
126 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
127 
128 void dp_configure_arch_ops(struct dp_soc *soc);
129 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
130 
131 /*
132  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
133  * If the buffer size is exceeding this size limit,
134  * dp_txrx_get_peer_stats is to be used instead.
135  */
136 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
137 			(sizeof(cdp_peer_stats_param_t) <= 16));
138 
139 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
140 /*
141  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
142  * also should be updated accordingly
143  */
144 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
145 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
146 
147 /*
148  * HIF_EVENT_HIST_MAX should always be power of 2
149  */
150 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
151 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
152 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
153 
154 /*
155  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
156  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
157  */
158 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
159 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
160 			WLAN_CFG_INT_NUM_CONTEXTS);
161 
162 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
163 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
164 static void dp_pdev_srng_free(struct dp_pdev *pdev);
165 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
166 
167 static void dp_soc_srng_deinit(struct dp_soc *soc);
168 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
169 static void dp_soc_srng_free(struct dp_soc *soc);
170 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
171 
172 static void dp_soc_cfg_init(struct dp_soc *soc);
173 static void dp_soc_cfg_attach(struct dp_soc *soc);
174 
175 static inline
176 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
177 				HTC_HANDLE htc_handle,
178 				qdf_device_t qdf_osdev,
179 				uint8_t pdev_id);
180 
181 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
182 
183 static QDF_STATUS
184 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
185 		   HTC_HANDLE htc_handle,
186 		   qdf_device_t qdf_osdev,
187 		   uint8_t pdev_id);
188 
189 static QDF_STATUS
190 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
191 
192 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
193 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
194 
195 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
196 		  struct hif_opaque_softc *hif_handle);
197 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
198 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
199 				       uint8_t pdev_id,
200 				       int force);
201 static struct dp_soc *
202 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
203 	      struct hif_opaque_softc *hif_handle,
204 	      HTC_HANDLE htc_handle,
205 	      qdf_device_t qdf_osdev,
206 	      struct ol_if_ops *ol_ops, uint16_t device_id);
207 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
208 					      uint8_t vdev_id,
209 					      uint8_t *peer_mac_addr);
210 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
211 				       uint8_t vdev_id,
212 				       uint8_t *peer_mac, uint32_t bitmap);
213 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
214 				bool unmap_only);
215 #ifdef ENABLE_VERBOSE_DEBUG
216 bool is_dp_verbose_debug_enabled;
217 #endif
218 
219 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
220 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
221 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
222 			   bool enable);
223 static inline void
224 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
225 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
226 static inline void
227 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
228 #endif
229 
230 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
231 						uint8_t index);
232 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
233 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
234 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
235 						 uint8_t index);
236 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
237 					    enum hal_ring_type ring_type,
238 					    int ring_num);
239 
240 #define DP_INTR_POLL_TIMER_MS	5
241 
242 #define MON_VDEV_TIMER_INIT 0x1
243 #define MON_VDEV_TIMER_RUNNING 0x2
244 
245 /* Generic AST entry aging timer value */
246 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
247 #define DP_MCS_LENGTH (6*MAX_MCS)
248 
249 #define DP_CURR_FW_STATS_AVAIL 19
250 #define DP_HTT_DBG_EXT_STATS_MAX 256
251 #define DP_MAX_SLEEP_TIME 100
252 #ifndef QCA_WIFI_3_0_EMU
253 #define SUSPEND_DRAIN_WAIT 500
254 #else
255 #define SUSPEND_DRAIN_WAIT 3000
256 #endif
257 
258 #ifdef IPA_OFFLOAD
259 /* Exclude IPA rings from the interrupt context */
260 #define TX_RING_MASK_VAL	0xb
261 #define RX_RING_MASK_VAL	0x7
262 #else
263 #define TX_RING_MASK_VAL	0xF
264 #define RX_RING_MASK_VAL	0xF
265 #endif
266 
267 #define STR_MAXLEN	64
268 
269 #define RNG_ERR		"SRNG setup failed for"
270 
271 /* Threshold for peer's cached buf queue beyond which frames are dropped */
272 #define DP_RX_CACHED_BUFQ_THRESH 64
273 
274 /**
275  * default_dscp_tid_map - Default DSCP-TID mapping
276  *
277  * DSCP        TID
278  * 000000      0
279  * 001000      1
280  * 010000      2
281  * 011000      3
282  * 100000      4
283  * 101000      5
284  * 110000      6
285  * 111000      7
286  */
287 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
288 	0, 0, 0, 0, 0, 0, 0, 0,
289 	1, 1, 1, 1, 1, 1, 1, 1,
290 	2, 2, 2, 2, 2, 2, 2, 2,
291 	3, 3, 3, 3, 3, 3, 3, 3,
292 	4, 4, 4, 4, 4, 4, 4, 4,
293 	5, 5, 5, 5, 5, 5, 5, 5,
294 	6, 6, 6, 6, 6, 6, 6, 6,
295 	7, 7, 7, 7, 7, 7, 7, 7,
296 };
297 
298 /**
299  * default_pcp_tid_map - Default PCP-TID mapping
300  *
301  * PCP     TID
302  * 000      0
303  * 001      1
304  * 010      2
305  * 011      3
306  * 100      4
307  * 101      5
308  * 110      6
309  * 111      7
310  */
311 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
312 	0, 1, 2, 3, 4, 5, 6, 7,
313 };
314 
315 /**
316  * @brief Cpu to tx ring map
317  */
318 uint8_t
319 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
320 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
321 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
322 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
323 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
324 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
325 #ifdef WLAN_TX_PKT_CAPTURE_ENH
326 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
327 #endif
328 };
329 
330 qdf_export_symbol(dp_cpu_ring_map);
331 
332 /**
333  * @brief Select the type of statistics
334  */
335 enum dp_stats_type {
336 	STATS_FW = 0,
337 	STATS_HOST = 1,
338 	STATS_TYPE_MAX = 2,
339 };
340 
341 /**
342  * @brief General Firmware statistics options
343  *
344  */
345 enum dp_fw_stats {
346 	TXRX_FW_STATS_INVALID	= -1,
347 };
348 
349 /**
350  * dp_stats_mapping_table - Firmware and Host statistics
351  * currently supported
352  */
353 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
354 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
355 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
356 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
357 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
358 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
359 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
360 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
361 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
362 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
363 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
364 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
365 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
366 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
367 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
368 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
369 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
370 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
371 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
372 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
373 	/* Last ENUM for HTT FW STATS */
374 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
375 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
376 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
377 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
378 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
379 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
380 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
381 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
382 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
383 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
384 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
385 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
386 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
387 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
388 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
389 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
390 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}
391 };
392 
393 /* MCL specific functions */
394 #if defined(DP_CON_MON)
395 /**
396  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
397  * @soc: pointer to dp_soc handle
398  * @intr_ctx_num: interrupt context number for which mon mask is needed
399  *
400  * For MCL, monitor mode rings are being processed in timer contexts (polled).
401  * This function is returning 0, since in interrupt mode(softirq based RX),
402  * we donot want to process monitor mode rings in a softirq.
403  *
404  * So, in case packet log is enabled for SAP/STA/P2P modes,
405  * regular interrupt processing will not process monitor mode rings. It would be
406  * done in a separate timer context.
407  *
408  * Return: 0
409  */
410 static inline
411 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
412 {
413 	return 0;
414 }
415 
416 /**
417  * dp_get_num_rx_contexts() - get number of RX contexts
418  * @soc_hdl: cdp opaque soc handle
419  *
420  * Return: number of RX contexts
421  */
422 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
423 {
424 	int i;
425 	int num_rx_contexts = 0;
426 
427 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
428 
429 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
430 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
431 			num_rx_contexts++;
432 
433 	return num_rx_contexts;
434 }
435 
436 #else
437 
438 /**
439  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
440  * @soc: pointer to dp_soc handle
441  * @intr_ctx_num: interrupt context number for which mon mask is needed
442  *
443  * Return: mon mask value
444  */
445 static inline
446 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
447 {
448 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
449 }
450 
451 /**
452  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
453  * @soc: pointer to dp_soc handle
454  *
455  * Return:
456  */
457 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
458 {
459 	int i;
460 
461 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
462 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
463 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
464 	}
465 }
466 
467 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
468 
469 /*
470  * dp_service_lmac_rings()- timer to reap lmac rings
471  * @arg: SoC Handle
472  *
473  * Return:
474  *
475  */
476 static void dp_service_lmac_rings(void *arg)
477 {
478 	struct dp_soc *soc = (struct dp_soc *)arg;
479 	int ring = 0, i;
480 	struct dp_pdev *pdev = NULL;
481 	union dp_rx_desc_list_elem_t *desc_list = NULL;
482 	union dp_rx_desc_list_elem_t *tail = NULL;
483 
484 	/* Process LMAC interrupts */
485 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
486 		int mac_for_pdev = ring;
487 		struct dp_srng *rx_refill_buf_ring;
488 
489 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
490 		if (!pdev)
491 			continue;
492 
493 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
494 
495 		dp_monitor_process(soc, NULL, mac_for_pdev,
496 				   QCA_NAPI_BUDGET);
497 
498 		for (i = 0;
499 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
500 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
501 					     mac_for_pdev,
502 					     QCA_NAPI_BUDGET);
503 
504 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
505 						  mac_for_pdev))
506 			dp_rx_buffers_replenish(soc, mac_for_pdev,
507 						rx_refill_buf_ring,
508 						&soc->rx_desc_buf[mac_for_pdev],
509 						0, &desc_list, &tail);
510 	}
511 
512 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
513 }
514 
515 #endif
516 
517 #ifdef FEATURE_MEC
518 void dp_peer_mec_flush_entries(struct dp_soc *soc)
519 {
520 	unsigned int index;
521 	struct dp_mec_entry *mecentry, *mecentry_next;
522 
523 	TAILQ_HEAD(, dp_mec_entry) free_list;
524 	TAILQ_INIT(&free_list);
525 
526 	if (!soc->mec_hash.mask)
527 		return;
528 
529 	if (!soc->mec_hash.bins)
530 		return;
531 
532 	if (!qdf_atomic_read(&soc->mec_cnt))
533 		return;
534 
535 	qdf_spin_lock_bh(&soc->mec_lock);
536 	for (index = 0; index <= soc->mec_hash.mask; index++) {
537 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
538 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
539 					   hash_list_elem, mecentry_next) {
540 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
541 			}
542 		}
543 	}
544 	qdf_spin_unlock_bh(&soc->mec_lock);
545 
546 	dp_peer_mec_free_list(soc, &free_list);
547 }
548 
549 /**
550  * dp_print_mec_entries() - Dump MEC entries in table
551  * @soc: Datapath soc handle
552  *
553  * Return: none
554  */
555 static void dp_print_mec_stats(struct dp_soc *soc)
556 {
557 	int i;
558 	uint32_t index;
559 	struct dp_mec_entry *mecentry = NULL, *mec_list;
560 	uint32_t num_entries = 0;
561 
562 	DP_PRINT_STATS("MEC Stats:");
563 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
564 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
565 
566 	if (!qdf_atomic_read(&soc->mec_cnt))
567 		return;
568 
569 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
570 	if (!mec_list) {
571 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
572 		return;
573 	}
574 
575 	DP_PRINT_STATS("MEC Table:");
576 	for (index = 0; index <= soc->mec_hash.mask; index++) {
577 		qdf_spin_lock_bh(&soc->mec_lock);
578 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
579 			qdf_spin_unlock_bh(&soc->mec_lock);
580 			continue;
581 		}
582 
583 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
584 			      hash_list_elem) {
585 			qdf_mem_copy(&mec_list[num_entries], mecentry,
586 				     sizeof(*mecentry));
587 			num_entries++;
588 		}
589 		qdf_spin_unlock_bh(&soc->mec_lock);
590 	}
591 
592 	if (!num_entries) {
593 		qdf_mem_free(mec_list);
594 		return;
595 	}
596 
597 	for (i = 0; i < num_entries; i++) {
598 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
599 			       " is_active = %d pdev_id = %d vdev_id = %d",
600 			       i,
601 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
602 			       mec_list[i].is_active,
603 			       mec_list[i].pdev_id,
604 			       mec_list[i].vdev_id);
605 	}
606 	qdf_mem_free(mec_list);
607 }
608 #else
609 static void dp_print_mec_stats(struct dp_soc *soc)
610 {
611 }
612 #endif
613 
614 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
615 				 uint8_t vdev_id,
616 				 uint8_t *peer_mac,
617 				 uint8_t *mac_addr,
618 				 enum cdp_txrx_ast_entry_type type,
619 				 uint32_t flags)
620 {
621 	int ret = -1;
622 	QDF_STATUS status = QDF_STATUS_SUCCESS;
623 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
624 						       peer_mac, 0, vdev_id,
625 						       DP_MOD_ID_CDP);
626 
627 	if (!peer) {
628 		dp_peer_debug("Peer is NULL!");
629 		return ret;
630 	}
631 
632 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
633 				 peer,
634 				 mac_addr,
635 				 type,
636 				 flags);
637 	if ((status == QDF_STATUS_SUCCESS) ||
638 	    (status == QDF_STATUS_E_ALREADY) ||
639 	    (status == QDF_STATUS_E_AGAIN))
640 		ret = 0;
641 
642 	dp_hmwds_ast_add_notify(peer, mac_addr,
643 				type, status, false);
644 
645 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
646 
647 	return ret;
648 }
649 
650 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
651 						uint8_t vdev_id,
652 						uint8_t *peer_mac,
653 						uint8_t *wds_macaddr,
654 						uint32_t flags)
655 {
656 	int status = -1;
657 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
658 	struct dp_ast_entry  *ast_entry = NULL;
659 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
660 						       peer_mac, 0, vdev_id,
661 						       DP_MOD_ID_CDP);
662 
663 	if (!peer) {
664 		dp_peer_debug("Peer is NULL!");
665 		return status;
666 	}
667 
668 	qdf_spin_lock_bh(&soc->ast_lock);
669 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
670 						    peer->vdev->pdev->pdev_id);
671 
672 	if (ast_entry) {
673 		status = dp_peer_update_ast(soc,
674 					    peer,
675 					    ast_entry, flags);
676 	}
677 	qdf_spin_unlock_bh(&soc->ast_lock);
678 
679 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
680 
681 	return status;
682 }
683 
684 /*
685  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
686  * @soc_handle:		Datapath SOC handle
687  * @peer:		DP peer
688  * @arg:		callback argument
689  *
690  * Return: None
691  */
692 static void
693 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
694 {
695 	struct dp_ast_entry *ast_entry = NULL;
696 	struct dp_ast_entry *tmp_ast_entry;
697 
698 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
699 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
700 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
701 			dp_peer_del_ast(soc, ast_entry);
702 	}
703 }
704 
705 /*
706  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
707  * @soc_handle:		Datapath SOC handle
708  * @wds_macaddr:	WDS entry MAC Address
709  * @peer_macaddr:	WDS entry MAC Address
710  * @vdev_id:		id of vdev handle
711  * Return: QDF_STATUS
712  */
713 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
714 					 uint8_t *wds_macaddr,
715 					 uint8_t *peer_mac_addr,
716 					 uint8_t vdev_id)
717 {
718 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
719 	struct dp_ast_entry *ast_entry = NULL;
720 	struct dp_peer *peer;
721 	struct dp_pdev *pdev;
722 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
723 						     DP_MOD_ID_CDP);
724 
725 	if (!vdev)
726 		return QDF_STATUS_E_FAILURE;
727 
728 	pdev = vdev->pdev;
729 
730 	if (peer_mac_addr) {
731 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
732 					      0, vdev->vdev_id,
733 					      DP_MOD_ID_CDP);
734 		if (!peer) {
735 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
736 			return QDF_STATUS_E_FAILURE;
737 		}
738 
739 		qdf_spin_lock_bh(&soc->ast_lock);
740 		dp_peer_reset_ast_entries(soc, peer, NULL);
741 		qdf_spin_unlock_bh(&soc->ast_lock);
742 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
743 	} else if (wds_macaddr) {
744 		qdf_spin_lock_bh(&soc->ast_lock);
745 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
746 							    pdev->pdev_id);
747 
748 		if (ast_entry) {
749 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
750 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
751 				dp_peer_del_ast(soc, ast_entry);
752 		}
753 		qdf_spin_unlock_bh(&soc->ast_lock);
754 	}
755 
756 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
757 	return QDF_STATUS_SUCCESS;
758 }
759 
760 /*
761  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
762  * @soc:		Datapath SOC handle
763  * @vdev_id:		id of vdev object
764  *
765  * Return: QDF_STATUS
766  */
767 static QDF_STATUS
768 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
769 			     uint8_t vdev_id)
770 {
771 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
772 
773 	qdf_spin_lock_bh(&soc->ast_lock);
774 
775 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
776 			    DP_MOD_ID_CDP);
777 	qdf_spin_unlock_bh(&soc->ast_lock);
778 
779 	return QDF_STATUS_SUCCESS;
780 }
781 
782 /*
783  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
784  * @soc:		Datapath SOC
785  * @peer:		Datapath peer
786  * @arg:		arg to callback
787  *
788  * Return: None
789  */
790 static void
791 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
792 {
793 	struct dp_ast_entry *ase = NULL;
794 	struct dp_ast_entry *temp_ase;
795 
796 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
797 		if ((ase->type ==
798 			CDP_TXRX_AST_TYPE_STATIC) ||
799 			(ase->type ==
800 			 CDP_TXRX_AST_TYPE_SELF) ||
801 			(ase->type ==
802 			 CDP_TXRX_AST_TYPE_STA_BSS))
803 			continue;
804 		dp_peer_del_ast(soc, ase);
805 	}
806 }
807 
808 /*
809  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
810  * @soc:		Datapath SOC handle
811  *
812  * Return: None
813  */
814 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
815 {
816 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
817 
818 	qdf_spin_lock_bh(&soc->ast_lock);
819 
820 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
821 			    DP_MOD_ID_CDP);
822 
823 	qdf_spin_unlock_bh(&soc->ast_lock);
824 	dp_peer_mec_flush_entries(soc);
825 }
826 
827 /**
828  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
829  *                                       and return ast entry information
830  *                                       of first ast entry found in the
831  *                                       table with given mac address
832  *
833  * @soc : data path soc handle
834  * @ast_mac_addr : AST entry mac address
835  * @ast_entry_info : ast entry information
836  *
837  * return : true if ast entry found with ast_mac_addr
838  *          false if ast entry not found
839  */
840 static bool dp_peer_get_ast_info_by_soc_wifi3
841 	(struct cdp_soc_t *soc_hdl,
842 	 uint8_t *ast_mac_addr,
843 	 struct cdp_ast_entry_info *ast_entry_info)
844 {
845 	struct dp_ast_entry *ast_entry = NULL;
846 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
847 	struct dp_peer *peer = NULL;
848 
849 	qdf_spin_lock_bh(&soc->ast_lock);
850 
851 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
852 	if ((!ast_entry) ||
853 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
854 		qdf_spin_unlock_bh(&soc->ast_lock);
855 		return false;
856 	}
857 
858 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
859 				     DP_MOD_ID_AST);
860 	if (!peer) {
861 		qdf_spin_unlock_bh(&soc->ast_lock);
862 		return false;
863 	}
864 
865 	ast_entry_info->type = ast_entry->type;
866 	ast_entry_info->pdev_id = ast_entry->pdev_id;
867 	ast_entry_info->vdev_id = ast_entry->vdev_id;
868 	ast_entry_info->peer_id = ast_entry->peer_id;
869 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
870 		     &peer->mac_addr.raw[0],
871 		     QDF_MAC_ADDR_SIZE);
872 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
873 	qdf_spin_unlock_bh(&soc->ast_lock);
874 	return true;
875 }
876 
877 /**
878  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
879  *                                          and return ast entry information
880  *                                          if mac address and pdev_id matches
881  *
882  * @soc : data path soc handle
883  * @ast_mac_addr : AST entry mac address
884  * @pdev_id : pdev_id
885  * @ast_entry_info : ast entry information
886  *
887  * return : true if ast entry found with ast_mac_addr
888  *          false if ast entry not found
889  */
890 static bool dp_peer_get_ast_info_by_pdevid_wifi3
891 		(struct cdp_soc_t *soc_hdl,
892 		 uint8_t *ast_mac_addr,
893 		 uint8_t pdev_id,
894 		 struct cdp_ast_entry_info *ast_entry_info)
895 {
896 	struct dp_ast_entry *ast_entry;
897 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
898 	struct dp_peer *peer = NULL;
899 
900 	qdf_spin_lock_bh(&soc->ast_lock);
901 
902 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
903 						    pdev_id);
904 
905 	if ((!ast_entry) ||
906 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
907 		qdf_spin_unlock_bh(&soc->ast_lock);
908 		return false;
909 	}
910 
911 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
912 				     DP_MOD_ID_AST);
913 	if (!peer) {
914 		qdf_spin_unlock_bh(&soc->ast_lock);
915 		return false;
916 	}
917 
918 	ast_entry_info->type = ast_entry->type;
919 	ast_entry_info->pdev_id = ast_entry->pdev_id;
920 	ast_entry_info->vdev_id = ast_entry->vdev_id;
921 	ast_entry_info->peer_id = ast_entry->peer_id;
922 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
923 		     &peer->mac_addr.raw[0],
924 		     QDF_MAC_ADDR_SIZE);
925 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
926 	qdf_spin_unlock_bh(&soc->ast_lock);
927 	return true;
928 }
929 
930 /**
931  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
932  *                            with given mac address
933  *
934  * @soc : data path soc handle
935  * @ast_mac_addr : AST entry mac address
936  * @callback : callback function to called on ast delete response from FW
937  * @cookie : argument to be passed to callback
938  *
939  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
940  *          is sent
941  *          QDF_STATUS_E_INVAL false if ast entry not found
942  */
943 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
944 					       uint8_t *mac_addr,
945 					       txrx_ast_free_cb callback,
946 					       void *cookie)
947 
948 {
949 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
950 	struct dp_ast_entry *ast_entry = NULL;
951 	txrx_ast_free_cb cb = NULL;
952 	void *arg = NULL;
953 
954 	qdf_spin_lock_bh(&soc->ast_lock);
955 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
956 	if (!ast_entry) {
957 		qdf_spin_unlock_bh(&soc->ast_lock);
958 		return -QDF_STATUS_E_INVAL;
959 	}
960 
961 	if (ast_entry->callback) {
962 		cb = ast_entry->callback;
963 		arg = ast_entry->cookie;
964 	}
965 
966 	ast_entry->callback = callback;
967 	ast_entry->cookie = cookie;
968 
969 	/*
970 	 * if delete_in_progress is set AST delete is sent to target
971 	 * and host is waiting for response should not send delete
972 	 * again
973 	 */
974 	if (!ast_entry->delete_in_progress)
975 		dp_peer_del_ast(soc, ast_entry);
976 
977 	qdf_spin_unlock_bh(&soc->ast_lock);
978 	if (cb) {
979 		cb(soc->ctrl_psoc,
980 		   dp_soc_to_cdp_soc(soc),
981 		   arg,
982 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
983 	}
984 	return QDF_STATUS_SUCCESS;
985 }
986 
987 /**
988  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
989  *                                   table if mac address and pdev_id matches
990  *
991  * @soc : data path soc handle
992  * @ast_mac_addr : AST entry mac address
993  * @pdev_id : pdev id
994  * @callback : callback function to called on ast delete response from FW
995  * @cookie : argument to be passed to callback
996  *
997  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
998  *          is sent
999  *          QDF_STATUS_E_INVAL false if ast entry not found
1000  */
1001 
1002 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1003 						uint8_t *mac_addr,
1004 						uint8_t pdev_id,
1005 						txrx_ast_free_cb callback,
1006 						void *cookie)
1007 
1008 {
1009 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1010 	struct dp_ast_entry *ast_entry;
1011 	txrx_ast_free_cb cb = NULL;
1012 	void *arg = NULL;
1013 
1014 	qdf_spin_lock_bh(&soc->ast_lock);
1015 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1016 
1017 	if (!ast_entry) {
1018 		qdf_spin_unlock_bh(&soc->ast_lock);
1019 		return -QDF_STATUS_E_INVAL;
1020 	}
1021 
1022 	if (ast_entry->callback) {
1023 		cb = ast_entry->callback;
1024 		arg = ast_entry->cookie;
1025 	}
1026 
1027 	ast_entry->callback = callback;
1028 	ast_entry->cookie = cookie;
1029 
1030 	/*
1031 	 * if delete_in_progress is set AST delete is sent to target
1032 	 * and host is waiting for response should not sent delete
1033 	 * again
1034 	 */
1035 	if (!ast_entry->delete_in_progress)
1036 		dp_peer_del_ast(soc, ast_entry);
1037 
1038 	qdf_spin_unlock_bh(&soc->ast_lock);
1039 
1040 	if (cb) {
1041 		cb(soc->ctrl_psoc,
1042 		   dp_soc_to_cdp_soc(soc),
1043 		   arg,
1044 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1045 	}
1046 	return QDF_STATUS_SUCCESS;
1047 }
1048 
1049 /**
1050  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1051  * @ring_num: ring num of the ring being queried
1052  * @grp_mask: the grp_mask array for the ring type in question.
1053  *
1054  * The grp_mask array is indexed by group number and the bit fields correspond
1055  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1056  *
1057  * Return: the index in the grp_mask array with the ring number.
1058  * -QDF_STATUS_E_NOENT if no entry is found
1059  */
1060 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1061 {
1062 	int ext_group_num;
1063 	uint8_t mask = 1 << ring_num;
1064 
1065 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1066 	     ext_group_num++) {
1067 		if (mask & grp_mask[ext_group_num])
1068 			return ext_group_num;
1069 	}
1070 
1071 	return -QDF_STATUS_E_NOENT;
1072 }
1073 
1074 /**
1075  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1076  * @msi_group_number: MSI group number.
1077  * @msi_data_count: MSI data count.
1078  *
1079  * Return: true if msi_group_number is invalid.
1080  */
1081 #ifdef WLAN_ONE_MSI_VECTOR
1082 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1083 					   int msi_data_count)
1084 {
1085 	return false;
1086 }
1087 #else
1088 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1089 					   int msi_data_count)
1090 {
1091 	return msi_group_number > msi_data_count;
1092 }
1093 #endif
1094 
1095 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1096 /**
1097  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1098  *				rx_near_full_grp1 mask
1099  * @soc: Datapath SoC Handle
1100  * @ring_num: REO ring number
1101  *
1102  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1103  *	   0, otherwise.
1104  */
1105 static inline int
1106 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1107 {
1108 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1109 }
1110 
1111 /**
1112  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1113  *				rx_near_full_grp2 mask
1114  * @soc: Datapath SoC Handle
1115  * @ring_num: REO ring number
1116  *
1117  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1118  *	   0, otherwise.
1119  */
1120 static inline int
1121 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1122 {
1123 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1124 }
1125 
1126 /**
1127  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1128  *				ring type and number
1129  * @soc: Datapath SoC handle
1130  * @ring_type: SRNG type
1131  * @ring_num: ring num
1132  *
1133  * Return: near ful irq mask pointer
1134  */
1135 static inline
1136 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1137 					enum hal_ring_type ring_type,
1138 					int ring_num)
1139 {
1140 	uint8_t *nf_irq_mask = NULL;
1141 
1142 	switch (ring_type) {
1143 	case WBM2SW_RELEASE:
1144 		if (ring_num != WBM2SW_REL_ERR_RING_NUM) {
1145 			nf_irq_mask = &soc->wlan_cfg_ctx->
1146 					int_tx_ring_near_full_irq_mask[0];
1147 		}
1148 		break;
1149 	case REO_DST:
1150 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1151 			nf_irq_mask =
1152 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1153 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1154 			nf_irq_mask =
1155 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1156 		else
1157 			qdf_assert(0);
1158 		break;
1159 	default:
1160 		break;
1161 	}
1162 
1163 	return nf_irq_mask;
1164 }
1165 
1166 /**
1167  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1168  * @soc: Datapath SoC handle
1169  * @ring_params: srng params handle
1170  * @msi2_addr: MSI2 addr to be set for the SRNG
1171  * @msi2_data: MSI2 data to be set for the SRNG
1172  *
1173  * Return: None
1174  */
1175 static inline
1176 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1177 				  struct hal_srng_params *ring_params,
1178 				  qdf_dma_addr_t msi2_addr,
1179 				  uint32_t msi2_data)
1180 {
1181 	ring_params->msi2_addr = msi2_addr;
1182 	ring_params->msi2_data = msi2_data;
1183 }
1184 
1185 /**
1186  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1187  * @soc: Datapath SoC handle
1188  * @ring_params: ring_params for SRNG
1189  * @ring_type: SENG type
1190  * @ring_num: ring number for the SRNG
1191  * @nf_msi_grp_num: near full msi group number
1192  *
1193  * Return: None
1194  */
1195 static inline void
1196 dp_srng_msi2_setup(struct dp_soc *soc,
1197 		   struct hal_srng_params *ring_params,
1198 		   int ring_type, int ring_num, int nf_msi_grp_num)
1199 {
1200 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1201 	int msi_data_count, ret;
1202 
1203 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1204 					  &msi_data_count, &msi_data_start,
1205 					  &msi_irq_start);
1206 	if (ret)
1207 		return;
1208 
1209 	if (nf_msi_grp_num < 0) {
1210 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1211 			     soc, ring_type, ring_num);
1212 		ring_params->msi2_addr = 0;
1213 		ring_params->msi2_data = 0;
1214 		return;
1215 	}
1216 
1217 	if (dp_is_msi_group_number_invalid(nf_msi_grp_num, msi_data_count)) {
1218 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1219 			     soc, nf_msi_grp_num);
1220 		QDF_ASSERT(0);
1221 	}
1222 
1223 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1224 
1225 	ring_params->nf_irq_support = 1;
1226 	ring_params->msi2_addr = addr_low;
1227 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1228 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1229 		+ msi_data_start;
1230 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1231 }
1232 
1233 /* Percentage of ring entries considered as nearly full */
1234 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1235 /* Percentage of ring entries considered as critically full */
1236 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1237 /* Percentage of ring entries considered as safe threshold */
1238 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1239 
1240 /**
1241  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1242  *			near full irq
1243  * @soc: Datapath SoC handle
1244  * @ring_params: ring params for SRNG
1245  * @ring_type: ring type
1246  */
1247 static inline void
1248 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1249 					  struct hal_srng_params *ring_params,
1250 					  int ring_type)
1251 {
1252 	if (ring_params->nf_irq_support) {
1253 		ring_params->high_thresh = (ring_params->num_entries *
1254 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1255 		ring_params->crit_thresh = (ring_params->num_entries *
1256 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1257 		ring_params->safe_thresh = (ring_params->num_entries *
1258 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1259 	}
1260 }
1261 
1262 /**
1263  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1264  *			structure from the ring params
1265  * @soc: Datapath SoC handle
1266  * @srng: SRNG handle
1267  * @ring_params: ring params for a SRNG
1268  *
1269  * Return: None
1270  */
1271 static inline void
1272 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1273 			  struct hal_srng_params *ring_params)
1274 {
1275 	srng->crit_thresh = ring_params->crit_thresh;
1276 	srng->safe_thresh = ring_params->safe_thresh;
1277 }
1278 
1279 #else
1280 static inline
1281 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1282 					enum hal_ring_type ring_type,
1283 					int ring_num)
1284 {
1285 	return NULL;
1286 }
1287 
1288 static inline
1289 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1290 				  struct hal_srng_params *ring_params,
1291 				  qdf_dma_addr_t msi2_addr,
1292 				  uint32_t msi2_data)
1293 {
1294 }
1295 
1296 static inline void
1297 dp_srng_msi2_setup(struct dp_soc *soc,
1298 		   struct hal_srng_params *ring_params,
1299 		   int ring_type, int ring_num, int nf_msi_grp_num)
1300 {
1301 }
1302 
1303 static inline void
1304 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1305 					  struct hal_srng_params *ring_params,
1306 					  int ring_type)
1307 {
1308 }
1309 
1310 static inline void
1311 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1312 			  struct hal_srng_params *ring_params)
1313 {
1314 }
1315 #endif
1316 
1317 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1318 				       enum hal_ring_type ring_type,
1319 				       int ring_num,
1320 				       int *reg_msi_grp_num,
1321 				       bool nf_irq_support,
1322 				       int *nf_msi_grp_num)
1323 {
1324 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1325 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1326 	bool nf_irq_enabled = false;
1327 
1328 	switch (ring_type) {
1329 	case WBM2SW_RELEASE:
1330 		if (ring_num == WBM2SW_REL_ERR_RING_NUM) {
1331 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1332 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1333 			ring_num = 0;
1334 		} else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1335 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1336 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1337 								     ring_type,
1338 								     ring_num);
1339 			if (nf_irq_mask)
1340 				nf_irq_enabled = true;
1341 		}
1342 	break;
1343 
1344 	case REO_EXCEPTION:
1345 		/* dp_rx_err_process - &soc->reo_exception_ring */
1346 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1347 	break;
1348 
1349 	case REO_DST:
1350 		/* dp_rx_process - soc->reo_dest_ring */
1351 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1352 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1353 							     ring_num);
1354 		if (nf_irq_mask)
1355 			nf_irq_enabled = true;
1356 	break;
1357 
1358 	case REO_STATUS:
1359 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1360 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1361 	break;
1362 
1363 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1364 	case RXDMA_MONITOR_STATUS:
1365 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1366 	case RXDMA_MONITOR_DST:
1367 		/* dp_mon_process */
1368 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1369 	break;
1370 	case RXDMA_DST:
1371 		/* dp_rxdma_err_process */
1372 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1373 	break;
1374 
1375 	case RXDMA_BUF:
1376 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1377 	break;
1378 
1379 	case RXDMA_MONITOR_BUF:
1380 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1381 	break;
1382 
1383 	case TCL_DATA:
1384 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1385 	case TCL_CMD_CREDIT:
1386 	case REO_CMD:
1387 	case SW2WBM_RELEASE:
1388 	case WBM_IDLE_LINK:
1389 		/* normally empty SW_TO_HW rings */
1390 		return -QDF_STATUS_E_NOENT;
1391 	break;
1392 
1393 	case TCL_STATUS:
1394 	case REO_REINJECT:
1395 		/* misc unused rings */
1396 		return -QDF_STATUS_E_NOENT;
1397 	break;
1398 
1399 	case CE_SRC:
1400 	case CE_DST:
1401 	case CE_DST_STATUS:
1402 		/* CE_rings - currently handled by hif */
1403 	default:
1404 		return -QDF_STATUS_E_NOENT;
1405 	break;
1406 	}
1407 
1408 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1409 
1410 	if (nf_irq_support && nf_irq_enabled) {
1411 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1412 							    nf_irq_mask);
1413 	}
1414 
1415 	return QDF_STATUS_SUCCESS;
1416 }
1417 
1418 /*
1419  * dp_get_num_msi_available()- API to get number of MSIs available
1420  * @dp_soc: DP soc Handle
1421  * @interrupt_mode: Mode of interrupts
1422  *
1423  * Return: Number of MSIs available or 0 in case of integrated
1424  */
1425 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1426 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1427 {
1428 	return 0;
1429 }
1430 #else
1431 /*
1432  * dp_get_num_msi_available()- API to get number of MSIs available
1433  * @dp_soc: DP soc Handle
1434  * @interrupt_mode: Mode of interrupts
1435  *
1436  * Return: Number of MSIs available or 0 in case of integrated
1437  */
1438 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1439 {
1440 	int msi_data_count;
1441 	int msi_data_start;
1442 	int msi_irq_start;
1443 	int ret;
1444 
1445 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1446 		return 0;
1447 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1448 		   DP_INTR_POLL) {
1449 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1450 						  &msi_data_count,
1451 						  &msi_data_start,
1452 						  &msi_irq_start);
1453 		if (ret) {
1454 			qdf_err("Unable to get DP MSI assignment %d",
1455 				interrupt_mode);
1456 			return -EINVAL;
1457 		}
1458 		return msi_data_count;
1459 	}
1460 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1461 	return -EINVAL;
1462 }
1463 #endif
1464 
1465 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1466 			      *ring_params, int ring_type, int ring_num)
1467 {
1468 	int reg_msi_grp_num;
1469 	/*
1470 	 * nf_msi_grp_num needs to be initialized with negative value,
1471 	 * to avoid configuring near-full msi for WBM2SW3 ring
1472 	 */
1473 	int nf_msi_grp_num = -1;
1474 	int msi_data_count;
1475 	int ret;
1476 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1477 	bool nf_irq_support;
1478 
1479 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1480 					    &msi_data_count, &msi_data_start,
1481 					    &msi_irq_start);
1482 
1483 	if (ret)
1484 		return;
1485 
1486 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1487 							     ring_type,
1488 							     ring_num);
1489 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1490 					  &reg_msi_grp_num,
1491 					  nf_irq_support,
1492 					  &nf_msi_grp_num);
1493 	if (ret < 0) {
1494 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1495 			     soc, ring_type, ring_num);
1496 		ring_params->msi_addr = 0;
1497 		ring_params->msi_data = 0;
1498 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1499 		return;
1500 	}
1501 
1502 	if (reg_msi_grp_num < 0) {
1503 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1504 			     soc, ring_type, ring_num);
1505 		ring_params->msi_addr = 0;
1506 		ring_params->msi_data = 0;
1507 		goto configure_msi2;
1508 	}
1509 
1510 	if (dp_is_msi_group_number_invalid(reg_msi_grp_num, msi_data_count)) {
1511 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1512 			     soc, reg_msi_grp_num);
1513 		QDF_ASSERT(0);
1514 	}
1515 
1516 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1517 
1518 	ring_params->msi_addr = addr_low;
1519 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1520 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1521 		+ msi_data_start;
1522 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1523 
1524 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1525 		 ring_type, ring_num, ring_params->msi_data,
1526 		 (uint64_t)ring_params->msi_addr);
1527 
1528 configure_msi2:
1529 	if (!nf_irq_support) {
1530 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1531 		return;
1532 	}
1533 
1534 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1535 			   nf_msi_grp_num);
1536 }
1537 
1538 #ifdef FEATURE_AST
1539 /**
1540  * dp_print_peer_ast_entries() - Dump AST entries of peer
1541  * @soc: Datapath soc handle
1542  * @peer: Datapath peer
1543  * @arg: argument to iterate function
1544  *
1545  * return void
1546  */
1547 static void
1548 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1549 {
1550 	struct dp_ast_entry *ase, *tmp_ase;
1551 	uint32_t num_entries = 0;
1552 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1553 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1554 			"DA", "HMWDS_SEC"};
1555 
1556 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1557 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1558 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1559 		    " peer_id = %u"
1560 		    " type = %s"
1561 		    " next_hop = %d"
1562 		    " is_active = %d"
1563 		    " ast_idx = %d"
1564 		    " ast_hash = %d"
1565 		    " delete_in_progress = %d"
1566 		    " pdev_id = %d"
1567 		    " vdev_id = %d",
1568 		    ++num_entries,
1569 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1570 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1571 		    ase->peer_id,
1572 		    type[ase->type],
1573 		    ase->next_hop,
1574 		    ase->is_active,
1575 		    ase->ast_idx,
1576 		    ase->ast_hash_value,
1577 		    ase->delete_in_progress,
1578 		    ase->pdev_id,
1579 		    ase->vdev_id);
1580 	}
1581 }
1582 
1583 /**
1584  * dp_print_ast_stats() - Dump AST table contents
1585  * @soc: Datapath soc handle
1586  *
1587  * return void
1588  */
1589 void dp_print_ast_stats(struct dp_soc *soc)
1590 {
1591 	DP_PRINT_STATS("AST Stats:");
1592 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1593 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1594 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1595 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1596 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1597 		       soc->stats.ast.ast_mismatch);
1598 
1599 	DP_PRINT_STATS("AST Table:");
1600 
1601 	qdf_spin_lock_bh(&soc->ast_lock);
1602 
1603 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1604 			    DP_MOD_ID_GENERIC_STATS);
1605 
1606 	qdf_spin_unlock_bh(&soc->ast_lock);
1607 }
1608 #else
1609 void dp_print_ast_stats(struct dp_soc *soc)
1610 {
1611 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1612 	return;
1613 }
1614 #endif
1615 
1616 /**
1617  * dp_print_peer_info() - Dump peer info
1618  * @soc: Datapath soc handle
1619  * @peer: Datapath peer handle
1620  * @arg: argument to iter function
1621  *
1622  * return void
1623  */
1624 static void
1625 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1626 {
1627 	DP_PRINT_STATS("    peer_mac_addr = "QDF_MAC_ADDR_FMT
1628 		       " nawds_enabled = %d"
1629 		       " bss_peer = %d"
1630 		       " wds_enabled = %d"
1631 		       " tx_cap_enabled = %d"
1632 		       " rx_cap_enabled = %d"
1633 		       " peer id = %d",
1634 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1635 		       peer->nawds_enabled,
1636 		       peer->bss_peer,
1637 		       peer->wds_enabled,
1638 		       peer->tx_cap_enabled,
1639 		       peer->rx_cap_enabled,
1640 		       peer->peer_id);
1641 }
1642 
1643 /**
1644  * dp_print_peer_table() - Dump all Peer stats
1645  * @vdev: Datapath Vdev handle
1646  *
1647  * return void
1648  */
1649 static void dp_print_peer_table(struct dp_vdev *vdev)
1650 {
1651 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1652 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1653 			     DP_MOD_ID_GENERIC_STATS);
1654 }
1655 
1656 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1657 /**
1658  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1659  * threshold values from the wlan_srng_cfg table for each ring type
1660  * @soc: device handle
1661  * @ring_params: per ring specific parameters
1662  * @ring_type: Ring type
1663  * @ring_num: Ring number for a given ring type
1664  *
1665  * Fill the ring params with the interrupt threshold
1666  * configuration parameters available in the per ring type wlan_srng_cfg
1667  * table.
1668  *
1669  * Return: None
1670  */
1671 static void
1672 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1673 				       struct hal_srng_params *ring_params,
1674 				       int ring_type, int ring_num,
1675 				       int num_entries)
1676 {
1677 	if (ring_type == REO_DST) {
1678 		ring_params->intr_timer_thres_us =
1679 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1680 		ring_params->intr_batch_cntr_thres_entries =
1681 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1682 	} else if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
1683 		ring_params->intr_timer_thres_us =
1684 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1685 		ring_params->intr_batch_cntr_thres_entries =
1686 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1687 	} else {
1688 		ring_params->intr_timer_thres_us =
1689 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1690 		ring_params->intr_batch_cntr_thres_entries =
1691 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1692 	}
1693 	ring_params->low_threshold =
1694 			soc->wlan_srng_cfg[ring_type].low_threshold;
1695 	if (ring_params->low_threshold)
1696 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1697 
1698 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1699 }
1700 #else
1701 static void
1702 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1703 				       struct hal_srng_params *ring_params,
1704 				       int ring_type, int ring_num,
1705 				       int num_entries)
1706 {
1707 	if (ring_type == REO_DST) {
1708 		ring_params->intr_timer_thres_us =
1709 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1710 		ring_params->intr_batch_cntr_thres_entries =
1711 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1712 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1713 		ring_params->intr_timer_thres_us =
1714 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1715 		ring_params->intr_batch_cntr_thres_entries =
1716 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1717 	} else {
1718 		ring_params->intr_timer_thres_us =
1719 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1720 		ring_params->intr_batch_cntr_thres_entries =
1721 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1722 	}
1723 
1724 	/* Enable low threshold interrupts for rx buffer rings (regular and
1725 	 * monitor buffer rings.
1726 	 * TODO: See if this is required for any other ring
1727 	 */
1728 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1729 	    (ring_type == RXDMA_MONITOR_STATUS)) {
1730 		/* TODO: Setting low threshold to 1/8th of ring size
1731 		 * see if this needs to be configurable
1732 		 */
1733 		ring_params->low_threshold = num_entries >> 3;
1734 		ring_params->intr_timer_thres_us =
1735 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1736 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1737 		ring_params->intr_batch_cntr_thres_entries = 0;
1738 	}
1739 
1740 	/* During initialisation monitor rings are only filled with
1741 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1742 	 * a value less than that. Low threshold value is reconfigured again
1743 	 * to 1/8th of the ring size when monitor vap is created.
1744 	 */
1745 	if (ring_type == RXDMA_MONITOR_BUF)
1746 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1747 
1748 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1749 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1750 	 * Keep batch threshold as 8 so that interrupt is received for
1751 	 * every 4 packets in MONITOR_STATUS ring
1752 	 */
1753 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1754 	    (soc->intr_mode == DP_INTR_MSI))
1755 		ring_params->intr_batch_cntr_thres_entries = 4;
1756 }
1757 #endif
1758 
1759 #ifdef DP_MEM_PRE_ALLOC
1760 
1761 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1762 			   size_t ctxt_size)
1763 {
1764 	void *ctxt_mem;
1765 
1766 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
1767 		dp_warn("dp_prealloc_get_context null!");
1768 		goto dynamic_alloc;
1769 	}
1770 
1771 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type);
1772 
1773 	if (ctxt_mem)
1774 		goto end;
1775 
1776 dynamic_alloc:
1777 	dp_info("Pre-alloc of ctxt failed. Dynamic allocation");
1778 	ctxt_mem = qdf_mem_malloc(ctxt_size);
1779 end:
1780 	return ctxt_mem;
1781 }
1782 
1783 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1784 			 void *vaddr)
1785 {
1786 	QDF_STATUS status;
1787 
1788 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
1789 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
1790 								ctxt_type,
1791 								vaddr);
1792 	} else {
1793 		dp_warn("dp_prealloc_get_context null!");
1794 		status = QDF_STATUS_E_NOSUPPORT;
1795 	}
1796 
1797 	if (QDF_IS_STATUS_ERROR(status)) {
1798 		dp_info("Context not pre-allocated");
1799 		qdf_mem_free(vaddr);
1800 	}
1801 }
1802 
1803 static inline
1804 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
1805 					   struct dp_srng *srng,
1806 					   uint32_t ring_type)
1807 {
1808 	void *mem;
1809 
1810 	qdf_assert(!srng->is_mem_prealloc);
1811 
1812 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
1813 		dp_warn("dp_prealloc_get_consistent is null!");
1814 		goto qdf;
1815 	}
1816 
1817 	mem =
1818 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
1819 						(&srng->alloc_size,
1820 						 &srng->base_vaddr_unaligned,
1821 						 &srng->base_paddr_unaligned,
1822 						 &srng->base_paddr_aligned,
1823 						 DP_RING_BASE_ALIGN, ring_type);
1824 
1825 	if (mem) {
1826 		srng->is_mem_prealloc = true;
1827 		goto end;
1828 	}
1829 qdf:
1830 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
1831 						&srng->base_vaddr_unaligned,
1832 						&srng->base_paddr_unaligned,
1833 						&srng->base_paddr_aligned,
1834 						DP_RING_BASE_ALIGN);
1835 end:
1836 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
1837 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
1838 		srng, ring_type, srng->alloc_size, srng->num_entries);
1839 	return mem;
1840 }
1841 
1842 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
1843 					       struct dp_srng *srng)
1844 {
1845 	if (srng->is_mem_prealloc) {
1846 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
1847 			dp_warn("dp_prealloc_put_consistent is null!");
1848 			QDF_BUG(0);
1849 			return;
1850 		}
1851 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
1852 						(srng->alloc_size,
1853 						 srng->base_vaddr_unaligned,
1854 						 srng->base_paddr_unaligned);
1855 
1856 	} else {
1857 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1858 					srng->alloc_size,
1859 					srng->base_vaddr_unaligned,
1860 					srng->base_paddr_unaligned, 0);
1861 	}
1862 }
1863 
1864 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
1865 				   enum dp_desc_type desc_type,
1866 				   struct qdf_mem_multi_page_t *pages,
1867 				   size_t element_size,
1868 				   uint16_t element_num,
1869 				   qdf_dma_context_t memctxt,
1870 				   bool cacheable)
1871 {
1872 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
1873 		dp_warn("dp_get_multi_pages is null!");
1874 		goto qdf;
1875 	}
1876 
1877 	pages->num_pages = 0;
1878 	pages->is_mem_prealloc = 0;
1879 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
1880 						element_size,
1881 						element_num,
1882 						pages,
1883 						cacheable);
1884 	if (pages->num_pages)
1885 		goto end;
1886 
1887 qdf:
1888 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
1889 				  element_num, memctxt, cacheable);
1890 end:
1891 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
1892 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
1893 		desc_type, (int)element_size, element_num, cacheable);
1894 }
1895 
1896 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
1897 				  enum dp_desc_type desc_type,
1898 				  struct qdf_mem_multi_page_t *pages,
1899 				  qdf_dma_context_t memctxt,
1900 				  bool cacheable)
1901 {
1902 	if (pages->is_mem_prealloc) {
1903 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
1904 			dp_warn("dp_put_multi_pages is null!");
1905 			QDF_BUG(0);
1906 			return;
1907 		}
1908 
1909 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
1910 		qdf_mem_zero(pages, sizeof(*pages));
1911 	} else {
1912 		qdf_mem_multi_pages_free(soc->osdev, pages,
1913 					 memctxt, cacheable);
1914 	}
1915 }
1916 
1917 #else
1918 
1919 static inline
1920 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
1921 					   struct dp_srng *srng,
1922 					   uint32_t ring_type)
1923 
1924 {
1925 	return qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
1926 						&srng->base_vaddr_unaligned,
1927 						&srng->base_paddr_unaligned,
1928 						&srng->base_paddr_aligned,
1929 						DP_RING_BASE_ALIGN);
1930 }
1931 
1932 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
1933 					       struct dp_srng *srng)
1934 {
1935 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1936 				srng->alloc_size,
1937 				srng->base_vaddr_unaligned,
1938 				srng->base_paddr_unaligned, 0);
1939 }
1940 
1941 #endif /* DP_MEM_PRE_ALLOC */
1942 
1943 /*
1944  * dp_srng_free() - Free SRNG memory
1945  * @soc  : Data path soc handle
1946  * @srng : SRNG pointer
1947  *
1948  * return: None
1949  */
1950 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
1951 {
1952 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
1953 		if (!srng->cached) {
1954 			dp_srng_mem_free_consistent(soc, srng);
1955 		} else {
1956 			qdf_mem_free(srng->base_vaddr_unaligned);
1957 		}
1958 		srng->alloc_size = 0;
1959 		srng->base_vaddr_unaligned = NULL;
1960 	}
1961 	srng->hal_srng = NULL;
1962 }
1963 
1964 qdf_export_symbol(dp_srng_free);
1965 
1966 #ifdef DISABLE_MON_RING_MSI_CFG
1967 /*
1968  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
1969  * @ring_type: sring type
1970  *
1971  * Return: True if msi cfg should be skipped for srng type else false
1972  */
1973 static inline bool dp_skip_msi_cfg(int ring_type)
1974 {
1975 	if (ring_type == RXDMA_MONITOR_STATUS)
1976 		return true;
1977 
1978 	return false;
1979 }
1980 #else
1981 static inline bool dp_skip_msi_cfg(int ring_type)
1982 {
1983 	return false;
1984 }
1985 #endif
1986 
1987 /*
1988  * dp_srng_init() - Initialize SRNG
1989  * @soc  : Data path soc handle
1990  * @srng : SRNG pointer
1991  * @ring_type : Ring Type
1992  * @ring_num: Ring number
1993  * @mac_id: mac_id
1994  *
1995  * return: QDF_STATUS
1996  */
1997 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
1998 			int ring_type, int ring_num, int mac_id)
1999 {
2000 	hal_soc_handle_t hal_soc = soc->hal_soc;
2001 	struct hal_srng_params ring_params;
2002 
2003 	if (srng->hal_srng) {
2004 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2005 			    soc, ring_type, ring_num);
2006 		return QDF_STATUS_SUCCESS;
2007 	}
2008 
2009 	/* memset the srng ring to zero */
2010 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2011 
2012 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2013 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2014 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2015 
2016 	ring_params.num_entries = srng->num_entries;
2017 
2018 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2019 		ring_type, ring_num,
2020 		(void *)ring_params.ring_base_vaddr,
2021 		(void *)ring_params.ring_base_paddr,
2022 		ring_params.num_entries);
2023 
2024 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(ring_type)) {
2025 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
2026 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2027 				 ring_type, ring_num);
2028 	} else {
2029 		ring_params.msi_data = 0;
2030 		ring_params.msi_addr = 0;
2031 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2032 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2033 				 ring_type, ring_num);
2034 	}
2035 
2036 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2037 					       ring_type, ring_num,
2038 					       srng->num_entries);
2039 
2040 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2041 
2042 	if (srng->cached)
2043 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2044 
2045 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
2046 					mac_id, &ring_params);
2047 
2048 	if (!srng->hal_srng) {
2049 		dp_srng_free(soc, srng);
2050 		return QDF_STATUS_E_FAILURE;
2051 	}
2052 
2053 	return QDF_STATUS_SUCCESS;
2054 }
2055 
2056 qdf_export_symbol(dp_srng_init);
2057 
2058 /*
2059  * dp_srng_alloc() - Allocate memory for SRNG
2060  * @soc  : Data path soc handle
2061  * @srng : SRNG pointer
2062  * @ring_type : Ring Type
2063  * @num_entries: Number of entries
2064  * @cached: cached flag variable
2065  *
2066  * return: QDF_STATUS
2067  */
2068 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2069 			 int ring_type, uint32_t num_entries,
2070 			 bool cached)
2071 {
2072 	hal_soc_handle_t hal_soc = soc->hal_soc;
2073 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2074 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2075 
2076 	if (srng->base_vaddr_unaligned) {
2077 		dp_init_err("%pK: Ring type: %d, is already allocated",
2078 			    soc, ring_type);
2079 		return QDF_STATUS_SUCCESS;
2080 	}
2081 
2082 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2083 	srng->hal_srng = NULL;
2084 	srng->alloc_size = num_entries * entry_size;
2085 	srng->num_entries = num_entries;
2086 	srng->cached = cached;
2087 
2088 	if (!cached) {
2089 		srng->base_vaddr_aligned =
2090 		    dp_srng_aligned_mem_alloc_consistent(soc,
2091 							 srng,
2092 							 ring_type);
2093 	} else {
2094 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2095 					&srng->alloc_size,
2096 					&srng->base_vaddr_unaligned,
2097 					&srng->base_paddr_unaligned,
2098 					&srng->base_paddr_aligned,
2099 					DP_RING_BASE_ALIGN);
2100 	}
2101 
2102 	if (!srng->base_vaddr_aligned)
2103 		return QDF_STATUS_E_NOMEM;
2104 
2105 	return QDF_STATUS_SUCCESS;
2106 }
2107 
2108 qdf_export_symbol(dp_srng_alloc);
2109 
2110 /*
2111  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
2112  * @soc: DP SOC handle
2113  * @srng: source ring structure
2114  * @ring_type: type of ring
2115  * @ring_num: ring number
2116  *
2117  * Return: None
2118  */
2119 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2120 		    int ring_type, int ring_num)
2121 {
2122 	if (!srng->hal_srng) {
2123 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2124 			    soc, ring_type, ring_num);
2125 		return;
2126 	}
2127 
2128 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2129 	srng->hal_srng = NULL;
2130 }
2131 
2132 qdf_export_symbol(dp_srng_deinit);
2133 
2134 /* TODO: Need this interface from HIF */
2135 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2136 
2137 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2138 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2139 			 hal_ring_handle_t hal_ring_hdl)
2140 {
2141 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2142 	uint32_t hp, tp;
2143 	uint8_t ring_id;
2144 
2145 	if (!int_ctx)
2146 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2147 
2148 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2149 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2150 
2151 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2152 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2153 
2154 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2155 }
2156 
2157 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2158 			hal_ring_handle_t hal_ring_hdl)
2159 {
2160 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2161 	uint32_t hp, tp;
2162 	uint8_t ring_id;
2163 
2164 	if (!int_ctx)
2165 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2166 
2167 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2168 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2169 
2170 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2171 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2172 
2173 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2174 }
2175 
2176 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2177 					      uint8_t hist_group_id)
2178 {
2179 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2180 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2181 }
2182 
2183 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2184 					     uint8_t hist_group_id)
2185 {
2186 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2187 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2188 }
2189 #else
2190 
2191 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2192 					      uint8_t hist_group_id)
2193 {
2194 }
2195 
2196 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2197 					     uint8_t hist_group_id)
2198 {
2199 }
2200 
2201 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2202 
2203 /*
2204  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2205  * @soc: DP soc handle
2206  * @work_done: work done in softirq context
2207  * @start_time: start time for the softirq
2208  *
2209  * Return: enum with yield code
2210  */
2211 enum timer_yield_status
2212 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2213 			  uint64_t start_time)
2214 {
2215 	uint64_t cur_time = qdf_get_log_timestamp();
2216 
2217 	if (!work_done)
2218 		return DP_TIMER_WORK_DONE;
2219 
2220 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2221 		return DP_TIMER_TIME_EXHAUST;
2222 
2223 	return DP_TIMER_NO_YIELD;
2224 }
2225 
2226 qdf_export_symbol(dp_should_timer_irq_yield);
2227 
2228 /**
2229  * dp_process_lmac_rings() - Process LMAC rings
2230  * @int_ctx: interrupt context
2231  * @total_budget: budget of work which can be done
2232  *
2233  * Return: work done
2234  */
2235 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2236 {
2237 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2238 	struct dp_soc *soc = int_ctx->soc;
2239 	uint32_t remaining_quota = total_budget;
2240 	struct dp_pdev *pdev = NULL;
2241 	uint32_t work_done  = 0;
2242 	int budget = total_budget;
2243 	int ring = 0;
2244 
2245 	/* Process LMAC interrupts */
2246 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2247 		int mac_for_pdev = ring;
2248 
2249 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2250 		if (!pdev)
2251 			continue;
2252 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2253 			work_done = dp_monitor_process(soc, int_ctx,
2254 						       mac_for_pdev,
2255 						       remaining_quota);
2256 			if (work_done)
2257 				intr_stats->num_rx_mon_ring_masks++;
2258 			budget -= work_done;
2259 			if (budget <= 0)
2260 				goto budget_done;
2261 			remaining_quota = budget;
2262 		}
2263 
2264 		if (int_ctx->rxdma2host_ring_mask &
2265 				(1 << mac_for_pdev)) {
2266 			work_done = dp_rxdma_err_process(int_ctx, soc,
2267 							 mac_for_pdev,
2268 							 remaining_quota);
2269 			if (work_done)
2270 				intr_stats->num_rxdma2host_ring_masks++;
2271 			budget -=  work_done;
2272 			if (budget <= 0)
2273 				goto budget_done;
2274 			remaining_quota = budget;
2275 		}
2276 
2277 		if (int_ctx->host2rxdma_ring_mask &
2278 					(1 << mac_for_pdev)) {
2279 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2280 			union dp_rx_desc_list_elem_t *tail = NULL;
2281 			struct dp_srng *rx_refill_buf_ring;
2282 
2283 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2284 				rx_refill_buf_ring =
2285 					&soc->rx_refill_buf_ring[mac_for_pdev];
2286 			else
2287 				rx_refill_buf_ring =
2288 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2289 
2290 			intr_stats->num_host2rxdma_ring_masks++;
2291 			DP_STATS_INC(pdev, replenish.low_thresh_intrs,
2292 				     1);
2293 			dp_rx_buffers_replenish(soc, mac_for_pdev,
2294 						rx_refill_buf_ring,
2295 						&soc->rx_desc_buf[mac_for_pdev],
2296 						0, &desc_list, &tail);
2297 		}
2298 	}
2299 
2300 budget_done:
2301 	return total_budget - budget;
2302 }
2303 
2304 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2305 /**
2306  * dp_service_near_full_srngs() - Bottom half handler to process the near
2307  *				full IRQ on a SRNG
2308  * @dp_ctx: Datapath SoC handle
2309  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2310  *		without rescheduling
2311  *
2312  * Return: remaining budget/quota for the soc device
2313  */
2314 static uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget)
2315 {
2316 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2317 	struct dp_soc *soc = int_ctx->soc;
2318 
2319 	/*
2320 	 * dp_service_near_full_srngs arch ops should be initialized always
2321 	 * if the NEAR FULL IRQ feature is enabled.
2322 	 */
2323 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2324 							dp_budget);
2325 }
2326 #endif
2327 
2328 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2329 
2330 /*
2331  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2332  * @dp_ctx: DP SOC handle
2333  * @budget: Number of frames/descriptors that can be processed in one shot
2334  *
2335  * Return: remaining budget/quota for the soc device
2336  */
2337 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2338 {
2339 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2340 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2341 	struct dp_soc *soc = int_ctx->soc;
2342 	int ring = 0;
2343 	int index;
2344 	uint32_t work_done  = 0;
2345 	int budget = dp_budget;
2346 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2347 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2348 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2349 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2350 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2351 	uint32_t remaining_quota = dp_budget;
2352 
2353 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2354 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2355 			 reo_status_mask,
2356 			 int_ctx->rx_mon_ring_mask,
2357 			 int_ctx->host2rxdma_ring_mask,
2358 			 int_ctx->rxdma2host_ring_mask);
2359 
2360 	/* Process Tx completion interrupts first to return back buffers */
2361 	for (index = 0; index < soc->num_tcl_data_rings; index++) {
2362 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2363 			continue;
2364 		work_done = dp_tx_comp_handler(int_ctx,
2365 					       soc,
2366 					       soc->tx_comp_ring[index].hal_srng,
2367 					       index, remaining_quota);
2368 		if (work_done) {
2369 			intr_stats->num_tx_ring_masks[index]++;
2370 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2371 					 tx_mask, index, budget,
2372 					 work_done);
2373 		}
2374 		budget -= work_done;
2375 		if (budget <= 0)
2376 			goto budget_done;
2377 
2378 		remaining_quota = budget;
2379 	}
2380 
2381 	/* Process REO Exception ring interrupt */
2382 	if (rx_err_mask) {
2383 		work_done = dp_rx_err_process(int_ctx, soc,
2384 					      soc->reo_exception_ring.hal_srng,
2385 					      remaining_quota);
2386 
2387 		if (work_done) {
2388 			intr_stats->num_rx_err_ring_masks++;
2389 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2390 					 work_done, budget);
2391 		}
2392 
2393 		budget -=  work_done;
2394 		if (budget <= 0) {
2395 			goto budget_done;
2396 		}
2397 		remaining_quota = budget;
2398 	}
2399 
2400 	/* Process Rx WBM release ring interrupt */
2401 	if (rx_wbm_rel_mask) {
2402 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2403 						  soc->rx_rel_ring.hal_srng,
2404 						  remaining_quota);
2405 
2406 		if (work_done) {
2407 			intr_stats->num_rx_wbm_rel_ring_masks++;
2408 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2409 					 work_done, budget);
2410 		}
2411 
2412 		budget -=  work_done;
2413 		if (budget <= 0) {
2414 			goto budget_done;
2415 		}
2416 		remaining_quota = budget;
2417 	}
2418 
2419 	/* Process Rx interrupts */
2420 	if (rx_mask) {
2421 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2422 			if (!(rx_mask & (1 << ring)))
2423 				continue;
2424 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2425 						  soc->reo_dest_ring[ring].hal_srng,
2426 						  ring,
2427 						  remaining_quota);
2428 			if (work_done) {
2429 				intr_stats->num_rx_ring_masks[ring]++;
2430 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2431 						 rx_mask, ring,
2432 						 work_done, budget);
2433 				budget -=  work_done;
2434 				if (budget <= 0)
2435 					goto budget_done;
2436 				remaining_quota = budget;
2437 			}
2438 		}
2439 	}
2440 
2441 	if (reo_status_mask) {
2442 		if (dp_reo_status_ring_handler(int_ctx, soc))
2443 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2444 	}
2445 
2446 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2447 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2448 		if (work_done) {
2449 			budget -=  work_done;
2450 			if (budget <= 0)
2451 				goto budget_done;
2452 			remaining_quota = budget;
2453 		}
2454 	}
2455 
2456 	qdf_lro_flush(int_ctx->lro_ctx);
2457 	intr_stats->num_masks++;
2458 
2459 budget_done:
2460 	return dp_budget - budget;
2461 }
2462 
2463 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2464 
2465 /*
2466  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2467  * @dp_ctx: DP SOC handle
2468  * @budget: Number of frames/descriptors that can be processed in one shot
2469  *
2470  * Return: remaining budget/quota for the soc device
2471  */
2472 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2473 {
2474 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2475 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2476 	struct dp_soc *soc = int_ctx->soc;
2477 	uint32_t remaining_quota = dp_budget;
2478 	uint32_t work_done  = 0;
2479 	int budget = dp_budget;
2480 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2481 
2482 	if (reo_status_mask) {
2483 		if (dp_reo_status_ring_handler(int_ctx, soc))
2484 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2485 	}
2486 
2487 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2488 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2489 		if (work_done) {
2490 			budget -=  work_done;
2491 			if (budget <= 0)
2492 				goto budget_done;
2493 			remaining_quota = budget;
2494 		}
2495 	}
2496 
2497 	qdf_lro_flush(int_ctx->lro_ctx);
2498 	intr_stats->num_masks++;
2499 
2500 budget_done:
2501 	return dp_budget - budget;
2502 }
2503 
2504 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2505 
2506 /* dp_interrupt_timer()- timer poll for interrupts
2507  *
2508  * @arg: SoC Handle
2509  *
2510  * Return:
2511  *
2512  */
2513 static void dp_interrupt_timer(void *arg)
2514 {
2515 	struct dp_soc *soc = (struct dp_soc *) arg;
2516 	struct dp_pdev *pdev = soc->pdev_list[0];
2517 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2518 	uint32_t work_done  = 0, total_work_done = 0;
2519 	int budget = 0xffff, i;
2520 	uint32_t remaining_quota = budget;
2521 	uint64_t start_time;
2522 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2523 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2524 	uint32_t lmac_iter;
2525 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2526 	enum reg_wifi_band mon_band;
2527 
2528 	/*
2529 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2530 	 * and Monitor rings polling mode when NSS offload is disabled
2531 	 */
2532 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2533 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2534 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2535 			for (i = 0; i < wlan_cfg_get_num_contexts(
2536 						soc->wlan_cfg_ctx); i++)
2537 				dp_service_srngs(&soc->intr_ctx[i], 0xffff);
2538 
2539 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2540 		}
2541 		return;
2542 	}
2543 
2544 	if (!qdf_atomic_read(&soc->cmn_init_done))
2545 		return;
2546 
2547 	if (dp_monitor_is_chan_band_known(pdev)) {
2548 		mon_band = dp_monitor_get_chan_band(pdev);
2549 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2550 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2551 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2552 			dp_srng_record_timer_entry(soc, dp_intr_id);
2553 		}
2554 	}
2555 
2556 	start_time = qdf_get_log_timestamp();
2557 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
2558 
2559 	while (yield == DP_TIMER_NO_YIELD) {
2560 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2561 			if (lmac_iter == lmac_id)
2562 				work_done = dp_monitor_process(soc,
2563 						&soc->intr_ctx[dp_intr_id],
2564 						lmac_iter, remaining_quota);
2565 			else
2566 				work_done =
2567 					dp_monitor_drop_packets_for_mac(pdev,
2568 							     lmac_iter,
2569 							     remaining_quota);
2570 			if (work_done) {
2571 				budget -=  work_done;
2572 				if (budget <= 0) {
2573 					yield = DP_TIMER_WORK_EXHAUST;
2574 					goto budget_done;
2575 				}
2576 				remaining_quota = budget;
2577 				total_work_done += work_done;
2578 			}
2579 		}
2580 
2581 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2582 						  start_time);
2583 		total_work_done = 0;
2584 	}
2585 
2586 budget_done:
2587 	if (yield == DP_TIMER_WORK_EXHAUST ||
2588 	    yield == DP_TIMER_TIME_EXHAUST)
2589 		qdf_timer_mod(&soc->int_timer, 1);
2590 	else
2591 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2592 
2593 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2594 		dp_srng_record_timer_exit(soc, dp_intr_id);
2595 }
2596 
2597 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2598 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2599 					struct dp_intr *intr_ctx)
2600 {
2601 	if (intr_ctx->rx_mon_ring_mask)
2602 		return true;
2603 
2604 	return false;
2605 }
2606 #else
2607 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2608 					struct dp_intr *intr_ctx)
2609 {
2610 	return false;
2611 }
2612 #endif
2613 
2614 /*
2615  * dp_soc_attach_poll() - Register handlers for DP interrupts
2616  * @txrx_soc: DP SOC handle
2617  *
2618  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2619  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2620  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2621  *
2622  * Return: 0 for success, nonzero for failure.
2623  */
2624 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
2625 {
2626 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2627 	int i;
2628 	int lmac_id = 0;
2629 
2630 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2631 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2632 	soc->intr_mode = DP_INTR_POLL;
2633 
2634 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2635 		soc->intr_ctx[i].dp_intr_id = i;
2636 		soc->intr_ctx[i].tx_ring_mask =
2637 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2638 		soc->intr_ctx[i].rx_ring_mask =
2639 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2640 		soc->intr_ctx[i].rx_mon_ring_mask =
2641 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
2642 		soc->intr_ctx[i].rx_err_ring_mask =
2643 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2644 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
2645 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2646 		soc->intr_ctx[i].reo_status_ring_mask =
2647 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2648 		soc->intr_ctx[i].rxdma2host_ring_mask =
2649 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2650 		soc->intr_ctx[i].soc = soc;
2651 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2652 
2653 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
2654 			hif_event_history_init(soc->hif_handle, i);
2655 			soc->mon_intr_id_lmac_map[lmac_id] = i;
2656 			lmac_id++;
2657 		}
2658 	}
2659 
2660 	qdf_timer_init(soc->osdev, &soc->int_timer,
2661 			dp_interrupt_timer, (void *)soc,
2662 			QDF_TIMER_TYPE_WAKE_APPS);
2663 
2664 	return QDF_STATUS_SUCCESS;
2665 }
2666 
2667 /**
2668  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
2669  * soc: DP soc handle
2670  *
2671  * Set the appropriate interrupt mode flag in the soc
2672  */
2673 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
2674 {
2675 	uint32_t msi_base_data, msi_vector_start;
2676 	int msi_vector_count, ret;
2677 
2678 	soc->intr_mode = DP_INTR_INTEGRATED;
2679 
2680 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2681 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2682 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
2683 		soc->intr_mode = DP_INTR_POLL;
2684 	} else {
2685 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2686 						  &msi_vector_count,
2687 						  &msi_base_data,
2688 						  &msi_vector_start);
2689 		if (ret)
2690 			return;
2691 
2692 		soc->intr_mode = DP_INTR_MSI;
2693 	}
2694 }
2695 
2696 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
2697 #if defined(DP_INTR_POLL_BOTH)
2698 /*
2699  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
2700  * @txrx_soc: DP SOC handle
2701  *
2702  * Call the appropriate attach function based on the mode of operation.
2703  * This is a WAR for enabling monitor mode.
2704  *
2705  * Return: 0 for success. nonzero for failure.
2706  */
2707 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2708 {
2709 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2710 
2711 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2712 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2713 	     soc->cdp_soc.ol_ops->get_con_mode() ==
2714 	     QDF_GLOBAL_MONITOR_MODE)) {
2715 		dp_info("Poll mode");
2716 		return dp_soc_attach_poll(txrx_soc);
2717 	} else {
2718 		dp_info("Interrupt  mode");
2719 		return dp_soc_interrupt_attach(txrx_soc);
2720 	}
2721 }
2722 #else
2723 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
2724 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2725 {
2726 	return dp_soc_attach_poll(txrx_soc);
2727 }
2728 #else
2729 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2730 {
2731 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2732 
2733 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
2734 		return dp_soc_attach_poll(txrx_soc);
2735 	else
2736 		return dp_soc_interrupt_attach(txrx_soc);
2737 }
2738 #endif
2739 #endif
2740 
2741 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
2742 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
2743 {
2744 	int j;
2745 	int num_irq = 0;
2746 
2747 	int tx_mask =
2748 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2749 	int rx_mask =
2750 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2751 	int rx_mon_mask =
2752 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2753 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2754 					soc->wlan_cfg_ctx, intr_ctx_num);
2755 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2756 					soc->wlan_cfg_ctx, intr_ctx_num);
2757 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2758 					soc->wlan_cfg_ctx, intr_ctx_num);
2759 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2760 					soc->wlan_cfg_ctx, intr_ctx_num);
2761 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2762 					soc->wlan_cfg_ctx, intr_ctx_num);
2763 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2764 					soc->wlan_cfg_ctx, intr_ctx_num);
2765 
2766 	soc->intr_mode = DP_INTR_INTEGRATED;
2767 
2768 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
2769 
2770 		if (tx_mask & (1 << j)) {
2771 			irq_id_map[num_irq++] =
2772 				(wbm2host_tx_completions_ring1 - j);
2773 		}
2774 
2775 		if (rx_mask & (1 << j)) {
2776 			irq_id_map[num_irq++] =
2777 				(reo2host_destination_ring1 - j);
2778 		}
2779 
2780 		if (rxdma2host_ring_mask & (1 << j)) {
2781 			irq_id_map[num_irq++] =
2782 				rxdma2host_destination_ring_mac1 - j;
2783 		}
2784 
2785 		if (host2rxdma_ring_mask & (1 << j)) {
2786 			irq_id_map[num_irq++] =
2787 				host2rxdma_host_buf_ring_mac1 -	j;
2788 		}
2789 
2790 		if (host2rxdma_mon_ring_mask & (1 << j)) {
2791 			irq_id_map[num_irq++] =
2792 				host2rxdma_monitor_ring1 - j;
2793 		}
2794 
2795 		if (rx_mon_mask & (1 << j)) {
2796 			irq_id_map[num_irq++] =
2797 				ppdu_end_interrupts_mac1 - j;
2798 			irq_id_map[num_irq++] =
2799 				rxdma2host_monitor_status_ring_mac1 - j;
2800 			irq_id_map[num_irq++] =
2801 				rxdma2host_monitor_destination_mac1 - j;
2802 		}
2803 
2804 		if (rx_wbm_rel_ring_mask & (1 << j))
2805 			irq_id_map[num_irq++] = wbm2host_rx_release;
2806 
2807 		if (rx_err_ring_mask & (1 << j))
2808 			irq_id_map[num_irq++] = reo2host_exception;
2809 
2810 		if (reo_status_ring_mask & (1 << j))
2811 			irq_id_map[num_irq++] = reo2host_status;
2812 
2813 	}
2814 	*num_irq_r = num_irq;
2815 }
2816 
2817 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
2818 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
2819 		int msi_vector_count, int msi_vector_start)
2820 {
2821 	int tx_mask = wlan_cfg_get_tx_ring_mask(
2822 					soc->wlan_cfg_ctx, intr_ctx_num);
2823 	int rx_mask = wlan_cfg_get_rx_ring_mask(
2824 					soc->wlan_cfg_ctx, intr_ctx_num);
2825 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
2826 					soc->wlan_cfg_ctx, intr_ctx_num);
2827 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2828 					soc->wlan_cfg_ctx, intr_ctx_num);
2829 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2830 					soc->wlan_cfg_ctx, intr_ctx_num);
2831 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2832 					soc->wlan_cfg_ctx, intr_ctx_num);
2833 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2834 					soc->wlan_cfg_ctx, intr_ctx_num);
2835 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2836 					soc->wlan_cfg_ctx, intr_ctx_num);
2837 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2838 					soc->wlan_cfg_ctx, intr_ctx_num);
2839 	int rx_near_full_grp_1_mask =
2840 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
2841 						     intr_ctx_num);
2842 	int rx_near_full_grp_2_mask =
2843 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
2844 						     intr_ctx_num);
2845 	int tx_ring_near_full_mask =
2846 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
2847 						    intr_ctx_num);
2848 
2849 	unsigned int vector =
2850 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
2851 	int num_irq = 0;
2852 
2853 	soc->intr_mode = DP_INTR_MSI;
2854 
2855 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
2856 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
2857 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
2858 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
2859 	    tx_ring_near_full_mask)
2860 		irq_id_map[num_irq++] =
2861 			pld_get_msi_irq(soc->osdev->dev, vector);
2862 
2863 	*num_irq_r = num_irq;
2864 }
2865 
2866 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
2867 				    int *irq_id_map, int *num_irq)
2868 {
2869 	int msi_vector_count, ret;
2870 	uint32_t msi_base_data, msi_vector_start;
2871 
2872 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2873 					    &msi_vector_count,
2874 					    &msi_base_data,
2875 					    &msi_vector_start);
2876 	if (ret)
2877 		return dp_soc_interrupt_map_calculate_integrated(soc,
2878 				intr_ctx_num, irq_id_map, num_irq);
2879 
2880 	else
2881 		dp_soc_interrupt_map_calculate_msi(soc,
2882 				intr_ctx_num, irq_id_map, num_irq,
2883 				msi_vector_count, msi_vector_start);
2884 }
2885 
2886 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2887 /**
2888  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
2889  * @soc: DP soc handle
2890  * @num_irq: IRQ number
2891  * @irq_id_map: IRQ map
2892  * intr_id: interrupt context ID
2893  *
2894  * Return: 0 for success. nonzero for failure.
2895  */
2896 static inline int
2897 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
2898 				  int irq_id_map[], int intr_id)
2899 {
2900 	return hif_register_ext_group(soc->hif_handle,
2901 				      num_irq, irq_id_map,
2902 				      dp_service_near_full_srngs,
2903 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
2904 				      HIF_EXEC_NAPI_TYPE,
2905 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
2906 }
2907 #else
2908 static inline int
2909 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
2910 				  int *irq_id_map, int intr_id)
2911 {
2912 	return 0;
2913 }
2914 #endif
2915 
2916 /*
2917  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
2918  * @txrx_soc: DP SOC handle
2919  *
2920  * Return: none
2921  */
2922 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
2923 {
2924 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2925 	int i;
2926 
2927 	if (soc->intr_mode == DP_INTR_POLL) {
2928 		qdf_timer_free(&soc->int_timer);
2929 	} else {
2930 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
2931 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
2932 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
2933 	}
2934 
2935 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2936 		soc->intr_ctx[i].tx_ring_mask = 0;
2937 		soc->intr_ctx[i].rx_ring_mask = 0;
2938 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
2939 		soc->intr_ctx[i].rx_err_ring_mask = 0;
2940 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
2941 		soc->intr_ctx[i].reo_status_ring_mask = 0;
2942 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
2943 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
2944 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
2945 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
2946 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
2947 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
2948 
2949 		hif_event_history_deinit(soc->hif_handle, i);
2950 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
2951 	}
2952 
2953 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2954 		    sizeof(soc->mon_intr_id_lmac_map),
2955 		    DP_MON_INVALID_LMAC_ID);
2956 }
2957 
2958 /*
2959  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
2960  * @txrx_soc: DP SOC handle
2961  *
2962  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2963  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2964  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2965  *
2966  * Return: 0 for success. nonzero for failure.
2967  */
2968 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
2969 {
2970 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2971 
2972 	int i = 0;
2973 	int num_irq = 0;
2974 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
2975 	int lmac_id = 0;
2976 
2977 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2978 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2979 
2980 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2981 		int ret = 0;
2982 
2983 		/* Map of IRQ ids registered with one interrupt context */
2984 		int irq_id_map[HIF_MAX_GRP_IRQ];
2985 
2986 		int tx_mask =
2987 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2988 		int rx_mask =
2989 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2990 		int rx_mon_mask =
2991 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
2992 		int rx_err_ring_mask =
2993 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2994 		int rx_wbm_rel_ring_mask =
2995 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2996 		int reo_status_ring_mask =
2997 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2998 		int rxdma2host_ring_mask =
2999 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3000 		int host2rxdma_ring_mask =
3001 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3002 		int host2rxdma_mon_ring_mask =
3003 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3004 				soc->wlan_cfg_ctx, i);
3005 		int rx_near_full_grp_1_mask =
3006 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3007 							     i);
3008 		int rx_near_full_grp_2_mask =
3009 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3010 							     i);
3011 		int tx_ring_near_full_mask =
3012 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3013 							    i);
3014 
3015 		soc->intr_ctx[i].dp_intr_id = i;
3016 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3017 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3018 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3019 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3020 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3021 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3022 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3023 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3024 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3025 			 host2rxdma_mon_ring_mask;
3026 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3027 						rx_near_full_grp_1_mask;
3028 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3029 						rx_near_full_grp_2_mask;
3030 		soc->intr_ctx[i].tx_ring_near_full_mask =
3031 						tx_ring_near_full_mask;
3032 
3033 		soc->intr_ctx[i].soc = soc;
3034 
3035 		num_irq = 0;
3036 
3037 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3038 					       &num_irq);
3039 
3040 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3041 		    tx_ring_near_full_mask) {
3042 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3043 							  irq_id_map, i);
3044 		} else {
3045 			ret = hif_register_ext_group(soc->hif_handle,
3046 				num_irq, irq_id_map, dp_service_srngs,
3047 				&soc->intr_ctx[i], "dp_intr",
3048 				HIF_EXEC_NAPI_TYPE,
3049 				QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3050 		}
3051 
3052 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3053 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3054 
3055 		if (ret) {
3056 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3057 			dp_soc_interrupt_detach(txrx_soc);
3058 			return QDF_STATUS_E_FAILURE;
3059 		}
3060 
3061 		hif_event_history_init(soc->hif_handle, i);
3062 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3063 
3064 		if (rx_err_ring_mask)
3065 			rx_err_ring_intr_ctxt_id = i;
3066 
3067 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3068 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3069 			lmac_id++;
3070 		}
3071 	}
3072 
3073 	hif_configure_ext_group_interrupts(soc->hif_handle);
3074 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3075 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3076 						  rx_err_ring_intr_ctxt_id, 0);
3077 
3078 	return QDF_STATUS_SUCCESS;
3079 }
3080 
3081 #define AVG_MAX_MPDUS_PER_TID 128
3082 #define AVG_TIDS_PER_CLIENT 2
3083 #define AVG_FLOWS_PER_TID 2
3084 #define AVG_MSDUS_PER_FLOW 128
3085 #define AVG_MSDUS_PER_MPDU 4
3086 
3087 /*
3088  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
3089  * @soc: DP SOC handle
3090  * @mac_id: mac id
3091  *
3092  * Return: none
3093  */
3094 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3095 {
3096 	struct qdf_mem_multi_page_t *pages;
3097 
3098 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3099 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3100 	} else {
3101 		pages = &soc->link_desc_pages;
3102 	}
3103 
3104 	if (!pages) {
3105 		dp_err("can not get link desc pages");
3106 		QDF_ASSERT(0);
3107 		return;
3108 	}
3109 
3110 	if (pages->dma_pages) {
3111 		wlan_minidump_remove((void *)
3112 				     pages->dma_pages->page_v_addr_start,
3113 				     pages->num_pages * pages->page_size,
3114 				     soc->ctrl_psoc,
3115 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3116 				     "hw_link_desc_bank");
3117 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3118 					     pages, 0, false);
3119 	}
3120 }
3121 
3122 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3123 
3124 /*
3125  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
3126  * @soc: DP SOC handle
3127  * @mac_id: mac id
3128  *
3129  * Allocates memory pages for link descriptors, the page size is 4K for
3130  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
3131  * allocated for regular RX/TX and if the there is a proper mac_id link
3132  * descriptors are allocated for RX monitor mode.
3133  *
3134  * Return: QDF_STATUS_SUCCESS: Success
3135  *	   QDF_STATUS_E_FAILURE: Failure
3136  */
3137 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3138 {
3139 	hal_soc_handle_t hal_soc = soc->hal_soc;
3140 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3141 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3142 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3143 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3144 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3145 	uint32_t num_mpdu_links_per_queue_desc =
3146 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3147 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3148 	uint32_t *total_link_descs, total_mem_size;
3149 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3150 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3151 	uint32_t num_entries;
3152 	struct qdf_mem_multi_page_t *pages;
3153 	struct dp_srng *dp_srng;
3154 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3155 
3156 	/* Only Tx queue descriptors are allocated from common link descriptor
3157 	 * pool Rx queue descriptors are not included in this because (REO queue
3158 	 * extension descriptors) they are expected to be allocated contiguously
3159 	 * with REO queue descriptors
3160 	 */
3161 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3162 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3163 		/* dp_monitor_get_link_desc_pages returns NULL only
3164 		 * if monitor SOC is  NULL
3165 		 */
3166 		if (!pages) {
3167 			dp_err("can not get link desc pages");
3168 			QDF_ASSERT(0);
3169 			return QDF_STATUS_E_FAULT;
3170 		}
3171 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3172 		num_entries = dp_srng->alloc_size /
3173 			hal_srng_get_entrysize(soc->hal_soc,
3174 					       RXDMA_MONITOR_DESC);
3175 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3176 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3177 			      MINIDUMP_STR_SIZE);
3178 	} else {
3179 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3180 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3181 
3182 		num_mpdu_queue_descs = num_mpdu_link_descs /
3183 			num_mpdu_links_per_queue_desc;
3184 
3185 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3186 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3187 			num_msdus_per_link_desc;
3188 
3189 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3190 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3191 
3192 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3193 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3194 
3195 		pages = &soc->link_desc_pages;
3196 		total_link_descs = &soc->total_link_descs;
3197 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3198 			      MINIDUMP_STR_SIZE);
3199 	}
3200 
3201 	/* If link descriptor banks are allocated, return from here */
3202 	if (pages->num_pages)
3203 		return QDF_STATUS_SUCCESS;
3204 
3205 	/* Round up to power of 2 */
3206 	*total_link_descs = 1;
3207 	while (*total_link_descs < num_entries)
3208 		*total_link_descs <<= 1;
3209 
3210 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3211 		     soc, *total_link_descs, link_desc_size);
3212 	total_mem_size =  *total_link_descs * link_desc_size;
3213 	total_mem_size += link_desc_align;
3214 
3215 	dp_init_info("%pK: total_mem_size: %d",
3216 		     soc, total_mem_size);
3217 
3218 	dp_set_max_page_size(pages, max_alloc_size);
3219 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3220 				      pages,
3221 				      link_desc_size,
3222 				      *total_link_descs,
3223 				      0, false);
3224 	if (!pages->num_pages) {
3225 		dp_err("Multi page alloc fail for hw link desc pool");
3226 		return QDF_STATUS_E_FAULT;
3227 	}
3228 
3229 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3230 			  pages->num_pages * pages->page_size,
3231 			  soc->ctrl_psoc,
3232 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3233 			  "hw_link_desc_bank");
3234 
3235 	return QDF_STATUS_SUCCESS;
3236 }
3237 
3238 /*
3239  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3240  * @soc: DP SOC handle
3241  *
3242  * Return: none
3243  */
3244 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3245 {
3246 	uint32_t i;
3247 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3248 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3249 	qdf_dma_addr_t paddr;
3250 
3251 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3252 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3253 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3254 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3255 			if (vaddr) {
3256 				qdf_mem_free_consistent(soc->osdev,
3257 							soc->osdev->dev,
3258 							size,
3259 							vaddr,
3260 							paddr,
3261 							0);
3262 				vaddr = NULL;
3263 			}
3264 		}
3265 	} else {
3266 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3267 				     soc->wbm_idle_link_ring.alloc_size,
3268 				     soc->ctrl_psoc,
3269 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3270 				     "wbm_idle_link_ring");
3271 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3272 	}
3273 }
3274 
3275 /*
3276  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3277  * @soc: DP SOC handle
3278  *
3279  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3280  * link descriptors is less then the max_allocated size. else
3281  * allocate memory for wbm_idle_scatter_buffer.
3282  *
3283  * Return: QDF_STATUS_SUCCESS: success
3284  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3285  */
3286 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3287 {
3288 	uint32_t entry_size, i;
3289 	uint32_t total_mem_size;
3290 	qdf_dma_addr_t *baseaddr = NULL;
3291 	struct dp_srng *dp_srng;
3292 	uint32_t ring_type;
3293 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3294 	uint32_t tlds;
3295 
3296 	ring_type = WBM_IDLE_LINK;
3297 	dp_srng = &soc->wbm_idle_link_ring;
3298 	tlds = soc->total_link_descs;
3299 
3300 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3301 	total_mem_size = entry_size * tlds;
3302 
3303 	if (total_mem_size <= max_alloc_size) {
3304 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3305 			dp_init_err("%pK: Link desc idle ring setup failed",
3306 				    soc);
3307 			goto fail;
3308 		}
3309 
3310 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3311 				  soc->wbm_idle_link_ring.alloc_size,
3312 				  soc->ctrl_psoc,
3313 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3314 				  "wbm_idle_link_ring");
3315 	} else {
3316 		uint32_t num_scatter_bufs;
3317 		uint32_t num_entries_per_buf;
3318 		uint32_t buf_size = 0;
3319 
3320 		soc->wbm_idle_scatter_buf_size =
3321 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3322 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3323 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
3324 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3325 					soc->hal_soc, total_mem_size,
3326 					soc->wbm_idle_scatter_buf_size);
3327 
3328 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3329 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3330 				  FL("scatter bufs size out of bounds"));
3331 			goto fail;
3332 		}
3333 
3334 		for (i = 0; i < num_scatter_bufs; i++) {
3335 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3336 			buf_size = soc->wbm_idle_scatter_buf_size;
3337 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3338 				qdf_mem_alloc_consistent(soc->osdev,
3339 							 soc->osdev->dev,
3340 							 buf_size,
3341 							 baseaddr);
3342 
3343 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3344 				QDF_TRACE(QDF_MODULE_ID_DP,
3345 					  QDF_TRACE_LEVEL_ERROR,
3346 					  FL("Scatter lst memory alloc fail"));
3347 				goto fail;
3348 			}
3349 		}
3350 		soc->num_scatter_bufs = num_scatter_bufs;
3351 	}
3352 	return QDF_STATUS_SUCCESS;
3353 
3354 fail:
3355 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3356 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3357 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3358 
3359 		if (vaddr) {
3360 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3361 						soc->wbm_idle_scatter_buf_size,
3362 						vaddr,
3363 						paddr, 0);
3364 			vaddr = NULL;
3365 		}
3366 	}
3367 	return QDF_STATUS_E_NOMEM;
3368 }
3369 
3370 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3371 
3372 /*
3373  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3374  * @soc: DP SOC handle
3375  *
3376  * Return: QDF_STATUS_SUCCESS: success
3377  *         QDF_STATUS_E_FAILURE: failure
3378  */
3379 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3380 {
3381 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3382 
3383 	if (dp_srng->base_vaddr_unaligned) {
3384 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3385 			return QDF_STATUS_E_FAILURE;
3386 	}
3387 	return QDF_STATUS_SUCCESS;
3388 }
3389 
3390 /*
3391  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3392  * @soc: DP SOC handle
3393  *
3394  * Return: None
3395  */
3396 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3397 {
3398 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3399 }
3400 
3401 /*
3402  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
3403  * @soc: DP SOC handle
3404  * @mac_id: mac id
3405  *
3406  * Return: None
3407  */
3408 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3409 {
3410 	uint32_t cookie = 0;
3411 	uint32_t page_idx = 0;
3412 	struct qdf_mem_multi_page_t *pages;
3413 	struct qdf_mem_dma_page_t *dma_pages;
3414 	uint32_t offset = 0;
3415 	uint32_t count = 0;
3416 	void *desc_srng;
3417 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3418 	uint32_t *total_link_descs_addr;
3419 	uint32_t total_link_descs;
3420 	uint32_t scatter_buf_num;
3421 	uint32_t num_entries_per_buf = 0;
3422 	uint32_t rem_entries;
3423 	uint32_t num_descs_per_page;
3424 	uint32_t num_scatter_bufs = 0;
3425 	uint8_t *scatter_buf_ptr;
3426 	void *desc;
3427 
3428 	num_scatter_bufs = soc->num_scatter_bufs;
3429 
3430 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3431 		pages = &soc->link_desc_pages;
3432 		total_link_descs = soc->total_link_descs;
3433 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3434 	} else {
3435 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3436 		/* dp_monitor_get_link_desc_pages returns NULL only
3437 		 * if monitor SOC is  NULL
3438 		 */
3439 		if (!pages) {
3440 			dp_err("can not get link desc pages");
3441 			QDF_ASSERT(0);
3442 			return;
3443 		}
3444 		total_link_descs_addr =
3445 				dp_monitor_get_total_link_descs(soc, mac_id);
3446 		total_link_descs = *total_link_descs_addr;
3447 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3448 	}
3449 
3450 	dma_pages = pages->dma_pages;
3451 	do {
3452 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3453 			     pages->page_size);
3454 		page_idx++;
3455 	} while (page_idx < pages->num_pages);
3456 
3457 	if (desc_srng) {
3458 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3459 		page_idx = 0;
3460 		count = 0;
3461 		offset = 0;
3462 		pages = &soc->link_desc_pages;
3463 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3464 						     desc_srng)) &&
3465 			(count < total_link_descs)) {
3466 			page_idx = count / pages->num_element_per_page;
3467 			offset = count % pages->num_element_per_page;
3468 			cookie = LINK_DESC_COOKIE(count, page_idx,
3469 						  soc->link_desc_id_start);
3470 
3471 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3472 					       dma_pages[page_idx].page_p_addr
3473 					       + (offset * link_desc_size));
3474 			count++;
3475 		}
3476 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
3477 	} else {
3478 		/* Populate idle list scatter buffers with link descriptor
3479 		 * pointers
3480 		 */
3481 		scatter_buf_num = 0;
3482 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3483 					soc->hal_soc,
3484 					soc->wbm_idle_scatter_buf_size);
3485 
3486 		scatter_buf_ptr = (uint8_t *)(
3487 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
3488 		rem_entries = num_entries_per_buf;
3489 		pages = &soc->link_desc_pages;
3490 		page_idx = 0; count = 0;
3491 		offset = 0;
3492 		num_descs_per_page = pages->num_element_per_page;
3493 
3494 		while (count < total_link_descs) {
3495 			page_idx = count / num_descs_per_page;
3496 			offset = count % num_descs_per_page;
3497 			cookie = LINK_DESC_COOKIE(count, page_idx,
3498 						  soc->link_desc_id_start);
3499 			hal_set_link_desc_addr(soc->hal_soc,
3500 					       (void *)scatter_buf_ptr,
3501 					       cookie,
3502 					       dma_pages[page_idx].page_p_addr +
3503 					       (offset * link_desc_size));
3504 			rem_entries--;
3505 			if (rem_entries) {
3506 				scatter_buf_ptr += link_desc_size;
3507 			} else {
3508 				rem_entries = num_entries_per_buf;
3509 				scatter_buf_num++;
3510 				if (scatter_buf_num >= num_scatter_bufs)
3511 					break;
3512 				scatter_buf_ptr = (uint8_t *)
3513 					(soc->wbm_idle_scatter_buf_base_vaddr[
3514 					 scatter_buf_num]);
3515 			}
3516 			count++;
3517 		}
3518 		/* Setup link descriptor idle list in HW */
3519 		hal_setup_link_idle_list(soc->hal_soc,
3520 			soc->wbm_idle_scatter_buf_base_paddr,
3521 			soc->wbm_idle_scatter_buf_base_vaddr,
3522 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
3523 			(uint32_t)(scatter_buf_ptr -
3524 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
3525 			scatter_buf_num-1])), total_link_descs);
3526 	}
3527 }
3528 
3529 qdf_export_symbol(dp_link_desc_ring_replenish);
3530 
3531 #ifdef IPA_OFFLOAD
3532 #define USE_1_IPA_RX_REO_RING 1
3533 #define USE_2_IPA_RX_REO_RINGS 2
3534 #define REO_DST_RING_SIZE_QCA6290 1023
3535 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
3536 #define REO_DST_RING_SIZE_QCA8074 1023
3537 #define REO_DST_RING_SIZE_QCN9000 2048
3538 #else
3539 #define REO_DST_RING_SIZE_QCA8074 8
3540 #define REO_DST_RING_SIZE_QCN9000 8
3541 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
3542 
3543 #ifdef IPA_WDI3_TX_TWO_PIPES
3544 #ifdef DP_MEMORY_OPT
3545 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3546 {
3547 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3548 }
3549 
3550 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3551 {
3552 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3553 }
3554 
3555 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3556 {
3557 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3558 }
3559 
3560 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3561 {
3562 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3563 }
3564 
3565 #else /* !DP_MEMORY_OPT */
3566 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3567 {
3568 	return 0;
3569 }
3570 
3571 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3572 {
3573 }
3574 
3575 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3576 {
3577 	return 0
3578 }
3579 
3580 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3581 {
3582 }
3583 #endif /* DP_MEMORY_OPT */
3584 
3585 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3586 {
3587 	hal_tx_init_data_ring(soc->hal_soc,
3588 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
3589 }
3590 
3591 #else /* !IPA_WDI3_TX_TWO_PIPES */
3592 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3593 {
3594 	return 0;
3595 }
3596 
3597 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3598 {
3599 }
3600 
3601 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3602 {
3603 	return 0;
3604 }
3605 
3606 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3607 {
3608 }
3609 
3610 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3611 {
3612 }
3613 
3614 #endif /* IPA_WDI3_TX_TWO_PIPES */
3615 
3616 #else
3617 
3618 #define REO_DST_RING_SIZE_QCA6290 1024
3619 
3620 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3621 {
3622 	return 0;
3623 }
3624 
3625 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3626 {
3627 }
3628 
3629 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3630 {
3631 	return 0;
3632 }
3633 
3634 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3635 {
3636 }
3637 
3638 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3639 {
3640 }
3641 
3642 #endif /* IPA_OFFLOAD */
3643 
3644 /*
3645  * dp_soc_reset_ring_map() - Reset cpu ring map
3646  * @soc: Datapath soc handler
3647  *
3648  * This api resets the default cpu ring map
3649  */
3650 
3651 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
3652 {
3653 	uint8_t i;
3654 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3655 
3656 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
3657 		switch (nss_config) {
3658 		case dp_nss_cfg_first_radio:
3659 			/*
3660 			 * Setting Tx ring map for one nss offloaded radio
3661 			 */
3662 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
3663 			break;
3664 
3665 		case dp_nss_cfg_second_radio:
3666 			/*
3667 			 * Setting Tx ring for two nss offloaded radios
3668 			 */
3669 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
3670 			break;
3671 
3672 		case dp_nss_cfg_dbdc:
3673 			/*
3674 			 * Setting Tx ring map for 2 nss offloaded radios
3675 			 */
3676 			soc->tx_ring_map[i] =
3677 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
3678 			break;
3679 
3680 		case dp_nss_cfg_dbtc:
3681 			/*
3682 			 * Setting Tx ring map for 3 nss offloaded radios
3683 			 */
3684 			soc->tx_ring_map[i] =
3685 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
3686 			break;
3687 
3688 		default:
3689 			dp_err("tx_ring_map failed due to invalid nss cfg");
3690 			break;
3691 		}
3692 	}
3693 }
3694 
3695 /*
3696  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
3697  * @dp_soc - DP soc handle
3698  * @ring_type - ring type
3699  * @ring_num - ring_num
3700  *
3701  * return 0 or 1
3702  */
3703 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
3704 {
3705 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3706 	uint8_t status = 0;
3707 
3708 	switch (ring_type) {
3709 	case WBM2SW_RELEASE:
3710 	case REO_DST:
3711 	case RXDMA_BUF:
3712 	case REO_EXCEPTION:
3713 		status = ((nss_config) & (1 << ring_num));
3714 		break;
3715 	default:
3716 		break;
3717 	}
3718 
3719 	return status;
3720 }
3721 
3722 /*
3723  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
3724  *					  unused WMAC hw rings
3725  * @dp_soc - DP Soc handle
3726  * @mac_num - wmac num
3727  *
3728  * Return: Return void
3729  */
3730 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
3731 						int mac_num)
3732 {
3733 	uint8_t *grp_mask = NULL;
3734 	int group_number;
3735 
3736 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
3737 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3738 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3739 					  group_number, 0x0);
3740 
3741 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
3742 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3743 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
3744 				      group_number, 0x0);
3745 
3746 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
3747 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3748 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
3749 					  group_number, 0x0);
3750 
3751 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
3752 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3753 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
3754 					      group_number, 0x0);
3755 }
3756 
3757 /*
3758  * dp_soc_reset_intr_mask() - reset interrupt mask
3759  * @dp_soc - DP Soc handle
3760  *
3761  * Return: Return void
3762  */
3763 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
3764 {
3765 	uint8_t j;
3766 	uint8_t *grp_mask = NULL;
3767 	int group_number, mask, num_ring;
3768 
3769 	/* number of tx ring */
3770 	num_ring = soc->num_tcl_data_rings;
3771 
3772 	/*
3773 	 * group mask for tx completion  ring.
3774 	 */
3775 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
3776 
3777 	/* loop and reset the mask for only offloaded ring */
3778 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
3779 		/*
3780 		 * Group number corresponding to tx offloaded ring.
3781 		 */
3782 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
3783 		if (group_number < 0) {
3784 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3785 				      soc, WBM2SW_RELEASE, j);
3786 			continue;
3787 		}
3788 
3789 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
3790 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
3791 		    (!mask)) {
3792 			continue;
3793 		}
3794 
3795 		/* reset the tx mask for offloaded ring */
3796 		mask &= (~(1 << j));
3797 
3798 		/*
3799 		 * reset the interrupt mask for offloaded ring.
3800 		 */
3801 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
3802 	}
3803 
3804 	/* number of rx rings */
3805 	num_ring = soc->num_reo_dest_rings;
3806 
3807 	/*
3808 	 * group mask for reo destination ring.
3809 	 */
3810 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
3811 
3812 	/* loop and reset the mask for only offloaded ring */
3813 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
3814 		/*
3815 		 * Group number corresponding to rx offloaded ring.
3816 		 */
3817 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
3818 		if (group_number < 0) {
3819 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3820 				      soc, REO_DST, j);
3821 			continue;
3822 		}
3823 
3824 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
3825 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
3826 		    (!mask)) {
3827 			continue;
3828 		}
3829 
3830 		/* reset the interrupt mask for offloaded ring */
3831 		mask &= (~(1 << j));
3832 
3833 		/*
3834 		 * set the interrupt mask to zero for rx offloaded radio.
3835 		 */
3836 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
3837 	}
3838 
3839 	/*
3840 	 * group mask for Rx buffer refill ring
3841 	 */
3842 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
3843 
3844 	/* loop and reset the mask for only offloaded ring */
3845 	for (j = 0; j < MAX_PDEV_CNT; j++) {
3846 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
3847 
3848 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
3849 			continue;
3850 		}
3851 
3852 		/*
3853 		 * Group number corresponding to rx offloaded ring.
3854 		 */
3855 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
3856 		if (group_number < 0) {
3857 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3858 				      soc, REO_DST, lmac_id);
3859 			continue;
3860 		}
3861 
3862 		/* set the interrupt mask for offloaded ring */
3863 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3864 				group_number);
3865 		mask &= (~(1 << lmac_id));
3866 
3867 		/*
3868 		 * set the interrupt mask to zero for rx offloaded radio.
3869 		 */
3870 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3871 			group_number, mask);
3872 	}
3873 
3874 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
3875 
3876 	for (j = 0; j < num_ring; j++) {
3877 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
3878 			continue;
3879 		}
3880 
3881 		/*
3882 		 * Group number corresponding to rx err ring.
3883 		 */
3884 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
3885 		if (group_number < 0) {
3886 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3887 				      soc, REO_EXCEPTION, j);
3888 			continue;
3889 		}
3890 
3891 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
3892 					      group_number, 0);
3893 	}
3894 }
3895 
3896 #ifdef IPA_OFFLOAD
3897 /**
3898  * dp_reo_remap_config() - configure reo remap register value based
3899  *                         nss configuration.
3900  *		based on offload_radio value below remap configuration
3901  *		get applied.
3902  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
3903  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
3904  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
3905  *		3 - both Radios handled by NSS (remap not required)
3906  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
3907  *
3908  * @remap1: output parameter indicates reo remap 1 register value
3909  * @remap2: output parameter indicates reo remap 2 register value
3910  * Return: bool type, true if remap is configured else false.
3911  */
3912 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
3913 {
3914 	uint32_t ring[8] = {REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3};
3915 	int target_type;
3916 
3917 	target_type = hal_get_target_type(soc->hal_soc);
3918 
3919 	switch (target_type) {
3920 	case TARGET_TYPE_WCN7850:
3921 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3922 					      soc->num_reo_dest_rings -
3923 					      USE_2_IPA_RX_REO_RINGS, remap1,
3924 					      remap2);
3925 		break;
3926 
3927 	default:
3928 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3929 					      soc->num_reo_dest_rings -
3930 					      USE_1_IPA_RX_REO_RING, remap1,
3931 					      remap2);
3932 		break;
3933 	}
3934 
3935 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
3936 
3937 	return true;
3938 }
3939 
3940 #ifdef IPA_WDI3_TX_TWO_PIPES
3941 static bool dp_ipa_is_alt_tx_ring(int index)
3942 {
3943 	return index == IPA_TX_ALT_RING_IDX;
3944 }
3945 
3946 static bool dp_ipa_is_alt_tx_comp_ring(int index)
3947 {
3948 	return index == IPA_TX_ALT_COMP_RING_IDX;
3949 }
3950 #else /* !IPA_WDI3_TX_TWO_PIPES */
3951 static bool dp_ipa_is_alt_tx_ring(int index)
3952 {
3953 	return false;
3954 }
3955 
3956 static bool dp_ipa_is_alt_tx_comp_ring(int index)
3957 {
3958 	return false;
3959 }
3960 #endif /* IPA_WDI3_TX_TWO_PIPES */
3961 
3962 /**
3963  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
3964  *
3965  * @tx_ring_num: Tx ring number
3966  * @tx_ipa_ring_sz: Return param only updated for IPA.
3967  * @soc_cfg_ctx: dp soc cfg context
3968  *
3969  * Return: None
3970  */
3971 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
3972 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3973 {
3974 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
3975 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
3976 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
3977 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
3978 }
3979 
3980 /**
3981  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
3982  *
3983  * @tx_comp_ring_num: Tx comp ring number
3984  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
3985  * @soc_cfg_ctx: dp soc cfg context
3986  *
3987  * Return: None
3988  */
3989 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3990 					 int *tx_comp_ipa_ring_sz,
3991 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3992 {
3993 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
3994 		*tx_comp_ipa_ring_sz =
3995 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
3996 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
3997 		*tx_comp_ipa_ring_sz =
3998 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
3999 }
4000 #else
4001 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4002 {
4003 	uint8_t num = 0;
4004 
4005 	switch (value) {
4006 	case 0xF:
4007 		num = 4;
4008 		ring[0] = REO_REMAP_SW1;
4009 		ring[1] = REO_REMAP_SW2;
4010 		ring[2] = REO_REMAP_SW3;
4011 		ring[3] = REO_REMAP_SW4;
4012 		break;
4013 	case 0xE:
4014 		num = 3;
4015 		ring[0] = REO_REMAP_SW2;
4016 		ring[1] = REO_REMAP_SW3;
4017 		ring[2] = REO_REMAP_SW4;
4018 		break;
4019 	case 0xD:
4020 		num = 3;
4021 		ring[0] = REO_REMAP_SW1;
4022 		ring[1] = REO_REMAP_SW3;
4023 		ring[2] = REO_REMAP_SW4;
4024 		break;
4025 	case 0xC:
4026 		num = 2;
4027 		ring[0] = REO_REMAP_SW3;
4028 		ring[1] = REO_REMAP_SW4;
4029 		break;
4030 	case 0xB:
4031 		num = 3;
4032 		ring[0] = REO_REMAP_SW1;
4033 		ring[1] = REO_REMAP_SW2;
4034 		ring[2] = REO_REMAP_SW4;
4035 		break;
4036 	case 0xA:
4037 		num = 2;
4038 		ring[0] = REO_REMAP_SW2;
4039 		ring[1] = REO_REMAP_SW4;
4040 		break;
4041 	case 0x9:
4042 		num = 2;
4043 		ring[0] = REO_REMAP_SW1;
4044 		ring[1] = REO_REMAP_SW4;
4045 		break;
4046 	case 0x8:
4047 		num = 1;
4048 		ring[0] = REO_REMAP_SW4;
4049 		break;
4050 	case 0x7:
4051 		num = 3;
4052 		ring[0] = REO_REMAP_SW1;
4053 		ring[1] = REO_REMAP_SW2;
4054 		ring[2] = REO_REMAP_SW3;
4055 		break;
4056 	case 0x6:
4057 		num = 2;
4058 		ring[0] = REO_REMAP_SW2;
4059 		ring[1] = REO_REMAP_SW3;
4060 		break;
4061 	case 0x5:
4062 		num = 2;
4063 		ring[0] = REO_REMAP_SW1;
4064 		ring[1] = REO_REMAP_SW3;
4065 		break;
4066 	case 0x4:
4067 		num = 1;
4068 		ring[0] = REO_REMAP_SW3;
4069 		break;
4070 	case 0x3:
4071 		num = 2;
4072 		ring[0] = REO_REMAP_SW1;
4073 		ring[1] = REO_REMAP_SW2;
4074 		break;
4075 	case 0x2:
4076 		num = 1;
4077 		ring[0] = REO_REMAP_SW2;
4078 		break;
4079 	case 0x1:
4080 		num = 1;
4081 		ring[0] = REO_REMAP_SW1;
4082 		break;
4083 	}
4084 	return num;
4085 }
4086 
4087 static bool dp_reo_remap_config(struct dp_soc *soc,
4088 				uint32_t *remap1,
4089 				uint32_t *remap2)
4090 {
4091 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4092 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4093 	uint8_t target_type, num;
4094 	uint32_t ring[4];
4095 	uint32_t value;
4096 
4097 	target_type = hal_get_target_type(soc->hal_soc);
4098 
4099 	switch (offload_radio) {
4100 	case dp_nss_cfg_default:
4101 		value = reo_config & 0xF;
4102 		num = dp_reo_ring_selection(value, ring);
4103 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4104 					      num, remap1, remap2);
4105 
4106 		break;
4107 	case dp_nss_cfg_first_radio:
4108 		value = reo_config & 0xE;
4109 		num = dp_reo_ring_selection(value, ring);
4110 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4111 					      num, remap1, remap2);
4112 
4113 		break;
4114 	case dp_nss_cfg_second_radio:
4115 		value = reo_config & 0xD;
4116 		num = dp_reo_ring_selection(value, ring);
4117 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4118 					      num, remap1, remap2);
4119 
4120 		break;
4121 	case dp_nss_cfg_dbdc:
4122 	case dp_nss_cfg_dbtc:
4123 		/* return false if both or all are offloaded to NSS */
4124 		return false;
4125 
4126 	}
4127 
4128 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4129 		 *remap1, *remap2, offload_radio);
4130 	return true;
4131 }
4132 
4133 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4134 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4135 {
4136 }
4137 
4138 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4139 					 int *tx_comp_ipa_ring_sz,
4140 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4141 {
4142 }
4143 #endif /* IPA_OFFLOAD */
4144 
4145 /*
4146  * dp_reo_frag_dst_set() - configure reo register to set the
4147  *                        fragment destination ring
4148  * @soc : Datapath soc
4149  * @frag_dst_ring : output parameter to set fragment destination ring
4150  *
4151  * Based on offload_radio below fragment destination rings is selected
4152  * 0 - TCL
4153  * 1 - SW1
4154  * 2 - SW2
4155  * 3 - SW3
4156  * 4 - SW4
4157  * 5 - Release
4158  * 6 - FW
4159  * 7 - alternate select
4160  *
4161  * return: void
4162  */
4163 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4164 {
4165 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4166 
4167 	switch (offload_radio) {
4168 	case dp_nss_cfg_default:
4169 		*frag_dst_ring = REO_REMAP_TCL;
4170 		break;
4171 	case dp_nss_cfg_first_radio:
4172 		/*
4173 		 * This configuration is valid for single band radio which
4174 		 * is also NSS offload.
4175 		 */
4176 	case dp_nss_cfg_dbdc:
4177 	case dp_nss_cfg_dbtc:
4178 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4179 		break;
4180 	default:
4181 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4182 		break;
4183 	}
4184 }
4185 
4186 #ifdef ENABLE_VERBOSE_DEBUG
4187 static void dp_enable_verbose_debug(struct dp_soc *soc)
4188 {
4189 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4190 
4191 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4192 
4193 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4194 		is_dp_verbose_debug_enabled = true;
4195 
4196 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4197 		hal_set_verbose_debug(true);
4198 	else
4199 		hal_set_verbose_debug(false);
4200 }
4201 #else
4202 static void dp_enable_verbose_debug(struct dp_soc *soc)
4203 {
4204 }
4205 #endif
4206 
4207 #ifdef WLAN_FEATURE_STATS_EXT
4208 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4209 {
4210 	qdf_event_create(&soc->rx_hw_stats_event);
4211 }
4212 #else
4213 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4214 {
4215 }
4216 #endif
4217 
4218 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4219 {
4220 	int tcl_ring_num, wbm_ring_num;
4221 
4222 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4223 						index,
4224 						&tcl_ring_num,
4225 						&wbm_ring_num);
4226 
4227 	if (tcl_ring_num == -1 || wbm_ring_num == -1) {
4228 		dp_err("incorrect tcl/wbm ring num for index %u", index);
4229 		return;
4230 	}
4231 
4232 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4233 			     soc->tcl_data_ring[index].alloc_size,
4234 			     soc->ctrl_psoc,
4235 			     WLAN_MD_DP_SRNG_TCL_DATA,
4236 			     "tcl_data_ring");
4237 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4238 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4239 		       tcl_ring_num);
4240 
4241 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4242 			     soc->tx_comp_ring[index].alloc_size,
4243 			     soc->ctrl_psoc,
4244 			     WLAN_MD_DP_SRNG_TX_COMP,
4245 			     "tcl_comp_ring");
4246 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4247 		       wbm_ring_num);
4248 }
4249 
4250 /**
4251  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4252  * ring pair
4253  * @soc: DP soc pointer
4254  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4255  *
4256  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4257  */
4258 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4259 						uint8_t index)
4260 {
4261 	int tcl_ring_num, wbm_ring_num;
4262 	uint8_t bm_id;
4263 
4264 	if (index >= MAX_TCL_DATA_RINGS) {
4265 		dp_err("unexpected index!");
4266 		QDF_BUG(0);
4267 		goto fail1;
4268 	}
4269 
4270 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4271 						index,
4272 						&tcl_ring_num,
4273 						&wbm_ring_num);
4274 
4275 	if (tcl_ring_num == -1 || wbm_ring_num == -1) {
4276 		dp_err("incorrect tcl/wbm ring num for index %u", index);
4277 		goto fail1;
4278 	}
4279 
4280 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4281 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4282 			 tcl_ring_num, 0)) {
4283 		dp_err("dp_srng_init failed for tcl_data_ring");
4284 		goto fail1;
4285 	}
4286 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4287 			  soc->tcl_data_ring[index].alloc_size,
4288 			  soc->ctrl_psoc,
4289 			  WLAN_MD_DP_SRNG_TCL_DATA,
4290 			  "tcl_data_ring");
4291 
4292 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4293 			 wbm_ring_num, 0)) {
4294 		dp_err("dp_srng_init failed for tx_comp_ring");
4295 		goto fail1;
4296 	}
4297 
4298 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
4299 
4300 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
4301 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4302 			  soc->tx_comp_ring[index].alloc_size,
4303 			  soc->ctrl_psoc,
4304 			  WLAN_MD_DP_SRNG_TX_COMP,
4305 			  "tcl_comp_ring");
4306 
4307 	return QDF_STATUS_SUCCESS;
4308 
4309 fail1:
4310 	return QDF_STATUS_E_FAILURE;
4311 }
4312 
4313 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4314 {
4315 	dp_debug("index %u", index);
4316 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4317 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4318 }
4319 
4320 /**
4321  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4322  * ring pair for the given "index"
4323  * @soc: DP soc pointer
4324  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4325  *
4326  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4327  */
4328 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4329 						 uint8_t index)
4330 {
4331 	int tx_ring_size;
4332 	int tx_comp_ring_size;
4333 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4334 	int cached = 0;
4335 
4336 	if (index >= MAX_TCL_DATA_RINGS) {
4337 		dp_err("unexpected index!");
4338 		QDF_BUG(0);
4339 		goto fail1;
4340 	}
4341 
4342 	dp_debug("index %u", index);
4343 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4344 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4345 
4346 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4347 			  tx_ring_size, cached)) {
4348 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4349 		goto fail1;
4350 	}
4351 
4352 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4353 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4354 	/* Enable cached TCL desc if NSS offload is disabled */
4355 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4356 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4357 
4358 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4359 			  tx_comp_ring_size, cached)) {
4360 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4361 		goto fail1;
4362 	}
4363 
4364 	return QDF_STATUS_SUCCESS;
4365 
4366 fail1:
4367 	return QDF_STATUS_E_FAILURE;
4368 }
4369 
4370 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4371 {
4372 	struct cdp_lro_hash_config lro_hash;
4373 	QDF_STATUS status;
4374 
4375 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4376 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4377 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
4378 		dp_err("LRO, GRO and RX hash disabled");
4379 		return QDF_STATUS_E_FAILURE;
4380 	}
4381 
4382 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
4383 
4384 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
4385 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
4386 		lro_hash.lro_enable = 1;
4387 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
4388 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
4389 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
4390 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
4391 	}
4392 
4393 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
4394 			     (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4395 			      LRO_IPV4_SEED_ARR_SZ));
4396 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
4397 			     (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4398 			      LRO_IPV6_SEED_ARR_SZ));
4399 
4400 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
4401 
4402 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
4403 		QDF_BUG(0);
4404 		dp_err("lro_hash_config not configured");
4405 		return QDF_STATUS_E_FAILURE;
4406 	}
4407 
4408 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
4409 						      pdev->pdev_id,
4410 						      &lro_hash);
4411 	if (!QDF_IS_STATUS_SUCCESS(status)) {
4412 		dp_err("failed to send lro_hash_config to FW %u", status);
4413 		return status;
4414 	}
4415 
4416 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
4417 		lro_hash.lro_enable, lro_hash.tcp_flag,
4418 		lro_hash.tcp_flag_mask);
4419 
4420 	dp_info("toeplitz_hash_ipv4:");
4421 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4422 			   lro_hash.toeplitz_hash_ipv4,
4423 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4424 			   LRO_IPV4_SEED_ARR_SZ));
4425 
4426 	dp_info("toeplitz_hash_ipv6:");
4427 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4428 			   lro_hash.toeplitz_hash_ipv6,
4429 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4430 			   LRO_IPV6_SEED_ARR_SZ));
4431 
4432 	return status;
4433 }
4434 
4435 /*
4436  * dp_rxdma_ring_setup() - configure the RX DMA rings
4437  * @soc: data path SoC handle
4438  * @pdev: Physical device handle
4439  *
4440  * Return: 0 - success, > 0 - failure
4441  */
4442 #ifdef QCA_HOST2FW_RXBUF_RING
4443 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4444 {
4445 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4446 	int max_mac_rings;
4447 	int i;
4448 	int ring_size;
4449 
4450 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4451 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
4452 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
4453 
4454 	for (i = 0; i < max_mac_rings; i++) {
4455 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
4456 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
4457 				  RXDMA_BUF, ring_size, 0)) {
4458 			dp_init_err("%pK: failed rx mac ring setup", soc);
4459 			return QDF_STATUS_E_FAILURE;
4460 		}
4461 
4462 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
4463 				 RXDMA_BUF, 1, i)) {
4464 			dp_init_err("%pK: failed rx mac ring setup", soc);
4465 
4466 			dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
4467 			return QDF_STATUS_E_FAILURE;
4468 		}
4469 	}
4470 	return QDF_STATUS_SUCCESS;
4471 }
4472 #else
4473 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4474 {
4475 	return QDF_STATUS_SUCCESS;
4476 }
4477 #endif
4478 
4479 /**
4480  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
4481  * @pdev - DP_PDEV handle
4482  *
4483  * Return: void
4484  */
4485 static inline void
4486 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
4487 {
4488 	uint8_t map_id;
4489 	struct dp_soc *soc = pdev->soc;
4490 
4491 	if (!soc)
4492 		return;
4493 
4494 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
4495 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
4496 			     default_dscp_tid_map,
4497 			     sizeof(default_dscp_tid_map));
4498 	}
4499 
4500 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
4501 		hal_tx_set_dscp_tid_map(soc->hal_soc,
4502 					default_dscp_tid_map,
4503 					map_id);
4504 	}
4505 }
4506 
4507 /**
4508  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
4509  * @pdev - DP_PDEV handle
4510  *
4511  * Return: void
4512  */
4513 static inline void
4514 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
4515 {
4516 	struct dp_soc *soc = pdev->soc;
4517 
4518 	if (!soc)
4519 		return;
4520 
4521 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
4522 		     sizeof(default_pcp_tid_map));
4523 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
4524 }
4525 
4526 #ifdef IPA_OFFLOAD
4527 /**
4528  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
4529  * @soc: data path instance
4530  * @pdev: core txrx pdev context
4531  *
4532  * Return: QDF_STATUS_SUCCESS: success
4533  *         QDF_STATUS_E_RESOURCES: Error return
4534  */
4535 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4536 					   struct dp_pdev *pdev)
4537 {
4538 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4539 	int entries;
4540 
4541 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4542 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
4543 
4544 	/* Setup second Rx refill buffer ring */
4545 	if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
4546 			  entries, 0)) {
4547 		dp_init_err("%pK: dp_srng_alloc failed second rx refill ring", soc);
4548 		return QDF_STATUS_E_FAILURE;
4549 	}
4550 
4551 	if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
4552 			 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
4553 		dp_init_err("%pK: dp_srng_init failed second rx refill ring", soc);
4554 		return QDF_STATUS_E_FAILURE;
4555 	}
4556 
4557 	return QDF_STATUS_SUCCESS;
4558 }
4559 
4560 /**
4561  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
4562  * @soc: data path instance
4563  * @pdev: core txrx pdev context
4564  *
4565  * Return: void
4566  */
4567 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4568 					      struct dp_pdev *pdev)
4569 {
4570 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
4571 	dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
4572 }
4573 
4574 #else
4575 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4576 					   struct dp_pdev *pdev)
4577 {
4578 	return QDF_STATUS_SUCCESS;
4579 }
4580 
4581 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4582 					      struct dp_pdev *pdev)
4583 {
4584 }
4585 #endif
4586 
4587 #ifdef DP_TX_HW_DESC_HISTORY
4588 /**
4589  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
4590  *
4591  * @soc: DP soc handle
4592  *
4593  * Return: None
4594  */
4595 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
4596 {
4597 	soc->tx_hw_desc_history = dp_context_alloc_mem(
4598 			soc, DP_TX_HW_DESC_HIST_TYPE,
4599 			sizeof(*soc->tx_hw_desc_history));
4600 	if (soc->tx_hw_desc_history)
4601 		soc->tx_hw_desc_history->index = 0;
4602 }
4603 
4604 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
4605 {
4606 	dp_context_free_mem(soc, DP_TX_HW_DESC_HIST_TYPE,
4607 			    soc->tx_hw_desc_history);
4608 }
4609 
4610 #else /* DP_TX_HW_DESC_HISTORY */
4611 static inline void
4612 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
4613 {
4614 }
4615 
4616 static inline void
4617 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
4618 {
4619 }
4620 #endif /* DP_TX_HW_DESC_HISTORY */
4621 
4622 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
4623 #ifndef RX_DEFRAG_DO_NOT_REINJECT
4624 /**
4625  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
4626  *					    history.
4627  * @soc: DP soc handle
4628  *
4629  * Return: None
4630  */
4631 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
4632 {
4633 	soc->rx_reinject_ring_history =
4634 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
4635 				     sizeof(struct dp_rx_reinject_history));
4636 	if (soc->rx_reinject_ring_history)
4637 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
4638 }
4639 #else /* RX_DEFRAG_DO_NOT_REINJECT */
4640 static inline void
4641 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
4642 {
4643 }
4644 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
4645 
4646 /**
4647  * dp_soc_rx_history_attach() - Attach the ring history record buffers
4648  * @soc: DP soc structure
4649  *
4650  * This function allocates the memory for recording the rx ring, rx error
4651  * ring and the reinject ring entries. There is no error returned in case
4652  * of allocation failure since the record function checks if the history is
4653  * initialized or not. We do not want to fail the driver load in case of
4654  * failure to allocate memory for debug history.
4655  *
4656  * Returns: None
4657  */
4658 static void dp_soc_rx_history_attach(struct dp_soc *soc)
4659 {
4660 	int i;
4661 	uint32_t rx_ring_hist_size;
4662 	uint32_t rx_refill_ring_hist_size;
4663 
4664 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
4665 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
4666 
4667 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
4668 		soc->rx_ring_history[i] = dp_context_alloc_mem(
4669 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
4670 		if (soc->rx_ring_history[i])
4671 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
4672 	}
4673 
4674 	soc->rx_err_ring_history = dp_context_alloc_mem(
4675 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
4676 	if (soc->rx_err_ring_history)
4677 		qdf_atomic_init(&soc->rx_err_ring_history->index);
4678 
4679 	dp_soc_rx_reinject_ring_history_attach(soc);
4680 
4681 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4682 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
4683 						soc,
4684 						DP_RX_REFILL_RING_HIST_TYPE,
4685 						rx_refill_ring_hist_size);
4686 
4687 		if (soc->rx_refill_ring_history[i])
4688 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
4689 	}
4690 }
4691 
4692 static void dp_soc_rx_history_detach(struct dp_soc *soc)
4693 {
4694 	int i;
4695 
4696 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
4697 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
4698 				    soc->rx_ring_history[i]);
4699 
4700 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
4701 			    soc->rx_err_ring_history);
4702 
4703 	/*
4704 	 * No need for a featurized detach since qdf_mem_free takes
4705 	 * care of NULL pointer.
4706 	 */
4707 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
4708 			    soc->rx_reinject_ring_history);
4709 
4710 	for (i = 0; i < MAX_PDEV_CNT; i++)
4711 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
4712 				    soc->rx_refill_ring_history[i]);
4713 }
4714 
4715 #else
4716 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
4717 {
4718 }
4719 
4720 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
4721 {
4722 }
4723 #endif
4724 
4725 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
4726 /**
4727  * dp_soc_tx_history_attach() - Attach the ring history record buffers
4728  * @soc: DP soc structure
4729  *
4730  * This function allocates the memory for recording the tx tcl ring and
4731  * the tx comp ring entries. There is no error returned in case
4732  * of allocation failure since the record function checks if the history is
4733  * initialized or not. We do not want to fail the driver load in case of
4734  * failure to allocate memory for debug history.
4735  *
4736  * Returns: None
4737  */
4738 static void dp_soc_tx_history_attach(struct dp_soc *soc)
4739 {
4740 	uint32_t tx_tcl_hist_size;
4741 	uint32_t tx_comp_hist_size;
4742 
4743 	tx_tcl_hist_size = sizeof(*soc->tx_tcl_history);
4744 	soc->tx_tcl_history = dp_context_alloc_mem(soc, DP_TX_TCL_HIST_TYPE,
4745 						   tx_tcl_hist_size);
4746 	if (soc->tx_tcl_history)
4747 		qdf_atomic_init(&soc->tx_tcl_history->index);
4748 
4749 	tx_comp_hist_size = sizeof(*soc->tx_comp_history);
4750 	soc->tx_comp_history = dp_context_alloc_mem(soc, DP_TX_COMP_HIST_TYPE,
4751 						    tx_comp_hist_size);
4752 	if (soc->tx_comp_history)
4753 		qdf_atomic_init(&soc->tx_comp_history->index);
4754 }
4755 
4756 /**
4757  * dp_soc_tx_history_detach() - Detach the ring history record buffers
4758  * @soc: DP soc structure
4759  *
4760  * This function frees the memory for recording the tx tcl ring and
4761  * the tx comp ring entries.
4762  *
4763  * Returns: None
4764  */
4765 static void dp_soc_tx_history_detach(struct dp_soc *soc)
4766 {
4767 	dp_context_free_mem(soc, DP_TX_TCL_HIST_TYPE, soc->tx_tcl_history);
4768 	dp_context_free_mem(soc, DP_TX_COMP_HIST_TYPE, soc->tx_comp_history);
4769 }
4770 
4771 #else
4772 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
4773 {
4774 }
4775 
4776 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
4777 {
4778 }
4779 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
4780 
4781 /*
4782 * dp_pdev_attach_wifi3() - attach txrx pdev
4783 * @txrx_soc: Datapath SOC handle
4784 * @htc_handle: HTC handle for host-target interface
4785 * @qdf_osdev: QDF OS device
4786 * @pdev_id: PDEV ID
4787 *
4788 * Return: QDF_STATUS
4789 */
4790 static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
4791 					      HTC_HANDLE htc_handle,
4792 					      qdf_device_t qdf_osdev,
4793 					      uint8_t pdev_id)
4794 {
4795 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4796 	struct dp_pdev *pdev = NULL;
4797 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4798 	int nss_cfg;
4799 
4800 	pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, sizeof(*pdev));
4801 	if (!pdev) {
4802 		dp_init_err("%pK: DP PDEV memory allocation failed",
4803 			    soc);
4804 		goto fail0;
4805 	}
4806 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
4807 			  WLAN_MD_DP_PDEV, "dp_pdev");
4808 
4809 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4810 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
4811 
4812 	if (!pdev->wlan_cfg_ctx) {
4813 		dp_init_err("%pK: pdev cfg_attach failed", soc);
4814 		goto fail1;
4815 	}
4816 
4817 	/*
4818 	 * set nss pdev config based on soc config
4819 	 */
4820 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
4821 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
4822 					 (nss_cfg & (1 << pdev_id)));
4823 
4824 	pdev->soc = soc;
4825 	pdev->pdev_id = pdev_id;
4826 	soc->pdev_list[pdev_id] = pdev;
4827 
4828 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
4829 	soc->pdev_count++;
4830 
4831 	/* Allocate memory for pdev srng rings */
4832 	if (dp_pdev_srng_alloc(pdev)) {
4833 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
4834 		goto fail2;
4835 	}
4836 
4837 	/* Rx specific init */
4838 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
4839 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
4840 		goto fail3;
4841 	}
4842 
4843 	if (dp_monitor_pdev_attach(pdev)) {
4844 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
4845 		goto fail4;
4846 	}
4847 
4848 	return QDF_STATUS_SUCCESS;
4849 fail4:
4850 	dp_rx_pdev_desc_pool_free(pdev);
4851 fail3:
4852 	dp_pdev_srng_free(pdev);
4853 fail2:
4854 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4855 fail1:
4856 	soc->pdev_list[pdev_id] = NULL;
4857 	qdf_mem_free(pdev);
4858 fail0:
4859 	return QDF_STATUS_E_FAILURE;
4860 }
4861 
4862 /*
4863  * dp_rxdma_ring_cleanup() - configure the RX DMA rings
4864  * @soc: data path SoC handle
4865  * @pdev: Physical device handle
4866  *
4867  * Return: void
4868  */
4869 #ifdef QCA_HOST2FW_RXBUF_RING
4870 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4871 {
4872 	int i;
4873 
4874 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
4875 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
4876 		dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
4877 	}
4878 
4879 	dp_monitor_reap_timer_deinit(soc);
4880 }
4881 #else
4882 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4883 {
4884 	if (soc->lmac_timer_init) {
4885 		qdf_timer_stop(&soc->lmac_reap_timer);
4886 		qdf_timer_free(&soc->lmac_reap_timer);
4887 		soc->lmac_timer_init = 0;
4888 	}
4889 }
4890 #endif
4891 
4892 #ifdef WLAN_DP_PENDING_MEM_FLUSH
4893 /**
4894  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
4895  * @pdev: Datapath PDEV handle
4896  *
4897  * This is the last chance to flush all pending dp vdevs/peers,
4898  * some peer/vdev leak case like Non-SSR + peer unmap missing
4899  * will be covered here.
4900  *
4901  * Return: None
4902  */
4903 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4904 {
4905 	struct dp_vdev *vdev = NULL;
4906 	struct dp_soc *soc = pdev->soc;
4907 
4908 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
4909 		return;
4910 
4911 	while (true) {
4912 		qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
4913 		TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
4914 			      inactive_list_elem) {
4915 			if (vdev->pdev == pdev)
4916 				break;
4917 		}
4918 		qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
4919 
4920 		/* vdev will be freed when all peers get cleanup */
4921 		if (vdev)
4922 			dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
4923 		else
4924 			break;
4925 	}
4926 }
4927 #else
4928 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4929 {
4930 }
4931 #endif
4932 
4933 /**
4934  * dp_pdev_deinit() - Deinit txrx pdev
4935  * @txrx_pdev: Datapath PDEV handle
4936  * @force: Force deinit
4937  *
4938  * Return: None
4939  */
4940 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
4941 {
4942 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4943 	qdf_nbuf_t curr_nbuf, next_nbuf;
4944 
4945 	if (pdev->pdev_deinit)
4946 		return;
4947 
4948 	dp_tx_me_exit(pdev);
4949 	dp_rx_fst_detach(pdev->soc, pdev);
4950 	dp_rx_pdev_buffers_free(pdev);
4951 	dp_rx_pdev_desc_pool_deinit(pdev);
4952 	dp_pdev_bkp_stats_detach(pdev);
4953 	qdf_event_destroy(&pdev->fw_peer_stats_event);
4954 	if (pdev->sojourn_buf)
4955 		qdf_nbuf_free(pdev->sojourn_buf);
4956 
4957 	dp_pdev_flush_pending_vdevs(pdev);
4958 	dp_tx_desc_flush(pdev, NULL, true);
4959 
4960 	qdf_spinlock_destroy(&pdev->tx_mutex);
4961 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
4962 
4963 	if (pdev->invalid_peer)
4964 		qdf_mem_free(pdev->invalid_peer);
4965 
4966 	dp_monitor_pdev_deinit(pdev);
4967 
4968 	dp_pdev_srng_deinit(pdev);
4969 
4970 	dp_ipa_uc_detach(pdev->soc, pdev);
4971 	dp_cleanup_ipa_rx_refill_buf_ring(pdev->soc, pdev);
4972 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
4973 
4974 	curr_nbuf = pdev->invalid_peer_head_msdu;
4975 	while (curr_nbuf) {
4976 		next_nbuf = qdf_nbuf_next(curr_nbuf);
4977 		qdf_nbuf_free(curr_nbuf);
4978 		curr_nbuf = next_nbuf;
4979 	}
4980 	pdev->invalid_peer_head_msdu = NULL;
4981 	pdev->invalid_peer_tail_msdu = NULL;
4982 
4983 	dp_wdi_event_detach(pdev);
4984 	pdev->pdev_deinit = 1;
4985 }
4986 
4987 /**
4988  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
4989  * @psoc: Datapath psoc handle
4990  * @pdev_id: Id of datapath PDEV handle
4991  * @force: Force deinit
4992  *
4993  * Return: QDF_STATUS
4994  */
4995 static QDF_STATUS
4996 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4997 		     int force)
4998 {
4999 	struct dp_pdev *txrx_pdev;
5000 
5001 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5002 						       pdev_id);
5003 
5004 	if (!txrx_pdev)
5005 		return QDF_STATUS_E_FAILURE;
5006 
5007 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
5008 
5009 	return QDF_STATUS_SUCCESS;
5010 }
5011 
5012 /*
5013  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
5014  * @txrx_pdev: Datapath PDEV handle
5015  *
5016  * Return: None
5017  */
5018 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
5019 {
5020 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5021 
5022 	dp_monitor_tx_capture_debugfs_init(pdev);
5023 
5024 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
5025 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
5026 	}
5027 }
5028 
5029 /*
5030  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
5031  * @psoc: Datapath soc handle
5032  * @pdev_id: pdev id of pdev
5033  *
5034  * Return: QDF_STATUS
5035  */
5036 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
5037 				     uint8_t pdev_id)
5038 {
5039 	struct dp_pdev *pdev;
5040 
5041 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5042 						  pdev_id);
5043 
5044 	if (!pdev) {
5045 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5046 			    (struct dp_soc *)soc, pdev_id);
5047 		return QDF_STATUS_E_FAILURE;
5048 	}
5049 
5050 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
5051 	return QDF_STATUS_SUCCESS;
5052 }
5053 
5054 /*
5055  * dp_pdev_detach() - Complete rest of pdev detach
5056  * @txrx_pdev: Datapath PDEV handle
5057  * @force: Force deinit
5058  *
5059  * Return: None
5060  */
5061 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
5062 {
5063 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5064 	struct dp_soc *soc = pdev->soc;
5065 
5066 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
5067 	dp_rx_pdev_desc_pool_free(pdev);
5068 	dp_monitor_pdev_detach(pdev);
5069 	dp_pdev_srng_free(pdev);
5070 
5071 	soc->pdev_count--;
5072 	soc->pdev_list[pdev->pdev_id] = NULL;
5073 
5074 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5075 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
5076 			     WLAN_MD_DP_PDEV, "dp_pdev");
5077 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
5078 }
5079 
5080 /*
5081  * dp_pdev_detach_wifi3() - detach txrx pdev
5082  * @psoc: Datapath soc handle
5083  * @pdev_id: pdev id of pdev
5084  * @force: Force detach
5085  *
5086  * Return: QDF_STATUS
5087  */
5088 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5089 				       int force)
5090 {
5091 	struct dp_pdev *pdev;
5092 
5093 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5094 						  pdev_id);
5095 
5096 	if (!pdev) {
5097 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5098 			    (struct dp_soc *)psoc, pdev_id);
5099 		return QDF_STATUS_E_FAILURE;
5100 	}
5101 
5102 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
5103 	return QDF_STATUS_SUCCESS;
5104 }
5105 
5106 /*
5107  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
5108  * @soc: DP SOC handle
5109  */
5110 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
5111 {
5112 	struct reo_desc_list_node *desc;
5113 	struct dp_rx_tid *rx_tid;
5114 
5115 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
5116 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
5117 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5118 		rx_tid = &desc->rx_tid;
5119 		qdf_mem_unmap_nbytes_single(soc->osdev,
5120 			rx_tid->hw_qdesc_paddr,
5121 			QDF_DMA_BIDIRECTIONAL,
5122 			rx_tid->hw_qdesc_alloc_size);
5123 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
5124 		qdf_mem_free(desc);
5125 	}
5126 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
5127 	qdf_list_destroy(&soc->reo_desc_freelist);
5128 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
5129 }
5130 
5131 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
5132 /*
5133  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
5134  *                                          for deferred reo desc list
5135  * @psoc: Datapath soc handle
5136  *
5137  * Return: void
5138  */
5139 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5140 {
5141 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
5142 	qdf_list_create(&soc->reo_desc_deferred_freelist,
5143 			REO_DESC_DEFERRED_FREELIST_SIZE);
5144 	soc->reo_desc_deferred_freelist_init = true;
5145 }
5146 
5147 /*
5148  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
5149  *                                           free the leftover REO QDESCs
5150  * @psoc: Datapath soc handle
5151  *
5152  * Return: void
5153  */
5154 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5155 {
5156 	struct reo_desc_deferred_freelist_node *desc;
5157 
5158 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
5159 	soc->reo_desc_deferred_freelist_init = false;
5160 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
5161 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5162 		qdf_mem_unmap_nbytes_single(soc->osdev,
5163 					    desc->hw_qdesc_paddr,
5164 					    QDF_DMA_BIDIRECTIONAL,
5165 					    desc->hw_qdesc_alloc_size);
5166 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
5167 		qdf_mem_free(desc);
5168 	}
5169 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
5170 
5171 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
5172 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
5173 }
5174 #else
5175 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5176 {
5177 }
5178 
5179 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5180 {
5181 }
5182 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
5183 
5184 /*
5185  * dp_soc_reset_txrx_ring_map() - reset tx ring map
5186  * @soc: DP SOC handle
5187  *
5188  */
5189 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
5190 {
5191 	uint32_t i;
5192 
5193 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
5194 		soc->tx_ring_map[i] = 0;
5195 }
5196 
5197 /*
5198  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
5199  * @soc: DP SOC handle
5200  *
5201  */
5202 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
5203 {
5204 	struct dp_peer *peer = NULL;
5205 	struct dp_peer *tmp_peer = NULL;
5206 	struct dp_vdev *vdev = NULL;
5207 	struct dp_vdev *tmp_vdev = NULL;
5208 	int i = 0;
5209 	uint32_t count;
5210 
5211 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
5212 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
5213 		return;
5214 
5215 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
5216 			   inactive_list_elem, tmp_peer) {
5217 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5218 			count = qdf_atomic_read(&peer->mod_refs[i]);
5219 			if (count)
5220 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
5221 					       peer, i, count);
5222 		}
5223 	}
5224 
5225 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
5226 			   inactive_list_elem, tmp_vdev) {
5227 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5228 			count = qdf_atomic_read(&vdev->mod_refs[i]);
5229 			if (count)
5230 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
5231 					       vdev, i, count);
5232 		}
5233 	}
5234 	QDF_BUG(0);
5235 }
5236 
5237 /**
5238  * dp_soc_deinit() - Deinitialize txrx SOC
5239  * @txrx_soc: Opaque DP SOC handle
5240  *
5241  * Return: None
5242  */
5243 static void dp_soc_deinit(void *txrx_soc)
5244 {
5245 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5246 	struct htt_soc *htt_soc = soc->htt_handle;
5247 
5248 	qdf_atomic_set(&soc->cmn_init_done, 0);
5249 
5250 	soc->arch_ops.txrx_soc_deinit(soc);
5251 
5252 	/* free peer tables & AST tables allocated during peer_map_attach */
5253 	if (soc->peer_map_attach_success) {
5254 		dp_peer_find_detach(soc);
5255 		soc->peer_map_attach_success = FALSE;
5256 	}
5257 
5258 	qdf_flush_work(&soc->htt_stats.work);
5259 	qdf_disable_work(&soc->htt_stats.work);
5260 
5261 	qdf_spinlock_destroy(&soc->htt_stats.lock);
5262 
5263 	dp_soc_reset_txrx_ring_map(soc);
5264 
5265 	dp_reo_desc_freelist_destroy(soc);
5266 	dp_reo_desc_deferred_freelist_destroy(soc);
5267 
5268 	DEINIT_RX_HW_STATS_LOCK(soc);
5269 
5270 	qdf_spinlock_destroy(&soc->ast_lock);
5271 
5272 	dp_peer_mec_spinlock_destroy(soc);
5273 
5274 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
5275 
5276 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
5277 
5278 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
5279 
5280 	qdf_spinlock_destroy(&soc->vdev_map_lock);
5281 
5282 	dp_reo_cmdlist_destroy(soc);
5283 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
5284 
5285 	dp_soc_tx_desc_sw_pools_deinit(soc);
5286 
5287 	dp_soc_srng_deinit(soc);
5288 
5289 	dp_hw_link_desc_ring_deinit(soc);
5290 
5291 	dp_soc_print_inactive_objects(soc);
5292 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
5293 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
5294 
5295 	htt_soc_htc_dealloc(soc->htt_handle);
5296 
5297 	htt_soc_detach(htt_soc);
5298 
5299 	/* Free wbm sg list and reset flags in down path */
5300 	dp_rx_wbm_sg_list_deinit(soc);
5301 
5302 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
5303 			     WLAN_MD_DP_SOC, "dp_soc");
5304 }
5305 
5306 /**
5307  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
5308  * @txrx_soc: Opaque DP SOC handle
5309  *
5310  * Return: None
5311  */
5312 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
5313 {
5314 	dp_soc_deinit(txrx_soc);
5315 }
5316 
5317 /*
5318  * dp_soc_detach() - Detach rest of txrx SOC
5319  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5320  *
5321  * Return: None
5322  */
5323 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
5324 {
5325 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5326 
5327 	soc->arch_ops.txrx_soc_detach(soc);
5328 
5329 	dp_soc_swlm_detach(soc);
5330 	dp_soc_tx_desc_sw_pools_free(soc);
5331 	dp_soc_srng_free(soc);
5332 	dp_hw_link_desc_ring_free(soc);
5333 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
5334 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
5335 	dp_soc_tx_hw_desc_history_detach(soc);
5336 	dp_soc_tx_history_detach(soc);
5337 	dp_soc_rx_history_detach(soc);
5338 
5339 	if (!dp_monitor_modularized_enable()) {
5340 		dp_mon_soc_detach_wrapper(soc);
5341 	}
5342 
5343 	qdf_mem_free(soc);
5344 }
5345 
5346 /*
5347  * dp_soc_detach_wifi3() - Detach txrx SOC
5348  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5349  *
5350  * Return: None
5351  */
5352 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
5353 {
5354 	dp_soc_detach(txrx_soc);
5355 }
5356 
5357 /*
5358  * dp_rxdma_ring_config() - configure the RX DMA rings
5359  *
5360  * This function is used to configure the MAC rings.
5361  * On MCL host provides buffers in Host2FW ring
5362  * FW refills (copies) buffers to the ring and updates
5363  * ring_idx in register
5364  *
5365  * @soc: data path SoC handle
5366  *
5367  * Return: zero on success, non-zero on failure
5368  */
5369 #ifdef QCA_HOST2FW_RXBUF_RING
5370 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
5371 {
5372 	int i;
5373 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5374 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5375 		struct dp_pdev *pdev = soc->pdev_list[i];
5376 
5377 		if (pdev) {
5378 			int mac_id;
5379 			bool dbs_enable = 0;
5380 			int max_mac_rings =
5381 				 wlan_cfg_get_num_mac_rings
5382 				(pdev->wlan_cfg_ctx);
5383 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
5384 
5385 			htt_srng_setup(soc->htt_handle, 0,
5386 				       soc->rx_refill_buf_ring[lmac_id]
5387 				       .hal_srng,
5388 				       RXDMA_BUF);
5389 
5390 			if (pdev->rx_refill_buf_ring2.hal_srng)
5391 				htt_srng_setup(soc->htt_handle, 0,
5392 					pdev->rx_refill_buf_ring2.hal_srng,
5393 					RXDMA_BUF);
5394 
5395 			if (soc->cdp_soc.ol_ops->
5396 				is_hw_dbs_2x2_capable) {
5397 				dbs_enable = soc->cdp_soc.ol_ops->
5398 					is_hw_dbs_2x2_capable(
5399 							(void *)soc->ctrl_psoc);
5400 			}
5401 
5402 			if (dbs_enable) {
5403 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5404 				QDF_TRACE_LEVEL_ERROR,
5405 				FL("DBS enabled max_mac_rings %d"),
5406 					 max_mac_rings);
5407 			} else {
5408 				max_mac_rings = 1;
5409 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5410 					 QDF_TRACE_LEVEL_ERROR,
5411 					 FL("DBS disabled, max_mac_rings %d"),
5412 					 max_mac_rings);
5413 			}
5414 
5415 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5416 					 FL("pdev_id %d max_mac_rings %d"),
5417 					 pdev->pdev_id, max_mac_rings);
5418 
5419 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
5420 				int mac_for_pdev =
5421 					dp_get_mac_id_for_pdev(mac_id,
5422 							       pdev->pdev_id);
5423 				/*
5424 				 * Obtain lmac id from pdev to access the LMAC
5425 				 * ring in soc context
5426 				 */
5427 				lmac_id =
5428 				dp_get_lmac_id_for_pdev_id(soc,
5429 							   mac_id,
5430 							   pdev->pdev_id);
5431 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5432 					 QDF_TRACE_LEVEL_ERROR,
5433 					 FL("mac_id %d"), mac_for_pdev);
5434 
5435 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
5436 					 pdev->rx_mac_buf_ring[mac_id]
5437 						.hal_srng,
5438 					 RXDMA_BUF);
5439 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
5440 				soc->rxdma_err_dst_ring[lmac_id]
5441 					.hal_srng,
5442 					RXDMA_DST);
5443 
5444 				/* Configure monitor mode rings */
5445 				status = dp_monitor_htt_srng_setup(soc, pdev,
5446 								   lmac_id,
5447 								   mac_for_pdev);
5448 				if (status != QDF_STATUS_SUCCESS) {
5449 					dp_err("Failed to send htt monitor messages to target");
5450 					return status;
5451 				}
5452 
5453 			}
5454 		}
5455 	}
5456 
5457 	/*
5458 	 * Timer to reap rxdma status rings.
5459 	 * Needed until we enable ppdu end interrupts
5460 	 */
5461 	dp_monitor_reap_timer_init(soc);
5462 	dp_monitor_vdev_timer_init(soc);
5463 	return status;
5464 }
5465 #else
5466 /* This is only for WIN */
5467 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
5468 {
5469 	int i;
5470 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5471 	int mac_for_pdev;
5472 	int lmac_id;
5473 
5474 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5475 		struct dp_pdev *pdev =  soc->pdev_list[i];
5476 
5477 		if (!pdev)
5478 			continue;
5479 
5480 		mac_for_pdev = i;
5481 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
5482 
5483 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
5484 			       soc->rx_refill_buf_ring[lmac_id].
5485 			       hal_srng, RXDMA_BUF);
5486 		/* Configure monitor mode rings */
5487 		dp_monitor_htt_srng_setup(soc, pdev,
5488 					  lmac_id,
5489 					  mac_for_pdev);
5490 		if (!soc->rxdma2sw_rings_not_supported)
5491 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
5492 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
5493 				       RXDMA_DST);
5494 	}
5495 
5496 	/* Configure LMAC rings in Polled mode */
5497 	if (soc->lmac_polled_mode) {
5498 		/*
5499 		 * Timer to reap lmac rings.
5500 		 */
5501 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
5502 			       dp_service_lmac_rings, (void *)soc,
5503 			       QDF_TIMER_TYPE_WAKE_APPS);
5504 		soc->lmac_timer_init = 1;
5505 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
5506 	}
5507 	return status;
5508 }
5509 #endif
5510 
5511 /*
5512  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
5513  *
5514  * This function is used to configure the FSE HW block in RX OLE on a
5515  * per pdev basis. Here, we will be programming parameters related to
5516  * the Flow Search Table.
5517  *
5518  * @soc: data path SoC handle
5519  *
5520  * Return: zero on success, non-zero on failure
5521  */
5522 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
5523 static QDF_STATUS
5524 dp_rx_target_fst_config(struct dp_soc *soc)
5525 {
5526 	int i;
5527 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5528 
5529 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5530 		struct dp_pdev *pdev = soc->pdev_list[i];
5531 
5532 		/* Flow search is not enabled if NSS offload is enabled */
5533 		if (pdev &&
5534 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
5535 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
5536 			if (status != QDF_STATUS_SUCCESS)
5537 				break;
5538 		}
5539 	}
5540 	return status;
5541 }
5542 #elif defined(WLAN_SUPPORT_RX_FISA)
5543 /**
5544  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
5545  * @soc: SoC handle
5546  *
5547  * Return: Success
5548  */
5549 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
5550 {
5551 	/* Check if it is enabled in the INI */
5552 	if (!soc->fisa_enable) {
5553 		dp_err("RX FISA feature is disabled");
5554 		return QDF_STATUS_E_NOSUPPORT;
5555 	}
5556 
5557 	return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
5558 }
5559 
5560 #define FISA_MAX_TIMEOUT 0xffffffff
5561 #define FISA_DISABLE_TIMEOUT 0
5562 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
5563 {
5564 	struct dp_htt_rx_fisa_cfg fisa_config;
5565 
5566 	fisa_config.pdev_id = 0;
5567 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
5568 
5569 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
5570 }
5571 #else /* !WLAN_SUPPORT_RX_FISA */
5572 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
5573 {
5574 	return QDF_STATUS_SUCCESS;
5575 }
5576 #endif /* !WLAN_SUPPORT_RX_FISA */
5577 
5578 #ifndef WLAN_SUPPORT_RX_FISA
5579 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
5580 {
5581 	return QDF_STATUS_SUCCESS;
5582 }
5583 
5584 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
5585 {
5586 	return QDF_STATUS_SUCCESS;
5587 }
5588 
5589 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
5590 {
5591 }
5592 
5593 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
5594 {
5595 }
5596 
5597 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
5598 {
5599 }
5600 #endif /* !WLAN_SUPPORT_RX_FISA */
5601 
5602 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
5603 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
5604 {
5605 	return QDF_STATUS_SUCCESS;
5606 }
5607 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
5608 
5609 /*
5610  * dp_soc_attach_target_wifi3() - SOC initialization in the target
5611  * @cdp_soc: Opaque Datapath SOC handle
5612  *
5613  * Return: zero on success, non-zero on failure
5614  */
5615 static QDF_STATUS
5616 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
5617 {
5618 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5619 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5620 
5621 	htt_soc_attach_target(soc->htt_handle);
5622 
5623 	status = dp_rxdma_ring_config(soc);
5624 	if (status != QDF_STATUS_SUCCESS) {
5625 		dp_err("Failed to send htt srng setup messages to target");
5626 		return status;
5627 	}
5628 
5629 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
5630 	if (status != QDF_STATUS_SUCCESS) {
5631 		dp_err("Failed to send htt ring config message to target");
5632 		return status;
5633 	}
5634 
5635 	status = dp_rx_target_fst_config(soc);
5636 	if (status != QDF_STATUS_SUCCESS &&
5637 	    status != QDF_STATUS_E_NOSUPPORT) {
5638 		dp_err("Failed to send htt fst setup config message to target");
5639 		return status;
5640 	}
5641 
5642 	if (status == QDF_STATUS_SUCCESS) {
5643 		status = dp_rx_fisa_config(soc);
5644 		if (status != QDF_STATUS_SUCCESS) {
5645 			dp_err("Failed to send htt FISA config message to target");
5646 			return status;
5647 		}
5648 	}
5649 
5650 	DP_STATS_INIT(soc);
5651 
5652 	dp_runtime_init(soc);
5653 
5654 	/* initialize work queue for stats processing */
5655 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
5656 
5657 	return QDF_STATUS_SUCCESS;
5658 }
5659 
5660 /*
5661  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
5662  * @soc: SoC handle
5663  * @vdev: vdev handle
5664  * @vdev_id: vdev_id
5665  *
5666  * Return: None
5667  */
5668 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
5669 				   struct dp_vdev *vdev,
5670 				   uint8_t vdev_id)
5671 {
5672 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
5673 
5674 	qdf_spin_lock_bh(&soc->vdev_map_lock);
5675 
5676 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
5677 			QDF_STATUS_SUCCESS) {
5678 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
5679 			     soc, vdev, vdev_id);
5680 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
5681 		return;
5682 	}
5683 
5684 	if (!soc->vdev_id_map[vdev_id])
5685 		soc->vdev_id_map[vdev_id] = vdev;
5686 	else
5687 		QDF_ASSERT(0);
5688 
5689 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
5690 }
5691 
5692 /*
5693  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
5694  * @soc: SoC handle
5695  * @vdev: vdev handle
5696  *
5697  * Return: None
5698  */
5699 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
5700 				      struct dp_vdev *vdev)
5701 {
5702 	qdf_spin_lock_bh(&soc->vdev_map_lock);
5703 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
5704 
5705 	soc->vdev_id_map[vdev->vdev_id] = NULL;
5706 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5707 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
5708 }
5709 
5710 /*
5711  * dp_vdev_pdev_list_add() - add vdev into pdev's list
5712  * @soc: soc handle
5713  * @pdev: pdev handle
5714  * @vdev: vdev handle
5715  *
5716  * return: none
5717  */
5718 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
5719 				  struct dp_pdev *pdev,
5720 				  struct dp_vdev *vdev)
5721 {
5722 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5723 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
5724 			QDF_STATUS_SUCCESS) {
5725 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
5726 			     soc, vdev);
5727 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5728 		return;
5729 	}
5730 	/* add this vdev into the pdev's list */
5731 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
5732 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5733 }
5734 
5735 /*
5736  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
5737  * @soc: SoC handle
5738  * @pdev: pdev handle
5739  * @vdev: VDEV handle
5740  *
5741  * Return: none
5742  */
5743 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
5744 				     struct dp_pdev *pdev,
5745 				     struct dp_vdev *vdev)
5746 {
5747 	uint8_t found = 0;
5748 	struct dp_vdev *tmpvdev = NULL;
5749 
5750 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5751 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
5752 		if (tmpvdev == vdev) {
5753 			found = 1;
5754 			break;
5755 		}
5756 	}
5757 
5758 	if (found) {
5759 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5760 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5761 	} else {
5762 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
5763 			      soc, vdev, pdev, &pdev->vdev_list);
5764 		QDF_ASSERT(0);
5765 	}
5766 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5767 }
5768 
5769 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
5770 /*
5771  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
5772  * @vdev: Datapath VDEV handle
5773  *
5774  * Return: None
5775  */
5776 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
5777 {
5778 	vdev->osif_rx_eapol = NULL;
5779 }
5780 
5781 /*
5782  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
5783  * @vdev: DP vdev handle
5784  * @txrx_ops: Tx and Rx operations
5785  *
5786  * Return: None
5787  */
5788 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
5789 					     struct ol_txrx_ops *txrx_ops)
5790 {
5791 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
5792 }
5793 #else
5794 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
5795 {
5796 }
5797 
5798 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
5799 					     struct ol_txrx_ops *txrx_ops)
5800 {
5801 }
5802 #endif
5803 
5804 /*
5805 * dp_vdev_attach_wifi3() - attach txrx vdev
5806 * @txrx_pdev: Datapath PDEV handle
5807 * @vdev_mac_addr: MAC address of the virtual interface
5808 * @vdev_id: VDEV Id
5809 * @wlan_op_mode: VDEV operating mode
5810 * @subtype: VDEV operating subtype
5811 *
5812 * Return: status
5813 */
5814 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
5815 				       uint8_t pdev_id,
5816 				       uint8_t *vdev_mac_addr,
5817 				       uint8_t vdev_id,
5818 				       enum wlan_op_mode op_mode,
5819 				       enum wlan_op_subtype subtype)
5820 {
5821 	int i = 0;
5822 	qdf_size_t vdev_context_size;
5823 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5824 	struct dp_pdev *pdev =
5825 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5826 						   pdev_id);
5827 	struct dp_vdev *vdev;
5828 
5829 	vdev_context_size =
5830 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
5831 	vdev = qdf_mem_malloc(vdev_context_size);
5832 
5833 	if (!pdev) {
5834 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5835 			    cdp_soc, pdev_id);
5836 		qdf_mem_free(vdev);
5837 		goto fail0;
5838 	}
5839 
5840 	if (!vdev) {
5841 		dp_init_err("%pK: DP VDEV memory allocation failed",
5842 			    cdp_soc);
5843 		goto fail0;
5844 	}
5845 
5846 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
5847 			  WLAN_MD_DP_VDEV, "dp_vdev");
5848 
5849 	vdev->pdev = pdev;
5850 	vdev->vdev_id = vdev_id;
5851 	vdev->opmode = op_mode;
5852 	vdev->subtype = subtype;
5853 	vdev->osdev = soc->osdev;
5854 
5855 	vdev->osif_rx = NULL;
5856 	vdev->osif_rsim_rx_decap = NULL;
5857 	vdev->osif_get_key = NULL;
5858 	vdev->osif_tx_free_ext = NULL;
5859 	vdev->osif_vdev = NULL;
5860 
5861 	vdev->delete.pending = 0;
5862 	vdev->safemode = 0;
5863 	vdev->drop_unenc = 1;
5864 	vdev->sec_type = cdp_sec_type_none;
5865 	vdev->multipass_en = false;
5866 	dp_vdev_init_rx_eapol(vdev);
5867 	qdf_atomic_init(&vdev->ref_cnt);
5868 	for (i = 0; i < DP_MOD_ID_MAX; i++)
5869 		qdf_atomic_init(&vdev->mod_refs[i]);
5870 
5871 	/* Take one reference for create*/
5872 	qdf_atomic_inc(&vdev->ref_cnt);
5873 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
5874 	vdev->num_peers = 0;
5875 #ifdef notyet
5876 	vdev->filters_num = 0;
5877 #endif
5878 	vdev->lmac_id = pdev->lmac_id;
5879 
5880 	qdf_mem_copy(
5881 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
5882 
5883 	/* TODO: Initialize default HTT meta data that will be used in
5884 	 * TCL descriptors for packets transmitted from this VDEV
5885 	 */
5886 
5887 	qdf_spinlock_create(&vdev->peer_list_lock);
5888 	TAILQ_INIT(&vdev->peer_list);
5889 	dp_peer_multipass_list_init(vdev);
5890 	if ((soc->intr_mode == DP_INTR_POLL) &&
5891 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
5892 		if ((pdev->vdev_count == 0) ||
5893 		    (wlan_op_mode_monitor == vdev->opmode))
5894 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
5895 	} else if (soc->intr_mode == DP_INTR_MSI &&
5896 		   wlan_op_mode_monitor == vdev->opmode) {
5897 		dp_monitor_vdev_timer_start(soc);
5898 	}
5899 
5900 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
5901 
5902 	if (wlan_op_mode_monitor == vdev->opmode) {
5903 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
5904 			dp_monitor_pdev_set_mon_vdev(vdev);
5905 			dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
5906 			return QDF_STATUS_SUCCESS;
5907 		}
5908 		return QDF_STATUS_E_FAILURE;
5909 	}
5910 
5911 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5912 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5913 	vdev->dscp_tid_map_id = 0;
5914 	vdev->mcast_enhancement_en = 0;
5915 	vdev->igmp_mcast_enhanc_en = 0;
5916 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
5917 	vdev->prev_tx_enq_tstamp = 0;
5918 	vdev->prev_rx_deliver_tstamp = 0;
5919 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
5920 
5921 	dp_vdev_pdev_list_add(soc, pdev, vdev);
5922 	pdev->vdev_count++;
5923 
5924 	if (wlan_op_mode_sta != vdev->opmode)
5925 		vdev->ap_bridge_enabled = true;
5926 	else
5927 		vdev->ap_bridge_enabled = false;
5928 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
5929 		     cdp_soc, vdev->ap_bridge_enabled);
5930 
5931 	dp_tx_vdev_attach(vdev);
5932 
5933 	dp_monitor_vdev_attach(vdev);
5934 	if (!pdev->is_lro_hash_configured) {
5935 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
5936 			pdev->is_lro_hash_configured = true;
5937 		else
5938 			dp_err("LRO hash setup failure!");
5939 	}
5940 
5941 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
5942 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
5943 	DP_STATS_INIT(vdev);
5944 
5945 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
5946 		goto fail0;
5947 
5948 	if (wlan_op_mode_sta == vdev->opmode)
5949 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
5950 				     vdev->mac_addr.raw);
5951 	return QDF_STATUS_SUCCESS;
5952 
5953 fail0:
5954 	return QDF_STATUS_E_FAILURE;
5955 }
5956 
5957 #ifndef QCA_HOST_MODE_WIFI_DISABLED
5958 /**
5959  * dp_vdev_register_tx_handler() - Register Tx handler
5960  * @vdev: struct dp_vdev *
5961  * @soc: struct dp_soc *
5962  * @txrx_ops: struct ol_txrx_ops *
5963  */
5964 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
5965 					       struct dp_soc *soc,
5966 					       struct ol_txrx_ops *txrx_ops)
5967 {
5968 	/* Enable vdev_id check only for ap, if flag is enabled */
5969 	if (vdev->mesh_vdev)
5970 		txrx_ops->tx.tx = dp_tx_send_mesh;
5971 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
5972 		 (vdev->opmode == wlan_op_mode_ap))
5973 		txrx_ops->tx.tx = dp_tx_send_vdev_id_check;
5974 	else
5975 		txrx_ops->tx.tx = dp_tx_send;
5976 
5977 	/* Avoid check in regular exception Path */
5978 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
5979 	    (vdev->opmode == wlan_op_mode_ap))
5980 		txrx_ops->tx.tx_exception = dp_tx_send_exception_vdev_id_check;
5981 	else
5982 		txrx_ops->tx.tx_exception = dp_tx_send_exception;
5983 
5984 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
5985 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
5986 		vdev->opmode, vdev->vdev_id);
5987 }
5988 #else /* QCA_HOST_MODE_WIFI_DISABLED */
5989 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
5990 					       struct dp_soc *soc,
5991 					       struct ol_txrx_ops *txrx_ops)
5992 {
5993 }
5994 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
5995 
5996 /**
5997  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
5998  * @soc: Datapath soc handle
5999  * @vdev_id: id of Datapath VDEV handle
6000  * @osif_vdev: OSIF vdev handle
6001  * @txrx_ops: Tx and Rx operations
6002  *
6003  * Return: DP VDEV handle on success, NULL on failure
6004  */
6005 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
6006 					 uint8_t vdev_id,
6007 					 ol_osif_vdev_handle osif_vdev,
6008 					 struct ol_txrx_ops *txrx_ops)
6009 {
6010 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6011 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
6012 						      DP_MOD_ID_CDP);
6013 
6014 	if (!vdev)
6015 		return QDF_STATUS_E_FAILURE;
6016 
6017 	vdev->osif_vdev = osif_vdev;
6018 	vdev->osif_rx = txrx_ops->rx.rx;
6019 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
6020 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
6021 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
6022 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
6023 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
6024 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
6025 	vdev->osif_get_key = txrx_ops->get_key;
6026 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
6027 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
6028 	vdev->tx_comp = txrx_ops->tx.tx_comp;
6029 	vdev->stats_cb = txrx_ops->rx.stats_rx;
6030 #ifdef notyet
6031 #if ATH_SUPPORT_WAPI
6032 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
6033 #endif
6034 #endif
6035 #ifdef UMAC_SUPPORT_PROXY_ARP
6036 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
6037 #endif
6038 	vdev->me_convert = txrx_ops->me_convert;
6039 
6040 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
6041 
6042 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
6043 
6044 	dp_init_info("%pK: DP Vdev Register success", soc);
6045 
6046 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6047 	return QDF_STATUS_SUCCESS;
6048 }
6049 
6050 /**
6051  * dp_peer_delete() - delete DP peer
6052  *
6053  * @soc: Datatpath soc
6054  * @peer: Datapath peer
6055  * @arg: argument to iter function
6056  *
6057  * Return: void
6058  */
6059 static void
6060 dp_peer_delete(struct dp_soc *soc,
6061 	       struct dp_peer *peer,
6062 	       void *arg)
6063 {
6064 	if (!peer->valid)
6065 		return;
6066 
6067 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
6068 			     peer->vdev->vdev_id,
6069 			     peer->mac_addr.raw, 0);
6070 }
6071 
6072 /**
6073  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
6074  * @vdev: Datapath VDEV handle
6075  * @unmap_only: Flag to indicate "only unmap"
6076  *
6077  * Return: void
6078  */
6079 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
6080 {
6081 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6082 	struct dp_pdev *pdev = vdev->pdev;
6083 	struct dp_soc *soc = pdev->soc;
6084 	struct dp_peer *peer;
6085 	uint32_t i = 0;
6086 
6087 
6088 	if (!unmap_only)
6089 		dp_vdev_iterate_peer_lock_safe(vdev, dp_peer_delete, NULL,
6090 					       DP_MOD_ID_CDP);
6091 
6092 	for (i = 0; i < soc->max_peers ; i++) {
6093 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
6094 
6095 		if (!peer)
6096 			continue;
6097 
6098 		if (peer->vdev != vdev) {
6099 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6100 			continue;
6101 		}
6102 
6103 		dp_info("peer: "QDF_MAC_ADDR_FMT" is getting unmap",
6104 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6105 
6106 		dp_rx_peer_unmap_handler(soc, i,
6107 					 vdev->vdev_id,
6108 					 peer->mac_addr.raw, 0,
6109 					 DP_PEER_WDS_COUNT_INVALID);
6110 		SET_PEER_REF_CNT_ONE(peer);
6111 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6112 	}
6113 
6114 }
6115 
6116 /*
6117  * dp_vdev_detach_wifi3() - Detach txrx vdev
6118  * @cdp_soc: Datapath soc handle
6119  * @vdev_id: VDEV Id
6120  * @callback: Callback OL_IF on completion of detach
6121  * @cb_context:	Callback context
6122  *
6123  */
6124 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
6125 				       uint8_t vdev_id,
6126 				       ol_txrx_vdev_delete_cb callback,
6127 				       void *cb_context)
6128 {
6129 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6130 	struct dp_pdev *pdev;
6131 	struct dp_neighbour_peer *peer = NULL;
6132 	struct dp_peer *vap_self_peer = NULL;
6133 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6134 						     DP_MOD_ID_CDP);
6135 
6136 	if (!vdev)
6137 		return QDF_STATUS_E_FAILURE;
6138 
6139 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
6140 
6141 	pdev = vdev->pdev;
6142 
6143 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
6144 							DP_MOD_ID_CONFIG);
6145 	if (vap_self_peer) {
6146 		qdf_spin_lock_bh(&soc->ast_lock);
6147 		if (vap_self_peer->self_ast_entry) {
6148 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
6149 			vap_self_peer->self_ast_entry = NULL;
6150 		}
6151 		qdf_spin_unlock_bh(&soc->ast_lock);
6152 
6153 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
6154 				     vap_self_peer->mac_addr.raw, 0);
6155 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
6156 	}
6157 
6158 	/*
6159 	 * If Target is hung, flush all peers before detaching vdev
6160 	 * this will free all references held due to missing
6161 	 * unmap commands from Target
6162 	 */
6163 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
6164 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
6165 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
6166 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);
6167 
6168 	/* indicate that the vdev needs to be deleted */
6169 	vdev->delete.pending = 1;
6170 	dp_rx_vdev_detach(vdev);
6171 	/*
6172 	 * move it after dp_rx_vdev_detach(),
6173 	 * as the call back done in dp_rx_vdev_detach()
6174 	 * still need to get vdev pointer by vdev_id.
6175 	 */
6176 	dp_vdev_id_map_tbl_remove(soc, vdev);
6177 
6178 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
6179 
6180 	dp_tx_vdev_multipass_deinit(vdev);
6181 
6182 	if (vdev->vdev_dp_ext_handle) {
6183 		qdf_mem_free(vdev->vdev_dp_ext_handle);
6184 		vdev->vdev_dp_ext_handle = NULL;
6185 	}
6186 	vdev->delete.callback = callback;
6187 	vdev->delete.context = cb_context;
6188 
6189 	if (vdev->opmode != wlan_op_mode_monitor)
6190 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
6191 
6192 	pdev->vdev_count--;
6193 	/* release reference taken above for find */
6194 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6195 
6196 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
6197 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
6198 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
6199 
6200 	/* release reference taken at dp_vdev_create */
6201 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6202 
6203 	return QDF_STATUS_SUCCESS;
6204 }
6205 
6206 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
6207 						uint8_t *peer_mac_addr)
6208 {
6209 	struct dp_peer *peer;
6210 	struct dp_soc *soc = vdev->pdev->soc;
6211 
6212 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
6213 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
6214 		      inactive_list_elem) {
6215 
6216 		/* reuse bss peer only when vdev matches*/
6217 		if (peer->bss_peer && (peer->vdev == vdev) &&
6218 		    qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
6219 				QDF_MAC_ADDR_SIZE) == 0) {
6220 			/* increment ref count for cdp_peer_create*/
6221 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
6222 						QDF_STATUS_SUCCESS) {
6223 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
6224 					     inactive_list_elem);
6225 				qdf_spin_unlock_bh
6226 					(&soc->inactive_peer_list_lock);
6227 				return peer;
6228 			}
6229 		}
6230 	}
6231 
6232 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
6233 	return NULL;
6234 }
6235 
6236 #ifdef FEATURE_AST
6237 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
6238 					       struct dp_pdev *pdev,
6239 					       uint8_t *peer_mac_addr)
6240 {
6241 	struct dp_ast_entry *ast_entry;
6242 
6243 	qdf_spin_lock_bh(&soc->ast_lock);
6244 	if (soc->ast_override_support)
6245 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
6246 							    pdev->pdev_id);
6247 	else
6248 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
6249 
6250 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
6251 		dp_peer_del_ast(soc, ast_entry);
6252 
6253 	qdf_spin_unlock_bh(&soc->ast_lock);
6254 }
6255 #endif
6256 
6257 #ifdef PEER_CACHE_RX_PKTS
6258 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
6259 {
6260 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
6261 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
6262 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
6263 }
6264 #else
6265 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
6266 {
6267 }
6268 #endif
6269 
6270 /*
6271  * dp_peer_create_wifi3() - attach txrx peer
6272  * @soc_hdl: Datapath soc handle
6273  * @vdev_id: id of vdev
6274  * @peer_mac_addr: Peer MAC address
6275  *
6276  * Return: 0 on success, -1 on failure
6277  */
6278 static QDF_STATUS
6279 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6280 		     uint8_t *peer_mac_addr)
6281 {
6282 	struct dp_peer *peer;
6283 	int i;
6284 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6285 	struct dp_pdev *pdev;
6286 	struct cdp_peer_cookie peer_cookie;
6287 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
6288 	struct dp_vdev *vdev = NULL;
6289 
6290 	if (!peer_mac_addr)
6291 		return QDF_STATUS_E_FAILURE;
6292 
6293 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
6294 
6295 	if (!vdev)
6296 		return QDF_STATUS_E_FAILURE;
6297 
6298 	pdev = vdev->pdev;
6299 	soc = pdev->soc;
6300 
6301 	/*
6302 	 * If a peer entry with given MAC address already exists,
6303 	 * reuse the peer and reset the state of peer.
6304 	 */
6305 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
6306 
6307 	if (peer) {
6308 		dp_peer_vdev_list_add(soc, vdev, peer);
6309 
6310 		dp_peer_find_hash_add(soc, peer);
6311 		qdf_atomic_init(&peer->is_default_route_set);
6312 		dp_peer_cleanup(vdev, peer);
6313 
6314 		for (i = 0; i < DP_MAX_TIDS; i++)
6315 			qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
6316 
6317 		qdf_spin_lock_bh(&soc->ast_lock);
6318 		dp_peer_delete_ast_entries(soc, peer);
6319 		qdf_spin_unlock_bh(&soc->ast_lock);
6320 
6321 		if ((vdev->opmode == wlan_op_mode_sta) &&
6322 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
6323 		     QDF_MAC_ADDR_SIZE)) {
6324 			ast_type = CDP_TXRX_AST_TYPE_SELF;
6325 		}
6326 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
6327 
6328 		peer->valid = 1;
6329 		dp_local_peer_id_alloc(pdev, peer);
6330 
6331 		qdf_spinlock_create(&peer->peer_info_lock);
6332 		dp_peer_rx_bufq_resources_init(peer);
6333 
6334 		DP_STATS_INIT(peer);
6335 		DP_STATS_UPD(peer, rx.avg_snr, CDP_INVALID_SNR);
6336 
6337 		/*
6338 		 * In tx_monitor mode, filter may be set for unassociated peer
6339 		 * when unassociated peer get associated peer need to
6340 		 * update tx_cap_enabled flag to support peer filter.
6341 		 */
6342 		dp_monitor_peer_tx_capture_filter_check(pdev, peer);
6343 
6344 		dp_set_peer_isolation(peer, false);
6345 
6346 		dp_wds_ext_peer_init(peer);
6347 
6348 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
6349 
6350 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6351 		return QDF_STATUS_SUCCESS;
6352 	} else {
6353 		/*
6354 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
6355 		 * need to remove the AST entry which was earlier added as a WDS
6356 		 * entry.
6357 		 * If an AST entry exists, but no peer entry exists with a given
6358 		 * MAC addresses, we could deduce it as a WDS entry
6359 		 */
6360 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
6361 	}
6362 
6363 #ifdef notyet
6364 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
6365 		soc->mempool_ol_ath_peer);
6366 #else
6367 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
6368 #endif
6369 	wlan_minidump_log(peer,
6370 			  sizeof(*peer),
6371 			  soc->ctrl_psoc,
6372 			  WLAN_MD_DP_PEER, "dp_peer");
6373 	if (!peer) {
6374 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6375 		return QDF_STATUS_E_FAILURE; /* failure */
6376 	}
6377 
6378 	qdf_mem_zero(peer, sizeof(struct dp_peer));
6379 
6380 	TAILQ_INIT(&peer->ast_entry_list);
6381 
6382 	/* store provided params */
6383 	peer->vdev = vdev;
6384 	/* get the vdev reference for new peer */
6385 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
6386 
6387 	if ((vdev->opmode == wlan_op_mode_sta) &&
6388 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
6389 			 QDF_MAC_ADDR_SIZE)) {
6390 		ast_type = CDP_TXRX_AST_TYPE_SELF;
6391 	}
6392 	qdf_spinlock_create(&peer->peer_state_lock);
6393 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
6394 	qdf_spinlock_create(&peer->peer_info_lock);
6395 	dp_wds_ext_peer_init(peer);
6396 
6397 	dp_peer_rx_bufq_resources_init(peer);
6398 
6399 	qdf_mem_copy(
6400 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
6401 
6402 	/* initialize the peer_id */
6403 	peer->peer_id = HTT_INVALID_PEER;
6404 
6405 	/* reset the ast index to flowid table */
6406 	dp_peer_reset_flowq_map(peer);
6407 
6408 	qdf_atomic_init(&peer->ref_cnt);
6409 
6410 	for (i = 0; i < DP_MOD_ID_MAX; i++)
6411 		qdf_atomic_init(&peer->mod_refs[i]);
6412 
6413 	/* keep one reference for attach */
6414 	qdf_atomic_inc(&peer->ref_cnt);
6415 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
6416 
6417 	dp_peer_vdev_list_add(soc, vdev, peer);
6418 
6419 	/* TODO: See if hash based search is required */
6420 	dp_peer_find_hash_add(soc, peer);
6421 
6422 	/* Initialize the peer state */
6423 	peer->state = OL_TXRX_PEER_STATE_DISC;
6424 
6425 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d",
6426 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
6427 		qdf_atomic_read(&peer->ref_cnt));
6428 	/*
6429 	 * For every peer MAp message search and set if bss_peer
6430 	 */
6431 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
6432 			QDF_MAC_ADDR_SIZE) == 0 &&
6433 			(wlan_op_mode_sta != vdev->opmode)) {
6434 		dp_info("vdev bss_peer!!");
6435 		peer->bss_peer = 1;
6436 	}
6437 
6438 	if (wlan_op_mode_sta == vdev->opmode &&
6439 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
6440 			QDF_MAC_ADDR_SIZE) == 0) {
6441 		peer->sta_self_peer = 1;
6442 	}
6443 
6444 	for (i = 0; i < DP_MAX_TIDS; i++)
6445 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
6446 
6447 	peer->valid = 1;
6448 	dp_local_peer_id_alloc(pdev, peer);
6449 	DP_STATS_INIT(peer);
6450 	DP_STATS_UPD(peer, rx.avg_snr, CDP_INVALID_SNR);
6451 
6452 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6453 		     QDF_MAC_ADDR_SIZE);
6454 	peer_cookie.ctx = NULL;
6455 	peer_cookie.pdev_id = pdev->pdev_id;
6456 	peer_cookie.cookie = pdev->next_peer_cookie++;
6457 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6458 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
6459 			     (void *)&peer_cookie,
6460 			     peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
6461 #endif
6462 	if (soc->rdkstats_enabled) {
6463 		if (!peer_cookie.ctx) {
6464 			pdev->next_peer_cookie--;
6465 			qdf_err("Failed to initialize peer rate stats");
6466 		} else {
6467 			peer->rdkstats_ctx = (struct cdp_peer_rate_stats_ctx *)
6468 						peer_cookie.ctx;
6469 		}
6470 	}
6471 
6472 	/*
6473 	 * Allocate peer extended stats context. Fall through in
6474 	 * case of failure as its not an implicit requirement to have
6475 	 * this object for regular statistics updates.
6476 	 */
6477 	if (dp_peer_ext_stats_ctx_alloc(soc, peer) !=
6478 			QDF_STATUS_SUCCESS)
6479 		dp_warn("peer ext_stats ctx alloc failed");
6480 
6481 	if (dp_monitor_peer_attach(soc, peer) !=
6482 	    QDF_STATUS_SUCCESS)
6483 		dp_warn("peer monitor ctx alloc failed");
6484 
6485 	dp_set_peer_isolation(peer, false);
6486 
6487 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
6488 
6489 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6490 
6491 	return QDF_STATUS_SUCCESS;
6492 }
6493 
6494 /*
6495  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
6496  * @vdev: Datapath VDEV handle
6497  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
6498  * @hash_based: pointer to hash value (enabled/disabled) to be populated
6499  *
6500  * Return: None
6501  */
6502 static
6503 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
6504 				  enum cdp_host_reo_dest_ring *reo_dest,
6505 				  bool *hash_based)
6506 {
6507 	struct dp_soc *soc;
6508 	struct dp_pdev *pdev;
6509 
6510 	pdev = vdev->pdev;
6511 	soc = pdev->soc;
6512 	/*
6513 	 * hash based steering is disabled for Radios which are offloaded
6514 	 * to NSS
6515 	 */
6516 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
6517 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
6518 
6519 	/*
6520 	 * Below line of code will ensure the proper reo_dest ring is chosen
6521 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
6522 	 */
6523 	*reo_dest = pdev->reo_dest;
6524 }
6525 
6526 #ifdef IPA_OFFLOAD
6527 /**
6528  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
6529  * @vdev: Virtual device
6530  *
6531  * Return: true if the vdev is of subtype P2P
6532  *	   false if the vdev is of any other subtype
6533  */
6534 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
6535 {
6536 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
6537 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
6538 	    vdev->subtype == wlan_op_subtype_p2p_go)
6539 		return true;
6540 
6541 	return false;
6542 }
6543 
6544 /*
6545  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
6546  * @vdev: Datapath VDEV handle
6547  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
6548  * @hash_based: pointer to hash value (enabled/disabled) to be populated
6549  *
6550  * If IPA is enabled in ini, for SAP mode, disable hash based
6551  * steering, use default reo_dst ring for RX. Use config values for other modes.
6552  * Return: None
6553  */
6554 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
6555 				       enum cdp_host_reo_dest_ring *reo_dest,
6556 				       bool *hash_based)
6557 {
6558 	struct dp_soc *soc;
6559 	struct dp_pdev *pdev;
6560 
6561 	pdev = vdev->pdev;
6562 	soc = pdev->soc;
6563 
6564 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
6565 
6566 	/* For P2P-GO interfaces we do not need to change the REO
6567 	 * configuration even if IPA config is enabled
6568 	 */
6569 	if (dp_is_vdev_subtype_p2p(vdev))
6570 		return;
6571 
6572 	/*
6573 	 * If IPA is enabled, disable hash-based flow steering and set
6574 	 * reo_dest_ring_4 as the REO ring to receive packets on.
6575 	 * IPA is configured to reap reo_dest_ring_4.
6576 	 *
6577 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
6578 	 * value enum value is from 1 - 4.
6579 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
6580 	 */
6581 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
6582 		if (vdev->opmode == wlan_op_mode_ap) {
6583 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
6584 			*hash_based = 0;
6585 		} else if (vdev->opmode == wlan_op_mode_sta &&
6586 			   dp_ipa_is_mdm_platform()) {
6587 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
6588 		}
6589 	}
6590 }
6591 
6592 #else
6593 
6594 /*
6595  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
6596  * @vdev: Datapath VDEV handle
6597  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
6598  * @hash_based: pointer to hash value (enabled/disabled) to be populated
6599  *
6600  * Use system config values for hash based steering.
6601  * Return: None
6602  */
6603 
6604 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
6605 				       enum cdp_host_reo_dest_ring *reo_dest,
6606 				       bool *hash_based)
6607 {
6608 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
6609 }
6610 #endif /* IPA_OFFLOAD */
6611 
6612 /*
6613  * dp_peer_setup_wifi3() - initialize the peer
6614  * @soc_hdl: soc handle object
6615  * @vdev_id : vdev_id of vdev object
6616  * @peer_mac: Peer's mac address
6617  *
6618  * Return: QDF_STATUS
6619  */
6620 static QDF_STATUS
6621 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6622 		    uint8_t *peer_mac)
6623 {
6624 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6625 	struct dp_pdev *pdev;
6626 	bool hash_based = 0;
6627 	enum cdp_host_reo_dest_ring reo_dest;
6628 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6629 	struct dp_vdev *vdev = NULL;
6630 	struct dp_peer *peer =
6631 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
6632 					       DP_MOD_ID_CDP);
6633 	enum wlan_op_mode vdev_opmode;
6634 
6635 	if (!peer)
6636 		return QDF_STATUS_E_FAILURE;
6637 
6638 	vdev = peer->vdev;
6639 	if (!vdev) {
6640 		status = QDF_STATUS_E_FAILURE;
6641 		goto fail;
6642 	}
6643 
6644 	/* save vdev related member in case vdev freed */
6645 	vdev_opmode = vdev->opmode;
6646 	pdev = vdev->pdev;
6647 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
6648 
6649 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
6650 		pdev->pdev_id, vdev->vdev_id,
6651 		vdev->opmode, hash_based, reo_dest);
6652 
6653 	/*
6654 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
6655 	 * i.e both the devices have same MAC address. In these
6656 	 * cases we want such pkts to be processed in NULL Q handler
6657 	 * which is REO2TCL ring. for this reason we should
6658 	 * not setup reo_queues and default route for bss_peer.
6659 	 */
6660 	dp_monitor_peer_tx_init(pdev, peer);
6661 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
6662 		status = QDF_STATUS_E_FAILURE;
6663 		goto fail;
6664 	}
6665 
6666 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
6667 		/* TODO: Check the destination ring number to be passed to FW */
6668 		soc->cdp_soc.ol_ops->peer_set_default_routing(
6669 				soc->ctrl_psoc,
6670 				peer->vdev->pdev->pdev_id,
6671 				peer->mac_addr.raw,
6672 				peer->vdev->vdev_id, hash_based, reo_dest);
6673 	}
6674 
6675 	qdf_atomic_set(&peer->is_default_route_set, 1);
6676 
6677 	if (vdev_opmode != wlan_op_mode_monitor)
6678 		dp_peer_rx_init(pdev, peer);
6679 
6680 	dp_peer_ppdu_delayed_ba_init(peer);
6681 
6682 fail:
6683 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6684 	return status;
6685 }
6686 
6687 /*
6688  * dp_cp_peer_del_resp_handler - Handle the peer delete response
6689  * @soc_hdl: Datapath SOC handle
6690  * @vdev_id: id of virtual device object
6691  * @mac_addr: Mac address of the peer
6692  *
6693  * Return: QDF_STATUS
6694  */
6695 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
6696 					      uint8_t vdev_id,
6697 					      uint8_t *mac_addr)
6698 {
6699 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6700 	struct dp_ast_entry  *ast_entry = NULL;
6701 	txrx_ast_free_cb cb = NULL;
6702 	void *cookie;
6703 
6704 	qdf_spin_lock_bh(&soc->ast_lock);
6705 
6706 	ast_entry =
6707 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
6708 						vdev_id);
6709 
6710 	/* in case of qwrap we have multiple BSS peers
6711 	 * with same mac address
6712 	 *
6713 	 * AST entry for this mac address will be created
6714 	 * only for one peer hence it will be NULL here
6715 	 */
6716 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
6717 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
6718 		qdf_spin_unlock_bh(&soc->ast_lock);
6719 		return QDF_STATUS_E_FAILURE;
6720 	}
6721 
6722 	if (ast_entry->is_mapped)
6723 		soc->ast_table[ast_entry->ast_idx] = NULL;
6724 
6725 	DP_STATS_INC(soc, ast.deleted, 1);
6726 	dp_peer_ast_hash_remove(soc, ast_entry);
6727 
6728 	cb = ast_entry->callback;
6729 	cookie = ast_entry->cookie;
6730 	ast_entry->callback = NULL;
6731 	ast_entry->cookie = NULL;
6732 
6733 	soc->num_ast_entries--;
6734 	qdf_spin_unlock_bh(&soc->ast_lock);
6735 
6736 	if (cb) {
6737 		cb(soc->ctrl_psoc,
6738 		   dp_soc_to_cdp_soc(soc),
6739 		   cookie,
6740 		   CDP_TXRX_AST_DELETED);
6741 	}
6742 	qdf_mem_free(ast_entry);
6743 
6744 	return QDF_STATUS_SUCCESS;
6745 }
6746 
6747 /*
6748  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
6749  * @txrx_soc: cdp soc handle
6750  * @ac: Access category
6751  * @value: timeout value in millisec
6752  *
6753  * Return: void
6754  */
6755 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
6756 				    uint8_t ac, uint32_t value)
6757 {
6758 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6759 
6760 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
6761 }
6762 
6763 /*
6764  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
6765  * @txrx_soc: cdp soc handle
6766  * @ac: access category
6767  * @value: timeout value in millisec
6768  *
6769  * Return: void
6770  */
6771 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
6772 				    uint8_t ac, uint32_t *value)
6773 {
6774 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6775 
6776 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
6777 }
6778 
6779 /*
6780  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
6781  * @txrx_soc: cdp soc handle
6782  * @pdev_id: id of physical device object
6783  * @val: reo destination ring index (1 - 4)
6784  *
6785  * Return: QDF_STATUS
6786  */
6787 static QDF_STATUS
6788 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
6789 		     enum cdp_host_reo_dest_ring val)
6790 {
6791 	struct dp_pdev *pdev =
6792 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
6793 						   pdev_id);
6794 
6795 	if (pdev) {
6796 		pdev->reo_dest = val;
6797 		return QDF_STATUS_SUCCESS;
6798 	}
6799 
6800 	return QDF_STATUS_E_FAILURE;
6801 }
6802 
6803 /*
6804  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
6805  * @txrx_soc: cdp soc handle
6806  * @pdev_id: id of physical device object
6807  *
6808  * Return: reo destination ring index
6809  */
6810 static enum cdp_host_reo_dest_ring
6811 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
6812 {
6813 	struct dp_pdev *pdev =
6814 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
6815 						   pdev_id);
6816 
6817 	if (pdev)
6818 		return pdev->reo_dest;
6819 	else
6820 		return cdp_host_reo_dest_ring_unknown;
6821 }
6822 
6823 #ifdef WLAN_SUPPORT_SCS
6824 /*
6825  * dp_enable_scs_params - Enable/Disable SCS procedures
6826  * @soc - Datapath soc handle
6827  * @peer_mac - STA Mac address
6828  * @vdev_id - ID of the vdev handle
6829  * @active - Flag to set SCS active/inactive
6830  * return type - QDF_STATUS - Success/Invalid
6831  */
6832 static QDF_STATUS
6833 dp_enable_scs_params(struct cdp_soc_t *soc_hdl, struct qdf_mac_addr
6834 		     *peer_mac,
6835 		     uint8_t vdev_id,
6836 		     bool is_active)
6837 {
6838 	struct dp_peer *peer;
6839 	QDF_STATUS status = QDF_STATUS_E_INVAL;
6840 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6841 
6842 	peer = dp_peer_find_hash_find(soc, peer_mac->bytes, 0, vdev_id,
6843 				      DP_MOD_ID_CDP);
6844 
6845 	if (!peer) {
6846 		dp_err("Peer is NULL!");
6847 		goto fail;
6848 	}
6849 
6850 	peer->scs_is_active = is_active;
6851 	status = QDF_STATUS_SUCCESS;
6852 
6853 fail:
6854 	if (peer)
6855 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6856 	return status;
6857 }
6858 
6859 /*
6860  * @brief dp_copy_scs_params - SCS Parameters sent by STA
6861  * is copied from the cdp layer to the dp layer
6862  * These parameters are then used by the peer
6863  * for traffic classification.
6864  *
6865  * @param peer - peer struct
6866  * @param scs_params - cdp layer params
6867  * @idx - SCS_entry index obtained from the
6868  * node database with a given SCSID
6869  * @return void
6870  */
6871 void
6872 dp_copy_scs_params(struct dp_peer *peer,
6873 		   struct cdp_scs_params *scs_params,
6874 		   uint8_t idx)
6875 {
6876 	uint8_t tidx = 0;
6877 	uint8_t tclas_elem;
6878 
6879 	peer->scs[idx].scsid = scs_params->scsid;
6880 	peer->scs[idx].access_priority =
6881 		scs_params->access_priority;
6882 	peer->scs[idx].tclas_elements =
6883 		scs_params->tclas_elements;
6884 	peer->scs[idx].tclas_process =
6885 		scs_params->tclas_process;
6886 
6887 	tclas_elem = peer->scs[idx].tclas_elements;
6888 
6889 	while (tidx < tclas_elem) {
6890 		qdf_mem_copy(&peer->scs[idx].tclas[tidx],
6891 			     &scs_params->tclas[tidx],
6892 			     sizeof(struct cdp_tclas_tuple));
6893 		tidx++;
6894 	}
6895 }
6896 
6897 /*
6898  * @brief dp_record_scs_params() - Copying the SCS params to a
6899  * peer based database.
6900  *
6901  * @soc - Datapath soc handle
6902  * @peer_mac - STA Mac address
6903  * @vdev_id - ID of the vdev handle
6904  * @scs_params - Structure having SCS parameters obtained
6905  * from handshake
6906  * @idx - SCS_entry index obtained from the
6907  * node database with a given SCSID
6908  * @scs_sessions - Total # of SCS sessions active
6909  *
6910  * @details
6911  * SCS parameters sent by the STA in
6912  * the SCS Request to the AP. The AP makes a note of these
6913  * parameters while sending the MSDUs to the STA, to
6914  * send the downlink traffic with correct User priority.
6915  *
6916  * return type - QDF_STATUS - Success/Invalid
6917  */
6918 static QDF_STATUS
6919 dp_record_scs_params(struct cdp_soc_t *soc_hdl, struct qdf_mac_addr
6920 		     *peer_mac,
6921 		     uint8_t vdev_id,
6922 		     struct cdp_scs_params *scs_params,
6923 		     uint8_t idx,
6924 		     uint8_t scs_sessions)
6925 {
6926 	struct dp_peer *peer;
6927 	QDF_STATUS status = QDF_STATUS_E_INVAL;
6928 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6929 
6930 	peer = dp_peer_find_hash_find(soc, peer_mac->bytes, 0, vdev_id,
6931 				      DP_MOD_ID_CDP);
6932 
6933 	if (!peer) {
6934 		dp_err("Peer is NULL!");
6935 		goto fail;
6936 	}
6937 
6938 	if (idx >= IEEE80211_SCS_MAX_NO_OF_ELEM)
6939 		goto fail;
6940 
6941 	/* SCS procedure for the peer is activated
6942 	 * as soon as we get this information from
6943 	 * the control path, unless explicitly disabled.
6944 	 */
6945 	peer->scs_is_active = 1;
6946 	dp_copy_scs_params(peer, scs_params, idx);
6947 	status = QDF_STATUS_SUCCESS;
6948 	peer->no_of_scs_sessions = scs_sessions;
6949 
6950 fail:
6951 	if (peer)
6952 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6953 	return status;
6954 }
6955 #endif
6956 
6957 #ifdef WLAN_SUPPORT_MSCS
6958 /*
6959  * dp_record_mscs_params - MSCS parameters sent by the STA in
6960  * the MSCS Request to the AP. The AP makes a note of these
6961  * parameters while comparing the MSDUs sent by the STA, to
6962  * send the downlink traffic with correct User priority.
6963  * @soc - Datapath soc handle
6964  * @peer_mac - STA Mac address
6965  * @vdev_id - ID of the vdev handle
6966  * @mscs_params - Structure having MSCS parameters obtained
6967  * from handshake
6968  * @active - Flag to set MSCS active/inactive
6969  * return type - QDF_STATUS - Success/Invalid
6970  */
6971 static QDF_STATUS
6972 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
6973 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
6974 		      bool active)
6975 {
6976 	struct dp_peer *peer;
6977 	QDF_STATUS status = QDF_STATUS_E_INVAL;
6978 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6979 
6980 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
6981 				      DP_MOD_ID_CDP);
6982 
6983 	if (!peer) {
6984 		dp_err("Peer is NULL!");
6985 		goto fail;
6986 	}
6987 	if (!active) {
6988 		dp_info("MSCS Procedure is terminated");
6989 		peer->mscs_active = active;
6990 		goto fail;
6991 	}
6992 
6993 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
6994 		/* Populate entries inside IPV4 database first */
6995 		peer->mscs_ipv4_parameter.user_priority_bitmap =
6996 			mscs_params->user_pri_bitmap;
6997 		peer->mscs_ipv4_parameter.user_priority_limit =
6998 			mscs_params->user_pri_limit;
6999 		peer->mscs_ipv4_parameter.classifier_mask =
7000 			mscs_params->classifier_mask;
7001 
7002 		/* Populate entries inside IPV6 database */
7003 		peer->mscs_ipv6_parameter.user_priority_bitmap =
7004 			mscs_params->user_pri_bitmap;
7005 		peer->mscs_ipv6_parameter.user_priority_limit =
7006 			mscs_params->user_pri_limit;
7007 		peer->mscs_ipv6_parameter.classifier_mask =
7008 			mscs_params->classifier_mask;
7009 		peer->mscs_active = 1;
7010 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
7011 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
7012 			"\tUser priority limit = %x\tClassifier mask = %x",
7013 			QDF_MAC_ADDR_REF(peer_mac),
7014 			mscs_params->classifier_type,
7015 			peer->mscs_ipv4_parameter.user_priority_bitmap,
7016 			peer->mscs_ipv4_parameter.user_priority_limit,
7017 			peer->mscs_ipv4_parameter.classifier_mask);
7018 	}
7019 
7020 	status = QDF_STATUS_SUCCESS;
7021 fail:
7022 	if (peer)
7023 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7024 	return status;
7025 }
7026 #endif
7027 
7028 /*
7029  * dp_get_sec_type() - Get the security type
7030  * @soc: soc handle
7031  * @vdev_id: id of dp handle
7032  * @peer_mac: mac of datapath PEER handle
7033  * @sec_idx:    Security id (mcast, ucast)
7034  *
7035  * return sec_type: Security type
7036  */
7037 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
7038 			   uint8_t *peer_mac, uint8_t sec_idx)
7039 {
7040 	int sec_type = 0;
7041 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
7042 						       peer_mac, 0, vdev_id,
7043 						       DP_MOD_ID_CDP);
7044 
7045 	if (!peer) {
7046 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
7047 		return sec_type;
7048 	}
7049 
7050 	sec_type = peer->security[sec_idx].sec_type;
7051 
7052 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7053 	return sec_type;
7054 }
7055 
7056 /*
7057  * dp_peer_authorize() - authorize txrx peer
7058  * @soc: soc handle
7059  * @vdev_id: id of dp handle
7060  * @peer_mac: mac of datapath PEER handle
7061  * @authorize
7062  *
7063  */
7064 static QDF_STATUS
7065 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7066 		  uint8_t *peer_mac, uint32_t authorize)
7067 {
7068 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7069 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7070 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
7071 						      0, vdev_id,
7072 						      DP_MOD_ID_CDP);
7073 
7074 	if (!peer) {
7075 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
7076 		status = QDF_STATUS_E_FAILURE;
7077 	} else {
7078 		peer->authorize = authorize ? 1 : 0;
7079 
7080 		if (!peer->authorize)
7081 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
7082 
7083 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7084 	}
7085 
7086 	return status;
7087 }
7088 
7089 /**
7090  * dp_vdev_unref_delete() - check and process vdev delete
7091  * @soc : DP specific soc pointer
7092  * @vdev: DP specific vdev pointer
7093  * @mod_id: module id
7094  *
7095  */
7096 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
7097 			  enum dp_mod_id mod_id)
7098 {
7099 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
7100 	void *vdev_delete_context = NULL;
7101 	uint8_t vdev_id = vdev->vdev_id;
7102 	struct dp_pdev *pdev = vdev->pdev;
7103 	struct dp_vdev *tmp_vdev = NULL;
7104 	uint8_t found = 0;
7105 
7106 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
7107 
7108 	/* Return if this is not the last reference*/
7109 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
7110 		return;
7111 
7112 	/*
7113 	 * This should be set as last reference need to released
7114 	 * after cdp_vdev_detach() is called
7115 	 *
7116 	 * if this assert is hit there is a ref count issue
7117 	 */
7118 	QDF_ASSERT(vdev->delete.pending);
7119 
7120 	vdev_delete_cb = vdev->delete.callback;
7121 	vdev_delete_context = vdev->delete.context;
7122 
7123 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
7124 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7125 
7126 	if (wlan_op_mode_monitor == vdev->opmode) {
7127 		dp_monitor_vdev_delete(soc, vdev);
7128 		goto free_vdev;
7129 	}
7130 
7131 	/* all peers are gone, go ahead and delete it */
7132 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
7133 			FLOW_TYPE_VDEV, vdev_id);
7134 	dp_tx_vdev_detach(vdev);
7135 	dp_monitor_vdev_detach(vdev);
7136 
7137 free_vdev:
7138 	qdf_spinlock_destroy(&vdev->peer_list_lock);
7139 
7140 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7141 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
7142 		      inactive_list_elem) {
7143 		if (tmp_vdev == vdev) {
7144 			found = 1;
7145 			break;
7146 		}
7147 	}
7148 	if (found)
7149 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
7150 			     inactive_list_elem);
7151 	/* delete this peer from the list */
7152 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7153 
7154 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
7155 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7156 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
7157 			     WLAN_MD_DP_VDEV, "dp_vdev");
7158 	qdf_mem_free(vdev);
7159 	vdev = NULL;
7160 
7161 	if (vdev_delete_cb)
7162 		vdev_delete_cb(vdev_delete_context);
7163 }
7164 
7165 qdf_export_symbol(dp_vdev_unref_delete);
7166 
7167 /*
7168  * dp_peer_unref_delete() - unref and delete peer
7169  * @peer_handle:    Datapath peer handle
7170  * @mod_id:         ID of module releasing reference
7171  *
7172  */
7173 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
7174 {
7175 	struct dp_vdev *vdev = peer->vdev;
7176 	struct dp_pdev *pdev = vdev->pdev;
7177 	struct dp_soc *soc = pdev->soc;
7178 	uint16_t peer_id;
7179 	struct cdp_peer_cookie peer_cookie;
7180 	struct dp_peer *tmp_peer;
7181 	bool found = false;
7182 	int tid = 0;
7183 
7184 	if (mod_id > DP_MOD_ID_RX)
7185 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
7186 
7187 	/*
7188 	 * Hold the lock all the way from checking if the peer ref count
7189 	 * is zero until the peer references are removed from the hash
7190 	 * table and vdev list (if the peer ref count is zero).
7191 	 * This protects against a new HL tx operation starting to use the
7192 	 * peer object just after this function concludes it's done being used.
7193 	 * Furthermore, the lock needs to be held while checking whether the
7194 	 * vdev's list of peers is empty, to make sure that list is not modified
7195 	 * concurrently with the empty check.
7196 	 */
7197 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
7198 		peer_id = peer->peer_id;
7199 
7200 		/*
7201 		 * Make sure that the reference to the peer in
7202 		 * peer object map is removed
7203 		 */
7204 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
7205 
7206 		dp_peer_debug("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
7207 			      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7208 
7209 		/*
7210 		 * Deallocate the extended stats contenxt
7211 		 */
7212 		dp_peer_ext_stats_ctx_dealloc(soc, peer);
7213 
7214 		/* send peer destroy event to upper layer */
7215 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
7216 			     QDF_MAC_ADDR_SIZE);
7217 		peer_cookie.ctx = NULL;
7218 		peer_cookie.ctx = (struct cdp_stats_cookie *)
7219 					peer->rdkstats_ctx;
7220 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7221 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
7222 				     soc,
7223 				     (void *)&peer_cookie,
7224 				     peer->peer_id,
7225 				     WDI_NO_VAL,
7226 				     pdev->pdev_id);
7227 #endif
7228 		peer->rdkstats_ctx = NULL;
7229 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
7230 				     WLAN_MD_DP_PEER, "dp_peer");
7231 
7232 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7233 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
7234 			      inactive_list_elem) {
7235 			if (tmp_peer == peer) {
7236 				found = 1;
7237 				break;
7238 			}
7239 		}
7240 		if (found)
7241 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7242 				     inactive_list_elem);
7243 		/* delete this peer from the list */
7244 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7245 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
7246 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
7247 
7248 		/* cleanup the peer data */
7249 		dp_peer_cleanup(vdev, peer);
7250 		dp_monitor_peer_detach(soc, peer);
7251 
7252 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
7253 			qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
7254 
7255 		qdf_spinlock_destroy(&peer->peer_state_lock);
7256 		qdf_mem_free(peer);
7257 
7258 		/*
7259 		 * Decrement ref count taken at peer create
7260 		 */
7261 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
7262 	}
7263 }
7264 
7265 qdf_export_symbol(dp_peer_unref_delete);
7266 
7267 #ifdef PEER_CACHE_RX_PKTS
7268 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
7269 {
7270 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
7271 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
7272 }
7273 #else
7274 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
7275 {
7276 }
7277 #endif
7278 
7279 /*
7280  * dp_peer_detach_wifi3() – Detach txrx peer
7281  * @soc_hdl: soc handle
7282  * @vdev_id: id of dp handle
7283  * @peer_mac: mac of datapath PEER handle
7284  * @bitmap: bitmap indicating special handling of request.
7285  *
7286  */
7287 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
7288 				       uint8_t vdev_id,
7289 				       uint8_t *peer_mac, uint32_t bitmap)
7290 {
7291 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7292 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
7293 						      0, vdev_id,
7294 						      DP_MOD_ID_CDP);
7295 	struct dp_vdev *vdev = NULL;
7296 
7297 	/* Peer can be null for monitor vap mac address */
7298 	if (!peer) {
7299 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7300 			  "%s: Invalid peer\n", __func__);
7301 		return QDF_STATUS_E_FAILURE;
7302 	}
7303 
7304 	if (!peer->valid) {
7305 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7306 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
7307 			QDF_MAC_ADDR_REF(peer_mac));
7308 		return QDF_STATUS_E_ALREADY;
7309 	}
7310 
7311 	vdev = peer->vdev;
7312 
7313 	if (!vdev)
7314 		return QDF_STATUS_E_FAILURE;
7315 	peer->valid = 0;
7316 
7317 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
7318 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7319 
7320 	dp_local_peer_id_free(peer->vdev->pdev, peer);
7321 
7322 	/* Drop all rx packets before deleting peer */
7323 	dp_clear_peer_internal(soc, peer);
7324 
7325 	dp_peer_rx_bufq_resources_deinit(peer);
7326 
7327 	qdf_spinlock_destroy(&peer->peer_info_lock);
7328 	dp_peer_multipass_list_remove(peer);
7329 
7330 	/* remove the reference to the peer from the hash table */
7331 	dp_peer_find_hash_remove(soc, peer);
7332 
7333 	dp_peer_vdev_list_remove(soc, vdev, peer);
7334 
7335 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7336 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
7337 			  inactive_list_elem);
7338 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7339 
7340 	/*
7341 	 * Remove the reference added during peer_attach.
7342 	 * The peer will still be left allocated until the
7343 	 * PEER_UNMAP message arrives to remove the other
7344 	 * reference, added by the PEER_MAP message.
7345 	 */
7346 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
7347 	/*
7348 	 * Remove the reference taken above
7349 	 */
7350 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7351 
7352 	return QDF_STATUS_SUCCESS;
7353 }
7354 
7355 /*
7356  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
7357  * @soc_hdl: Datapath soc handle
7358  * @vdev_id: virtual interface id
7359  *
7360  * Return: MAC address on success, NULL on failure.
7361  *
7362  */
7363 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
7364 					   uint8_t vdev_id)
7365 {
7366 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7367 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7368 						     DP_MOD_ID_CDP);
7369 	uint8_t *mac = NULL;
7370 
7371 	if (!vdev)
7372 		return NULL;
7373 
7374 	mac = vdev->mac_addr.raw;
7375 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7376 
7377 	return mac;
7378 }
7379 
7380 /*
7381  * dp_vdev_set_wds() - Enable per packet stats
7382  * @soc: DP soc handle
7383  * @vdev_id: id of DP VDEV handle
7384  * @val: value
7385  *
7386  * Return: none
7387  */
7388 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7389 			   uint32_t val)
7390 {
7391 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7392 	struct dp_vdev *vdev =
7393 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
7394 				      DP_MOD_ID_CDP);
7395 
7396 	if (!vdev)
7397 		return QDF_STATUS_E_FAILURE;
7398 
7399 	vdev->wds_enabled = val;
7400 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7401 
7402 	return QDF_STATUS_SUCCESS;
7403 }
7404 
7405 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
7406 {
7407 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7408 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7409 						     DP_MOD_ID_CDP);
7410 	int opmode;
7411 
7412 	if (!vdev) {
7413 		dp_err("vdev for id %d is NULL", vdev_id);
7414 		return -EINVAL;
7415 	}
7416 	opmode = vdev->opmode;
7417 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7418 
7419 	return opmode;
7420 }
7421 
7422 /**
7423  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
7424  * @soc_hdl: ol_txrx_soc_handle handle
7425  * @vdev_id: vdev id for which os rx handles are needed
7426  * @stack_fn_p: pointer to stack function pointer
7427  * @osif_handle_p: pointer to ol_osif_vdev_handle
7428  *
7429  * Return: void
7430  */
7431 static
7432 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
7433 					  uint8_t vdev_id,
7434 					  ol_txrx_rx_fp *stack_fn_p,
7435 					  ol_osif_vdev_handle *osif_vdev_p)
7436 {
7437 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7438 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7439 						     DP_MOD_ID_CDP);
7440 
7441 	if (qdf_unlikely(!vdev)) {
7442 		*stack_fn_p = NULL;
7443 		*osif_vdev_p = NULL;
7444 		return;
7445 	}
7446 	*stack_fn_p = vdev->osif_rx_stack;
7447 	*osif_vdev_p = vdev->osif_vdev;
7448 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7449 }
7450 
7451 /**
7452  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
7453  * @soc_hdl: datapath soc handle
7454  * @vdev_id: virtual device/interface id
7455  *
7456  * Return: Handle to control pdev
7457  */
7458 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
7459 						struct cdp_soc_t *soc_hdl,
7460 						uint8_t vdev_id)
7461 {
7462 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7463 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7464 						     DP_MOD_ID_CDP);
7465 	struct dp_pdev *pdev;
7466 
7467 	if (!vdev)
7468 		return NULL;
7469 
7470 	pdev = vdev->pdev;
7471 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7472 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
7473 }
7474 
7475 /**
7476  * dp_get_tx_pending() - read pending tx
7477  * @pdev_handle: Datapath PDEV handle
7478  *
7479  * Return: outstanding tx
7480  */
7481 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
7482 {
7483 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7484 
7485 	return qdf_atomic_read(&pdev->num_tx_outstanding);
7486 }
7487 
7488 /**
7489  * dp_get_peer_mac_from_peer_id() - get peer mac
7490  * @pdev_handle: Datapath PDEV handle
7491  * @peer_id: Peer ID
7492  * @peer_mac: MAC addr of PEER
7493  *
7494  * Return: QDF_STATUS
7495  */
7496 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
7497 					       uint32_t peer_id,
7498 					       uint8_t *peer_mac)
7499 {
7500 	struct dp_peer *peer;
7501 
7502 	if (soc && peer_mac) {
7503 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
7504 					     (uint16_t)peer_id,
7505 					     DP_MOD_ID_CDP);
7506 		if (peer) {
7507 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
7508 				     QDF_MAC_ADDR_SIZE);
7509 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7510 			return QDF_STATUS_SUCCESS;
7511 		}
7512 	}
7513 
7514 	return QDF_STATUS_E_FAILURE;
7515 }
7516 
7517 #ifdef MESH_MODE_SUPPORT
7518 static
7519 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
7520 {
7521 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7522 
7523 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
7524 	vdev->mesh_vdev = val;
7525 	if (val)
7526 		vdev->skip_sw_tid_classification |=
7527 			DP_TX_MESH_ENABLED;
7528 	else
7529 		vdev->skip_sw_tid_classification &=
7530 			~DP_TX_MESH_ENABLED;
7531 }
7532 
7533 /*
7534  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
7535  * @vdev_hdl: virtual device object
7536  * @val: value to be set
7537  *
7538  * Return: void
7539  */
7540 static
7541 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
7542 {
7543 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7544 
7545 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
7546 	vdev->mesh_rx_filter = val;
7547 }
7548 #endif
7549 
7550 /*
7551  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
7552  * @vdev_hdl: virtual device object
7553  * @val: value to be set
7554  *
7555  * Return: void
7556  */
7557 static
7558 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
7559 {
7560 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
7561 	if (val)
7562 		vdev->skip_sw_tid_classification |=
7563 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
7564 	else
7565 		vdev->skip_sw_tid_classification &=
7566 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
7567 }
7568 
7569 /*
7570  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
7571  * @vdev_hdl: virtual device object
7572  * @val: value to be set
7573  *
7574  * Return: 1 if this flag is set
7575  */
7576 static
7577 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
7578 {
7579 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7580 
7581 	return !!(vdev->skip_sw_tid_classification &
7582 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
7583 }
7584 
7585 #ifdef VDEV_PEER_PROTOCOL_COUNT
7586 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
7587 					       int8_t vdev_id,
7588 					       bool enable)
7589 {
7590 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7591 	struct dp_vdev *vdev;
7592 
7593 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7594 	if (!vdev)
7595 		return;
7596 
7597 	dp_info("enable %d vdev_id %d", enable, vdev_id);
7598 	vdev->peer_protocol_count_track = enable;
7599 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7600 }
7601 
7602 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
7603 						   int8_t vdev_id,
7604 						   int drop_mask)
7605 {
7606 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7607 	struct dp_vdev *vdev;
7608 
7609 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7610 	if (!vdev)
7611 		return;
7612 
7613 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
7614 	vdev->peer_protocol_count_dropmask = drop_mask;
7615 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7616 }
7617 
7618 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
7619 						  int8_t vdev_id)
7620 {
7621 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7622 	struct dp_vdev *vdev;
7623 	int peer_protocol_count_track;
7624 
7625 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7626 	if (!vdev)
7627 		return 0;
7628 
7629 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
7630 		vdev_id);
7631 	peer_protocol_count_track =
7632 		vdev->peer_protocol_count_track;
7633 
7634 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7635 	return peer_protocol_count_track;
7636 }
7637 
7638 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
7639 					       int8_t vdev_id)
7640 {
7641 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7642 	struct dp_vdev *vdev;
7643 	int peer_protocol_count_dropmask;
7644 
7645 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7646 	if (!vdev)
7647 		return 0;
7648 
7649 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
7650 		vdev_id);
7651 	peer_protocol_count_dropmask =
7652 		vdev->peer_protocol_count_dropmask;
7653 
7654 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7655 	return peer_protocol_count_dropmask;
7656 }
7657 
7658 #endif
7659 
7660 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
7661 {
7662 	uint8_t pdev_count;
7663 
7664 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
7665 		if (soc->pdev_list[pdev_count] &&
7666 		    soc->pdev_list[pdev_count] == data)
7667 			return true;
7668 	}
7669 	return false;
7670 }
7671 
7672 /**
7673  * dp_rx_bar_stats_cb(): BAR received stats callback
7674  * @soc: SOC handle
7675  * @cb_ctxt: Call back context
7676  * @reo_status: Reo status
7677  *
7678  * return: void
7679  */
7680 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
7681 	union hal_reo_status *reo_status)
7682 {
7683 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
7684 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
7685 
7686 	if (!dp_check_pdev_exists(soc, pdev)) {
7687 		dp_err_rl("pdev doesn't exist");
7688 		return;
7689 	}
7690 
7691 	if (!qdf_atomic_read(&soc->cmn_init_done))
7692 		return;
7693 
7694 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
7695 		DP_PRINT_STATS("REO stats failure %d",
7696 			       queue_status->header.status);
7697 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7698 		return;
7699 	}
7700 
7701 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
7702 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7703 
7704 }
7705 
7706 /**
7707  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
7708  * @vdev: DP VDEV handle
7709  *
7710  * return: void
7711  */
7712 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
7713 			     struct cdp_vdev_stats *vdev_stats)
7714 {
7715 	struct dp_soc *soc = NULL;
7716 
7717 	if (!vdev || !vdev->pdev)
7718 		return;
7719 
7720 	soc = vdev->pdev->soc;
7721 
7722 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
7723 
7724 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
7725 			     DP_MOD_ID_GENERIC_STATS);
7726 
7727 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7728 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7729 			     vdev_stats, vdev->vdev_id,
7730 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7731 #endif
7732 }
7733 
7734 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
7735 {
7736 	struct dp_vdev *vdev = NULL;
7737 	struct dp_soc *soc;
7738 	struct cdp_vdev_stats *vdev_stats =
7739 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
7740 
7741 	if (!vdev_stats) {
7742 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
7743 			   pdev->soc);
7744 		return;
7745 	}
7746 
7747 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
7748 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
7749 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
7750 
7751 	if (dp_monitor_is_enable_mcopy_mode(pdev))
7752 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
7753 
7754 	soc = pdev->soc;
7755 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7756 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
7757 
7758 		dp_aggregate_vdev_stats(vdev, vdev_stats);
7759 		dp_update_pdev_stats(pdev, vdev_stats);
7760 		dp_update_pdev_ingress_stats(pdev, vdev);
7761 	}
7762 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7763 	qdf_mem_free(vdev_stats);
7764 
7765 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7766 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
7767 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
7768 #endif
7769 }
7770 
7771 /**
7772  * dp_vdev_getstats() - get vdev packet level stats
7773  * @vdev_handle: Datapath VDEV handle
7774  * @stats: cdp network device stats structure
7775  *
7776  * Return: QDF_STATUS
7777  */
7778 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
7779 				   struct cdp_dev_stats *stats)
7780 {
7781 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7782 	struct dp_pdev *pdev;
7783 	struct dp_soc *soc;
7784 	struct cdp_vdev_stats *vdev_stats;
7785 
7786 	if (!vdev)
7787 		return QDF_STATUS_E_FAILURE;
7788 
7789 	pdev = vdev->pdev;
7790 	if (!pdev)
7791 		return QDF_STATUS_E_FAILURE;
7792 
7793 	soc = pdev->soc;
7794 
7795 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7796 
7797 	if (!vdev_stats) {
7798 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
7799 			   soc);
7800 		return QDF_STATUS_E_FAILURE;
7801 	}
7802 
7803 	dp_aggregate_vdev_stats(vdev, vdev_stats);
7804 
7805 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
7806 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
7807 
7808 	stats->tx_errors = vdev_stats->tx.tx_failed +
7809 		vdev_stats->tx_i.dropped.dropped_pkt.num;
7810 	stats->tx_dropped = stats->tx_errors;
7811 
7812 	stats->rx_packets = vdev_stats->rx.unicast.num +
7813 		vdev_stats->rx.multicast.num +
7814 		vdev_stats->rx.bcast.num;
7815 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
7816 		vdev_stats->rx.multicast.bytes +
7817 		vdev_stats->rx.bcast.bytes;
7818 
7819 	qdf_mem_free(vdev_stats);
7820 
7821 	return QDF_STATUS_SUCCESS;
7822 }
7823 
7824 /**
7825  * dp_pdev_getstats() - get pdev packet level stats
7826  * @pdev_handle: Datapath PDEV handle
7827  * @stats: cdp network device stats structure
7828  *
7829  * Return: QDF_STATUS
7830  */
7831 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
7832 			     struct cdp_dev_stats *stats)
7833 {
7834 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7835 
7836 	dp_aggregate_pdev_stats(pdev);
7837 
7838 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
7839 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
7840 
7841 	stats->tx_errors = pdev->stats.tx.tx_failed +
7842 		pdev->stats.tx_i.dropped.dropped_pkt.num;
7843 	stats->tx_dropped = stats->tx_errors;
7844 
7845 	stats->rx_packets = pdev->stats.rx.unicast.num +
7846 		pdev->stats.rx.multicast.num +
7847 		pdev->stats.rx.bcast.num;
7848 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
7849 		pdev->stats.rx.multicast.bytes +
7850 		pdev->stats.rx.bcast.bytes;
7851 	stats->rx_errors = pdev->stats.err.ip_csum_err +
7852 		pdev->stats.err.tcp_udp_csum_err +
7853 		pdev->stats.rx.err.mic_err +
7854 		pdev->stats.rx.err.decrypt_err +
7855 		pdev->stats.err.rxdma_error +
7856 		pdev->stats.err.reo_error;
7857 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
7858 		pdev->stats.dropped.mec +
7859 		pdev->stats.dropped.mesh_filter +
7860 		pdev->stats.dropped.wifi_parse +
7861 		pdev->stats.dropped.mon_rx_drop +
7862 		pdev->stats.dropped.mon_radiotap_update_err;
7863 }
7864 
7865 /**
7866  * dp_get_device_stats() - get interface level packet stats
7867  * @soc: soc handle
7868  * @id : vdev_id or pdev_id based on type
7869  * @stats: cdp network device stats structure
7870  * @type: device type pdev/vdev
7871  *
7872  * Return: QDF_STATUS
7873  */
7874 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
7875 				      struct cdp_dev_stats *stats,
7876 				      uint8_t type)
7877 {
7878 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7879 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7880 	struct dp_vdev *vdev;
7881 
7882 	switch (type) {
7883 	case UPDATE_VDEV_STATS:
7884 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
7885 
7886 		if (vdev) {
7887 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
7888 						  stats);
7889 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7890 		}
7891 		return status;
7892 	case UPDATE_PDEV_STATS:
7893 		{
7894 			struct dp_pdev *pdev =
7895 				dp_get_pdev_from_soc_pdev_id_wifi3(
7896 						(struct dp_soc *)soc,
7897 						 id);
7898 			if (pdev) {
7899 				dp_pdev_getstats((struct cdp_pdev *)pdev,
7900 						 stats);
7901 				return QDF_STATUS_SUCCESS;
7902 			}
7903 		}
7904 		break;
7905 	default:
7906 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7907 			"apstats cannot be updated for this input "
7908 			"type %d", type);
7909 		break;
7910 	}
7911 
7912 	return QDF_STATUS_E_FAILURE;
7913 }
7914 
7915 const
7916 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
7917 {
7918 	switch (ring_type) {
7919 	case REO_DST:
7920 		return "Reo_dst";
7921 	case REO_EXCEPTION:
7922 		return "Reo_exception";
7923 	case REO_CMD:
7924 		return "Reo_cmd";
7925 	case REO_REINJECT:
7926 		return "Reo_reinject";
7927 	case REO_STATUS:
7928 		return "Reo_status";
7929 	case WBM2SW_RELEASE:
7930 		return "wbm2sw_release";
7931 	case TCL_DATA:
7932 		return "tcl_data";
7933 	case TCL_CMD_CREDIT:
7934 		return "tcl_cmd_credit";
7935 	case TCL_STATUS:
7936 		return "tcl_status";
7937 	case SW2WBM_RELEASE:
7938 		return "sw2wbm_release";
7939 	case RXDMA_BUF:
7940 		return "Rxdma_buf";
7941 	case RXDMA_DST:
7942 		return "Rxdma_dst";
7943 	case RXDMA_MONITOR_BUF:
7944 		return "Rxdma_monitor_buf";
7945 	case RXDMA_MONITOR_DESC:
7946 		return "Rxdma_monitor_desc";
7947 	case RXDMA_MONITOR_STATUS:
7948 		return "Rxdma_monitor_status";
7949 	case RXDMA_MONITOR_DST:
7950 		return "Rxdma_monitor_destination";
7951 	case WBM_IDLE_LINK:
7952 		return "WBM_hw_idle_link";
7953 	default:
7954 		dp_err("Invalid ring type");
7955 		break;
7956 	}
7957 	return "Invalid";
7958 }
7959 
7960 /*
7961  * dp_print_napi_stats(): NAPI stats
7962  * @soc - soc handle
7963  */
7964 void dp_print_napi_stats(struct dp_soc *soc)
7965 {
7966 	hif_print_napi_stats(soc->hif_handle);
7967 }
7968 
7969 #ifdef QCA_PEER_EXT_STATS
7970 /**
7971  * dp_txrx_host_peer_ext_stats_clr: Reinitialize the txrx peer ext stats
7972  *
7973  */
7974 static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer)
7975 {
7976 	if (peer->pext_stats)
7977 		qdf_mem_zero(peer->pext_stats, sizeof(*peer->pext_stats));
7978 }
7979 #else
7980 static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer)
7981 {
7982 }
7983 #endif
7984 
7985 /**
7986  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
7987  * @soc: Datapath soc
7988  * @peer: Datatpath peer
7989  * @arg: argument to iter function
7990  *
7991  * Return: QDF_STATUS
7992  */
7993 static inline void
7994 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
7995 			    struct dp_peer *peer,
7996 			    void *arg)
7997 {
7998 	struct dp_rx_tid *rx_tid;
7999 	uint8_t tid;
8000 
8001 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
8002 		rx_tid = &peer->rx_tid[tid];
8003 		DP_STATS_CLR(rx_tid);
8004 	}
8005 
8006 	DP_STATS_CLR(peer);
8007 
8008 	dp_txrx_host_peer_ext_stats_clr(peer);
8009 
8010 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8011 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
8012 			     &peer->stats,  peer->peer_id,
8013 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
8014 #endif
8015 }
8016 
8017 /**
8018  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
8019  * @vdev: DP_VDEV handle
8020  * @dp_soc: DP_SOC handle
8021  *
8022  * Return: QDF_STATUS
8023  */
8024 static inline QDF_STATUS
8025 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
8026 {
8027 	if (!vdev || !vdev->pdev)
8028 		return QDF_STATUS_E_FAILURE;
8029 
8030 	/*
8031 	 * if NSS offload is enabled, then send message
8032 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
8033 	 * then clear host statistics.
8034 	 */
8035 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
8036 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
8037 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
8038 							   vdev->vdev_id);
8039 	}
8040 
8041 	DP_STATS_CLR(vdev->pdev);
8042 	DP_STATS_CLR(vdev->pdev->soc);
8043 	DP_STATS_CLR(vdev);
8044 
8045 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
8046 
8047 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
8048 			     DP_MOD_ID_GENERIC_STATS);
8049 
8050 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8051 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
8052 			     &vdev->stats,  vdev->vdev_id,
8053 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
8054 #endif
8055 	return QDF_STATUS_SUCCESS;
8056 }
8057 
8058 /*
8059  * dp_get_host_peer_stats()- function to print peer stats
8060  * @soc: dp_soc handle
8061  * @mac_addr: mac address of the peer
8062  *
8063  * Return: QDF_STATUS
8064  */
8065 static QDF_STATUS
8066 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
8067 {
8068 	struct dp_peer *peer = NULL;
8069 
8070 	if (!mac_addr) {
8071 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
8072 			  "%s: NULL peer mac addr\n", __func__);
8073 		return QDF_STATUS_E_FAILURE;
8074 	}
8075 
8076 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8077 				      mac_addr, 0,
8078 				      DP_VDEV_ALL,
8079 				      DP_MOD_ID_CDP);
8080 	if (!peer) {
8081 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
8082 			  "%s: Invalid peer\n", __func__);
8083 		return QDF_STATUS_E_FAILURE;
8084 	}
8085 
8086 	dp_print_peer_stats(peer);
8087 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
8088 
8089 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8090 
8091 	return QDF_STATUS_SUCCESS;
8092 }
8093 
8094 /**
8095  * dp_txrx_stats_help() - Helper function for Txrx_Stats
8096  *
8097  * Return: None
8098  */
8099 static void dp_txrx_stats_help(void)
8100 {
8101 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
8102 	dp_info("stats_option:");
8103 	dp_info("  1 -- HTT Tx Statistics");
8104 	dp_info("  2 -- HTT Rx Statistics");
8105 	dp_info("  3 -- HTT Tx HW Queue Statistics");
8106 	dp_info("  4 -- HTT Tx HW Sched Statistics");
8107 	dp_info("  5 -- HTT Error Statistics");
8108 	dp_info("  6 -- HTT TQM Statistics");
8109 	dp_info("  7 -- HTT TQM CMDQ Statistics");
8110 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
8111 	dp_info("  9 -- HTT Tx Rate Statistics");
8112 	dp_info(" 10 -- HTT Rx Rate Statistics");
8113 	dp_info(" 11 -- HTT Peer Statistics");
8114 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
8115 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
8116 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
8117 	dp_info(" 15 -- HTT SRNG Statistics");
8118 	dp_info(" 16 -- HTT SFM Info Statistics");
8119 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
8120 	dp_info(" 18 -- HTT Peer List Details");
8121 	dp_info(" 20 -- Clear Host Statistics");
8122 	dp_info(" 21 -- Host Rx Rate Statistics");
8123 	dp_info(" 22 -- Host Tx Rate Statistics");
8124 	dp_info(" 23 -- Host Tx Statistics");
8125 	dp_info(" 24 -- Host Rx Statistics");
8126 	dp_info(" 25 -- Host AST Statistics");
8127 	dp_info(" 26 -- Host SRNG PTR Statistics");
8128 	dp_info(" 27 -- Host Mon Statistics");
8129 	dp_info(" 28 -- Host REO Queue Statistics");
8130 	dp_info(" 29 -- Host Soc cfg param Statistics");
8131 	dp_info(" 30 -- Host pdev cfg param Statistics");
8132 	dp_info(" 31 -- Host FISA stats");
8133 	dp_info(" 32 -- Host Register Work stats");
8134 }
8135 
8136 /**
8137  * dp_print_host_stats()- Function to print the stats aggregated at host
8138  * @vdev_handle: DP_VDEV handle
8139  * @req: host stats type
8140  * @soc: dp soc handler
8141  *
8142  * Return: 0 on success, print error message in case of failure
8143  */
8144 static int
8145 dp_print_host_stats(struct dp_vdev *vdev,
8146 		    struct cdp_txrx_stats_req *req,
8147 		    struct dp_soc *soc)
8148 {
8149 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
8150 	enum cdp_host_txrx_stats type =
8151 			dp_stats_mapping_table[req->stats][STATS_HOST];
8152 
8153 	dp_aggregate_pdev_stats(pdev);
8154 
8155 	switch (type) {
8156 	case TXRX_CLEAR_STATS:
8157 		dp_txrx_host_stats_clr(vdev, soc);
8158 		break;
8159 	case TXRX_RX_RATE_STATS:
8160 		dp_print_rx_rates(vdev);
8161 		break;
8162 	case TXRX_TX_RATE_STATS:
8163 		dp_print_tx_rates(vdev);
8164 		break;
8165 	case TXRX_TX_HOST_STATS:
8166 		dp_print_pdev_tx_stats(pdev);
8167 		dp_print_soc_tx_stats(pdev->soc);
8168 		break;
8169 	case TXRX_RX_HOST_STATS:
8170 		dp_print_pdev_rx_stats(pdev);
8171 		dp_print_soc_rx_stats(pdev->soc);
8172 		break;
8173 	case TXRX_AST_STATS:
8174 		dp_print_ast_stats(pdev->soc);
8175 		dp_print_mec_stats(pdev->soc);
8176 		dp_print_peer_table(vdev);
8177 		break;
8178 	case TXRX_SRNG_PTR_STATS:
8179 		dp_print_ring_stats(pdev);
8180 		break;
8181 	case TXRX_RX_MON_STATS:
8182 		dp_monitor_print_pdev_rx_mon_stats(pdev);
8183 		break;
8184 	case TXRX_REO_QUEUE_STATS:
8185 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
8186 				       req->peer_addr);
8187 		break;
8188 	case TXRX_SOC_CFG_PARAMS:
8189 		dp_print_soc_cfg_params(pdev->soc);
8190 		break;
8191 	case TXRX_PDEV_CFG_PARAMS:
8192 		dp_print_pdev_cfg_params(pdev);
8193 		break;
8194 	case TXRX_NAPI_STATS:
8195 		dp_print_napi_stats(pdev->soc);
8196 		break;
8197 	case TXRX_SOC_INTERRUPT_STATS:
8198 		dp_print_soc_interrupt_stats(pdev->soc);
8199 		break;
8200 	case TXRX_SOC_FSE_STATS:
8201 		dp_rx_dump_fisa_table(pdev->soc);
8202 		break;
8203 	case TXRX_HAL_REG_WRITE_STATS:
8204 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
8205 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
8206 		break;
8207 	case TXRX_SOC_REO_HW_DESC_DUMP:
8208 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
8209 					 vdev->vdev_id);
8210 		break;
8211 	default:
8212 		dp_info("Wrong Input For TxRx Host Stats");
8213 		dp_txrx_stats_help();
8214 		break;
8215 	}
8216 	return 0;
8217 }
8218 
8219 /*
8220  * dp_pdev_tid_stats_ingress_inc
8221  * @pdev: pdev handle
8222  * @val: increase in value
8223  *
8224  * Return: void
8225  */
8226 static void
8227 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
8228 {
8229 	pdev->stats.tid_stats.ingress_stack += val;
8230 }
8231 
8232 /*
8233  * dp_pdev_tid_stats_osif_drop
8234  * @pdev: pdev handle
8235  * @val: increase in value
8236  *
8237  * Return: void
8238  */
8239 static void
8240 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
8241 {
8242 	pdev->stats.tid_stats.osif_drop += val;
8243 }
8244 
8245 /*
8246  * dp_get_fw_peer_stats()- function to print peer stats
8247  * @soc: soc handle
8248  * @pdev_id : id of the pdev handle
8249  * @mac_addr: mac address of the peer
8250  * @cap: Type of htt stats requested
8251  * @is_wait: if set, wait on completion from firmware response
8252  *
8253  * Currently Supporting only MAC ID based requests Only
8254  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
8255  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
8256  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
8257  *
8258  * Return: QDF_STATUS
8259  */
8260 static QDF_STATUS
8261 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8262 		     uint8_t *mac_addr,
8263 		     uint32_t cap, uint32_t is_wait)
8264 {
8265 	int i;
8266 	uint32_t config_param0 = 0;
8267 	uint32_t config_param1 = 0;
8268 	uint32_t config_param2 = 0;
8269 	uint32_t config_param3 = 0;
8270 	struct dp_pdev *pdev =
8271 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8272 						   pdev_id);
8273 
8274 	if (!pdev)
8275 		return QDF_STATUS_E_FAILURE;
8276 
8277 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
8278 	config_param0 |= (1 << (cap + 1));
8279 
8280 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
8281 		config_param1 |= (1 << i);
8282 	}
8283 
8284 	config_param2 |= (mac_addr[0] & 0x000000ff);
8285 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
8286 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
8287 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
8288 
8289 	config_param3 |= (mac_addr[4] & 0x000000ff);
8290 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
8291 
8292 	if (is_wait) {
8293 		qdf_event_reset(&pdev->fw_peer_stats_event);
8294 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
8295 					  config_param0, config_param1,
8296 					  config_param2, config_param3,
8297 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
8298 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
8299 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
8300 	} else {
8301 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
8302 					  config_param0, config_param1,
8303 					  config_param2, config_param3,
8304 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
8305 	}
8306 
8307 	return QDF_STATUS_SUCCESS;
8308 
8309 }
8310 
8311 /* This struct definition will be removed from here
8312  * once it get added in FW headers*/
8313 struct httstats_cmd_req {
8314     uint32_t    config_param0;
8315     uint32_t    config_param1;
8316     uint32_t    config_param2;
8317     uint32_t    config_param3;
8318     int cookie;
8319     u_int8_t    stats_id;
8320 };
8321 
8322 /*
8323  * dp_get_htt_stats: function to process the httstas request
8324  * @soc: DP soc handle
8325  * @pdev_id: id of pdev handle
8326  * @data: pointer to request data
8327  * @data_len: length for request data
8328  *
8329  * return: QDF_STATUS
8330  */
8331 static QDF_STATUS
8332 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
8333 		 uint32_t data_len)
8334 {
8335 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
8336 	struct dp_pdev *pdev =
8337 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8338 						   pdev_id);
8339 
8340 	if (!pdev)
8341 		return QDF_STATUS_E_FAILURE;
8342 
8343 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
8344 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
8345 				req->config_param0, req->config_param1,
8346 				req->config_param2, req->config_param3,
8347 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
8348 
8349 	return QDF_STATUS_SUCCESS;
8350 }
8351 
8352 /**
8353  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
8354  * @pdev: DP_PDEV handle
8355  * @prio: tidmap priority value passed by the user
8356  *
8357  * Return: QDF_STATUS_SUCCESS on success
8358  */
8359 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
8360 						uint8_t prio)
8361 {
8362 	struct dp_soc *soc = pdev->soc;
8363 
8364 	soc->tidmap_prty = prio;
8365 
8366 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
8367 	return QDF_STATUS_SUCCESS;
8368 }
8369 
8370 /*
8371  * dp_get_peer_param: function to get parameters in peer
8372  * @cdp_soc: DP soc handle
8373  * @vdev_id: id of vdev handle
8374  * @peer_mac: peer mac address
8375  * @param: parameter type to be set
8376  * @val : address of buffer
8377  *
8378  * Return: val
8379  */
8380 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
8381 				    uint8_t *peer_mac,
8382 				    enum cdp_peer_param_type param,
8383 				    cdp_config_param_type *val)
8384 {
8385 	return QDF_STATUS_SUCCESS;
8386 }
8387 
8388 /*
8389  * dp_set_peer_param: function to set parameters in peer
8390  * @cdp_soc: DP soc handle
8391  * @vdev_id: id of vdev handle
8392  * @peer_mac: peer mac address
8393  * @param: parameter type to be set
8394  * @val: value of parameter to be set
8395  *
8396  * Return: 0 for success. nonzero for failure.
8397  */
8398 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
8399 				    uint8_t *peer_mac,
8400 				    enum cdp_peer_param_type param,
8401 				    cdp_config_param_type val)
8402 {
8403 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
8404 						      peer_mac, 0, vdev_id,
8405 						      DP_MOD_ID_CDP);
8406 
8407 	if (!peer)
8408 		return QDF_STATUS_E_FAILURE;
8409 
8410 	switch (param) {
8411 	case CDP_CONFIG_NAWDS:
8412 		peer->nawds_enabled = val.cdp_peer_param_nawds;
8413 		break;
8414 	case CDP_CONFIG_NAC:
8415 		peer->nac = !!(val.cdp_peer_param_nac);
8416 		break;
8417 	case CDP_CONFIG_ISOLATION:
8418 		dp_set_peer_isolation(peer, val.cdp_peer_param_isolation);
8419 		break;
8420 	case CDP_CONFIG_IN_TWT:
8421 		peer->in_twt = !!(val.cdp_peer_param_in_twt);
8422 		break;
8423 	default:
8424 		break;
8425 	}
8426 
8427 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8428 
8429 	return QDF_STATUS_SUCCESS;
8430 }
8431 
8432 /*
8433  * dp_get_pdev_param: function to get parameters from pdev
8434  * @cdp_soc: DP soc handle
8435  * @pdev_id: id of pdev handle
8436  * @param: parameter type to be get
8437  * @value : buffer for value
8438  *
8439  * Return: status
8440  */
8441 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
8442 				    enum cdp_pdev_param_type param,
8443 				    cdp_config_param_type *val)
8444 {
8445 	struct cdp_pdev *pdev = (struct cdp_pdev *)
8446 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8447 						   pdev_id);
8448 	if (!pdev)
8449 		return QDF_STATUS_E_FAILURE;
8450 
8451 	switch (param) {
8452 	case CDP_CONFIG_VOW:
8453 		val->cdp_pdev_param_cfg_vow =
8454 				((struct dp_pdev *)pdev)->delay_stats_flag;
8455 		break;
8456 	case CDP_TX_PENDING:
8457 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
8458 		break;
8459 	case CDP_FILTER_MCAST_DATA:
8460 		val->cdp_pdev_param_fltr_mcast =
8461 				dp_monitor_pdev_get_filter_mcast_data(pdev);
8462 		break;
8463 	case CDP_FILTER_NO_DATA:
8464 		val->cdp_pdev_param_fltr_none =
8465 				dp_monitor_pdev_get_filter_non_data(pdev);
8466 		break;
8467 	case CDP_FILTER_UCAST_DATA:
8468 		val->cdp_pdev_param_fltr_ucast =
8469 				dp_monitor_pdev_get_filter_ucast_data(pdev);
8470 		break;
8471 	default:
8472 		return QDF_STATUS_E_FAILURE;
8473 	}
8474 
8475 	return QDF_STATUS_SUCCESS;
8476 }
8477 
8478 /*
8479  * dp_set_pdev_param: function to set parameters in pdev
8480  * @cdp_soc: DP soc handle
8481  * @pdev_id: id of pdev handle
8482  * @param: parameter type to be set
8483  * @val: value of parameter to be set
8484  *
8485  * Return: 0 for success. nonzero for failure.
8486  */
8487 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
8488 				    enum cdp_pdev_param_type param,
8489 				    cdp_config_param_type val)
8490 {
8491 	int target_type;
8492 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8493 	struct dp_pdev *pdev =
8494 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8495 						   pdev_id);
8496 	enum reg_wifi_band chan_band;
8497 
8498 	if (!pdev)
8499 		return QDF_STATUS_E_FAILURE;
8500 
8501 	target_type = hal_get_target_type(soc->hal_soc);
8502 	switch (target_type) {
8503 	case TARGET_TYPE_QCA6750:
8504 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_5G_LMAC_ID;
8505 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
8506 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
8507 		break;
8508 	default:
8509 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_2G_LMAC_ID;
8510 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
8511 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
8512 		break;
8513 	}
8514 
8515 	switch (param) {
8516 	case CDP_CONFIG_TX_CAPTURE:
8517 		return dp_monitor_config_debug_sniffer(pdev,
8518 						val.cdp_pdev_param_tx_capture);
8519 	case CDP_CONFIG_DEBUG_SNIFFER:
8520 		return dp_monitor_config_debug_sniffer(pdev,
8521 						val.cdp_pdev_param_dbg_snf);
8522 	case CDP_CONFIG_BPR_ENABLE:
8523 		return dp_monitor_set_bpr_enable(pdev,
8524 						 val.cdp_pdev_param_bpr_enable);
8525 	case CDP_CONFIG_PRIMARY_RADIO:
8526 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
8527 		break;
8528 	case CDP_CONFIG_CAPTURE_LATENCY:
8529 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
8530 		break;
8531 	case CDP_INGRESS_STATS:
8532 		dp_pdev_tid_stats_ingress_inc(pdev,
8533 					      val.cdp_pdev_param_ingrs_stats);
8534 		break;
8535 	case CDP_OSIF_DROP:
8536 		dp_pdev_tid_stats_osif_drop(pdev,
8537 					    val.cdp_pdev_param_osif_drop);
8538 		break;
8539 	case CDP_CONFIG_ENH_RX_CAPTURE:
8540 		return dp_monitor_config_enh_rx_capture(pdev,
8541 						val.cdp_pdev_param_en_rx_cap);
8542 	case CDP_CONFIG_ENH_TX_CAPTURE:
8543 		return dp_monitor_config_enh_tx_capture(pdev,
8544 						val.cdp_pdev_param_en_tx_cap);
8545 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
8546 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
8547 		break;
8548 	case CDP_CONFIG_HMMC_TID_VALUE:
8549 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
8550 		break;
8551 	case CDP_CHAN_NOISE_FLOOR:
8552 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
8553 		break;
8554 	case CDP_TIDMAP_PRTY:
8555 		dp_set_pdev_tidmap_prty_wifi3(pdev,
8556 					      val.cdp_pdev_param_tidmap_prty);
8557 		break;
8558 	case CDP_FILTER_NEIGH_PEERS:
8559 		dp_monitor_set_filter_neigh_peers(pdev,
8560 					val.cdp_pdev_param_fltr_neigh_peers);
8561 		break;
8562 	case CDP_MONITOR_CHANNEL:
8563 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
8564 		break;
8565 	case CDP_MONITOR_FREQUENCY:
8566 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
8567 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
8568 		dp_monitor_set_chan_band(pdev, chan_band);
8569 		break;
8570 	case CDP_CONFIG_BSS_COLOR:
8571 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
8572 		break;
8573 	case CDP_SET_ATF_STATS_ENABLE:
8574 		dp_monitor_set_atf_stats_enable(pdev,
8575 					val.cdp_pdev_param_atf_stats_enable);
8576 		break;
8577 	case CDP_CONFIG_SPECIAL_VAP:
8578 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
8579 					val.cdp_pdev_param_config_special_vap);
8580 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
8581 		break;
8582 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
8583 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
8584 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
8585 		break;
8586 	default:
8587 		return QDF_STATUS_E_INVAL;
8588 	}
8589 	return QDF_STATUS_SUCCESS;
8590 }
8591 
8592 #ifdef QCA_PEER_EXT_STATS
8593 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
8594 					  qdf_nbuf_t nbuf)
8595 {
8596 	struct dp_peer *peer = NULL;
8597 	uint16_t peer_id, ring_id;
8598 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
8599 	struct cdp_peer_ext_stats *pext_stats = NULL;
8600 
8601 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
8602 	if (peer_id > soc->max_peers)
8603 		return;
8604 
8605 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
8606 	if (qdf_unlikely(!peer))
8607 		return;
8608 
8609 	if (qdf_likely(peer->pext_stats)) {
8610 		pext_stats = peer->pext_stats;
8611 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
8612 		dp_rx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
8613 					nbuf);
8614 	}
8615 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8616 }
8617 #else
8618 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
8619 						 qdf_nbuf_t nbuf)
8620 {
8621 }
8622 #endif
8623 
8624 /*
8625  * dp_calculate_delay_stats: function to get rx delay stats
8626  * @cdp_soc: DP soc handle
8627  * @vdev_id: id of DP vdev handle
8628  * @nbuf: skb
8629  *
8630  * Return: QDF_STATUS
8631  */
8632 static QDF_STATUS
8633 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8634 			 qdf_nbuf_t nbuf)
8635 {
8636 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8637 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8638 						     DP_MOD_ID_CDP);
8639 
8640 	if (!vdev)
8641 		return QDF_STATUS_SUCCESS;
8642 
8643 	if (vdev->pdev->delay_stats_flag)
8644 		dp_rx_compute_delay(vdev, nbuf);
8645 	else
8646 		dp_rx_update_peer_delay_stats(soc, nbuf);
8647 
8648 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8649 	return QDF_STATUS_SUCCESS;
8650 }
8651 
8652 /*
8653  * dp_get_vdev_param: function to get parameters from vdev
8654  * @cdp_soc : DP soc handle
8655  * @vdev_id: id of DP vdev handle
8656  * @param: parameter type to get value
8657  * @val: buffer address
8658  *
8659  * return: status
8660  */
8661 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8662 				    enum cdp_vdev_param_type param,
8663 				    cdp_config_param_type *val)
8664 {
8665 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8666 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8667 						     DP_MOD_ID_CDP);
8668 
8669 	if (!vdev)
8670 		return QDF_STATUS_E_FAILURE;
8671 
8672 	switch (param) {
8673 	case CDP_ENABLE_WDS:
8674 		val->cdp_vdev_param_wds = vdev->wds_enabled;
8675 		break;
8676 	case CDP_ENABLE_MEC:
8677 		val->cdp_vdev_param_mec = vdev->mec_enabled;
8678 		break;
8679 	case CDP_ENABLE_DA_WAR:
8680 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
8681 		break;
8682 	case CDP_ENABLE_IGMP_MCAST_EN:
8683 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
8684 		break;
8685 	case CDP_ENABLE_MCAST_EN:
8686 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
8687 		break;
8688 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
8689 		val->cdp_vdev_param_hlos_tid_override =
8690 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
8691 		break;
8692 	case CDP_ENABLE_PEER_AUTHORIZE:
8693 		val->cdp_vdev_param_peer_authorize =
8694 			    vdev->peer_authorize;
8695 		break;
8696 #ifdef WLAN_SUPPORT_MESH_LATENCY
8697 	case CDP_ENABLE_PEER_TID_LATENCY:
8698 		val->cdp_vdev_param_peer_tid_latency_enable =
8699 			vdev->peer_tid_latency_enabled;
8700 		break;
8701 	case CDP_SET_VAP_MESH_TID:
8702 		val->cdp_vdev_param_mesh_tid =
8703 				vdev->mesh_tid_latency_config.latency_tid;
8704 		break;
8705 #endif
8706 	default:
8707 		dp_cdp_err("%pK: param value %d is wrong",
8708 			   soc, param);
8709 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8710 		return QDF_STATUS_E_FAILURE;
8711 	}
8712 
8713 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8714 	return QDF_STATUS_SUCCESS;
8715 }
8716 
8717 /*
8718  * dp_set_vdev_param: function to set parameters in vdev
8719  * @cdp_soc : DP soc handle
8720  * @vdev_id: id of DP vdev handle
8721  * @param: parameter type to get value
8722  * @val: value
8723  *
8724  * return: QDF_STATUS
8725  */
8726 static QDF_STATUS
8727 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8728 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
8729 {
8730 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
8731 	struct dp_vdev *vdev =
8732 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
8733 	uint32_t var = 0;
8734 
8735 	if (!vdev)
8736 		return QDF_STATUS_E_FAILURE;
8737 
8738 	switch (param) {
8739 	case CDP_ENABLE_WDS:
8740 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
8741 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
8742 		vdev->wds_enabled = val.cdp_vdev_param_wds;
8743 		break;
8744 	case CDP_ENABLE_MEC:
8745 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
8746 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
8747 		vdev->mec_enabled = val.cdp_vdev_param_mec;
8748 		break;
8749 	case CDP_ENABLE_DA_WAR:
8750 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
8751 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
8752 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
8753 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
8754 					     vdev->pdev->soc));
8755 		break;
8756 	case CDP_ENABLE_NAWDS:
8757 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
8758 		break;
8759 	case CDP_ENABLE_MCAST_EN:
8760 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
8761 		break;
8762 	case CDP_ENABLE_IGMP_MCAST_EN:
8763 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
8764 		break;
8765 	case CDP_ENABLE_PROXYSTA:
8766 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
8767 		break;
8768 	case CDP_UPDATE_TDLS_FLAGS:
8769 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
8770 		break;
8771 	case CDP_CFG_WDS_AGING_TIMER:
8772 		var = val.cdp_vdev_param_aging_tmr;
8773 		if (!var)
8774 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
8775 		else if (var != vdev->wds_aging_timer_val)
8776 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
8777 
8778 		vdev->wds_aging_timer_val = var;
8779 		break;
8780 	case CDP_ENABLE_AP_BRIDGE:
8781 		if (wlan_op_mode_sta != vdev->opmode)
8782 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
8783 		else
8784 			vdev->ap_bridge_enabled = false;
8785 		break;
8786 	case CDP_ENABLE_CIPHER:
8787 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
8788 		break;
8789 	case CDP_ENABLE_QWRAP_ISOLATION:
8790 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
8791 		break;
8792 	case CDP_UPDATE_MULTIPASS:
8793 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
8794 		break;
8795 	case CDP_TX_ENCAP_TYPE:
8796 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
8797 		break;
8798 	case CDP_RX_DECAP_TYPE:
8799 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
8800 		break;
8801 	case CDP_TID_VDEV_PRTY:
8802 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
8803 		break;
8804 	case CDP_TIDMAP_TBL_ID:
8805 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
8806 		break;
8807 #ifdef MESH_MODE_SUPPORT
8808 	case CDP_MESH_RX_FILTER:
8809 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
8810 					   val.cdp_vdev_param_mesh_rx_filter);
8811 		break;
8812 	case CDP_MESH_MODE:
8813 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
8814 				      val.cdp_vdev_param_mesh_mode);
8815 		break;
8816 #endif
8817 	case CDP_ENABLE_CSUM:
8818 		dp_info("vdev_id %d enable Checksum %d", vdev_id,
8819 			val.cdp_enable_tx_checksum);
8820 		vdev->csum_enabled = val.cdp_enable_tx_checksum;
8821 		break;
8822 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
8823 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
8824 			val.cdp_vdev_param_hlos_tid_override);
8825 		dp_vdev_set_hlos_tid_override(vdev,
8826 				val.cdp_vdev_param_hlos_tid_override);
8827 		break;
8828 #ifdef QCA_SUPPORT_WDS_EXTENDED
8829 	case CDP_CFG_WDS_EXT:
8830 		vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
8831 		break;
8832 #endif
8833 	case CDP_ENABLE_PEER_AUTHORIZE:
8834 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
8835 		break;
8836 #ifdef WLAN_SUPPORT_MESH_LATENCY
8837 	case CDP_ENABLE_PEER_TID_LATENCY:
8838 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
8839 			val.cdp_vdev_param_peer_tid_latency_enable);
8840 		vdev->peer_tid_latency_enabled =
8841 			val.cdp_vdev_param_peer_tid_latency_enable;
8842 		break;
8843 	case CDP_SET_VAP_MESH_TID:
8844 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
8845 			val.cdp_vdev_param_mesh_tid);
8846 		vdev->mesh_tid_latency_config.latency_tid
8847 				= val.cdp_vdev_param_mesh_tid;
8848 		break;
8849 #endif
8850 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
8851 	case CDP_SKIP_BAR_UPDATE_AP:
8852 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
8853 			val.cdp_skip_bar_update);
8854 		vdev->skip_bar_update = val.cdp_skip_bar_update;
8855 		vdev->skip_bar_update_last_ts = 0;
8856 		break;
8857 #endif
8858 	default:
8859 		break;
8860 	}
8861 
8862 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
8863 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
8864 
8865 	return QDF_STATUS_SUCCESS;
8866 }
8867 
8868 /*
8869  * dp_set_psoc_param: function to set parameters in psoc
8870  * @cdp_soc : DP soc handle
8871  * @param: parameter type to be set
8872  * @val: value of parameter to be set
8873  *
8874  * return: QDF_STATUS
8875  */
8876 static QDF_STATUS
8877 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
8878 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
8879 {
8880 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8881 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
8882 
8883 	switch (param) {
8884 	case CDP_ENABLE_RATE_STATS:
8885 		soc->rdkstats_enabled = val.cdp_psoc_param_en_rate_stats;
8886 		break;
8887 	case CDP_SET_NSS_CFG:
8888 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
8889 					    val.cdp_psoc_param_en_nss_cfg);
8890 		/*
8891 		 * TODO: masked out based on the per offloaded radio
8892 		 */
8893 		switch (val.cdp_psoc_param_en_nss_cfg) {
8894 		case dp_nss_cfg_default:
8895 			break;
8896 		case dp_nss_cfg_first_radio:
8897 		/*
8898 		 * This configuration is valid for single band radio which
8899 		 * is also NSS offload.
8900 		 */
8901 		case dp_nss_cfg_dbdc:
8902 		case dp_nss_cfg_dbtc:
8903 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
8904 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
8905 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
8906 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
8907 			break;
8908 		default:
8909 			dp_cdp_err("%pK: Invalid offload config %d",
8910 				   soc, val.cdp_psoc_param_en_nss_cfg);
8911 		}
8912 
8913 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
8914 				   , soc);
8915 		break;
8916 	case CDP_SET_PREFERRED_HW_MODE:
8917 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
8918 		break;
8919 	default:
8920 		break;
8921 	}
8922 
8923 	return QDF_STATUS_SUCCESS;
8924 }
8925 
8926 /*
8927  * dp_get_psoc_param: function to get parameters in soc
8928  * @cdp_soc : DP soc handle
8929  * @param: parameter type to be set
8930  * @val: address of buffer
8931  *
8932  * return: status
8933  */
8934 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
8935 				    enum cdp_psoc_param_type param,
8936 				    cdp_config_param_type *val)
8937 {
8938 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8939 
8940 	if (!soc)
8941 		return QDF_STATUS_E_FAILURE;
8942 
8943 	switch (param) {
8944 	case CDP_CFG_PEER_EXT_STATS:
8945 		val->cdp_psoc_param_pext_stats =
8946 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
8947 		break;
8948 	default:
8949 		dp_warn("Invalid param");
8950 		break;
8951 	}
8952 
8953 	return QDF_STATUS_SUCCESS;
8954 }
8955 
8956 /*
8957  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8958  * @soc: DP_SOC handle
8959  * @vdev_id: id of DP_VDEV handle
8960  * @map_id:ID of map that needs to be updated
8961  *
8962  * Return: QDF_STATUS
8963  */
8964 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
8965 						 uint8_t vdev_id,
8966 						 uint8_t map_id)
8967 {
8968 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8969 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8970 						     DP_MOD_ID_CDP);
8971 	if (vdev) {
8972 		vdev->dscp_tid_map_id = map_id;
8973 		/* Updatr flag for transmit tid classification */
8974 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
8975 			vdev->skip_sw_tid_classification |=
8976 				DP_TX_HW_DSCP_TID_MAP_VALID;
8977 		else
8978 			vdev->skip_sw_tid_classification &=
8979 				~DP_TX_HW_DSCP_TID_MAP_VALID;
8980 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8981 		return QDF_STATUS_SUCCESS;
8982 	}
8983 
8984 	return QDF_STATUS_E_FAILURE;
8985 }
8986 
8987 #ifdef DP_RATETABLE_SUPPORT
8988 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8989 				int htflag, int gintval)
8990 {
8991 	uint32_t rix;
8992 	uint16_t ratecode;
8993 
8994 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
8995 			       (uint8_t)preamb, 1, &rix, &ratecode);
8996 }
8997 #else
8998 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8999 				int htflag, int gintval)
9000 {
9001 	return 0;
9002 }
9003 #endif
9004 
9005 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
9006  * @soc: DP soc handle
9007  * @pdev_id: id of DP pdev handle
9008  * @pdev_stats: buffer to copy to
9009  *
9010  * return : status success/failure
9011  */
9012 static QDF_STATUS
9013 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
9014 		       struct cdp_pdev_stats *pdev_stats)
9015 {
9016 	struct dp_pdev *pdev =
9017 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9018 						   pdev_id);
9019 	if (!pdev)
9020 		return QDF_STATUS_E_FAILURE;
9021 
9022 	dp_aggregate_pdev_stats(pdev);
9023 
9024 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
9025 	return QDF_STATUS_SUCCESS;
9026 }
9027 
9028 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
9029  * @vdev: DP vdev handle
9030  * @buf: buffer containing specific stats structure
9031  *
9032  * Returns: void
9033  */
9034 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
9035 					 void *buf)
9036 {
9037 	struct cdp_tx_ingress_stats *host_stats = NULL;
9038 
9039 	if (!buf) {
9040 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
9041 		return;
9042 	}
9043 	host_stats = (struct cdp_tx_ingress_stats *)buf;
9044 
9045 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
9046 			 host_stats->mcast_en.mcast_pkt.num,
9047 			 host_stats->mcast_en.mcast_pkt.bytes);
9048 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
9049 		     host_stats->mcast_en.dropped_map_error);
9050 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
9051 		     host_stats->mcast_en.dropped_self_mac);
9052 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
9053 		     host_stats->mcast_en.dropped_send_fail);
9054 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
9055 		     host_stats->mcast_en.ucast);
9056 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
9057 		     host_stats->mcast_en.fail_seg_alloc);
9058 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
9059 		     host_stats->mcast_en.clone_fail);
9060 }
9061 
9062 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
9063  * @vdev: DP vdev handle
9064  * @buf: buffer containing specific stats structure
9065  *
9066  * Returns: void
9067  */
9068 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
9069 					      void *buf)
9070 {
9071 	struct cdp_tx_ingress_stats *host_stats = NULL;
9072 
9073 	if (!buf) {
9074 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
9075 		return;
9076 	}
9077 	host_stats = (struct cdp_tx_ingress_stats *)buf;
9078 
9079 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
9080 		     host_stats->igmp_mcast_en.igmp_rcvd);
9081 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
9082 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
9083 }
9084 
9085 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
9086  * @soc: DP soc handle
9087  * @vdev_id: id of DP vdev handle
9088  * @buf: buffer containing specific stats structure
9089  * @stats_id: stats type
9090  *
9091  * Returns: QDF_STATUS
9092  */
9093 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
9094 						 uint8_t vdev_id,
9095 						 void *buf,
9096 						 uint16_t stats_id)
9097 {
9098 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9099 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9100 						     DP_MOD_ID_CDP);
9101 
9102 	if (!vdev) {
9103 		dp_cdp_err("%pK: Invalid vdev handle", soc);
9104 		return QDF_STATUS_E_FAILURE;
9105 	}
9106 
9107 	switch (stats_id) {
9108 	case DP_VDEV_STATS_PKT_CNT_ONLY:
9109 		break;
9110 	case DP_VDEV_STATS_TX_ME:
9111 		dp_txrx_update_vdev_me_stats(vdev, buf);
9112 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
9113 		break;
9114 	default:
9115 		qdf_info("Invalid stats_id %d", stats_id);
9116 		break;
9117 	}
9118 
9119 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9120 	return QDF_STATUS_SUCCESS;
9121 }
9122 
9123 /* dp_txrx_get_soc_stats - will return cdp_soc_stats
9124  * @soc_hdl: soc handle
9125  * @soc_stats: buffer to hold the values
9126  *
9127  * return: status success/failure
9128  */
9129 static QDF_STATUS
9130 dp_txrx_get_soc_stats(struct cdp_soc_t *soc_hdl,
9131 		      struct cdp_soc_stats *soc_stats)
9132 {
9133 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9134 
9135 	soc_stats->tx.egress = soc->stats.tx.egress;
9136 	soc_stats->rx.ingress = soc->stats.rx.ingress;
9137 	soc_stats->rx.err_ring_pkts = soc->stats.rx.err_ring_pkts;
9138 	soc_stats->rx.rx_frags = soc->stats.rx.rx_frags;
9139 	soc_stats->rx.reo_reinject = soc->stats.rx.reo_reinject;
9140 	soc_stats->rx.bar_frame = soc->stats.rx.bar_frame;
9141 	soc_stats->rx.err.rx_rejected = soc->stats.rx.err.rejected;
9142 	soc_stats->rx.err.rx_raw_frm_drop = soc->stats.rx.err.raw_frm_drop;
9143 
9144 	return QDF_STATUS_SUCCESS;
9145 }
9146 
9147 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
9148  * @soc: soc handle
9149  * @vdev_id: id of vdev handle
9150  * @peer_mac: mac of DP_PEER handle
9151  * @peer_stats: buffer to copy to
9152  * return : status success/failure
9153  */
9154 static QDF_STATUS
9155 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
9156 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
9157 {
9158 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9159 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9160 						       peer_mac, 0, vdev_id,
9161 						       DP_MOD_ID_CDP);
9162 
9163 	if (!peer)
9164 		return QDF_STATUS_E_FAILURE;
9165 
9166 	qdf_mem_copy(peer_stats, &peer->stats,
9167 		     sizeof(struct cdp_peer_stats));
9168 
9169 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9170 
9171 	return status;
9172 }
9173 
9174 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
9175  * @param soc - soc handle
9176  * @param vdev_id - vdev_id of vdev object
9177  * @param peer_mac - mac address of the peer
9178  * @param type - enum of required stats
9179  * @param buf - buffer to hold the value
9180  * return : status success/failure
9181  */
9182 static QDF_STATUS
9183 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
9184 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
9185 			     cdp_peer_stats_param_t *buf)
9186 {
9187 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
9188 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9189 						      peer_mac, 0, vdev_id,
9190 						      DP_MOD_ID_CDP);
9191 
9192 	if (!peer) {
9193 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
9194 			    soc, QDF_MAC_ADDR_REF(peer_mac));
9195 		return QDF_STATUS_E_FAILURE;
9196 	} else if (type < cdp_peer_stats_max) {
9197 		switch (type) {
9198 		case cdp_peer_tx_ucast:
9199 			buf->tx_ucast = peer->stats.tx.ucast;
9200 			break;
9201 		case cdp_peer_tx_mcast:
9202 			buf->tx_mcast = peer->stats.tx.mcast;
9203 			break;
9204 		case cdp_peer_tx_rate:
9205 			buf->tx_rate = peer->stats.tx.tx_rate;
9206 			break;
9207 		case cdp_peer_tx_last_tx_rate:
9208 			buf->last_tx_rate = peer->stats.tx.last_tx_rate;
9209 			break;
9210 		case cdp_peer_tx_inactive_time:
9211 			buf->tx_inactive_time = peer->stats.tx.inactive_time;
9212 			break;
9213 		case cdp_peer_tx_ratecode:
9214 			buf->tx_ratecode = peer->stats.tx.tx_ratecode;
9215 			break;
9216 		case cdp_peer_tx_flags:
9217 			buf->tx_flags = peer->stats.tx.tx_flags;
9218 			break;
9219 		case cdp_peer_tx_power:
9220 			buf->tx_power = peer->stats.tx.tx_power;
9221 			break;
9222 		case cdp_peer_rx_rate:
9223 			buf->rx_rate = peer->stats.rx.rx_rate;
9224 			break;
9225 		case cdp_peer_rx_last_rx_rate:
9226 			buf->last_rx_rate = peer->stats.rx.last_rx_rate;
9227 			break;
9228 		case cdp_peer_rx_ratecode:
9229 			buf->rx_ratecode = peer->stats.rx.rx_ratecode;
9230 			break;
9231 		case cdp_peer_rx_ucast:
9232 			buf->rx_ucast = peer->stats.rx.unicast;
9233 			break;
9234 		case cdp_peer_rx_flags:
9235 			buf->rx_flags = peer->stats.rx.rx_flags;
9236 			break;
9237 		case cdp_peer_rx_avg_snr:
9238 			buf->rx_avg_snr = peer->stats.rx.avg_snr;
9239 			break;
9240 		default:
9241 			dp_peer_err("%pK: Invalid value", soc);
9242 			ret = QDF_STATUS_E_FAILURE;
9243 			break;
9244 		}
9245 	} else {
9246 		dp_peer_err("%pK: Invalid value", soc);
9247 		ret = QDF_STATUS_E_FAILURE;
9248 	}
9249 
9250 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9251 
9252 	return ret;
9253 }
9254 
9255 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
9256  * @soc: soc handle
9257  * @vdev_id: id of vdev handle
9258  * @peer_mac: mac of DP_PEER handle
9259  *
9260  * return : QDF_STATUS
9261  */
9262 static QDF_STATUS
9263 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
9264 			 uint8_t *peer_mac)
9265 {
9266 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9267 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9268 						      peer_mac, 0, vdev_id,
9269 						      DP_MOD_ID_CDP);
9270 
9271 	if (!peer)
9272 		return QDF_STATUS_E_FAILURE;
9273 
9274 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
9275 
9276 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9277 
9278 	return status;
9279 }
9280 
9281 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
9282  * @vdev_handle: DP_VDEV handle
9283  * @buf: buffer for vdev stats
9284  *
9285  * return : int
9286  */
9287 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9288 				  void *buf, bool is_aggregate)
9289 {
9290 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9291 	struct cdp_vdev_stats *vdev_stats;
9292 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9293 						     DP_MOD_ID_CDP);
9294 
9295 	if (!vdev)
9296 		return 1;
9297 
9298 	vdev_stats = (struct cdp_vdev_stats *)buf;
9299 
9300 	if (is_aggregate) {
9301 		dp_aggregate_vdev_stats(vdev, buf);
9302 	} else {
9303 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9304 	}
9305 
9306 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9307 	return 0;
9308 }
9309 
9310 /*
9311  * dp_get_total_per(): get total per
9312  * @soc: DP soc handle
9313  * @pdev_id: id of DP_PDEV handle
9314  *
9315  * Return: % error rate using retries per packet and success packets
9316  */
9317 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
9318 {
9319 	struct dp_pdev *pdev =
9320 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9321 						   pdev_id);
9322 
9323 	if (!pdev)
9324 		return 0;
9325 
9326 	dp_aggregate_pdev_stats(pdev);
9327 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
9328 		return 0;
9329 	return ((pdev->stats.tx.retries * 100) /
9330 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
9331 }
9332 
9333 /*
9334  * dp_txrx_stats_publish(): publish pdev stats into a buffer
9335  * @soc: DP soc handle
9336  * @pdev_id: id of DP_PDEV handle
9337  * @buf: to hold pdev_stats
9338  *
9339  * Return: int
9340  */
9341 static int
9342 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
9343 		      struct cdp_stats_extd *buf)
9344 {
9345 	struct cdp_txrx_stats_req req = {0,};
9346 	struct dp_pdev *pdev =
9347 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9348 						   pdev_id);
9349 
9350 	if (!pdev)
9351 		return TXRX_STATS_LEVEL_OFF;
9352 
9353 	dp_aggregate_pdev_stats(pdev);
9354 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
9355 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
9356 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
9357 				req.param1, req.param2, req.param3, 0,
9358 				req.cookie_val, 0);
9359 
9360 	msleep(DP_MAX_SLEEP_TIME);
9361 
9362 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
9363 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
9364 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
9365 				req.param1, req.param2, req.param3, 0,
9366 				req.cookie_val, 0);
9367 
9368 	msleep(DP_MAX_SLEEP_TIME);
9369 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
9370 
9371 	return TXRX_STATS_LEVEL;
9372 }
9373 
9374 /**
9375  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
9376  * @soc: soc handle
9377  * @pdev_id: id of DP_PDEV handle
9378  * @map_id: ID of map that needs to be updated
9379  * @tos: index value in map
9380  * @tid: tid value passed by the user
9381  *
9382  * Return: QDF_STATUS
9383  */
9384 static QDF_STATUS
9385 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
9386 			       uint8_t pdev_id,
9387 			       uint8_t map_id,
9388 			       uint8_t tos, uint8_t tid)
9389 {
9390 	uint8_t dscp;
9391 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9392 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9393 
9394 	if (!pdev)
9395 		return QDF_STATUS_E_FAILURE;
9396 
9397 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
9398 	pdev->dscp_tid_map[map_id][dscp] = tid;
9399 
9400 	if (map_id < soc->num_hw_dscp_tid_map)
9401 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
9402 				       map_id, dscp);
9403 	else
9404 		return QDF_STATUS_E_FAILURE;
9405 
9406 	return QDF_STATUS_SUCCESS;
9407 }
9408 
9409 /**
9410  * dp_fw_stats_process(): Process TxRX FW stats request
9411  * @vdev_handle: DP VDEV handle
9412  * @req: stats request
9413  *
9414  * return: int
9415  */
9416 static int dp_fw_stats_process(struct dp_vdev *vdev,
9417 			       struct cdp_txrx_stats_req *req)
9418 {
9419 	struct dp_pdev *pdev = NULL;
9420 	uint32_t stats = req->stats;
9421 	uint8_t mac_id = req->mac_id;
9422 
9423 	if (!vdev) {
9424 		DP_TRACE(NONE, "VDEV not found");
9425 		return 1;
9426 	}
9427 	pdev = vdev->pdev;
9428 
9429 	/*
9430 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
9431 	 * from param0 to param3 according to below rule:
9432 	 *
9433 	 * PARAM:
9434 	 *   - config_param0 : start_offset (stats type)
9435 	 *   - config_param1 : stats bmask from start offset
9436 	 *   - config_param2 : stats bmask from start offset + 32
9437 	 *   - config_param3 : stats bmask from start offset + 64
9438 	 */
9439 	if (req->stats == CDP_TXRX_STATS_0) {
9440 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
9441 		req->param1 = 0xFFFFFFFF;
9442 		req->param2 = 0xFFFFFFFF;
9443 		req->param3 = 0xFFFFFFFF;
9444 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
9445 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
9446 	}
9447 
9448 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
9449 		return dp_h2t_ext_stats_msg_send(pdev,
9450 				HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
9451 				req->param0, req->param1, req->param2,
9452 				req->param3, 0, DBG_STATS_COOKIE_DEFAULT,
9453 				mac_id);
9454 	} else {
9455 		return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
9456 				req->param1, req->param2, req->param3,
9457 				0, DBG_STATS_COOKIE_DEFAULT, mac_id);
9458 	}
9459 }
9460 
9461 /**
9462  * dp_txrx_stats_request - function to map to firmware and host stats
9463  * @soc: soc handle
9464  * @vdev_id: virtual device ID
9465  * @req: stats request
9466  *
9467  * Return: QDF_STATUS
9468  */
9469 static
9470 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
9471 				 uint8_t vdev_id,
9472 				 struct cdp_txrx_stats_req *req)
9473 {
9474 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
9475 	int host_stats;
9476 	int fw_stats;
9477 	enum cdp_stats stats;
9478 	int num_stats;
9479 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9480 						     DP_MOD_ID_CDP);
9481 	QDF_STATUS status = QDF_STATUS_E_INVAL;
9482 
9483 	if (!vdev || !req) {
9484 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
9485 		status = QDF_STATUS_E_INVAL;
9486 		goto fail0;
9487 	}
9488 
9489 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
9490 		dp_err("Invalid mac id request");
9491 		status = QDF_STATUS_E_INVAL;
9492 		goto fail0;
9493 	}
9494 
9495 	stats = req->stats;
9496 	if (stats >= CDP_TXRX_MAX_STATS) {
9497 		status = QDF_STATUS_E_INVAL;
9498 		goto fail0;
9499 	}
9500 
9501 	/*
9502 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
9503 	 *			has to be updated if new FW HTT stats added
9504 	 */
9505 	if (stats > CDP_TXRX_STATS_HTT_MAX)
9506 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
9507 
9508 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
9509 
9510 	if (stats >= num_stats) {
9511 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
9512 		status = QDF_STATUS_E_INVAL;
9513 		goto fail0;
9514 	}
9515 
9516 	req->stats = stats;
9517 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
9518 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
9519 
9520 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
9521 		stats, fw_stats, host_stats);
9522 
9523 	if (fw_stats != TXRX_FW_STATS_INVALID) {
9524 		/* update request with FW stats type */
9525 		req->stats = fw_stats;
9526 		status = dp_fw_stats_process(vdev, req);
9527 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
9528 			(host_stats <= TXRX_HOST_STATS_MAX))
9529 		status = dp_print_host_stats(vdev, req, soc);
9530 	else
9531 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
9532 fail0:
9533 	if (vdev)
9534 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9535 	return status;
9536 }
9537 
9538 /*
9539  * dp_txrx_dump_stats() -  Dump statistics
9540  * @value - Statistics option
9541  */
9542 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
9543 				     enum qdf_stats_verbosity_level level)
9544 {
9545 	struct dp_soc *soc =
9546 		(struct dp_soc *)psoc;
9547 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9548 
9549 	if (!soc) {
9550 		dp_cdp_err("%pK: soc is NULL", soc);
9551 		return QDF_STATUS_E_INVAL;
9552 	}
9553 
9554 	switch (value) {
9555 	case CDP_TXRX_PATH_STATS:
9556 		dp_txrx_path_stats(soc);
9557 		dp_print_soc_interrupt_stats(soc);
9558 		hal_dump_reg_write_stats(soc->hal_soc);
9559 		break;
9560 
9561 	case CDP_RX_RING_STATS:
9562 		dp_print_per_ring_stats(soc);
9563 		break;
9564 
9565 	case CDP_TXRX_TSO_STATS:
9566 		dp_print_tso_stats(soc, level);
9567 		break;
9568 
9569 	case CDP_DUMP_TX_FLOW_POOL_INFO:
9570 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
9571 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
9572 		else
9573 			dp_tx_dump_flow_pool_info_compact(soc);
9574 		break;
9575 
9576 	case CDP_DP_NAPI_STATS:
9577 		dp_print_napi_stats(soc);
9578 		break;
9579 
9580 	case CDP_TXRX_DESC_STATS:
9581 		/* TODO: NOT IMPLEMENTED */
9582 		break;
9583 
9584 	case CDP_DP_RX_FISA_STATS:
9585 		dp_rx_dump_fisa_stats(soc);
9586 		break;
9587 
9588 	case CDP_DP_SWLM_STATS:
9589 		dp_print_swlm_stats(soc);
9590 		break;
9591 
9592 	default:
9593 		status = QDF_STATUS_E_INVAL;
9594 		break;
9595 	}
9596 
9597 	return status;
9598 
9599 }
9600 
9601 /**
9602  * dp_txrx_clear_dump_stats() - clear dumpStats
9603  * @soc- soc handle
9604  * @value - stats option
9605  *
9606  * Return: 0 - Success, non-zero - failure
9607  */
9608 static
9609 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9610 				    uint8_t value)
9611 {
9612 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9613 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9614 
9615 	if (!soc) {
9616 		dp_err("soc is NULL");
9617 		return QDF_STATUS_E_INVAL;
9618 	}
9619 
9620 	switch (value) {
9621 	case CDP_TXRX_TSO_STATS:
9622 		dp_txrx_clear_tso_stats(soc);
9623 		break;
9624 
9625 	default:
9626 		status = QDF_STATUS_E_INVAL;
9627 		break;
9628 	}
9629 
9630 	return status;
9631 }
9632 
9633 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
9634 /**
9635  * dp_update_flow_control_parameters() - API to store datapath
9636  *                            config parameters
9637  * @soc: soc handle
9638  * @cfg: ini parameter handle
9639  *
9640  * Return: void
9641  */
9642 static inline
9643 void dp_update_flow_control_parameters(struct dp_soc *soc,
9644 				struct cdp_config_params *params)
9645 {
9646 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
9647 					params->tx_flow_stop_queue_threshold;
9648 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
9649 					params->tx_flow_start_queue_offset;
9650 }
9651 #else
9652 static inline
9653 void dp_update_flow_control_parameters(struct dp_soc *soc,
9654 				struct cdp_config_params *params)
9655 {
9656 }
9657 #endif
9658 
9659 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
9660 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
9661 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
9662 
9663 /* Max packet limit for RX REAP Loop (dp_rx_process) */
9664 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
9665 
9666 static
9667 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9668 					struct cdp_config_params *params)
9669 {
9670 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
9671 				params->tx_comp_loop_pkt_limit;
9672 
9673 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
9674 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
9675 	else
9676 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
9677 
9678 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
9679 				params->rx_reap_loop_pkt_limit;
9680 
9681 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
9682 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
9683 	else
9684 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
9685 
9686 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
9687 				params->rx_hp_oos_update_limit;
9688 
9689 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
9690 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
9691 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
9692 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
9693 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
9694 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
9695 }
9696 
9697 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
9698 				      uint32_t rx_limit)
9699 {
9700 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
9701 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
9702 }
9703 
9704 #else
9705 static inline
9706 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9707 					struct cdp_config_params *params)
9708 { }
9709 
9710 static inline
9711 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
9712 			       uint32_t rx_limit)
9713 {
9714 }
9715 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
9716 
9717 /**
9718  * dp_update_config_parameters() - API to store datapath
9719  *                            config parameters
9720  * @soc: soc handle
9721  * @cfg: ini parameter handle
9722  *
9723  * Return: status
9724  */
9725 static
9726 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
9727 				struct cdp_config_params *params)
9728 {
9729 	struct dp_soc *soc = (struct dp_soc *)psoc;
9730 
9731 	if (!(soc)) {
9732 		dp_cdp_err("%pK: Invalid handle", soc);
9733 		return QDF_STATUS_E_INVAL;
9734 	}
9735 
9736 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
9737 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
9738 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
9739 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
9740 				params->p2p_tcp_udp_checksumoffload;
9741 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
9742 				params->nan_tcp_udp_checksumoffload;
9743 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
9744 				params->tcp_udp_checksumoffload;
9745 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
9746 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
9747 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
9748 
9749 	dp_update_rx_soft_irq_limit_params(soc, params);
9750 	dp_update_flow_control_parameters(soc, params);
9751 
9752 	return QDF_STATUS_SUCCESS;
9753 }
9754 
9755 static struct cdp_wds_ops dp_ops_wds = {
9756 	.vdev_set_wds = dp_vdev_set_wds,
9757 #ifdef WDS_VENDOR_EXTENSION
9758 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
9759 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
9760 #endif
9761 };
9762 
9763 /*
9764  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
9765  * @soc_hdl - datapath soc handle
9766  * @vdev_id - virtual interface id
9767  * @callback - callback function
9768  * @ctxt: callback context
9769  *
9770  */
9771 static void
9772 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9773 		       ol_txrx_data_tx_cb callback, void *ctxt)
9774 {
9775 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9776 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9777 						     DP_MOD_ID_CDP);
9778 
9779 	if (!vdev)
9780 		return;
9781 
9782 	vdev->tx_non_std_data_callback.func = callback;
9783 	vdev->tx_non_std_data_callback.ctxt = ctxt;
9784 
9785 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9786 }
9787 
9788 /**
9789  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
9790  * @soc: datapath soc handle
9791  * @pdev_id: id of datapath pdev handle
9792  *
9793  * Return: opaque pointer to dp txrx handle
9794  */
9795 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
9796 {
9797 	struct dp_pdev *pdev =
9798 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9799 						   pdev_id);
9800 	if (qdf_unlikely(!pdev))
9801 		return NULL;
9802 
9803 	return pdev->dp_txrx_handle;
9804 }
9805 
9806 /**
9807  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
9808  * @soc: datapath soc handle
9809  * @pdev_id: id of datapath pdev handle
9810  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
9811  *
9812  * Return: void
9813  */
9814 static void
9815 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
9816 			   void *dp_txrx_hdl)
9817 {
9818 	struct dp_pdev *pdev =
9819 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9820 						   pdev_id);
9821 
9822 	if (!pdev)
9823 		return;
9824 
9825 	pdev->dp_txrx_handle = dp_txrx_hdl;
9826 }
9827 
9828 /**
9829  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
9830  * @soc: datapath soc handle
9831  * @vdev_id: vdev id
9832  *
9833  * Return: opaque pointer to dp txrx handle
9834  */
9835 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
9836 				       uint8_t vdev_id)
9837 {
9838 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9839 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9840 						     DP_MOD_ID_CDP);
9841 	void *dp_ext_handle;
9842 
9843 	if (!vdev)
9844 		return NULL;
9845 	dp_ext_handle = vdev->vdev_dp_ext_handle;
9846 
9847 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9848 	return dp_ext_handle;
9849 }
9850 
9851 /**
9852  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
9853  * @soc: datapath soc handle
9854  * @vdev_id: vdev id
9855  * @size: size of advance dp handle
9856  *
9857  * Return: QDF_STATUS
9858  */
9859 static QDF_STATUS
9860 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
9861 			  uint16_t size)
9862 {
9863 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9864 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9865 						     DP_MOD_ID_CDP);
9866 	void *dp_ext_handle;
9867 
9868 	if (!vdev)
9869 		return QDF_STATUS_E_FAILURE;
9870 
9871 	dp_ext_handle = qdf_mem_malloc(size);
9872 
9873 	if (!dp_ext_handle) {
9874 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9875 		return QDF_STATUS_E_FAILURE;
9876 	}
9877 
9878 	vdev->vdev_dp_ext_handle = dp_ext_handle;
9879 
9880 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9881 	return QDF_STATUS_SUCCESS;
9882 }
9883 
9884 /**
9885  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
9886  *			      connection for this vdev
9887  * @soc_hdl: CDP soc handle
9888  * @vdev_id: vdev ID
9889  * @action: Add/Delete action
9890  *
9891  * Returns: QDF_STATUS.
9892  */
9893 static QDF_STATUS
9894 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9895 		       enum vdev_ll_conn_actions action)
9896 {
9897 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9898 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9899 						     DP_MOD_ID_CDP);
9900 
9901 	if (!vdev) {
9902 		dp_err("LL connection action for invalid vdev %d", vdev_id);
9903 		return QDF_STATUS_E_FAILURE;
9904 	}
9905 
9906 	switch (action) {
9907 	case CDP_VDEV_LL_CONN_ADD:
9908 		vdev->num_latency_critical_conn++;
9909 		break;
9910 
9911 	case CDP_VDEV_LL_CONN_DEL:
9912 		vdev->num_latency_critical_conn--;
9913 		break;
9914 
9915 	default:
9916 		dp_err("LL connection action invalid %d", action);
9917 		break;
9918 	}
9919 
9920 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9921 	return QDF_STATUS_SUCCESS;
9922 }
9923 
9924 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
9925 /**
9926  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
9927  * @soc_hdl: CDP Soc handle
9928  * @value: Enable/Disable value
9929  *
9930  * Returns: QDF_STATUS
9931  */
9932 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
9933 					 uint8_t value)
9934 {
9935 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9936 
9937 	if (!soc->swlm.is_init) {
9938 		dp_err("SWLM is not initialized");
9939 		return QDF_STATUS_E_FAILURE;
9940 	}
9941 
9942 	soc->swlm.is_enabled = !!value;
9943 
9944 	return QDF_STATUS_SUCCESS;
9945 }
9946 
9947 /**
9948  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
9949  * @soc_hdl: CDP Soc handle
9950  *
9951  * Returns: QDF_STATUS
9952  */
9953 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
9954 {
9955 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9956 
9957 	return soc->swlm.is_enabled;
9958 }
9959 #endif
9960 
9961 /**
9962  * dp_display_srng_info() - Dump the srng HP TP info
9963  * @soc_hdl: CDP Soc handle
9964  *
9965  * This function dumps the SW hp/tp values for the important rings.
9966  * HW hp/tp values are not being dumped, since it can lead to
9967  * READ NOC error when UMAC is in low power state. MCC does not have
9968  * device force wake working yet.
9969  *
9970  * Return: none
9971  */
9972 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
9973 {
9974 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9975 	hal_soc_handle_t hal_soc = soc->hal_soc;
9976 	uint32_t hp, tp, i;
9977 
9978 	dp_info("SRNG HP-TP data:");
9979 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
9980 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
9981 				&hp, &tp);
9982 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
9983 
9984 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
9985 				&hp, &tp);
9986 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
9987 	}
9988 
9989 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
9990 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
9991 				&hp, &tp);
9992 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
9993 	}
9994 
9995 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &hp, &tp);
9996 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
9997 
9998 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &hp, &tp);
9999 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
10000 
10001 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &hp, &tp);
10002 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
10003 }
10004 
10005 /**
10006  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
10007  * @soc_handle: datapath soc handle
10008  *
10009  * Return: opaque pointer to external dp (non-core DP)
10010  */
10011 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
10012 {
10013 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10014 
10015 	return soc->external_txrx_handle;
10016 }
10017 
10018 /**
10019  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
10020  * @soc_handle: datapath soc handle
10021  * @txrx_handle: opaque pointer to external dp (non-core DP)
10022  *
10023  * Return: void
10024  */
10025 static void
10026 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
10027 {
10028 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10029 
10030 	soc->external_txrx_handle = txrx_handle;
10031 }
10032 
10033 /**
10034  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
10035  * @soc_hdl: datapath soc handle
10036  * @pdev_id: id of the datapath pdev handle
10037  * @lmac_id: lmac id
10038  *
10039  * Return: QDF_STATUS
10040  */
10041 static QDF_STATUS
10042 dp_soc_map_pdev_to_lmac
10043 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10044 	 uint32_t lmac_id)
10045 {
10046 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10047 
10048 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
10049 				pdev_id,
10050 				lmac_id);
10051 
10052 	/*Set host PDEV ID for lmac_id*/
10053 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
10054 			      pdev_id,
10055 			      lmac_id);
10056 
10057 	return QDF_STATUS_SUCCESS;
10058 }
10059 
10060 /**
10061  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
10062  * @soc_hdl: datapath soc handle
10063  * @pdev_id: id of the datapath pdev handle
10064  * @lmac_id: lmac id
10065  *
10066  * In the event of a dynamic mode change, update the pdev to lmac mapping
10067  *
10068  * Return: QDF_STATUS
10069  */
10070 static QDF_STATUS
10071 dp_soc_handle_pdev_mode_change
10072 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10073 	 uint32_t lmac_id)
10074 {
10075 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10076 	struct dp_vdev *vdev = NULL;
10077 	uint8_t hw_pdev_id, mac_id;
10078 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
10079 								  pdev_id);
10080 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
10081 
10082 	if (qdf_unlikely(!pdev))
10083 		return QDF_STATUS_E_FAILURE;
10084 
10085 	pdev->lmac_id = lmac_id;
10086 	pdev->target_pdev_id =
10087 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
10088 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
10089 
10090 	/*Set host PDEV ID for lmac_id*/
10091 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
10092 			      pdev->pdev_id,
10093 			      lmac_id);
10094 
10095 	hw_pdev_id =
10096 		dp_get_target_pdev_id_for_host_pdev_id(soc,
10097 						       pdev->pdev_id);
10098 
10099 	/*
10100 	 * When NSS offload is enabled, send pdev_id->lmac_id
10101 	 * and pdev_id to hw_pdev_id to NSS FW
10102 	 */
10103 	if (nss_config) {
10104 		mac_id = pdev->lmac_id;
10105 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
10106 			soc->cdp_soc.ol_ops->
10107 				pdev_update_lmac_n_target_pdev_id(
10108 				soc->ctrl_psoc,
10109 				&pdev_id, &mac_id, &hw_pdev_id);
10110 	}
10111 
10112 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
10113 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
10114 		HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
10115 						hw_pdev_id);
10116 		vdev->lmac_id = pdev->lmac_id;
10117 	}
10118 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
10119 
10120 	return QDF_STATUS_SUCCESS;
10121 }
10122 
10123 /**
10124  * dp_soc_set_pdev_status_down() - set pdev down/up status
10125  * @soc: datapath soc handle
10126  * @pdev_id: id of datapath pdev handle
10127  * @is_pdev_down: pdev down/up status
10128  *
10129  * Return: QDF_STATUS
10130  */
10131 static QDF_STATUS
10132 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
10133 			    bool is_pdev_down)
10134 {
10135 	struct dp_pdev *pdev =
10136 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10137 						   pdev_id);
10138 	if (!pdev)
10139 		return QDF_STATUS_E_FAILURE;
10140 
10141 	pdev->is_pdev_down = is_pdev_down;
10142 	return QDF_STATUS_SUCCESS;
10143 }
10144 
10145 /**
10146  * dp_get_cfg_capabilities() - get dp capabilities
10147  * @soc_handle: datapath soc handle
10148  * @dp_caps: enum for dp capabilities
10149  *
10150  * Return: bool to determine if dp caps is enabled
10151  */
10152 static bool
10153 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
10154 			enum cdp_capabilities dp_caps)
10155 {
10156 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10157 
10158 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
10159 }
10160 
10161 #ifdef FEATURE_AST
10162 static QDF_STATUS
10163 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10164 		       uint8_t *peer_mac)
10165 {
10166 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10167 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10168 	struct dp_peer *peer =
10169 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
10170 					       DP_MOD_ID_CDP);
10171 
10172 	/* Peer can be null for monitor vap mac address */
10173 	if (!peer) {
10174 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
10175 			  "%s: Invalid peer\n", __func__);
10176 		return QDF_STATUS_E_FAILURE;
10177 	}
10178 
10179 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
10180 
10181 	qdf_spin_lock_bh(&soc->ast_lock);
10182 	dp_peer_delete_ast_entries(soc, peer);
10183 	qdf_spin_unlock_bh(&soc->ast_lock);
10184 
10185 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10186 	return status;
10187 }
10188 #endif
10189 
10190 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
10191 /**
10192  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
10193  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
10194  * @soc: cdp_soc handle
10195  * @pdev_id: id of cdp_pdev handle
10196  * @protocol_type: protocol type for which stats should be displayed
10197  *
10198  * Return: none
10199  */
10200 static inline void
10201 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
10202 				   uint16_t protocol_type)
10203 {
10204 }
10205 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
10206 
10207 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
10208 /**
10209  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
10210  * applied to the desired protocol type packets
10211  * @soc: soc handle
10212  * @pdev_id: id of cdp_pdev handle
10213  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
10214  * are enabled for tagging. zero indicates disable feature, non-zero indicates
10215  * enable feature
10216  * @protocol_type: new protocol type for which the tag is being added
10217  * @tag: user configured tag for the new protocol
10218  *
10219  * Return: Success
10220  */
10221 static inline QDF_STATUS
10222 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
10223 			       uint32_t enable_rx_protocol_tag,
10224 			       uint16_t protocol_type,
10225 			       uint16_t tag)
10226 {
10227 	return QDF_STATUS_SUCCESS;
10228 }
10229 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
10230 
10231 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
10232 /**
10233  * dp_set_rx_flow_tag - add/delete a flow
10234  * @soc: soc handle
10235  * @pdev_id: id of cdp_pdev handle
10236  * @flow_info: flow tuple that is to be added to/deleted from flow search table
10237  *
10238  * Return: Success
10239  */
10240 static inline QDF_STATUS
10241 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10242 		   struct cdp_rx_flow_info *flow_info)
10243 {
10244 	return QDF_STATUS_SUCCESS;
10245 }
10246 /**
10247  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
10248  * given flow 5-tuple
10249  * @cdp_soc: soc handle
10250  * @pdev_id: id of cdp_pdev handle
10251  * @flow_info: flow 5-tuple for which stats should be displayed
10252  *
10253  * Return: Success
10254  */
10255 static inline QDF_STATUS
10256 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10257 			  struct cdp_rx_flow_info *flow_info)
10258 {
10259 	return QDF_STATUS_SUCCESS;
10260 }
10261 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
10262 
10263 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
10264 					   uint32_t max_peers,
10265 					   uint32_t max_ast_index,
10266 					   bool peer_map_unmap_v2)
10267 {
10268 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10269 
10270 	soc->max_peers = max_peers;
10271 
10272 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
10273 		   __func__, max_peers, max_ast_index);
10274 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
10275 
10276 	if (dp_peer_find_attach(soc))
10277 		return QDF_STATUS_E_FAILURE;
10278 
10279 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
10280 	soc->peer_map_attach_success = TRUE;
10281 
10282 	return QDF_STATUS_SUCCESS;
10283 }
10284 
10285 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
10286 				   enum cdp_soc_param_t param,
10287 				   uint32_t value)
10288 {
10289 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10290 
10291 	switch (param) {
10292 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
10293 		soc->num_msdu_exception_desc = value;
10294 		dp_info("num_msdu exception_desc %u",
10295 			value);
10296 		break;
10297 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
10298 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
10299 			soc->fst_in_cmem = !!value;
10300 		dp_info("FW supports CMEM FSE %u", value);
10301 		break;
10302 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
10303 		soc->max_ast_ageout_count = value;
10304 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
10305 		break;
10306 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
10307 		soc->eapol_over_control_port = value;
10308 		dp_info("Eapol over control_port:%d",
10309 			soc->eapol_over_control_port);
10310 		break;
10311 	default:
10312 		dp_info("not handled param %d ", param);
10313 		break;
10314 	}
10315 
10316 	return QDF_STATUS_SUCCESS;
10317 }
10318 
10319 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
10320 				      void *stats_ctx)
10321 {
10322 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10323 
10324 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
10325 }
10326 
10327 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
10328 /**
10329  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
10330  * @soc: Datapath SOC handle
10331  * @peer: Datapath peer
10332  * @arg: argument to iter function
10333  *
10334  * Return: QDF_STATUS
10335  */
10336 static void
10337 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
10338 			     void *arg)
10339 {
10340 	if (peer->bss_peer)
10341 		return;
10342 
10343 	dp_wdi_event_handler(
10344 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
10345 		soc, peer->rdkstats_ctx,
10346 		peer->peer_id,
10347 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
10348 }
10349 
10350 /**
10351  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
10352  * @soc_hdl: Datapath SOC handle
10353  * @pdev_id: pdev_id
10354  *
10355  * Return: QDF_STATUS
10356  */
10357 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
10358 					  uint8_t pdev_id)
10359 {
10360 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10361 	struct dp_pdev *pdev =
10362 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10363 						   pdev_id);
10364 	if (!pdev)
10365 		return QDF_STATUS_E_FAILURE;
10366 
10367 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
10368 			     DP_MOD_ID_CDP);
10369 
10370 	return QDF_STATUS_SUCCESS;
10371 }
10372 #else
10373 static inline QDF_STATUS
10374 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
10375 			uint8_t pdev_id)
10376 {
10377 	return QDF_STATUS_SUCCESS;
10378 }
10379 #endif
10380 
10381 static void *dp_peer_get_rdkstats_ctx(struct cdp_soc_t *soc_hdl,
10382 				      uint8_t vdev_id,
10383 				      uint8_t *mac_addr)
10384 {
10385 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10386 	struct dp_peer *peer;
10387 	void *rdkstats_ctx = NULL;
10388 
10389 	if (mac_addr) {
10390 		peer = dp_peer_find_hash_find(soc, mac_addr,
10391 					      0, vdev_id,
10392 					      DP_MOD_ID_CDP);
10393 		if (!peer)
10394 			return NULL;
10395 
10396 		rdkstats_ctx = peer->rdkstats_ctx;
10397 
10398 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10399 	}
10400 
10401 	return rdkstats_ctx;
10402 }
10403 
10404 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
10405 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
10406 					   uint8_t pdev_id,
10407 					   void *buf)
10408 {
10409 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
10410 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
10411 			      WDI_NO_VAL, pdev_id);
10412 	return QDF_STATUS_SUCCESS;
10413 }
10414 #else
10415 static inline QDF_STATUS
10416 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
10417 			 uint8_t pdev_id,
10418 			 void *buf)
10419 {
10420 	return QDF_STATUS_SUCCESS;
10421 }
10422 #endif
10423 
10424 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
10425 {
10426 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10427 
10428 	return soc->rate_stats_ctx;
10429 }
10430 
10431 /*
10432  * dp_get_cfg() - get dp cfg
10433  * @soc: cdp soc handle
10434  * @cfg: cfg enum
10435  *
10436  * Return: cfg value
10437  */
10438 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
10439 {
10440 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
10441 	uint32_t value = 0;
10442 
10443 	switch (cfg) {
10444 	case cfg_dp_enable_data_stall:
10445 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
10446 		break;
10447 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
10448 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
10449 		break;
10450 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
10451 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
10452 		break;
10453 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
10454 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
10455 		break;
10456 	case cfg_dp_disable_legacy_mode_csum_offload:
10457 		value = dpsoc->wlan_cfg_ctx->
10458 					legacy_mode_checksumoffload_disable;
10459 		break;
10460 	case cfg_dp_tso_enable:
10461 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
10462 		break;
10463 	case cfg_dp_lro_enable:
10464 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
10465 		break;
10466 	case cfg_dp_gro_enable:
10467 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
10468 		break;
10469 	case cfg_dp_sg_enable:
10470 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
10471 		break;
10472 	case cfg_dp_tx_flow_start_queue_offset:
10473 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
10474 		break;
10475 	case cfg_dp_tx_flow_stop_queue_threshold:
10476 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
10477 		break;
10478 	case cfg_dp_disable_intra_bss_fwd:
10479 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
10480 		break;
10481 	case cfg_dp_pktlog_buffer_size:
10482 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
10483 		break;
10484 	case cfg_dp_wow_check_rx_pending:
10485 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
10486 		break;
10487 	default:
10488 		value =  0;
10489 	}
10490 
10491 	return value;
10492 }
10493 
10494 #ifdef PEER_FLOW_CONTROL
10495 /**
10496  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
10497  * @soc_handle: datapath soc handle
10498  * @pdev_id: id of datapath pdev handle
10499  * @param: ol ath params
10500  * @value: value of the flag
10501  * @buff: Buffer to be passed
10502  *
10503  * Implemented this function same as legacy function. In legacy code, single
10504  * function is used to display stats and update pdev params.
10505  *
10506  * Return: 0 for success. nonzero for failure.
10507  */
10508 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
10509 					       uint8_t pdev_id,
10510 					       enum _dp_param_t param,
10511 					       uint32_t value, void *buff)
10512 {
10513 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10514 	struct dp_pdev *pdev =
10515 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10516 						   pdev_id);
10517 
10518 	if (qdf_unlikely(!pdev))
10519 		return 1;
10520 
10521 	soc = pdev->soc;
10522 	if (!soc)
10523 		return 1;
10524 
10525 	switch (param) {
10526 #ifdef QCA_ENH_V3_STATS_SUPPORT
10527 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
10528 		if (value)
10529 			pdev->delay_stats_flag = true;
10530 		else
10531 			pdev->delay_stats_flag = false;
10532 		break;
10533 	case DP_PARAM_VIDEO_STATS_FC:
10534 		qdf_print("------- TID Stats ------\n");
10535 		dp_pdev_print_tid_stats(pdev);
10536 		qdf_print("------ Delay Stats ------\n");
10537 		dp_pdev_print_delay_stats(pdev);
10538 		qdf_print("------ Rx Error Stats ------\n");
10539 		dp_pdev_print_rx_error_stats(pdev);
10540 		break;
10541 #endif
10542 	case DP_PARAM_TOTAL_Q_SIZE:
10543 		{
10544 			uint32_t tx_min, tx_max;
10545 
10546 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
10547 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
10548 
10549 			if (!buff) {
10550 				if ((value >= tx_min) && (value <= tx_max)) {
10551 					pdev->num_tx_allowed = value;
10552 				} else {
10553 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
10554 						   soc, tx_min, tx_max);
10555 					break;
10556 				}
10557 			} else {
10558 				*(int *)buff = pdev->num_tx_allowed;
10559 			}
10560 		}
10561 		break;
10562 	default:
10563 		dp_tx_info("%pK: not handled param %d ", soc, param);
10564 		break;
10565 	}
10566 
10567 	return 0;
10568 }
10569 #endif
10570 
10571 /**
10572  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
10573  * @psoc: dp soc handle
10574  * @pdev_id: id of DP_PDEV handle
10575  * @pcp: pcp value
10576  * @tid: tid value passed by the user
10577  *
10578  * Return: QDF_STATUS_SUCCESS on success
10579  */
10580 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
10581 						uint8_t pdev_id,
10582 						uint8_t pcp, uint8_t tid)
10583 {
10584 	struct dp_soc *soc = (struct dp_soc *)psoc;
10585 
10586 	soc->pcp_tid_map[pcp] = tid;
10587 
10588 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
10589 	return QDF_STATUS_SUCCESS;
10590 }
10591 
10592 /**
10593  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
10594  * @soc: DP soc handle
10595  * @vdev_id: id of DP_VDEV handle
10596  * @pcp: pcp value
10597  * @tid: tid value passed by the user
10598  *
10599  * Return: QDF_STATUS_SUCCESS on success
10600  */
10601 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
10602 						uint8_t vdev_id,
10603 						uint8_t pcp, uint8_t tid)
10604 {
10605 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10606 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10607 						     DP_MOD_ID_CDP);
10608 
10609 	if (!vdev)
10610 		return QDF_STATUS_E_FAILURE;
10611 
10612 	vdev->pcp_tid_map[pcp] = tid;
10613 
10614 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10615 	return QDF_STATUS_SUCCESS;
10616 }
10617 
10618 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
10619 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
10620 {
10621 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10622 	uint32_t cur_tx_limit, cur_rx_limit;
10623 	uint32_t budget = 0xffff;
10624 	uint32_t val;
10625 	int i;
10626 
10627 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
10628 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
10629 
10630 	/* Temporarily increase soft irq limits when going to drain
10631 	 * the UMAC/LMAC SRNGs and restore them after polling.
10632 	 * Though the budget is on higher side, the TX/RX reaping loops
10633 	 * will not execute longer as both TX and RX would be suspended
10634 	 * by the time this API is called.
10635 	 */
10636 	dp_update_soft_irq_limits(soc, budget, budget);
10637 
10638 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
10639 		dp_service_srngs(&soc->intr_ctx[i], budget);
10640 
10641 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
10642 
10643 	/* Do a dummy read at offset 0; this will ensure all
10644 	 * pendings writes(HP/TP) are flushed before read returns.
10645 	 */
10646 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
10647 	dp_debug("Register value at offset 0: %u\n", val);
10648 }
10649 #endif
10650 
10651 static struct cdp_cmn_ops dp_ops_cmn = {
10652 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
10653 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
10654 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
10655 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
10656 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
10657 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
10658 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
10659 	.txrx_peer_create = dp_peer_create_wifi3,
10660 	.txrx_peer_setup = dp_peer_setup_wifi3,
10661 #ifdef FEATURE_AST
10662 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
10663 #else
10664 	.txrx_peer_teardown = NULL,
10665 #endif
10666 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
10667 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
10668 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
10669 	.txrx_peer_get_ast_info_by_pdev =
10670 		dp_peer_get_ast_info_by_pdevid_wifi3,
10671 	.txrx_peer_ast_delete_by_soc =
10672 		dp_peer_ast_entry_del_by_soc,
10673 	.txrx_peer_ast_delete_by_pdev =
10674 		dp_peer_ast_entry_del_by_pdev,
10675 	.txrx_peer_delete = dp_peer_delete_wifi3,
10676 	.txrx_vdev_register = dp_vdev_register_wifi3,
10677 	.txrx_soc_detach = dp_soc_detach_wifi3,
10678 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
10679 	.txrx_soc_init = dp_soc_init_wifi3,
10680 #ifndef QCA_HOST_MODE_WIFI_DISABLED
10681 	.txrx_tso_soc_attach = dp_tso_soc_attach,
10682 	.txrx_tso_soc_detach = dp_tso_soc_detach,
10683 	.tx_send = dp_tx_send,
10684 	.tx_send_exc = dp_tx_send_exception,
10685 #endif
10686 	.txrx_pdev_init = dp_pdev_init_wifi3,
10687 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
10688 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
10689 	.txrx_ath_getstats = dp_get_device_stats,
10690 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
10691 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
10692 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
10693 	.delba_process = dp_delba_process_wifi3,
10694 	.set_addba_response = dp_set_addba_response,
10695 	.flush_cache_rx_queue = NULL,
10696 	/* TODO: get API's for dscp-tid need to be added*/
10697 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
10698 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
10699 	.txrx_get_total_per = dp_get_total_per,
10700 	.txrx_stats_request = dp_txrx_stats_request,
10701 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
10702 	.display_stats = dp_txrx_dump_stats,
10703 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
10704 	.txrx_intr_detach = dp_soc_interrupt_detach,
10705 	.set_pn_check = dp_set_pn_check_wifi3,
10706 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
10707 	.update_config_parameters = dp_update_config_parameters,
10708 	/* TODO: Add other functions */
10709 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
10710 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
10711 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
10712 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
10713 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
10714 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
10715 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
10716 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
10717 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
10718 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
10719 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
10720 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
10721 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
10722 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
10723 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
10724 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
10725 	.set_soc_param = dp_soc_set_param,
10726 	.txrx_get_os_rx_handles_from_vdev =
10727 					dp_get_os_rx_handles_from_vdev_wifi3,
10728 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
10729 	.get_dp_capabilities = dp_get_cfg_capabilities,
10730 	.txrx_get_cfg = dp_get_cfg,
10731 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
10732 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
10733 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
10734 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
10735 	.txrx_peer_get_rdkstats_ctx = dp_peer_get_rdkstats_ctx,
10736 
10737 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
10738 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
10739 
10740 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
10741 #ifdef QCA_MULTIPASS_SUPPORT
10742 	.set_vlan_groupkey = dp_set_vlan_groupkey,
10743 #endif
10744 	.get_peer_mac_list = dp_get_peer_mac_list,
10745 #ifdef QCA_SUPPORT_WDS_EXTENDED
10746 	.get_wds_ext_peer_id = dp_wds_ext_get_peer_id,
10747 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
10748 #endif /* QCA_SUPPORT_WDS_EXTENDED */
10749 
10750 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
10751 	.txrx_drain = dp_drain_txrx,
10752 #endif
10753 };
10754 
10755 static struct cdp_ctrl_ops dp_ops_ctrl = {
10756 	.txrx_peer_authorize = dp_peer_authorize,
10757 #ifdef VDEV_PEER_PROTOCOL_COUNT
10758 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
10759 	.txrx_set_peer_protocol_drop_mask =
10760 		dp_enable_vdev_peer_protocol_drop_mask,
10761 	.txrx_is_peer_protocol_count_enabled =
10762 		dp_is_vdev_peer_protocol_count_enabled,
10763 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
10764 #endif
10765 	.txrx_set_vdev_param = dp_set_vdev_param,
10766 	.txrx_set_psoc_param = dp_set_psoc_param,
10767 	.txrx_get_psoc_param = dp_get_psoc_param,
10768 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
10769 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
10770 	.txrx_get_sec_type = dp_get_sec_type,
10771 	.txrx_wdi_event_sub = dp_wdi_event_sub,
10772 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
10773 	.txrx_set_pdev_param = dp_set_pdev_param,
10774 	.txrx_get_pdev_param = dp_get_pdev_param,
10775 	.txrx_set_peer_param = dp_set_peer_param,
10776 	.txrx_get_peer_param = dp_get_peer_param,
10777 #ifdef VDEV_PEER_PROTOCOL_COUNT
10778 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
10779 #endif
10780 #ifdef WLAN_SUPPORT_MSCS
10781 	.txrx_record_mscs_params = dp_record_mscs_params,
10782 #endif
10783 #ifdef WLAN_SUPPORT_SCS
10784 	.txrx_enable_scs_params = dp_enable_scs_params,
10785 	.txrx_record_scs_params = dp_record_scs_params,
10786 #endif
10787 	.set_key = dp_set_michael_key,
10788 	.txrx_get_vdev_param = dp_get_vdev_param,
10789 	.calculate_delay_stats = dp_calculate_delay_stats,
10790 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
10791 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
10792 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
10793 	.txrx_dump_pdev_rx_protocol_tag_stats =
10794 				dp_dump_pdev_rx_protocol_tag_stats,
10795 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
10796 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
10797 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
10798 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
10799 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
10800 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
10801 #ifdef QCA_MULTIPASS_SUPPORT
10802 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
10803 #endif /*QCA_MULTIPASS_SUPPORT*/
10804 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
10805 	.txrx_set_delta_tsf = dp_set_delta_tsf,
10806 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
10807 	.txrx_get_uplink_delay = dp_get_uplink_delay,
10808 #endif
10809 };
10810 
10811 static struct cdp_me_ops dp_ops_me = {
10812 #ifndef QCA_HOST_MODE_WIFI_DISABLED
10813 #ifdef ATH_SUPPORT_IQUE
10814 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
10815 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
10816 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
10817 #endif
10818 #endif
10819 };
10820 
10821 static struct cdp_host_stats_ops dp_ops_host_stats = {
10822 	.txrx_per_peer_stats = dp_get_host_peer_stats,
10823 	.get_fw_peer_stats = dp_get_fw_peer_stats,
10824 	.get_htt_stats = dp_get_htt_stats,
10825 	.txrx_stats_publish = dp_txrx_stats_publish,
10826 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
10827 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
10828 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
10829 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
10830 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
10831 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
10832 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
10833 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
10834 	/* TODO */
10835 };
10836 
10837 static struct cdp_raw_ops dp_ops_raw = {
10838 	/* TODO */
10839 };
10840 
10841 #ifdef PEER_FLOW_CONTROL
10842 static struct cdp_pflow_ops dp_ops_pflow = {
10843 	dp_tx_flow_ctrl_configure_pdev,
10844 };
10845 #endif /* CONFIG_WIN */
10846 
10847 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10848 static struct cdp_cfr_ops dp_ops_cfr = {
10849 	.txrx_cfr_filter = NULL,
10850 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
10851 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
10852 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
10853 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
10854 	.txrx_enable_mon_reap_timer = NULL,
10855 };
10856 #endif
10857 
10858 #ifdef WLAN_SUPPORT_MSCS
10859 static struct cdp_mscs_ops dp_ops_mscs = {
10860 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
10861 };
10862 #endif
10863 
10864 #ifdef WLAN_SUPPORT_MESH_LATENCY
10865 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
10866 	.mesh_latency_update_peer_parameter =
10867 		dp_mesh_latency_update_peer_parameter,
10868 };
10869 #endif
10870 
10871 #ifdef FEATURE_RUNTIME_PM
10872 /**
10873  * dp_flush_ring_hptp() - Update ring shadow
10874  *			  register HP/TP address when runtime
10875  *                        resume
10876  * @opaque_soc: DP soc context
10877  *
10878  * Return: None
10879  */
10880 static
10881 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
10882 {
10883 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
10884 						 HAL_SRNG_FLUSH_EVENT)) {
10885 		/* Acquire the lock */
10886 		hal_srng_access_start(soc->hal_soc, hal_srng);
10887 
10888 		hal_srng_access_end(soc->hal_soc, hal_srng);
10889 
10890 		hal_srng_set_flush_last_ts(hal_srng);
10891 		dp_debug("flushed");
10892 	}
10893 }
10894 
10895 /**
10896  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
10897  * @soc_hdl: Datapath soc handle
10898  * @pdev_id: id of data path pdev handle
10899  *
10900  * DP is ready to runtime suspend if there are no pending TX packets.
10901  *
10902  * Return: QDF_STATUS
10903  */
10904 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10905 {
10906 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10907 	struct dp_pdev *pdev;
10908 	uint8_t i;
10909 	int32_t tx_pending;
10910 
10911 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10912 	if (!pdev) {
10913 		dp_err("pdev is NULL");
10914 		return QDF_STATUS_E_INVAL;
10915 	}
10916 
10917 	/* Abort if there are any pending TX packets */
10918 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
10919 	if (tx_pending) {
10920 		dp_init_info("%pK: Abort suspend due to pending TX packets %d",
10921 			     soc, tx_pending);
10922 
10923 		/* perform a force flush if tx is pending */
10924 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
10925 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
10926 					   HAL_SRNG_FLUSH_EVENT);
10927 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
10928 		}
10929 
10930 		return QDF_STATUS_E_AGAIN;
10931 	}
10932 
10933 	if (dp_runtime_get_refcount(soc)) {
10934 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
10935 
10936 		return QDF_STATUS_E_AGAIN;
10937 	}
10938 
10939 	if (soc->intr_mode == DP_INTR_POLL)
10940 		qdf_timer_stop(&soc->int_timer);
10941 
10942 	dp_rx_fst_update_pm_suspend_status(soc, true);
10943 
10944 	return QDF_STATUS_SUCCESS;
10945 }
10946 
10947 #define DP_FLUSH_WAIT_CNT 10
10948 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
10949 /**
10950  * dp_runtime_resume() - ensure DP is ready to runtime resume
10951  * @soc_hdl: Datapath soc handle
10952  * @pdev_id: id of data path pdev handle
10953  *
10954  * Resume DP for runtime PM.
10955  *
10956  * Return: QDF_STATUS
10957  */
10958 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10959 {
10960 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10961 	int i, suspend_wait = 0;
10962 
10963 	if (soc->intr_mode == DP_INTR_POLL)
10964 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10965 
10966 	/*
10967 	 * Wait until dp runtime refcount becomes zero or time out, then flush
10968 	 * pending tx for runtime suspend.
10969 	 */
10970 	while (dp_runtime_get_refcount(soc) &&
10971 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
10972 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
10973 		suspend_wait++;
10974 	}
10975 
10976 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
10977 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
10978 	}
10979 
10980 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
10981 	dp_rx_fst_update_pm_suspend_status(soc, false);
10982 
10983 	return QDF_STATUS_SUCCESS;
10984 }
10985 #endif /* FEATURE_RUNTIME_PM */
10986 
10987 /**
10988  * dp_tx_get_success_ack_stats() - get tx success completion count
10989  * @soc_hdl: Datapath soc handle
10990  * @vdevid: vdev identifier
10991  *
10992  * Return: tx success ack count
10993  */
10994 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
10995 					    uint8_t vdev_id)
10996 {
10997 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10998 	struct cdp_vdev_stats *vdev_stats = NULL;
10999 	uint32_t tx_success;
11000 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11001 						     DP_MOD_ID_CDP);
11002 
11003 	if (!vdev) {
11004 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
11005 		return 0;
11006 	}
11007 
11008 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
11009 	if (!vdev_stats) {
11010 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
11011 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11012 		return 0;
11013 	}
11014 
11015 	dp_aggregate_vdev_stats(vdev, vdev_stats);
11016 
11017 	tx_success = vdev_stats->tx.tx_success.num;
11018 	qdf_mem_free(vdev_stats);
11019 
11020 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11021 	return tx_success;
11022 }
11023 
11024 #ifdef WLAN_SUPPORT_DATA_STALL
11025 /**
11026  * dp_register_data_stall_detect_cb() - register data stall callback
11027  * @soc_hdl: Datapath soc handle
11028  * @pdev_id: id of data path pdev handle
11029  * @data_stall_detect_callback: data stall callback function
11030  *
11031  * Return: QDF_STATUS Enumeration
11032  */
11033 static
11034 QDF_STATUS dp_register_data_stall_detect_cb(
11035 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11036 			data_stall_detect_cb data_stall_detect_callback)
11037 {
11038 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11039 	struct dp_pdev *pdev;
11040 
11041 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11042 	if (!pdev) {
11043 		dp_err("pdev NULL!");
11044 		return QDF_STATUS_E_INVAL;
11045 	}
11046 
11047 	pdev->data_stall_detect_callback = data_stall_detect_callback;
11048 	return QDF_STATUS_SUCCESS;
11049 }
11050 
11051 /**
11052  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
11053  * @soc_hdl: Datapath soc handle
11054  * @pdev_id: id of data path pdev handle
11055  * @data_stall_detect_callback: data stall callback function
11056  *
11057  * Return: QDF_STATUS Enumeration
11058  */
11059 static
11060 QDF_STATUS dp_deregister_data_stall_detect_cb(
11061 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11062 			data_stall_detect_cb data_stall_detect_callback)
11063 {
11064 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11065 	struct dp_pdev *pdev;
11066 
11067 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11068 	if (!pdev) {
11069 		dp_err("pdev NULL!");
11070 		return QDF_STATUS_E_INVAL;
11071 	}
11072 
11073 	pdev->data_stall_detect_callback = NULL;
11074 	return QDF_STATUS_SUCCESS;
11075 }
11076 
11077 /**
11078  * dp_txrx_post_data_stall_event() - post data stall event
11079  * @soc_hdl: Datapath soc handle
11080  * @indicator: Module triggering data stall
11081  * @data_stall_type: data stall event type
11082  * @pdev_id: pdev id
11083  * @vdev_id_bitmap: vdev id bitmap
11084  * @recovery_type: data stall recovery type
11085  *
11086  * Return: None
11087  */
11088 static void
11089 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
11090 			      enum data_stall_log_event_indicator indicator,
11091 			      enum data_stall_log_event_type data_stall_type,
11092 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
11093 			      enum data_stall_log_recovery_type recovery_type)
11094 {
11095 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11096 	struct data_stall_event_info data_stall_info;
11097 	struct dp_pdev *pdev;
11098 
11099 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11100 	if (!pdev) {
11101 		dp_err("pdev NULL!");
11102 		return;
11103 	}
11104 
11105 	if (!pdev->data_stall_detect_callback) {
11106 		dp_err("data stall cb not registered!");
11107 		return;
11108 	}
11109 
11110 	dp_info("data_stall_type: %x pdev_id: %d",
11111 		data_stall_type, pdev_id);
11112 
11113 	data_stall_info.indicator = indicator;
11114 	data_stall_info.data_stall_type = data_stall_type;
11115 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
11116 	data_stall_info.pdev_id = pdev_id;
11117 	data_stall_info.recovery_type = recovery_type;
11118 
11119 	pdev->data_stall_detect_callback(&data_stall_info);
11120 }
11121 #endif /* WLAN_SUPPORT_DATA_STALL */
11122 
11123 #ifdef WLAN_FEATURE_STATS_EXT
11124 /* rx hw stats event wait timeout in ms */
11125 #define DP_REO_STATUS_STATS_TIMEOUT 1500
11126 /**
11127  * dp_txrx_ext_stats_request - request dp txrx extended stats request
11128  * @soc_hdl: soc handle
11129  * @pdev_id: pdev id
11130  * @req: stats request
11131  *
11132  * Return: QDF_STATUS
11133  */
11134 static QDF_STATUS
11135 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11136 			  struct cdp_txrx_ext_stats *req)
11137 {
11138 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11139 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11140 
11141 	if (!pdev) {
11142 		dp_err("pdev is null");
11143 		return QDF_STATUS_E_INVAL;
11144 	}
11145 
11146 	dp_aggregate_pdev_stats(pdev);
11147 
11148 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
11149 	req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full;
11150 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
11151 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
11152 	req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
11153 	/* only count error source from RXDMA */
11154 	req->rx_mpdu_error = pdev->stats.err.rxdma_error;
11155 
11156 	return QDF_STATUS_SUCCESS;
11157 }
11158 
11159 /**
11160  * dp_rx_hw_stats_cb - request rx hw stats response callback
11161  * @soc: soc handle
11162  * @cb_ctxt: callback context
11163  * @reo_status: reo command response status
11164  *
11165  * Return: None
11166  */
11167 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
11168 			      union hal_reo_status *reo_status)
11169 {
11170 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
11171 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
11172 	bool is_query_timeout;
11173 
11174 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
11175 	is_query_timeout = rx_hw_stats->is_query_timeout;
11176 	/* free the cb_ctxt if all pending tid stats query is received */
11177 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
11178 		if (!is_query_timeout) {
11179 			qdf_event_set(&soc->rx_hw_stats_event);
11180 			soc->is_last_stats_ctx_init = false;
11181 		}
11182 
11183 		qdf_mem_free(rx_hw_stats);
11184 	}
11185 
11186 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
11187 		dp_info("REO stats failure %d",
11188 			queue_status->header.status);
11189 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
11190 		return;
11191 	}
11192 
11193 	if (!is_query_timeout) {
11194 		soc->ext_stats.rx_mpdu_received +=
11195 					queue_status->mpdu_frms_cnt;
11196 		soc->ext_stats.rx_mpdu_missed +=
11197 					queue_status->hole_cnt;
11198 	}
11199 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
11200 }
11201 
11202 /**
11203  * dp_request_rx_hw_stats - request rx hardware stats
11204  * @soc_hdl: soc handle
11205  * @vdev_id: vdev id
11206  *
11207  * Return: None
11208  */
11209 static QDF_STATUS
11210 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
11211 {
11212 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11213 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11214 						     DP_MOD_ID_CDP);
11215 	struct dp_peer *peer = NULL;
11216 	QDF_STATUS status;
11217 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
11218 	int rx_stats_sent_cnt = 0;
11219 	uint32_t last_rx_mpdu_received;
11220 	uint32_t last_rx_mpdu_missed;
11221 
11222 	if (!vdev) {
11223 		dp_err("vdev is null for vdev_id: %u", vdev_id);
11224 		status = QDF_STATUS_E_INVAL;
11225 		goto out;
11226 	}
11227 
11228 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
11229 
11230 	if (!peer) {
11231 		dp_err("Peer is NULL");
11232 		status = QDF_STATUS_E_INVAL;
11233 		goto out;
11234 	}
11235 
11236 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
11237 
11238 	if (!rx_hw_stats) {
11239 		dp_err("malloc failed for hw stats structure");
11240 		status = QDF_STATUS_E_INVAL;
11241 		goto out;
11242 	}
11243 
11244 	qdf_event_reset(&soc->rx_hw_stats_event);
11245 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
11246 	/* save the last soc cumulative stats and reset it to 0 */
11247 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
11248 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
11249 	soc->ext_stats.rx_mpdu_received = 0;
11250 	soc->ext_stats.rx_mpdu_missed = 0;
11251 
11252 	rx_stats_sent_cnt =
11253 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
11254 	if (!rx_stats_sent_cnt) {
11255 		dp_err("no tid stats sent successfully");
11256 		qdf_mem_free(rx_hw_stats);
11257 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
11258 		status = QDF_STATUS_E_INVAL;
11259 		goto out;
11260 	}
11261 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
11262 		       rx_stats_sent_cnt);
11263 	rx_hw_stats->is_query_timeout = false;
11264 	soc->is_last_stats_ctx_init = true;
11265 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
11266 
11267 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
11268 				       DP_REO_STATUS_STATS_TIMEOUT);
11269 
11270 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
11271 	if (status != QDF_STATUS_SUCCESS) {
11272 		dp_info("rx hw stats event timeout");
11273 		if (soc->is_last_stats_ctx_init)
11274 			rx_hw_stats->is_query_timeout = true;
11275 		/**
11276 		 * If query timeout happened, use the last saved stats
11277 		 * for this time query.
11278 		 */
11279 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
11280 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
11281 	}
11282 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
11283 
11284 out:
11285 	if (peer)
11286 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11287 	if (vdev)
11288 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11289 
11290 	return status;
11291 }
11292 
11293 /**
11294  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
11295  * @soc_hdl: soc handle
11296  *
11297  * Return: None
11298  */
11299 static
11300 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
11301 {
11302 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11303 
11304 	soc->ext_stats.rx_mpdu_received = 0;
11305 	soc->ext_stats.rx_mpdu_missed = 0;
11306 }
11307 #endif /* WLAN_FEATURE_STATS_EXT */
11308 
11309 #ifdef DP_PEER_EXTENDED_API
11310 static struct cdp_misc_ops dp_ops_misc = {
11311 #ifdef FEATURE_WLAN_TDLS
11312 	.tx_non_std = dp_tx_non_std,
11313 #endif /* FEATURE_WLAN_TDLS */
11314 	.get_opmode = dp_get_opmode,
11315 #ifdef FEATURE_RUNTIME_PM
11316 	.runtime_suspend = dp_runtime_suspend,
11317 	.runtime_resume = dp_runtime_resume,
11318 #endif /* FEATURE_RUNTIME_PM */
11319 	.get_num_rx_contexts = dp_get_num_rx_contexts,
11320 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
11321 #ifdef WLAN_SUPPORT_DATA_STALL
11322 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
11323 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
11324 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
11325 #endif
11326 
11327 #ifdef WLAN_FEATURE_STATS_EXT
11328 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
11329 	.request_rx_hw_stats = dp_request_rx_hw_stats,
11330 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
11331 #endif /* WLAN_FEATURE_STATS_EXT */
11332 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
11333 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
11334 	.set_swlm_enable = dp_soc_set_swlm_enable,
11335 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
11336 #endif
11337 	.display_txrx_hw_info = dp_display_srng_info,
11338 };
11339 #endif
11340 
11341 #ifdef DP_FLOW_CTL
11342 static struct cdp_flowctl_ops dp_ops_flowctl = {
11343 	/* WIFI 3.0 DP implement as required. */
11344 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
11345 	.flow_pool_map_handler = dp_tx_flow_pool_map,
11346 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
11347 	.register_pause_cb = dp_txrx_register_pause_cb,
11348 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
11349 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
11350 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
11351 };
11352 
11353 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
11354 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11355 };
11356 #endif
11357 
11358 #ifdef IPA_OFFLOAD
11359 static struct cdp_ipa_ops dp_ops_ipa = {
11360 	.ipa_get_resource = dp_ipa_get_resource,
11361 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
11362 	.ipa_op_response = dp_ipa_op_response,
11363 	.ipa_register_op_cb = dp_ipa_register_op_cb,
11364 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
11365 	.ipa_get_stat = dp_ipa_get_stat,
11366 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
11367 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
11368 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
11369 	.ipa_setup = dp_ipa_setup,
11370 	.ipa_cleanup = dp_ipa_cleanup,
11371 	.ipa_setup_iface = dp_ipa_setup_iface,
11372 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
11373 	.ipa_enable_pipes = dp_ipa_enable_pipes,
11374 	.ipa_disable_pipes = dp_ipa_disable_pipes,
11375 	.ipa_set_perf_level = dp_ipa_set_perf_level,
11376 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
11377 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
11378 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping
11379 };
11380 #endif
11381 
11382 #ifdef DP_POWER_SAVE
11383 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11384 {
11385 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11386 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11387 	int timeout = SUSPEND_DRAIN_WAIT;
11388 	int drain_wait_delay = 50; /* 50 ms */
11389 	int32_t tx_pending;
11390 
11391 	if (qdf_unlikely(!pdev)) {
11392 		dp_err("pdev is NULL");
11393 		return QDF_STATUS_E_INVAL;
11394 	}
11395 
11396 	/* Abort if there are any pending TX packets */
11397 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
11398 		qdf_sleep(drain_wait_delay);
11399 		if (timeout <= 0) {
11400 			dp_info("TX frames are pending %d, abort suspend",
11401 				tx_pending);
11402 			return QDF_STATUS_E_TIMEOUT;
11403 		}
11404 		timeout = timeout - drain_wait_delay;
11405 	}
11406 
11407 	if (soc->intr_mode == DP_INTR_POLL)
11408 		qdf_timer_stop(&soc->int_timer);
11409 
11410 	/* Stop monitor reap timer and reap any pending frames in ring */
11411 	dp_monitor_pktlog_reap_pending_frames(pdev);
11412 
11413 	dp_suspend_fse_cache_flush(soc);
11414 
11415 	return QDF_STATUS_SUCCESS;
11416 }
11417 
11418 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11419 {
11420 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11421 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11422 
11423 	if (qdf_unlikely(!pdev)) {
11424 		dp_err("pdev is NULL");
11425 		return QDF_STATUS_E_INVAL;
11426 	}
11427 
11428 	if (soc->intr_mode == DP_INTR_POLL)
11429 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
11430 
11431 	/* Start monitor reap timer */
11432 	dp_monitor_pktlog_start_reap_timer(pdev);
11433 
11434 	dp_resume_fse_cache_flush(soc);
11435 
11436 	return QDF_STATUS_SUCCESS;
11437 }
11438 
11439 /**
11440  * dp_process_wow_ack_rsp() - process wow ack response
11441  * @soc_hdl: datapath soc handle
11442  * @pdev_id: data path pdev handle id
11443  *
11444  * Return: none
11445  */
11446 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11447 {
11448 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11449 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11450 
11451 	if (qdf_unlikely(!pdev)) {
11452 		dp_err("pdev is NULL");
11453 		return;
11454 	}
11455 
11456 	/*
11457 	 * As part of wow enable FW disables the mon status ring and in wow ack
11458 	 * response from FW reap mon status ring to make sure no packets pending
11459 	 * in the ring.
11460 	 */
11461 	dp_monitor_pktlog_reap_pending_frames(pdev);
11462 }
11463 
11464 /**
11465  * dp_process_target_suspend_req() - process target suspend request
11466  * @soc_hdl: datapath soc handle
11467  * @pdev_id: data path pdev handle id
11468  *
11469  * Return: none
11470  */
11471 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
11472 					  uint8_t pdev_id)
11473 {
11474 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11475 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11476 
11477 	if (qdf_unlikely(!pdev)) {
11478 		dp_err("pdev is NULL");
11479 		return;
11480 	}
11481 
11482 	/* Stop monitor reap timer and reap any pending frames in ring */
11483 	dp_monitor_pktlog_reap_pending_frames(pdev);
11484 }
11485 
11486 static struct cdp_bus_ops dp_ops_bus = {
11487 	.bus_suspend = dp_bus_suspend,
11488 	.bus_resume = dp_bus_resume,
11489 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
11490 	.process_target_suspend_req = dp_process_target_suspend_req
11491 };
11492 #endif
11493 
11494 #ifdef DP_FLOW_CTL
11495 static struct cdp_throttle_ops dp_ops_throttle = {
11496 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11497 };
11498 
11499 static struct cdp_cfg_ops dp_ops_cfg = {
11500 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11501 };
11502 #endif
11503 
11504 #ifdef DP_PEER_EXTENDED_API
11505 static struct cdp_ocb_ops dp_ops_ocb = {
11506 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11507 };
11508 
11509 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
11510 	.clear_stats = dp_txrx_clear_dump_stats,
11511 };
11512 
11513 static struct cdp_peer_ops dp_ops_peer = {
11514 	.register_peer = dp_register_peer,
11515 	.clear_peer = dp_clear_peer,
11516 	.find_peer_exist = dp_find_peer_exist,
11517 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
11518 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
11519 	.peer_state_update = dp_peer_state_update,
11520 	.get_vdevid = dp_get_vdevid,
11521 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
11522 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
11523 	.get_peer_state = dp_get_peer_state,
11524 	.peer_flush_frags = dp_peer_flush_frags,
11525 };
11526 #endif
11527 
11528 static struct cdp_ops dp_txrx_ops = {
11529 	.cmn_drv_ops = &dp_ops_cmn,
11530 	.ctrl_ops = &dp_ops_ctrl,
11531 	.me_ops = &dp_ops_me,
11532 	.host_stats_ops = &dp_ops_host_stats,
11533 	.wds_ops = &dp_ops_wds,
11534 	.raw_ops = &dp_ops_raw,
11535 #ifdef PEER_FLOW_CONTROL
11536 	.pflow_ops = &dp_ops_pflow,
11537 #endif /* PEER_FLOW_CONTROL */
11538 #ifdef DP_PEER_EXTENDED_API
11539 	.misc_ops = &dp_ops_misc,
11540 	.ocb_ops = &dp_ops_ocb,
11541 	.peer_ops = &dp_ops_peer,
11542 	.mob_stats_ops = &dp_ops_mob_stats,
11543 #endif
11544 #ifdef DP_FLOW_CTL
11545 	.cfg_ops = &dp_ops_cfg,
11546 	.flowctl_ops = &dp_ops_flowctl,
11547 	.l_flowctl_ops = &dp_ops_l_flowctl,
11548 	.throttle_ops = &dp_ops_throttle,
11549 #endif
11550 #ifdef IPA_OFFLOAD
11551 	.ipa_ops = &dp_ops_ipa,
11552 #endif
11553 #ifdef DP_POWER_SAVE
11554 	.bus_ops = &dp_ops_bus,
11555 #endif
11556 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
11557 	.cfr_ops = &dp_ops_cfr,
11558 #endif
11559 #ifdef WLAN_SUPPORT_MSCS
11560 	.mscs_ops = &dp_ops_mscs,
11561 #endif
11562 #ifdef WLAN_SUPPORT_MESH_LATENCY
11563 	.mesh_latency_ops = &dp_ops_mesh_latency,
11564 #endif
11565 };
11566 
11567 /*
11568  * dp_soc_set_txrx_ring_map()
11569  * @dp_soc: DP handler for soc
11570  *
11571  * Return: Void
11572  */
11573 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
11574 {
11575 	uint32_t i;
11576 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
11577 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
11578 	}
11579 }
11580 
11581 qdf_export_symbol(dp_soc_set_txrx_ring_map);
11582 
11583 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
11584 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
11585 /**
11586  * dp_soc_attach_wifi3() - Attach txrx SOC
11587  * @ctrl_psoc: Opaque SOC handle from control plane
11588  * @htc_handle: Opaque HTC handle
11589  * @hif_handle: Opaque HIF handle
11590  * @qdf_osdev: QDF device
11591  * @ol_ops: Offload Operations
11592  * @device_id: Device ID
11593  *
11594  * Return: DP SOC handle on success, NULL on failure
11595  */
11596 struct cdp_soc_t *
11597 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11598 		    struct hif_opaque_softc *hif_handle,
11599 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
11600 		    struct ol_if_ops *ol_ops, uint16_t device_id)
11601 {
11602 	struct dp_soc *dp_soc = NULL;
11603 
11604 	dp_soc = dp_soc_attach(ctrl_psoc, hif_handle, htc_handle, qdf_osdev,
11605 			       ol_ops, device_id);
11606 	return dp_soc_to_cdp_soc_t(dp_soc);
11607 }
11608 
11609 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
11610 {
11611 	int lmac_id;
11612 
11613 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
11614 		/*Set default host PDEV ID for lmac_id*/
11615 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
11616 				      INVALID_PDEV_ID, lmac_id);
11617 	}
11618 }
11619 
11620 static uint32_t
11621 dp_get_link_desc_id_start(uint16_t arch_id)
11622 {
11623 	switch (arch_id) {
11624 	case CDP_ARCH_TYPE_LI:
11625 		return LINK_DESC_ID_START_21_BITS_COOKIE;
11626 	case CDP_ARCH_TYPE_BE:
11627 		return LINK_DESC_ID_START_20_BITS_COOKIE;
11628 	default:
11629 		dp_err("unkonwn arch_id 0x%x", arch_id);
11630 		QDF_BUG(0);
11631 		return LINK_DESC_ID_START_21_BITS_COOKIE;
11632 	}
11633 }
11634 
11635 /**
11636  * dp_soc_attach() - Attach txrx SOC
11637  * @ctrl_psoc: Opaque SOC handle from control plane
11638  * @hif_handle: Opaque HIF handle
11639  * @htc_handle: Opaque HTC handle
11640  * @qdf_osdev: QDF device
11641  * @ol_ops: Offload Operations
11642  * @device_id: Device ID
11643  *
11644  * Return: DP SOC handle on success, NULL on failure
11645  */
11646 static struct dp_soc *
11647 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11648 	      struct hif_opaque_softc *hif_handle, HTC_HANDLE htc_handle,
11649 	      qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops,
11650 	      uint16_t device_id)
11651 {
11652 	int int_ctx;
11653 	struct dp_soc *soc =  NULL;
11654 	uint16_t arch_id;
11655 
11656 	if (!hif_handle) {
11657 		dp_err("HIF handle is NULL");
11658 		goto fail0;
11659 	}
11660 	arch_id = cdp_get_arch_type_from_devid(device_id);
11661 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
11662 	if (!soc) {
11663 		dp_err("DP SOC memory allocation failed");
11664 		goto fail0;
11665 	}
11666 
11667 	dp_info("soc memory allocated %pk", soc);
11668 	soc->hif_handle = hif_handle;
11669 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
11670 	if (!soc->hal_soc)
11671 		goto fail1;
11672 
11673 	hif_get_cmem_info(soc->hif_handle,
11674 			  &soc->cmem_base,
11675 			  &soc->cmem_size);
11676 	int_ctx = 0;
11677 	soc->device_id = device_id;
11678 	soc->cdp_soc.ops = &dp_txrx_ops;
11679 	soc->cdp_soc.ol_ops = ol_ops;
11680 	soc->ctrl_psoc = ctrl_psoc;
11681 	soc->osdev = qdf_osdev;
11682 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
11683 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
11684 			    &soc->rx_mon_pkt_tlv_size);
11685 
11686 	soc->arch_id = arch_id;
11687 	soc->link_desc_id_start =
11688 			dp_get_link_desc_id_start(soc->arch_id);
11689 	dp_configure_arch_ops(soc);
11690 
11691 	/* Reset wbm sg list and flags */
11692 	dp_rx_wbm_sg_list_reset(soc);
11693 
11694 	dp_soc_tx_hw_desc_history_attach(soc);
11695 	dp_soc_rx_history_attach(soc);
11696 	dp_soc_tx_history_attach(soc);
11697 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
11698 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
11699 	if (!soc->wlan_cfg_ctx) {
11700 		dp_err("wlan_cfg_ctx failed\n");
11701 		goto fail1;
11702 	}
11703 
11704 	dp_soc_cfg_attach(soc);
11705 
11706 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
11707 		dp_err("failed to allocate link desc pool banks");
11708 		goto fail2;
11709 	}
11710 
11711 	if (dp_hw_link_desc_ring_alloc(soc)) {
11712 		dp_err("failed to allocate link_desc_ring");
11713 		goto fail3;
11714 	}
11715 
11716 	if (dp_soc_srng_alloc(soc)) {
11717 		dp_err("failed to allocate soc srng rings");
11718 		goto fail4;
11719 	}
11720 
11721 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
11722 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
11723 		goto fail5;
11724 	}
11725 
11726 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc))) {
11727 		dp_err("unable to do target specific attach");
11728 		goto fail6;
11729 	}
11730 
11731 	if (!dp_monitor_modularized_enable()) {
11732 		if (dp_mon_soc_attach_wrapper(soc)) {
11733 			dp_err("failed to attach monitor");
11734 			goto fail6;
11735 		}
11736 	}
11737 
11738 	dp_soc_swlm_attach(soc);
11739 	dp_soc_set_interrupt_mode(soc);
11740 	dp_soc_set_def_pdev(soc);
11741 
11742 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
11743 		qdf_dma_mem_stats_read(),
11744 		qdf_heap_mem_stats_read(),
11745 		qdf_skb_total_mem_stats_read());
11746 
11747 	return soc;
11748 fail6:
11749 	dp_soc_tx_desc_sw_pools_free(soc);
11750 fail5:
11751 	dp_soc_srng_free(soc);
11752 fail4:
11753 	dp_hw_link_desc_ring_free(soc);
11754 fail3:
11755 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
11756 fail2:
11757 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
11758 fail1:
11759 	qdf_mem_free(soc);
11760 fail0:
11761 	return NULL;
11762 }
11763 
11764 /**
11765  * dp_soc_init() - Initialize txrx SOC
11766  * @dp_soc: Opaque DP SOC handle
11767  * @htc_handle: Opaque HTC handle
11768  * @hif_handle: Opaque HIF handle
11769  *
11770  * Return: DP SOC handle on success, NULL on failure
11771  */
11772 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
11773 		  struct hif_opaque_softc *hif_handle)
11774 {
11775 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
11776 	bool is_monitor_mode = false;
11777 	struct hal_reo_params reo_params;
11778 	uint8_t i;
11779 	int num_dp_msi;
11780 
11781 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
11782 			  WLAN_MD_DP_SOC, "dp_soc");
11783 
11784 	soc->hif_handle = hif_handle;
11785 
11786 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
11787 	if (!soc->hal_soc)
11788 		goto fail0;
11789 
11790 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
11791 		dp_err("unable to do target specific init");
11792 		goto fail0;
11793 	}
11794 
11795 	htt_soc = htt_soc_attach(soc, htc_handle);
11796 	if (!htt_soc)
11797 		goto fail1;
11798 
11799 	soc->htt_handle = htt_soc;
11800 
11801 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
11802 		goto fail2;
11803 
11804 	htt_set_htc_handle(htt_soc, htc_handle);
11805 
11806 	dp_soc_cfg_init(soc);
11807 
11808 	dp_monitor_soc_cfg_init(soc);
11809 	/* Reset/Initialize wbm sg list and flags */
11810 	dp_rx_wbm_sg_list_reset(soc);
11811 
11812 	/* Note: Any SRNG ring initialization should happen only after
11813 	 * Interrupt mode is set and followed by filling up the
11814 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
11815 	 */
11816 	dp_soc_set_interrupt_mode(soc);
11817 	if (soc->cdp_soc.ol_ops->get_con_mode &&
11818 	    soc->cdp_soc.ol_ops->get_con_mode() ==
11819 	    QDF_GLOBAL_MONITOR_MODE)
11820 		is_monitor_mode = true;
11821 
11822 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
11823 	if (num_dp_msi < 0) {
11824 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
11825 		goto fail3;
11826 	}
11827 
11828 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
11829 				     soc->intr_mode, is_monitor_mode);
11830 
11831 	/* initialize WBM_IDLE_LINK ring */
11832 	if (dp_hw_link_desc_ring_init(soc)) {
11833 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
11834 		goto fail3;
11835 	}
11836 
11837 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
11838 
11839 	if (dp_soc_srng_init(soc)) {
11840 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
11841 		goto fail4;
11842 	}
11843 
11844 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
11845 			       htt_get_htc_handle(htt_soc),
11846 			       soc->hal_soc, soc->osdev) == NULL)
11847 		goto fail5;
11848 
11849 	/* Initialize descriptors in TCL Rings */
11850 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
11851 		hal_tx_init_data_ring(soc->hal_soc,
11852 				      soc->tcl_data_ring[i].hal_srng);
11853 	}
11854 
11855 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
11856 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
11857 		goto fail6;
11858 	}
11859 
11860 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
11861 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
11862 	soc->cce_disable = false;
11863 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
11864 
11865 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
11866 	qdf_spinlock_create(&soc->vdev_map_lock);
11867 	qdf_atomic_init(&soc->num_tx_outstanding);
11868 	qdf_atomic_init(&soc->num_tx_exception);
11869 	soc->num_tx_allowed =
11870 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
11871 
11872 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
11873 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
11874 				CDP_CFG_MAX_PEER_ID);
11875 
11876 		if (ret != -EINVAL)
11877 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
11878 
11879 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
11880 				CDP_CFG_CCE_DISABLE);
11881 		if (ret == 1)
11882 			soc->cce_disable = true;
11883 	}
11884 
11885 	/*
11886 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
11887 	 * and IPQ5018 WMAC2 is not there in these platforms.
11888 	 */
11889 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
11890 	    soc->disable_mac2_intr)
11891 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
11892 
11893 	/*
11894 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
11895 	 * WMAC1 is not there in this platform.
11896 	 */
11897 	if (soc->disable_mac1_intr)
11898 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
11899 
11900 	/* Setup HW REO */
11901 	qdf_mem_zero(&reo_params, sizeof(reo_params));
11902 
11903 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
11904 		/*
11905 		 * Reo ring remap is not required if both radios
11906 		 * are offloaded to NSS
11907 		 */
11908 		if (dp_reo_remap_config(soc,
11909 					&reo_params.remap1,
11910 					&reo_params.remap2))
11911 			reo_params.rx_hash_enabled = true;
11912 		else
11913 			reo_params.rx_hash_enabled = false;
11914 	}
11915 
11916 	/* setup the global rx defrag waitlist */
11917 	TAILQ_INIT(&soc->rx.defrag.waitlist);
11918 	soc->rx.defrag.timeout_ms =
11919 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
11920 	soc->rx.defrag.next_flush_ms = 0;
11921 	soc->rx.flags.defrag_timeout_check =
11922 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
11923 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
11924 
11925 	/*
11926 	 * set the fragment destination ring
11927 	 */
11928 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
11929 
11930 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
11931 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
11932 
11933 	hal_reo_setup(soc->hal_soc, &reo_params);
11934 
11935 	hal_reo_set_err_dst_remap(soc->hal_soc);
11936 
11937 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
11938 
11939 	qdf_atomic_set(&soc->cmn_init_done, 1);
11940 
11941 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
11942 
11943 	qdf_spinlock_create(&soc->ast_lock);
11944 	dp_peer_mec_spinlock_create(soc);
11945 
11946 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
11947 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
11948 	INIT_RX_HW_STATS_LOCK(soc);
11949 
11950 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
11951 	/* fill the tx/rx cpu ring map*/
11952 	dp_soc_set_txrx_ring_map(soc);
11953 
11954 	TAILQ_INIT(&soc->inactive_peer_list);
11955 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
11956 	TAILQ_INIT(&soc->inactive_vdev_list);
11957 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
11958 	qdf_spinlock_create(&soc->htt_stats.lock);
11959 	/* initialize work queue for stats processing */
11960 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
11961 
11962 	dp_reo_desc_deferred_freelist_create(soc);
11963 
11964 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
11965 		qdf_dma_mem_stats_read(),
11966 		qdf_heap_mem_stats_read(),
11967 		qdf_skb_total_mem_stats_read());
11968 
11969 	return soc;
11970 fail6:
11971 	htt_soc_htc_dealloc(soc->htt_handle);
11972 fail5:
11973 	dp_soc_srng_deinit(soc);
11974 fail4:
11975 	dp_hw_link_desc_ring_deinit(soc);
11976 fail3:
11977 	htt_htc_pkt_pool_free(htt_soc);
11978 fail2:
11979 	htt_soc_detach(htt_soc);
11980 fail1:
11981 	soc->arch_ops.txrx_soc_deinit(soc);
11982 fail0:
11983 	return NULL;
11984 }
11985 
11986 /**
11987  * dp_soc_init_wifi3() - Initialize txrx SOC
11988  * @soc: Opaque DP SOC handle
11989  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
11990  * @hif_handle: Opaque HIF handle
11991  * @htc_handle: Opaque HTC handle
11992  * @qdf_osdev: QDF device (Unused)
11993  * @ol_ops: Offload Operations (Unused)
11994  * @device_id: Device ID (Unused)
11995  *
11996  * Return: DP SOC handle on success, NULL on failure
11997  */
11998 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
11999 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
12000 			struct hif_opaque_softc *hif_handle,
12001 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
12002 			struct ol_if_ops *ol_ops, uint16_t device_id)
12003 {
12004 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
12005 }
12006 
12007 #endif
12008 
12009 /*
12010  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
12011  *
12012  * @soc: handle to DP soc
12013  * @mac_id: MAC id
12014  *
12015  * Return: Return pdev corresponding to MAC
12016  */
12017 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
12018 {
12019 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
12020 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
12021 
12022 	/* Typically for MCL as there only 1 PDEV*/
12023 	return soc->pdev_list[0];
12024 }
12025 
12026 /*
12027  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
12028  * @soc:		DP SoC context
12029  * @max_mac_rings:	No of MAC rings
12030  *
12031  * Return: None
12032  */
12033 void dp_is_hw_dbs_enable(struct dp_soc *soc,
12034 				int *max_mac_rings)
12035 {
12036 	bool dbs_enable = false;
12037 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
12038 		dbs_enable = soc->cdp_soc.ol_ops->
12039 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
12040 
12041 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
12042 }
12043 
12044 qdf_export_symbol(dp_is_hw_dbs_enable);
12045 
12046 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
12047 /**
12048  * dp_get_cfr_rcc() - get cfr rcc config
12049  * @soc_hdl: Datapath soc handle
12050  * @pdev_id: id of objmgr pdev
12051  *
12052  * Return: true/false based on cfr mode setting
12053  */
12054 static
12055 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12056 {
12057 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12058 	struct dp_pdev *pdev = NULL;
12059 
12060 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12061 	if (!pdev) {
12062 		dp_err("pdev is NULL");
12063 		return false;
12064 	}
12065 
12066 	return pdev->cfr_rcc_mode;
12067 }
12068 
12069 /**
12070  * dp_set_cfr_rcc() - enable/disable cfr rcc config
12071  * @soc_hdl: Datapath soc handle
12072  * @pdev_id: id of objmgr pdev
12073  * @enable: Enable/Disable cfr rcc mode
12074  *
12075  * Return: none
12076  */
12077 static
12078 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
12079 {
12080 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12081 	struct dp_pdev *pdev = NULL;
12082 
12083 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12084 	if (!pdev) {
12085 		dp_err("pdev is NULL");
12086 		return;
12087 	}
12088 
12089 	pdev->cfr_rcc_mode = enable;
12090 }
12091 
12092 /*
12093  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
12094  * @soc_hdl: Datapath soc handle
12095  * @pdev_id: id of data path pdev handle
12096  * @cfr_rcc_stats: CFR RCC debug statistics buffer
12097  *
12098  * Return: none
12099  */
12100 static inline void
12101 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12102 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
12103 {
12104 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12105 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12106 
12107 	if (!pdev) {
12108 		dp_err("Invalid pdev");
12109 		return;
12110 	}
12111 
12112 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
12113 		     sizeof(struct cdp_cfr_rcc_stats));
12114 }
12115 
12116 /*
12117  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
12118  * @soc_hdl: Datapath soc handle
12119  * @pdev_id: id of data path pdev handle
12120  *
12121  * Return: none
12122  */
12123 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
12124 				   uint8_t pdev_id)
12125 {
12126 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12127 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12128 
12129 	if (!pdev) {
12130 		dp_err("dp pdev is NULL");
12131 		return;
12132 	}
12133 
12134 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
12135 }
12136 #endif
12137 
12138 /**
12139  * dp_bucket_index() - Return index from array
12140  *
12141  * @delay: delay measured
12142  * @array: array used to index corresponding delay
12143  *
12144  * Return: index
12145  */
12146 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
12147 {
12148 	uint8_t i = CDP_DELAY_BUCKET_0;
12149 
12150 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
12151 		if (delay >= array[i] && delay <= array[i + 1])
12152 			return i;
12153 	}
12154 
12155 	return (CDP_DELAY_BUCKET_MAX - 1);
12156 }
12157 
12158 /**
12159  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
12160  *				type of delay
12161  *
12162  * @pdev: pdev handle
12163  * @delay: delay in ms
12164  * @tid: tid value
12165  * @mode: type of tx delay mode
12166  * @ring_id: ring number
12167  * Return: pointer to cdp_delay_stats structure
12168  */
12169 static struct cdp_delay_stats *
12170 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
12171 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
12172 {
12173 	uint8_t delay_index = 0;
12174 	struct cdp_tid_tx_stats *tstats =
12175 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
12176 	struct cdp_tid_rx_stats *rstats =
12177 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
12178 	/*
12179 	 * cdp_fw_to_hw_delay_range
12180 	 * Fw to hw delay ranges in milliseconds
12181 	 */
12182 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
12183 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
12184 
12185 	/*
12186 	 * cdp_sw_enq_delay_range
12187 	 * Software enqueue delay ranges in milliseconds
12188 	 */
12189 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
12190 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
12191 
12192 	/*
12193 	 * cdp_intfrm_delay_range
12194 	 * Interframe delay ranges in milliseconds
12195 	 */
12196 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
12197 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
12198 
12199 	/*
12200 	 * Update delay stats in proper bucket
12201 	 */
12202 	switch (mode) {
12203 	/* Software Enqueue delay ranges */
12204 	case CDP_DELAY_STATS_SW_ENQ:
12205 
12206 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
12207 		tstats->swq_delay.delay_bucket[delay_index]++;
12208 		return &tstats->swq_delay;
12209 
12210 	/* Tx Completion delay ranges */
12211 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
12212 
12213 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
12214 		tstats->hwtx_delay.delay_bucket[delay_index]++;
12215 		return &tstats->hwtx_delay;
12216 
12217 	/* Interframe tx delay ranges */
12218 	case CDP_DELAY_STATS_TX_INTERFRAME:
12219 
12220 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12221 		tstats->intfrm_delay.delay_bucket[delay_index]++;
12222 		return &tstats->intfrm_delay;
12223 
12224 	/* Interframe rx delay ranges */
12225 	case CDP_DELAY_STATS_RX_INTERFRAME:
12226 
12227 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12228 		rstats->intfrm_delay.delay_bucket[delay_index]++;
12229 		return &rstats->intfrm_delay;
12230 
12231 	/* Ring reap to indication to network stack */
12232 	case CDP_DELAY_STATS_REAP_STACK:
12233 
12234 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12235 		rstats->to_stack_delay.delay_bucket[delay_index]++;
12236 		return &rstats->to_stack_delay;
12237 	default:
12238 		dp_debug("Incorrect delay mode: %d", mode);
12239 	}
12240 
12241 	return NULL;
12242 }
12243 
12244 /**
12245  * dp_update_delay_stats() - Update delay statistics in structure
12246  *				and fill min, max and avg delay
12247  *
12248  * @pdev: pdev handle
12249  * @delay: delay in ms
12250  * @tid: tid value
12251  * @mode: type of tx delay mode
12252  * @ring id: ring number
12253  * Return: none
12254  */
12255 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
12256 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
12257 {
12258 	struct cdp_delay_stats *dstats = NULL;
12259 
12260 	/*
12261 	 * Delay ranges are different for different delay modes
12262 	 * Get the correct index to update delay bucket
12263 	 */
12264 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
12265 	if (qdf_unlikely(!dstats))
12266 		return;
12267 
12268 	if (delay != 0) {
12269 		/*
12270 		 * Compute minimum,average and maximum
12271 		 * delay
12272 		 */
12273 		if (delay < dstats->min_delay)
12274 			dstats->min_delay = delay;
12275 
12276 		if (delay > dstats->max_delay)
12277 			dstats->max_delay = delay;
12278 
12279 		/*
12280 		 * Average over delay measured till now
12281 		 */
12282 		if (!dstats->avg_delay)
12283 			dstats->avg_delay = delay;
12284 		else
12285 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
12286 	}
12287 }
12288 
12289 /**
12290  * dp_get_peer_mac_list(): function to get peer mac list of vdev
12291  * @soc: Datapath soc handle
12292  * @vdev_id: vdev id
12293  * @newmac: Table of the clients mac
12294  * @mac_cnt: No. of MACs required
12295  * @limit: Limit the number of clients
12296  *
12297  * return: no of clients
12298  */
12299 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
12300 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
12301 			      u_int16_t mac_cnt, bool limit)
12302 {
12303 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
12304 	struct dp_vdev *vdev =
12305 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
12306 	struct dp_peer *peer;
12307 	uint16_t new_mac_cnt = 0;
12308 
12309 	if (!vdev)
12310 		return new_mac_cnt;
12311 
12312 	if (limit && (vdev->num_peers > mac_cnt))
12313 		return 0;
12314 
12315 	qdf_spin_lock_bh(&vdev->peer_list_lock);
12316 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
12317 		if (peer->bss_peer)
12318 			continue;
12319 		if (new_mac_cnt < mac_cnt) {
12320 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
12321 			new_mac_cnt++;
12322 		}
12323 	}
12324 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
12325 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
12326 	return new_mac_cnt;
12327 }
12328 
12329 #ifdef QCA_SUPPORT_WDS_EXTENDED
12330 uint16_t dp_wds_ext_get_peer_id(ol_txrx_soc_handle soc,
12331 				uint8_t vdev_id,
12332 				uint8_t *mac)
12333 {
12334 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
12335 						       mac, 0, vdev_id,
12336 						       DP_MOD_ID_CDP);
12337 	uint16_t peer_id = HTT_INVALID_PEER;
12338 
12339 	if (!peer) {
12340 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
12341 		return peer_id;
12342 	}
12343 
12344 	peer_id = peer->peer_id;
12345 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12346 	return peer_id;
12347 }
12348 
12349 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
12350 				  uint8_t vdev_id,
12351 				  uint8_t *mac,
12352 				  ol_txrx_rx_fp rx,
12353 				  ol_osif_peer_handle osif_peer)
12354 {
12355 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
12356 						       mac, 0, vdev_id,
12357 						       DP_MOD_ID_CDP);
12358 	QDF_STATUS status = QDF_STATUS_E_INVAL;
12359 
12360 	if (!peer) {
12361 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
12362 		return status;
12363 	}
12364 
12365 	if (rx) {
12366 		if (peer->osif_rx) {
12367 		    status = QDF_STATUS_E_ALREADY;
12368 		} else {
12369 		    peer->osif_rx = rx;
12370 		    status = QDF_STATUS_SUCCESS;
12371 		}
12372 	} else {
12373 		if (peer->osif_rx) {
12374 		    peer->osif_rx = NULL;
12375 		    status = QDF_STATUS_SUCCESS;
12376 		} else {
12377 		    status = QDF_STATUS_E_ALREADY;
12378 		}
12379 	}
12380 
12381 	peer->wds_ext.osif_peer = osif_peer;
12382 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12383 
12384 	return status;
12385 }
12386 #endif /* QCA_SUPPORT_WDS_EXTENDED */
12387 
12388 /**
12389  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
12390  *			   monitor rings
12391  * @pdev: Datapath pdev handle
12392  *
12393  */
12394 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
12395 {
12396 	struct dp_soc *soc = pdev->soc;
12397 	uint8_t i;
12398 
12399 	dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], RXDMA_BUF,
12400 		       pdev->lmac_id);
12401 
12402 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12403 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
12404 		dp_ipa_deinit_alt_tx_ring(soc);
12405 	}
12406 
12407 	if (!soc->rxdma2sw_rings_not_supported) {
12408 		for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12409 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
12410 								 pdev->pdev_id);
12411 
12412 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
12413 							base_vaddr_unaligned,
12414 					     soc->rxdma_err_dst_ring[lmac_id].
12415 								alloc_size,
12416 					     soc->ctrl_psoc,
12417 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
12418 					     "rxdma_err_dst");
12419 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
12420 				       RXDMA_DST, lmac_id);
12421 		}
12422 	}
12423 
12424 
12425 }
12426 
12427 /**
12428  * dp_pdev_srng_init() - initialize all pdev srng rings including
12429  *			   monitor rings
12430  * @pdev: Datapath pdev handle
12431  *
12432  * return: QDF_STATUS_SUCCESS on success
12433  *	   QDF_STATUS_E_NOMEM on failure
12434  */
12435 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
12436 {
12437 	struct dp_soc *soc = pdev->soc;
12438 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12439 	uint32_t i;
12440 
12441 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12442 
12443 	if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
12444 			 RXDMA_BUF, 0, pdev->lmac_id)) {
12445 		dp_init_err("%pK: dp_srng_init failed rx refill ring", soc);
12446 		goto fail1;
12447 	}
12448 
12449 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12450 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
12451 			goto fail1;
12452 
12453 		if (dp_ipa_init_alt_tx_ring(soc))
12454 			goto fail1;
12455 	}
12456 
12457 	/* LMAC RxDMA to SW Rings configuration */
12458 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
12459 		/* Only valid for MCL */
12460 		pdev = soc->pdev_list[0];
12461 
12462 	if (!soc->rxdma2sw_rings_not_supported) {
12463 		for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12464 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
12465 								 pdev->pdev_id);
12466 			struct dp_srng *srng =
12467 				&soc->rxdma_err_dst_ring[lmac_id];
12468 
12469 			if (srng->hal_srng)
12470 				continue;
12471 
12472 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
12473 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
12474 					    soc);
12475 				goto fail1;
12476 			}
12477 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
12478 						base_vaddr_unaligned,
12479 					  soc->rxdma_err_dst_ring[lmac_id].
12480 						alloc_size,
12481 					  soc->ctrl_psoc,
12482 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
12483 					  "rxdma_err_dst");
12484 		}
12485 	}
12486 	return QDF_STATUS_SUCCESS;
12487 
12488 fail1:
12489 	dp_pdev_srng_deinit(pdev);
12490 	return QDF_STATUS_E_NOMEM;
12491 }
12492 
12493 /**
12494  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
12495  * pdev: Datapath pdev handle
12496  *
12497  */
12498 static void dp_pdev_srng_free(struct dp_pdev *pdev)
12499 {
12500 	struct dp_soc *soc = pdev->soc;
12501 	uint8_t i;
12502 
12503 	dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
12504 
12505 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12506 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
12507 		dp_ipa_free_alt_tx_ring(soc);
12508 	}
12509 
12510 	if (!soc->rxdma2sw_rings_not_supported) {
12511 		for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12512 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
12513 								 pdev->pdev_id);
12514 
12515 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
12516 		}
12517 	}
12518 }
12519 
12520 /**
12521  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
12522  *			  monitor rings
12523  * pdev: Datapath pdev handle
12524  *
12525  * return: QDF_STATUS_SUCCESS on success
12526  *	   QDF_STATUS_E_NOMEM on failure
12527  */
12528 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
12529 {
12530 	struct dp_soc *soc = pdev->soc;
12531 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12532 	uint32_t ring_size;
12533 	uint32_t i;
12534 
12535 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12536 
12537 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
12538 	if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
12539 			  RXDMA_BUF, ring_size, 0)) {
12540 		dp_init_err("%pK: dp_srng_alloc failed rx refill ring", soc);
12541 		goto fail1;
12542 	}
12543 
12544 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12545 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
12546 			goto fail1;
12547 
12548 		if (dp_ipa_alloc_alt_tx_ring(soc))
12549 			goto fail1;
12550 	}
12551 
12552 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
12553 	/* LMAC RxDMA to SW Rings configuration */
12554 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
12555 		/* Only valid for MCL */
12556 		pdev = soc->pdev_list[0];
12557 
12558 	if (!soc->rxdma2sw_rings_not_supported) {
12559 		for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12560 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
12561 								 pdev->pdev_id);
12562 			struct dp_srng *srng =
12563 				&soc->rxdma_err_dst_ring[lmac_id];
12564 
12565 			if (srng->base_vaddr_unaligned)
12566 				continue;
12567 
12568 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
12569 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
12570 					    soc);
12571 				goto fail1;
12572 			}
12573 		}
12574 	}
12575 
12576 	return QDF_STATUS_SUCCESS;
12577 fail1:
12578 	dp_pdev_srng_free(pdev);
12579 	return QDF_STATUS_E_NOMEM;
12580 }
12581 
12582 /**
12583  * dp_soc_srng_deinit() - de-initialize soc srng rings
12584  * @soc: Datapath soc handle
12585  *
12586  */
12587 static void dp_soc_srng_deinit(struct dp_soc *soc)
12588 {
12589 	uint32_t i;
12590 
12591 	if (soc->arch_ops.txrx_soc_srng_deinit)
12592 		soc->arch_ops.txrx_soc_srng_deinit(soc);
12593 
12594 	/* Free the ring memories */
12595 	/* Common rings */
12596 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
12597 			     soc->wbm_desc_rel_ring.alloc_size,
12598 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
12599 			     "wbm_desc_rel_ring");
12600 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
12601 
12602 	/* Tx data rings */
12603 	for (i = 0; i < soc->num_tcl_data_rings; i++)
12604 		dp_deinit_tx_pair_by_index(soc, i);
12605 
12606 	/* TCL command and status rings */
12607 	if (soc->init_tcl_cmd_cred_ring) {
12608 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
12609 				     soc->tcl_cmd_credit_ring.alloc_size,
12610 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
12611 				     "wbm_desc_rel_ring");
12612 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
12613 			       TCL_CMD_CREDIT, 0);
12614 	}
12615 
12616 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
12617 			     soc->tcl_status_ring.alloc_size,
12618 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
12619 			     "wbm_desc_rel_ring");
12620 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
12621 
12622 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12623 		/* TODO: Get number of rings and ring sizes
12624 		 * from wlan_cfg
12625 		 */
12626 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
12627 				     soc->reo_dest_ring[i].alloc_size,
12628 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
12629 				     "reo_dest_ring");
12630 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
12631 	}
12632 
12633 	/* REO reinjection ring */
12634 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
12635 			     soc->reo_reinject_ring.alloc_size,
12636 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
12637 			     "reo_reinject_ring");
12638 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
12639 
12640 	/* Rx release ring */
12641 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
12642 			     soc->rx_rel_ring.alloc_size,
12643 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
12644 			     "reo_release_ring");
12645 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
12646 
12647 	/* Rx exception ring */
12648 	/* TODO: Better to store ring_type and ring_num in
12649 	 * dp_srng during setup
12650 	 */
12651 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
12652 			     soc->reo_exception_ring.alloc_size,
12653 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
12654 			     "reo_exception_ring");
12655 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
12656 
12657 	/* REO command and status rings */
12658 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
12659 			     soc->reo_cmd_ring.alloc_size,
12660 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
12661 			     "reo_cmd_ring");
12662 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
12663 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
12664 			     soc->reo_status_ring.alloc_size,
12665 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
12666 			     "reo_status_ring");
12667 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
12668 }
12669 
12670 /**
12671  * dp_soc_srng_init() - Initialize soc level srng rings
12672  * @soc: Datapath soc handle
12673  *
12674  * return: QDF_STATUS_SUCCESS on success
12675  *	   QDF_STATUS_E_FAILURE on failure
12676  */
12677 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
12678 {
12679 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12680 	uint8_t i;
12681 
12682 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12683 
12684 	dp_enable_verbose_debug(soc);
12685 
12686 	/* WBM descriptor release ring */
12687 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
12688 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
12689 		goto fail1;
12690 	}
12691 
12692 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
12693 			  soc->wbm_desc_rel_ring.alloc_size,
12694 			  soc->ctrl_psoc,
12695 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
12696 			  "wbm_desc_rel_ring");
12697 
12698 	if (soc->init_tcl_cmd_cred_ring) {
12699 		/* TCL command and status rings */
12700 		if (dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
12701 				 TCL_CMD_CREDIT, 0, 0)) {
12702 			dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
12703 			goto fail1;
12704 		}
12705 
12706 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
12707 				  soc->tcl_cmd_credit_ring.alloc_size,
12708 				  soc->ctrl_psoc,
12709 				  WLAN_MD_DP_SRNG_TCL_CMD,
12710 				  "wbm_desc_rel_ring");
12711 	}
12712 
12713 	if (dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0)) {
12714 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
12715 		goto fail1;
12716 	}
12717 
12718 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
12719 			  soc->tcl_status_ring.alloc_size,
12720 			  soc->ctrl_psoc,
12721 			  WLAN_MD_DP_SRNG_TCL_STATUS,
12722 			  "wbm_desc_rel_ring");
12723 
12724 	/* REO reinjection ring */
12725 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
12726 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
12727 		goto fail1;
12728 	}
12729 
12730 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
12731 			  soc->reo_reinject_ring.alloc_size,
12732 			  soc->ctrl_psoc,
12733 			  WLAN_MD_DP_SRNG_REO_REINJECT,
12734 			  "reo_reinject_ring");
12735 
12736 	/* Rx release ring */
12737 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
12738 			 WBM2SW_REL_ERR_RING_NUM, 0)) {
12739 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
12740 		goto fail1;
12741 	}
12742 
12743 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
12744 			  soc->rx_rel_ring.alloc_size,
12745 			  soc->ctrl_psoc,
12746 			  WLAN_MD_DP_SRNG_RX_REL,
12747 			  "reo_release_ring");
12748 
12749 	/* Rx exception ring */
12750 	if (dp_srng_init(soc, &soc->reo_exception_ring,
12751 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
12752 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
12753 		goto fail1;
12754 	}
12755 
12756 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
12757 			  soc->reo_exception_ring.alloc_size,
12758 			  soc->ctrl_psoc,
12759 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
12760 			  "reo_exception_ring");
12761 
12762 	/* REO command and status rings */
12763 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
12764 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
12765 		goto fail1;
12766 	}
12767 
12768 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
12769 			  soc->reo_cmd_ring.alloc_size,
12770 			  soc->ctrl_psoc,
12771 			  WLAN_MD_DP_SRNG_REO_CMD,
12772 			  "reo_cmd_ring");
12773 
12774 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
12775 	TAILQ_INIT(&soc->rx.reo_cmd_list);
12776 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
12777 
12778 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
12779 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
12780 		goto fail1;
12781 	}
12782 
12783 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
12784 			  soc->reo_status_ring.alloc_size,
12785 			  soc->ctrl_psoc,
12786 			  WLAN_MD_DP_SRNG_REO_STATUS,
12787 			  "reo_status_ring");
12788 
12789 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12790 		if (dp_init_tx_ring_pair_by_index(soc, i))
12791 			goto fail1;
12792 	}
12793 
12794 	dp_create_ext_stats_event(soc);
12795 
12796 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12797 		/* Initialize REO destination ring */
12798 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
12799 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
12800 			goto fail1;
12801 		}
12802 
12803 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
12804 				  soc->reo_dest_ring[i].alloc_size,
12805 				  soc->ctrl_psoc,
12806 				  WLAN_MD_DP_SRNG_REO_DEST,
12807 				  "reo_dest_ring");
12808 	}
12809 
12810 	if (soc->arch_ops.txrx_soc_srng_init) {
12811 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
12812 			dp_init_err("%pK: dp_srng_init failed for arch rings",
12813 				    soc);
12814 			goto fail1;
12815 		}
12816 	}
12817 
12818 	return QDF_STATUS_SUCCESS;
12819 fail1:
12820 	/*
12821 	 * Cleanup will be done as part of soc_detach, which will
12822 	 * be called on pdev attach failure
12823 	 */
12824 	dp_soc_srng_deinit(soc);
12825 	return QDF_STATUS_E_FAILURE;
12826 }
12827 
12828 /**
12829  * dp_soc_srng_free() - free soc level srng rings
12830  * @soc: Datapath soc handle
12831  *
12832  */
12833 static void dp_soc_srng_free(struct dp_soc *soc)
12834 {
12835 	uint32_t i;
12836 
12837 	if (soc->arch_ops.txrx_soc_srng_free)
12838 		soc->arch_ops.txrx_soc_srng_free(soc);
12839 
12840 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
12841 
12842 	for (i = 0; i < soc->num_tcl_data_rings; i++)
12843 		dp_free_tx_ring_pair_by_index(soc, i);
12844 
12845 	if (soc->init_tcl_cmd_cred_ring)
12846 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
12847 
12848 	dp_srng_free(soc, &soc->tcl_status_ring);
12849 
12850 	for (i = 0; i < soc->num_reo_dest_rings; i++)
12851 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
12852 
12853 	dp_srng_free(soc, &soc->reo_reinject_ring);
12854 	dp_srng_free(soc, &soc->rx_rel_ring);
12855 
12856 	dp_srng_free(soc, &soc->reo_exception_ring);
12857 
12858 	dp_srng_free(soc, &soc->reo_cmd_ring);
12859 	dp_srng_free(soc, &soc->reo_status_ring);
12860 }
12861 
12862 /**
12863  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
12864  * @soc: Datapath soc handle
12865  *
12866  * return: QDF_STATUS_SUCCESS on success
12867  *	   QDF_STATUS_E_NOMEM on failure
12868  */
12869 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
12870 {
12871 	uint32_t entries;
12872 	uint32_t i;
12873 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12874 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
12875 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
12876 
12877 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12878 
12879 	/* sw2wbm link descriptor release ring */
12880 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
12881 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
12882 			  entries, 0)) {
12883 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
12884 		goto fail1;
12885 	}
12886 
12887 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
12888 	/* TCL command and status rings */
12889 	if (soc->init_tcl_cmd_cred_ring) {
12890 		if (dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
12891 				  TCL_CMD_CREDIT, entries, 0)) {
12892 			dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
12893 			goto fail1;
12894 		}
12895 	}
12896 
12897 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
12898 	if (dp_srng_alloc(soc, &soc->tcl_status_ring, TCL_STATUS, entries,
12899 			  0)) {
12900 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
12901 		goto fail1;
12902 	}
12903 
12904 	/* REO reinjection ring */
12905 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
12906 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
12907 			  entries, 0)) {
12908 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
12909 		goto fail1;
12910 	}
12911 
12912 	/* Rx release ring */
12913 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
12914 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
12915 			  entries, 0)) {
12916 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
12917 		goto fail1;
12918 	}
12919 
12920 	/* Rx exception ring */
12921 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
12922 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
12923 			  entries, 0)) {
12924 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
12925 		goto fail1;
12926 	}
12927 
12928 	/* REO command and status rings */
12929 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
12930 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
12931 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
12932 		goto fail1;
12933 	}
12934 
12935 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
12936 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
12937 			  entries, 0)) {
12938 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
12939 		goto fail1;
12940 	}
12941 
12942 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
12943 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
12944 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
12945 
12946 	/* Disable cached desc if NSS offload is enabled */
12947 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
12948 		cached = 0;
12949 
12950 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12951 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
12952 			goto fail1;
12953 	}
12954 
12955 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12956 		/* Setup REO destination ring */
12957 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
12958 				  reo_dst_ring_size, cached)) {
12959 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
12960 			goto fail1;
12961 		}
12962 	}
12963 
12964 	if (soc->arch_ops.txrx_soc_srng_alloc) {
12965 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
12966 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
12967 				    soc);
12968 			goto fail1;
12969 		}
12970 	}
12971 
12972 	return QDF_STATUS_SUCCESS;
12973 
12974 fail1:
12975 	dp_soc_srng_free(soc);
12976 	return QDF_STATUS_E_NOMEM;
12977 }
12978 
12979 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
12980 {
12981 	dp_init_info("DP soc Dump for Target = %d", target_type);
12982 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
12983 		     soc->ast_override_support, soc->da_war_enabled);
12984 
12985 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
12986 }
12987 
12988 /**
12989  * dp_soc_cfg_init() - initialize target specific configuration
12990  *		       during dp_soc_init
12991  * @soc: dp soc handle
12992  */
12993 static void dp_soc_cfg_init(struct dp_soc *soc)
12994 {
12995 	uint32_t target_type;
12996 
12997 	target_type = hal_get_target_type(soc->hal_soc);
12998 	switch (target_type) {
12999 	case TARGET_TYPE_QCA6290:
13000 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13001 					       REO_DST_RING_SIZE_QCA6290);
13002 		soc->ast_override_support = 1;
13003 		soc->da_war_enabled = false;
13004 		break;
13005 	case TARGET_TYPE_QCA6390:
13006 	case TARGET_TYPE_QCA6490:
13007 	case TARGET_TYPE_QCA6750:
13008 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13009 					       REO_DST_RING_SIZE_QCA6290);
13010 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
13011 		soc->ast_override_support = 1;
13012 		if (soc->cdp_soc.ol_ops->get_con_mode &&
13013 		    soc->cdp_soc.ol_ops->get_con_mode() ==
13014 		    QDF_GLOBAL_MONITOR_MODE) {
13015 			int int_ctx;
13016 
13017 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
13018 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
13019 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
13020 			}
13021 		}
13022 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
13023 		break;
13024 	case TARGET_TYPE_WCN7850:
13025 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13026 					       REO_DST_RING_SIZE_QCA6290);
13027 		soc->ast_override_support = 1;
13028 
13029 		if (soc->cdp_soc.ol_ops->get_con_mode &&
13030 		    soc->cdp_soc.ol_ops->get_con_mode() ==
13031 		    QDF_GLOBAL_MONITOR_MODE) {
13032 			int int_ctx;
13033 
13034 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
13035 			     int_ctx++) {
13036 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
13037 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
13038 			}
13039 		}
13040 
13041 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
13042 		break;
13043 	case TARGET_TYPE_QCA8074:
13044 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
13045 		soc->da_war_enabled = true;
13046 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
13047 		break;
13048 	case TARGET_TYPE_QCA8074V2:
13049 	case TARGET_TYPE_QCA6018:
13050 	case TARGET_TYPE_QCA9574:
13051 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
13052 		soc->ast_override_support = 1;
13053 		soc->per_tid_basize_max_tid = 8;
13054 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
13055 		soc->da_war_enabled = false;
13056 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
13057 		break;
13058 	case TARGET_TYPE_QCN9000:
13059 		soc->ast_override_support = 1;
13060 		soc->da_war_enabled = false;
13061 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
13062 		soc->per_tid_basize_max_tid = 8;
13063 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
13064 		soc->lmac_polled_mode = 0;
13065 		soc->wbm_release_desc_rx_sg_support = 1;
13066 		break;
13067 	case TARGET_TYPE_QCA5018:
13068 	case TARGET_TYPE_QCN6122:
13069 		soc->ast_override_support = 1;
13070 		soc->da_war_enabled = false;
13071 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
13072 		soc->per_tid_basize_max_tid = 8;
13073 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
13074 		soc->disable_mac1_intr = 1;
13075 		soc->disable_mac2_intr = 1;
13076 		soc->wbm_release_desc_rx_sg_support = 1;
13077 		break;
13078 	case TARGET_TYPE_QCN9224:
13079 		soc->ast_override_support = 1;
13080 		soc->da_war_enabled = false;
13081 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
13082 		soc->per_tid_basize_max_tid = 8;
13083 		soc->wbm_release_desc_rx_sg_support = 1;
13084 		soc->rxdma2sw_rings_not_supported = 1;
13085 
13086 		break;
13087 	default:
13088 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
13089 		qdf_assert_always(0);
13090 		break;
13091 	}
13092 	dp_soc_cfg_dump(soc, target_type);
13093 }
13094 
13095 /**
13096  * dp_soc_cfg_attach() - set target specific configuration in
13097  *			 dp soc cfg.
13098  * @soc: dp soc handle
13099  */
13100 static void dp_soc_cfg_attach(struct dp_soc *soc)
13101 {
13102 	int target_type;
13103 	int nss_cfg = 0;
13104 
13105 	target_type = hal_get_target_type(soc->hal_soc);
13106 	switch (target_type) {
13107 	case TARGET_TYPE_QCA6290:
13108 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13109 					       REO_DST_RING_SIZE_QCA6290);
13110 		break;
13111 	case TARGET_TYPE_QCA6390:
13112 	case TARGET_TYPE_QCA6490:
13113 	case TARGET_TYPE_QCA6750:
13114 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13115 					       REO_DST_RING_SIZE_QCA6290);
13116 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
13117 		break;
13118 	case TARGET_TYPE_WCN7850:
13119 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
13120 					       REO_DST_RING_SIZE_QCA6290);
13121 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
13122 		break;
13123 	case TARGET_TYPE_QCA8074:
13124 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
13125 		break;
13126 	case TARGET_TYPE_QCA8074V2:
13127 	case TARGET_TYPE_QCA6018:
13128 	case TARGET_TYPE_QCA9574:
13129 	case TARGET_TYPE_QCN6122:
13130 	case TARGET_TYPE_QCA5018:
13131 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
13132 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
13133 		break;
13134 	case TARGET_TYPE_QCN9000:
13135 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
13136 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
13137 		break;
13138 	case TARGET_TYPE_QCN9224:
13139 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
13140 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
13141 		break;
13142 	default:
13143 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
13144 		qdf_assert_always(0);
13145 		break;
13146 	}
13147 
13148 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
13149 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
13150 
13151 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
13152 
13153 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
13154 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
13155 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
13156 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
13157 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
13158 		soc->init_tcl_cmd_cred_ring = false;
13159 		soc->num_tcl_data_rings =
13160 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
13161 		soc->num_reo_dest_rings =
13162 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
13163 
13164 	} else {
13165 		soc->init_tcl_cmd_cred_ring = true;
13166 		soc->num_tcl_data_rings =
13167 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
13168 		soc->num_reo_dest_rings =
13169 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
13170 	}
13171 
13172 	soc->arch_ops.soc_cfg_attach(soc);
13173 }
13174 
13175 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
13176 {
13177 	struct dp_soc *soc = pdev->soc;
13178 
13179 	switch (pdev->pdev_id) {
13180 	case 0:
13181 		pdev->reo_dest =
13182 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
13183 		break;
13184 
13185 	case 1:
13186 		pdev->reo_dest =
13187 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
13188 		break;
13189 
13190 	case 2:
13191 		pdev->reo_dest =
13192 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
13193 		break;
13194 
13195 	default:
13196 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
13197 			    soc, pdev->pdev_id);
13198 		break;
13199 	}
13200 }
13201 
13202 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
13203 				      HTC_HANDLE htc_handle,
13204 				      qdf_device_t qdf_osdev,
13205 				      uint8_t pdev_id)
13206 {
13207 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
13208 	int nss_cfg;
13209 	void *sojourn_buf;
13210 	QDF_STATUS ret;
13211 
13212 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
13213 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
13214 
13215 	soc_cfg_ctx = soc->wlan_cfg_ctx;
13216 	pdev->soc = soc;
13217 	pdev->pdev_id = pdev_id;
13218 
13219 	/*
13220 	 * Variable to prevent double pdev deinitialization during
13221 	 * radio detach execution .i.e. in the absence of any vdev.
13222 	 */
13223 	pdev->pdev_deinit = 0;
13224 
13225 	if (dp_wdi_event_attach(pdev)) {
13226 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
13227 			  "dp_wdi_evet_attach failed");
13228 		goto fail0;
13229 	}
13230 
13231 	if (dp_pdev_srng_init(pdev)) {
13232 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
13233 		goto fail1;
13234 	}
13235 
13236 	/* Initialize descriptors in TCL Rings used by IPA */
13237 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
13238 		hal_tx_init_data_ring(soc->hal_soc,
13239 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
13240 		dp_ipa_hal_tx_init_alt_data_ring(soc);
13241 	}
13242 
13243 	/*
13244 	 * Initialize command/credit ring descriptor
13245 	 * Command/CREDIT ring also used for sending DATA cmds
13246 	 */
13247 	if (soc->init_tcl_cmd_cred_ring)
13248 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
13249 					    soc->tcl_cmd_credit_ring.hal_srng);
13250 
13251 	dp_tx_pdev_init(pdev);
13252 	/*
13253 	 * Variable to prevent double pdev deinitialization during
13254 	 * radio detach execution .i.e. in the absence of any vdev.
13255 	 */
13256 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
13257 
13258 	if (!pdev->invalid_peer) {
13259 		dp_init_err("%pK: Invalid peer memory allocation failed", soc);
13260 		goto fail2;
13261 	}
13262 
13263 	/*
13264 	 * set nss pdev config based on soc config
13265 	 */
13266 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
13267 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
13268 					 (nss_cfg & (1 << pdev_id)));
13269 	pdev->target_pdev_id =
13270 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
13271 
13272 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
13273 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
13274 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
13275 	}
13276 
13277 	/* Reset the cpu ring map if radio is NSS offloaded */
13278 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
13279 		dp_soc_reset_cpu_ring_map(soc);
13280 		dp_soc_reset_intr_mask(soc);
13281 	}
13282 
13283 	TAILQ_INIT(&pdev->vdev_list);
13284 	qdf_spinlock_create(&pdev->vdev_list_lock);
13285 	pdev->vdev_count = 0;
13286 
13287 	qdf_spinlock_create(&pdev->tx_mutex);
13288 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
13289 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
13290 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
13291 
13292 	DP_STATS_INIT(pdev);
13293 
13294 	dp_local_peer_id_pool_init(pdev);
13295 
13296 	dp_dscp_tid_map_setup(pdev);
13297 	dp_pcp_tid_map_setup(pdev);
13298 
13299 	/* set the reo destination during initialization */
13300 	dp_pdev_set_default_reo(pdev);
13301 
13302 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
13303 
13304 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
13305 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
13306 			      TRUE);
13307 
13308 	if (!pdev->sojourn_buf) {
13309 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
13310 		goto fail3;
13311 	}
13312 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
13313 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
13314 
13315 	qdf_event_create(&pdev->fw_peer_stats_event);
13316 
13317 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
13318 
13319 	if (dp_rxdma_ring_setup(soc, pdev)) {
13320 		dp_init_err("%pK: RXDMA ring config failed", soc);
13321 		goto fail4;
13322 	}
13323 
13324 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
13325 		goto fail5;
13326 
13327 	if (dp_ipa_ring_resource_setup(soc, pdev))
13328 		goto fail6;
13329 
13330 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
13331 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
13332 		goto fail6;
13333 	}
13334 
13335 	ret = dp_rx_fst_attach(soc, pdev);
13336 	if ((ret != QDF_STATUS_SUCCESS) &&
13337 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
13338 		dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
13339 			    soc, pdev_id, ret);
13340 		goto fail7;
13341 	}
13342 
13343 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
13344 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
13345 			  FL("dp_pdev_bkp_stats_attach failed"));
13346 		goto fail8;
13347 	}
13348 
13349 	if (dp_monitor_pdev_init(pdev)) {
13350 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
13351 		goto fail9;
13352 	}
13353 
13354 	/* initialize sw rx descriptors */
13355 	dp_rx_pdev_desc_pool_init(pdev);
13356 	/* allocate buffers and replenish the RxDMA ring */
13357 	dp_rx_pdev_buffers_alloc(pdev);
13358 
13359 	dp_init_tso_stats(pdev);
13360 
13361 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
13362 		qdf_dma_mem_stats_read(),
13363 		qdf_heap_mem_stats_read(),
13364 		qdf_skb_total_mem_stats_read());
13365 
13366 	return QDF_STATUS_SUCCESS;
13367 fail9:
13368 	dp_pdev_bkp_stats_detach(pdev);
13369 fail8:
13370 	dp_rx_fst_detach(soc, pdev);
13371 fail7:
13372 	dp_ipa_uc_detach(soc, pdev);
13373 fail6:
13374 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
13375 fail5:
13376 	dp_rxdma_ring_cleanup(soc, pdev);
13377 fail4:
13378 	qdf_nbuf_free(pdev->sojourn_buf);
13379 fail3:
13380 	qdf_spinlock_destroy(&pdev->tx_mutex);
13381 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
13382 	qdf_mem_free(pdev->invalid_peer);
13383 fail2:
13384 	dp_pdev_srng_deinit(pdev);
13385 fail1:
13386 	dp_wdi_event_detach(pdev);
13387 fail0:
13388 	return QDF_STATUS_E_FAILURE;
13389 }
13390 
13391 /*
13392  * dp_pdev_init_wifi3() - Init txrx pdev
13393  * @htc_handle: HTC handle for host-target interface
13394  * @qdf_osdev: QDF OS device
13395  * @force: Force deinit
13396  *
13397  * Return: QDF_STATUS
13398  */
13399 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
13400 				     HTC_HANDLE htc_handle,
13401 				     qdf_device_t qdf_osdev,
13402 				     uint8_t pdev_id)
13403 {
13404 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
13405 }
13406 
13407