xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision b62151f8dd0743da724a4533988c78d2c7385d4f)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_TYPES_H_
21 #define _DP_TYPES_H_
22 
23 #include <qdf_types.h>
24 #include <qdf_nbuf.h>
25 #include <qdf_lock.h>
26 #include <qdf_atomic.h>
27 #include <qdf_util.h>
28 #include <qdf_list.h>
29 #include <qdf_lro.h>
30 #include <queue.h>
31 #include <htt_common.h>
32 #include <htt.h>
33 #include <htt_stats.h>
34 #include <cdp_txrx_cmn.h>
35 #ifdef DP_MOB_DEFS
36 #include <cds_ieee80211_common.h>
37 #endif
38 #include <wdi_event_api.h>    /* WDI subscriber event list */
39 
40 #include "hal_hw_headers.h"
41 #include <hal_tx.h>
42 #include <hal_reo.h>
43 #include "wlan_cfg.h"
44 #include "hal_rx.h"
45 #include <hal_api.h>
46 #include <hal_api_mon.h>
47 #include "hal_rx.h"
48 
49 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
50 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
51 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
52 #define dp_init_info(params...) \
53 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
54 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
55 
56 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
57 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
58 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
59 #define dp_vdev_info(params...) \
60 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
61 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
62 
63 #define MAX_BW 8
64 #define MAX_RETRIES 4
65 #define MAX_RECEPTION_TYPES 4
66 
67 #define MINIDUMP_STR_SIZE 25
68 #include <dp_umac_reset.h>
69 
70 #define REPT_MU_MIMO 1
71 #define REPT_MU_OFDMA_MIMO 3
72 #define DP_VO_TID 6
73  /** MAX TID MAPS AVAILABLE PER PDEV */
74 #define DP_MAX_TID_MAPS 16
75 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */
76 #define DSCP_TID_MAP_MAX (64 + 6)
77 #define DP_IP_DSCP_SHIFT 2
78 #define DP_IP_DSCP_MASK 0x3f
79 #define DP_FC0_SUBTYPE_QOS 0x80
80 #define DP_QOS_TID 0x0f
81 #define DP_IPV6_PRIORITY_SHIFT 20
82 #define MAX_MON_LINK_DESC_BANKS 2
83 #define DP_VDEV_ALL CDP_VDEV_ALL
84 
85 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
86 #define WLAN_DP_RESET_MON_BUF_RING_FILTER
87 #define MAX_TXDESC_POOLS 6
88 #else
89 #define MAX_TXDESC_POOLS 4
90 #endif
91 
92 /* Max no of descriptors to handle special frames like EAPOL */
93 #define MAX_TX_SPL_DESC 1024
94 
95 #define MAX_RXDESC_POOLS 4
96 #define MAX_PPE_TXDESC_POOLS 1
97 
98 /* Max no. of VDEV per PSOC */
99 #ifdef WLAN_PSOC_MAX_VDEVS
100 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS
101 #else
102 #define MAX_VDEV_CNT 51
103 #endif
104 
105 /* Max no. of VDEVs, a PDEV can support */
106 #ifdef WLAN_PDEV_MAX_VDEVS
107 #define DP_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS
108 #else
109 #define DP_PDEV_MAX_VDEVS 17
110 #endif
111 
112 #define EXCEPTION_DEST_RING_ID 0
113 #define MAX_IDLE_SCATTER_BUFS 16
114 #define DP_MAX_IRQ_PER_CONTEXT 12
115 #define DEFAULT_HW_PEER_ID 0xffff
116 
117 #define MAX_AST_AGEOUT_COUNT 128
118 
119 #ifdef TX_ADDR_INDEX_SEARCH
120 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_INDEX_SEARCH
121 #else
122 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_SEARCH_DEFAULT
123 #endif
124 
125 #define WBM_INT_ERROR_ALL 0
126 #define WBM_INT_ERROR_REO_NULL_BUFFER 1
127 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2
128 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3
129 #define WBM_INT_ERROR_REO_BUFF_REAPED 4
130 #define MAX_WBM_INT_ERROR_REASONS 5
131 
132 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
133 /* Maximum retries for Delba per tid per peer */
134 #define DP_MAX_DELBA_RETRY 3
135 
136 #ifdef AST_OFFLOAD_ENABLE
137 #define AST_OFFLOAD_ENABLE_STATUS 1
138 #else
139 #define AST_OFFLOAD_ENABLE_STATUS 0
140 #endif
141 
142 #ifdef FEATURE_MEC_OFFLOAD
143 #define FW_MEC_FW_OFFLOAD_ENABLED 1
144 #else
145 #define FW_MEC_FW_OFFLOAD_ENABLED 0
146 #endif
147 
148 #define PCP_TID_MAP_MAX 8
149 #define MAX_MU_USERS 37
150 
151 #define REO_CMD_EVENT_HIST_MAX 64
152 
153 #define DP_MAX_SRNGS 64
154 
155 /* 2G PHYB */
156 #define PHYB_2G_LMAC_ID 2
157 #define PHYB_2G_TARGET_PDEV_ID 2
158 
159 /* Flags for skippig s/w tid classification */
160 #define DP_TX_HW_DSCP_TID_MAP_VALID 0x1
161 #define DP_TXRX_HLOS_TID_OVERRIDE_ENABLED 0x2
162 #define DP_TX_MESH_ENABLED 0x4
163 #define DP_TX_INVALID_QOS_TAG 0xf
164 
165 #ifdef WLAN_SUPPORT_RX_FISA
166 #define FISA_FLOW_MAX_AGGR_COUNT        16 /* max flow aggregate count */
167 #endif
168 
169 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
170 #define DP_RX_REFILL_BUFF_POOL_SIZE  2048
171 #define DP_RX_REFILL_BUFF_POOL_BURST 64
172 #define DP_RX_REFILL_THRD_THRESHOLD  512
173 #endif
174 
175 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
176 #define DP_SKIP_BAR_UPDATE_TIMEOUT 5000
177 #endif
178 
179 #define DP_TX_MAGIC_PATTERN_INUSE	0xABCD1234
180 #define DP_TX_MAGIC_PATTERN_FREE	0xDEADBEEF
181 
182 #define DP_INTR_POLL_TIMER_MS	5
183 
184 #ifdef IPA_OFFLOAD
185 #define DP_PEER_REO_STATS_TID_SHIFT 16
186 #define DP_PEER_REO_STATS_TID_MASK 0xFFFF0000
187 #define DP_PEER_REO_STATS_PEER_ID_MASK 0x0000FFFF
188 #define DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid) \
189 	((comb_peer_id_tid & DP_PEER_REO_STATS_TID_MASK) >> \
190 	DP_PEER_REO_STATS_TID_SHIFT)
191 #define DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid) \
192 	(comb_peer_id_tid & DP_PEER_REO_STATS_PEER_ID_MASK)
193 #endif
194 
195 typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc, void *arg,
196 				   int chip_id);
197 
198 enum rx_pktlog_mode {
199 	DP_RX_PKTLOG_DISABLED = 0,
200 	DP_RX_PKTLOG_FULL,
201 	DP_RX_PKTLOG_LITE,
202 };
203 
204 /* enum m_copy_mode - Available mcopy mode
205  *
206  */
207 enum m_copy_mode {
208 	M_COPY_DISABLED = 0,
209 	M_COPY = 2,
210 	M_COPY_EXTENDED = 4,
211 };
212 
213 struct msdu_list {
214 	qdf_nbuf_t head;
215 	qdf_nbuf_t tail;
216 	uint32_t sum_len;
217 };
218 
219 struct dp_soc_cmn;
220 struct dp_pdev;
221 struct dp_vdev;
222 struct dp_tx_desc_s;
223 struct dp_soc;
224 union dp_rx_desc_list_elem_t;
225 struct cdp_peer_rate_stats_ctx;
226 struct cdp_soc_rate_stats_ctx;
227 struct dp_rx_fst;
228 struct dp_mon_filter;
229 struct dp_mon_mpdu;
230 #ifdef BE_PKTLOG_SUPPORT
231 struct dp_mon_filter_be;
232 #endif
233 struct dp_peer;
234 struct dp_txrx_peer;
235 
236 /**
237  * enum dp_peer_state - DP peer states
238  * @DP_PEER_STATE_NONE:
239  * @DP_PEER_STATE_INIT:
240  * @DP_PEER_STATE_ACTIVE:
241  * @DP_PEER_STATE_LOGICAL_DELETE:
242  * @DP_PEER_STATE_INACTIVE:
243  * @DP_PEER_STATE_FREED:
244  * @DP_PEER_STATE_INVALID:
245  */
246 enum dp_peer_state {
247 	DP_PEER_STATE_NONE,
248 	DP_PEER_STATE_INIT,
249 	DP_PEER_STATE_ACTIVE,
250 	DP_PEER_STATE_LOGICAL_DELETE,
251 	DP_PEER_STATE_INACTIVE,
252 	DP_PEER_STATE_FREED,
253 	DP_PEER_STATE_INVALID,
254 };
255 
256 /**
257  * enum dp_mod_id - DP module IDs
258  * @DP_MOD_ID_TX_RX:
259  * @DP_MOD_ID_TX_COMP:
260  * @DP_MOD_ID_RX:
261  * @DP_MOD_ID_HTT_COMP:
262  * @DP_MOD_ID_RX_ERR:
263  * @DP_MOD_ID_TX_PPDU_STATS:
264  * @DP_MOD_ID_RX_PPDU_STATS:
265  * @DP_MOD_ID_CDP:
266  * @DP_MOD_ID_GENERIC_STATS:
267  * @DP_MOD_ID_TX_MULTIPASS:
268  * @DP_MOD_ID_TX_CAPTURE:
269  * @DP_MOD_ID_NSS_OFFLOAD:
270  * @DP_MOD_ID_CONFIG:
271  * @DP_MOD_ID_HTT:
272  * @DP_MOD_ID_IPA:
273  * @DP_MOD_ID_AST:
274  * @DP_MOD_ID_MCAST2UCAST:
275  * @DP_MOD_ID_CHILD:
276  * @DP_MOD_ID_MESH:
277  * @DP_MOD_ID_TX_EXCEPTION:
278  * @DP_MOD_ID_TDLS:
279  * @DP_MOD_ID_MISC:
280  * @DP_MOD_ID_MSCS:
281  * @DP_MOD_ID_TX:
282  * @DP_MOD_ID_SAWF:
283  * @DP_MOD_ID_REINJECT:
284  * @DP_MOD_ID_SCS:
285  * @DP_MOD_ID_UMAC_RESET:
286  * @DP_MOD_ID_TX_MCAST:
287  * @DP_MOD_ID_DS:
288  * @DP_MOD_ID_MAX:
289  */
290 enum dp_mod_id {
291 	DP_MOD_ID_TX_RX,
292 	DP_MOD_ID_TX_COMP,
293 	DP_MOD_ID_RX,
294 	DP_MOD_ID_HTT_COMP,
295 	DP_MOD_ID_RX_ERR,
296 	DP_MOD_ID_TX_PPDU_STATS,
297 	DP_MOD_ID_RX_PPDU_STATS,
298 	DP_MOD_ID_CDP,
299 	DP_MOD_ID_GENERIC_STATS,
300 	DP_MOD_ID_TX_MULTIPASS,
301 	DP_MOD_ID_TX_CAPTURE,
302 	DP_MOD_ID_NSS_OFFLOAD,
303 	DP_MOD_ID_CONFIG,
304 	DP_MOD_ID_HTT,
305 	DP_MOD_ID_IPA,
306 	DP_MOD_ID_AST,
307 	DP_MOD_ID_MCAST2UCAST,
308 	DP_MOD_ID_CHILD,
309 	DP_MOD_ID_MESH,
310 	DP_MOD_ID_TX_EXCEPTION,
311 	DP_MOD_ID_TDLS,
312 	DP_MOD_ID_MISC,
313 	DP_MOD_ID_MSCS,
314 	DP_MOD_ID_TX,
315 	DP_MOD_ID_SAWF,
316 	DP_MOD_ID_REINJECT,
317 	DP_MOD_ID_SCS,
318 	DP_MOD_ID_UMAC_RESET,
319 	DP_MOD_ID_TX_MCAST,
320 	DP_MOD_ID_DS,
321 	DP_MOD_ID_MAX,
322 };
323 
324 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
325 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
326 
327 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
328 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
329 
330 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
331 	TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase))
332 
333 #define DP_MUTEX_TYPE qdf_spinlock_t
334 
335 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
336 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
337 
338 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
339     ((_a)[0] == 0x33 &&                         \
340      (_a)[1] == 0x33)
341 
342 #define DP_FRAME_IS_BROADCAST(_a)              \
343     ((_a)[0] == 0xff &&                         \
344      (_a)[1] == 0xff &&                         \
345      (_a)[2] == 0xff &&                         \
346      (_a)[3] == 0xff &&                         \
347      (_a)[4] == 0xff &&                         \
348      (_a)[5] == 0xff)
349 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
350 		(_llc)->llc_ssap == 0xaa && \
351 		(_llc)->llc_un.type_snap.control == 0x3)
352 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
353 #define DP_FRAME_FC0_TYPE_MASK 0x0c
354 #define DP_FRAME_FC0_TYPE_DATA 0x08
355 #define DP_FRAME_IS_DATA(_frame) \
356 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
357 
358 /*
359  * macros to convert hw mac id to sw mac id:
360  * mac ids used by hardware start from a value of 1 while
361  * those in host software start from a value of 0. Use the
362  * macros below to convert between mac ids used by software and
363  * hardware
364  */
365 #define DP_SW2HW_MACID(id) ((id) + 1)
366 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
367 
368 /*
369  * Number of Tx Queues
370  * enum and macro to define how many threshold levels is used
371  * for the AC based flow control
372  */
373 #ifdef QCA_AC_BASED_FLOW_CONTROL
374 enum dp_fl_ctrl_threshold {
375 	DP_TH_BE_BK = 0,
376 	DP_TH_VI,
377 	DP_TH_VO,
378 	DP_TH_HI,
379 };
380 
381 #define FL_TH_MAX (4)
382 #define FL_TH_VI_PERCENTAGE (80)
383 #define FL_TH_VO_PERCENTAGE (60)
384 #define FL_TH_HI_PERCENTAGE (40)
385 #endif
386 
387 /**
388  * enum dp_intr_mode
389  * @DP_INTR_INTEGRATED: Line interrupts
390  * @DP_INTR_MSI: MSI interrupts
391  * @DP_INTR_POLL: Polling
392  * @DP_INTR_LEGACY_VIRTUAL_IRQ:
393  */
394 enum dp_intr_mode {
395 	DP_INTR_INTEGRATED = 0,
396 	DP_INTR_MSI,
397 	DP_INTR_POLL,
398 	DP_INTR_LEGACY_VIRTUAL_IRQ,
399 };
400 
401 /**
402  * enum dp_tx_frm_type
403  * @dp_tx_frm_std: Regular frame, no added header fragments
404  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
405  * @dp_tx_frm_sg: SG segment
406  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
407  * @dp_tx_frm_me: Multicast to Unicast Converted frame
408  * @dp_tx_frm_raw: Raw Frame
409  * @dp_tx_frm_rmnet:
410  */
411 enum dp_tx_frm_type {
412 	dp_tx_frm_std = 0,
413 	dp_tx_frm_tso,
414 	dp_tx_frm_sg,
415 	dp_tx_frm_audio,
416 	dp_tx_frm_me,
417 	dp_tx_frm_raw,
418 	dp_tx_frm_rmnet,
419 };
420 
421 /**
422  * enum dp_ast_type
423  * @dp_ast_type_wds: WDS peer AST type
424  * @dp_ast_type_static: static ast entry type
425  * @dp_ast_type_mec: Multicast echo ast entry type
426  */
427 enum dp_ast_type {
428 	dp_ast_type_wds = 0,
429 	dp_ast_type_static,
430 	dp_ast_type_mec,
431 };
432 
433 /**
434  * enum dp_nss_cfg
435  * @dp_nss_cfg_default: No radios are offloaded
436  * @dp_nss_cfg_first_radio: First radio offloaded
437  * @dp_nss_cfg_second_radio: Second radio offloaded
438  * @dp_nss_cfg_dbdc: Dual radios offloaded
439  * @dp_nss_cfg_dbtc: Three radios offloaded
440  * @dp_nss_cfg_max: max value
441  */
442 enum dp_nss_cfg {
443 	dp_nss_cfg_default = 0x0,
444 	dp_nss_cfg_first_radio = 0x1,
445 	dp_nss_cfg_second_radio = 0x2,
446 	dp_nss_cfg_dbdc = 0x3,
447 	dp_nss_cfg_dbtc = 0x7,
448 	dp_nss_cfg_max
449 };
450 
451 #ifdef WLAN_TX_PKT_CAPTURE_ENH
452 #define DP_CPU_RING_MAP_1 1
453 #endif
454 
455 /**
456  * enum dp_cpu_ring_map_types - dp tx cpu ring map
457  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
458  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
459  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
460  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
461  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
462  * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring
463  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
464  */
465 enum dp_cpu_ring_map_types {
466 	DP_NSS_DEFAULT_MAP,
467 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
468 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
469 	DP_NSS_DBDC_OFFLOADED_MAP,
470 	DP_NSS_DBTC_OFFLOADED_MAP,
471 #ifdef WLAN_TX_PKT_CAPTURE_ENH
472 	DP_SINGLE_TX_RING_MAP,
473 #endif
474 	DP_NSS_CPU_RING_MAP_MAX
475 };
476 
477 /**
478  * struct dp_rx_nbuf_frag_info - Hold vaddr and paddr for a buffer
479  *
480  * @paddr: Physical address of buffer allocated.
481  * @virt_addr: union of virtual address representations
482  * @nbuf: Allocated nbuf in case of nbuf approach.
483  * @vaddr: Virtual address of frag allocated in case of frag approach.
484  */
485 struct dp_rx_nbuf_frag_info {
486 	qdf_dma_addr_t paddr;
487 	union {
488 		qdf_nbuf_t nbuf;
489 		qdf_frag_t vaddr;
490 	} virt_addr;
491 };
492 
493 /**
494  * enum dp_ctxt_type - context type
495  * @DP_PDEV_TYPE: PDEV context
496  * @DP_RX_RING_HIST_TYPE: Datapath rx ring history
497  * @DP_RX_ERR_RING_HIST_TYPE: Datapath rx error ring history
498  * @DP_RX_REINJECT_RING_HIST_TYPE: Datapath reinject ring history
499  * @DP_TX_TCL_HIST_TYPE:
500  * @DP_TX_COMP_HIST_TYPE:
501  * @DP_FISA_RX_FT_TYPE:
502  * @DP_RX_REFILL_RING_HIST_TYPE: Datapath rx refill ring history
503  * @DP_TX_HW_DESC_HIST_TYPE: Datapath TX HW descriptor history
504  * @DP_MON_SOC_TYPE: Datapath monitor soc context
505  * @DP_MON_PDEV_TYPE: Datapath monitor pdev context
506  * @DP_MON_STATUS_BUF_HIST_TYPE: DP monitor status buffer history
507  * @DP_CFG_EVENT_HIST_TYPE: DP config events history
508  */
509 enum dp_ctxt_type {
510 	DP_PDEV_TYPE,
511 	DP_RX_RING_HIST_TYPE,
512 	DP_RX_ERR_RING_HIST_TYPE,
513 	DP_RX_REINJECT_RING_HIST_TYPE,
514 	DP_TX_TCL_HIST_TYPE,
515 	DP_TX_COMP_HIST_TYPE,
516 	DP_FISA_RX_FT_TYPE,
517 	DP_RX_REFILL_RING_HIST_TYPE,
518 	DP_TX_HW_DESC_HIST_TYPE,
519 	DP_MON_SOC_TYPE,
520 	DP_MON_PDEV_TYPE,
521 	DP_MON_STATUS_BUF_HIST_TYPE,
522 	DP_CFG_EVENT_HIST_TYPE,
523 };
524 
525 /**
526  * enum dp_desc_type - source type for multiple pages allocation
527  * @DP_TX_DESC_TYPE: DP SW TX descriptor
528  * @DP_TX_PPEDS_DESC_TYPE: DP PPE-DS Tx descriptor
529  * @DP_TX_EXT_DESC_TYPE: DP TX msdu extension descriptor
530  * @DP_TX_EXT_DESC_LINK_TYPE: DP link descriptor for msdu ext_desc
531  * @DP_TX_TSO_DESC_TYPE: DP TX TSO descriptor
532  * @DP_TX_TSO_NUM_SEG_TYPE: DP TX number of segments
533  * @DP_RX_DESC_BUF_TYPE: DP RX SW descriptor
534  * @DP_RX_DESC_STATUS_TYPE: DP RX SW descriptor for monitor status
535  * @DP_HW_LINK_DESC_TYPE: DP HW link descriptor
536  * @DP_HW_CC_SPT_PAGE_TYPE: DP pages for HW CC secondary page table
537  * @DP_TX_TCL_DESC_TYPE: DP TCL descriptor
538  */
539 enum dp_desc_type {
540 	DP_TX_DESC_TYPE,
541 	DP_TX_PPEDS_DESC_TYPE,
542 	DP_TX_EXT_DESC_TYPE,
543 	DP_TX_EXT_DESC_LINK_TYPE,
544 	DP_TX_TSO_DESC_TYPE,
545 	DP_TX_TSO_NUM_SEG_TYPE,
546 	DP_RX_DESC_BUF_TYPE,
547 	DP_RX_DESC_STATUS_TYPE,
548 	DP_HW_LINK_DESC_TYPE,
549 	DP_HW_CC_SPT_PAGE_TYPE,
550 	DP_TX_TCL_DESC_TYPE,
551 };
552 
553 /**
554  * struct rx_desc_pool
555  * @pool_size: number of RX descriptor in the pool
556  * @elem_size: Element size
557  * @desc_pages: Multi page descriptors
558  * @array: pointer to array of RX descriptor
559  * @freelist: pointer to free RX descriptor link list
560  * @lock: Protection for the RX descriptor pool
561  * @owner: owner for nbuf
562  * @buf_size: Buffer size
563  * @buf_alignment: Buffer alignment
564  * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
565  * @desc_type: type of desc this pool serves
566  */
567 struct rx_desc_pool {
568 	uint32_t pool_size;
569 #ifdef RX_DESC_MULTI_PAGE_ALLOC
570 	uint16_t elem_size;
571 	struct qdf_mem_multi_page_t desc_pages;
572 #else
573 	union dp_rx_desc_list_elem_t *array;
574 #endif
575 	union dp_rx_desc_list_elem_t *freelist;
576 	qdf_spinlock_t lock;
577 	uint8_t owner;
578 	uint16_t buf_size;
579 	uint8_t buf_alignment;
580 	bool rx_mon_dest_frag_enable;
581 	enum dp_desc_type desc_type;
582 };
583 
584 /**
585  * struct dp_tx_ext_desc_elem_s
586  * @next: next extension descriptor pointer
587  * @vaddr: hlos virtual address pointer
588  * @paddr: physical address pointer for descriptor
589  * @flags: mark features for extension descriptor
590  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
591  *		Tx completion of ME packet
592  * @tso_desc: Pointer to Tso desc
593  * @tso_num_desc: Pointer to tso_num_desc
594  */
595 struct dp_tx_ext_desc_elem_s {
596 	struct dp_tx_ext_desc_elem_s *next;
597 	void *vaddr;
598 	qdf_dma_addr_t paddr;
599 	uint16_t flags;
600 	struct dp_tx_me_buf_t *me_buffer;
601 	struct qdf_tso_seg_elem_t *tso_desc;
602 	struct qdf_tso_num_seg_elem_t *tso_num_desc;
603 };
604 
605 /*
606  * NB: intentionally not using kernel-doc comment because the kernel-doc
607  *     script does not handle the qdf_dma_mem_context macro
608  * struct dp_tx_ext_desc_pool_s - Tx Extension Descriptor Pool
609  * @elem_count: Number of descriptors in the pool
610  * @elem_size: Size of each descriptor
611  * @num_free: Number of free descriptors
612  * @desc_pages: multiple page allocation information for actual descriptors
613  * @link_elem_size: size of the link descriptor in cacheable memory used for
614  * 		    chaining the extension descriptors
615  * @desc_link_pages: multiple page allocation information for link descriptors
616  * @freelist:
617  * @lock:
618  * @memctx:
619  */
620 struct dp_tx_ext_desc_pool_s {
621 	uint16_t elem_count;
622 	int elem_size;
623 	uint16_t num_free;
624 	struct qdf_mem_multi_page_t desc_pages;
625 	int link_elem_size;
626 	struct qdf_mem_multi_page_t desc_link_pages;
627 	struct dp_tx_ext_desc_elem_s *freelist;
628 	qdf_spinlock_t lock;
629 	qdf_dma_mem_context(memctx);
630 };
631 
632 /**
633  * struct dp_tx_desc_s - Tx Descriptor
634  * @next: Next in the chain of descriptors in freelist or in the completion list
635  * @nbuf: Buffer Address
636  * @length:
637  * @magic:
638  * @timestamp_tick:
639  * @flags: Flags to track the state of descriptor and special frame handling
640  * @id: Descriptor ID
641  * @dma_addr:
642  * @vdev_id: vdev_id of vdev over which the packet was transmitted
643  * @tx_status:
644  * @peer_id:
645  * @pdev: Handle to pdev
646  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
647  * 		   This is maintained in descriptor to allow more efficient
648  * 		   processing in completion event processing code.
649  * 		   This field is filled in with the htt_pkt_type enum.
650  * @buffer_src: buffer source TQM, REO, FW etc.
651  * @reserved:
652  * @frm_type: Frame Type - ToDo check if this is redundant
653  * @pkt_offset: Offset from which the actual packet data starts
654  * @pool_id: Pool ID - used when releasing the descriptor
655  * @shinfo_addr:
656  * @msdu_ext_desc: MSDU extension descriptor
657  * @timestamp:
658  * @comp:
659  * @tcl_cmd_vaddr: VADDR of the TCL descriptor, valid for soft-umac arch
660  * @tcl_cmd_paddr: PADDR of the TCL descriptor, valid for soft-umac arch
661  */
662 struct dp_tx_desc_s {
663 	struct dp_tx_desc_s *next;
664 	qdf_nbuf_t nbuf;
665 	uint16_t length;
666 #ifdef DP_TX_TRACKING
667 	uint32_t magic;
668 	uint64_t timestamp_tick;
669 #endif
670 	uint16_t flags;
671 	uint32_t id;
672 	qdf_dma_addr_t dma_addr;
673 	uint8_t vdev_id;
674 	uint8_t tx_status;
675 	uint16_t peer_id;
676 	struct dp_pdev *pdev;
677 	uint8_t tx_encap_type:2,
678 		buffer_src:3,
679 		reserved:3;
680 	uint8_t frm_type;
681 	uint8_t pkt_offset;
682 	uint8_t  pool_id;
683 	unsigned char *shinfo_addr;
684 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
685 	qdf_ktime_t timestamp;
686 	struct hal_tx_desc_comp_s comp;
687 #ifdef WLAN_SOFTUMAC_SUPPORT
688 	void *tcl_cmd_vaddr;
689 	qdf_dma_addr_t tcl_cmd_paddr;
690 #endif
691 };
692 
693 #ifdef QCA_AC_BASED_FLOW_CONTROL
694 /**
695  * enum flow_pool_status - flow pool status
696  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
697  *				and network queues are unpaused
698  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
699  *			   and network queues are paused
700  * @FLOW_POOL_BE_BK_PAUSED:
701  * @FLOW_POOL_VI_PAUSED:
702  * @FLOW_POOL_VO_PAUSED:
703  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
704  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
705  * @FLOW_POOL_ACTIVE_UNPAUSED_REATTACH: pool is reattached but network
706  *					queues are not paused
707  */
708 enum flow_pool_status {
709 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
710 	FLOW_POOL_ACTIVE_PAUSED = 1,
711 	FLOW_POOL_BE_BK_PAUSED = 2,
712 	FLOW_POOL_VI_PAUSED = 3,
713 	FLOW_POOL_VO_PAUSED = 4,
714 	FLOW_POOL_INVALID = 5,
715 	FLOW_POOL_INACTIVE = 6,
716 	FLOW_POOL_ACTIVE_UNPAUSED_REATTACH = 7,
717 };
718 
719 #else
720 /**
721  * enum flow_pool_status - flow pool status
722  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
723  *				and network queues are unpaused
724  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
725  *			   and network queues are paused
726  * @FLOW_POOL_BE_BK_PAUSED:
727  * @FLOW_POOL_VI_PAUSED:
728  * @FLOW_POOL_VO_PAUSED:
729  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
730  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
731  */
732 enum flow_pool_status {
733 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
734 	FLOW_POOL_ACTIVE_PAUSED = 1,
735 	FLOW_POOL_BE_BK_PAUSED = 2,
736 	FLOW_POOL_VI_PAUSED = 3,
737 	FLOW_POOL_VO_PAUSED = 4,
738 	FLOW_POOL_INVALID = 5,
739 	FLOW_POOL_INACTIVE = 6,
740 };
741 
742 #endif
743 
744 /**
745  * struct dp_tx_tso_seg_pool_s
746  * @pool_size: total number of pool elements
747  * @num_free: free element count
748  * @freelist: first free element pointer
749  * @desc_pages: multiple page allocation information for actual descriptors
750  * @lock: lock for accessing the pool
751  */
752 struct dp_tx_tso_seg_pool_s {
753 	uint16_t pool_size;
754 	uint16_t num_free;
755 	struct qdf_tso_seg_elem_t *freelist;
756 	struct qdf_mem_multi_page_t desc_pages;
757 	qdf_spinlock_t lock;
758 };
759 
760 /**
761  * struct dp_tx_tso_num_seg_pool_s - TSO Num seg pool
762  * @num_seg_pool_size: total number of pool elements
763  * @num_free: free element count
764  * @freelist: first free element pointer
765  * @desc_pages: multiple page allocation information for actual descriptors
766  * @lock: lock for accessing the pool
767  */
768 
769 struct dp_tx_tso_num_seg_pool_s {
770 	uint16_t num_seg_pool_size;
771 	uint16_t num_free;
772 	struct qdf_tso_num_seg_elem_t *freelist;
773 	struct qdf_mem_multi_page_t desc_pages;
774 	/*tso mutex */
775 	qdf_spinlock_t lock;
776 };
777 
778 /**
779  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
780  * @elem_size: Size of each descriptor in the pool
781  * @num_allocated: Number of used descriptors
782  * @freelist: Chain of free descriptors
783  * @desc_pages: multiple page allocation information for actual descriptors
784  * @pool_size: Total number of descriptors in the pool
785  * @flow_pool_id:
786  * @num_invalid_bin: Deleted pool with pending Tx completions.
787  * @avail_desc:
788  * @status:
789  * @flow_type:
790  * @stop_th:
791  * @start_th:
792  * @max_pause_time:
793  * @latest_pause_time:
794  * @pkt_drop_no_desc:
795  * @flow_pool_lock:
796  * @pool_create_cnt:
797  * @pool_owner_ctx:
798  * @elem_count:
799  * @num_free: Number of free descriptors
800  * @lock: Lock for descriptor allocation/free from/to the pool
801  */
802 struct dp_tx_desc_pool_s {
803 	uint16_t elem_size;
804 	uint32_t num_allocated;
805 	struct dp_tx_desc_s *freelist;
806 	struct qdf_mem_multi_page_t desc_pages;
807 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
808 	uint16_t pool_size;
809 	uint8_t flow_pool_id;
810 	uint8_t num_invalid_bin;
811 	uint16_t avail_desc;
812 	enum flow_pool_status status;
813 	enum htt_flow_type flow_type;
814 #ifdef QCA_AC_BASED_FLOW_CONTROL
815 	uint16_t stop_th[FL_TH_MAX];
816 	uint16_t start_th[FL_TH_MAX];
817 	qdf_time_t max_pause_time[FL_TH_MAX];
818 	qdf_time_t latest_pause_time[FL_TH_MAX];
819 #else
820 	uint16_t stop_th;
821 	uint16_t start_th;
822 #endif
823 	uint16_t pkt_drop_no_desc;
824 	qdf_spinlock_t flow_pool_lock;
825 	uint8_t pool_create_cnt;
826 	void *pool_owner_ctx;
827 #else
828 	uint16_t elem_count;
829 	uint32_t num_free;
830 	qdf_spinlock_t lock;
831 #endif
832 };
833 
834 /**
835  * struct dp_txrx_pool_stats - flow pool related statistics
836  * @pool_map_count: flow pool map received
837  * @pool_unmap_count: flow pool unmap received
838  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
839  */
840 struct dp_txrx_pool_stats {
841 	uint16_t pool_map_count;
842 	uint16_t pool_unmap_count;
843 	uint16_t pkt_drop_no_pool;
844 };
845 
846 /**
847  * struct dp_srng - DP srng structure
848  * @hal_srng: hal_srng handle
849  * @base_vaddr_unaligned: un-aligned virtual base address of the srng ring
850  * @base_vaddr_aligned: aligned virtual base address of the srng ring
851  * @base_paddr_unaligned: un-aligned physical base address of the srng ring
852  * @base_paddr_aligned: aligned physical base address of the srng ring
853  * @alloc_size: size of the srng ring
854  * @cached: is the srng ring memory cached or un-cached memory
855  * @irq: irq number of the srng ring
856  * @num_entries: number of entries in the srng ring
857  * @is_mem_prealloc: Is this srng memory pre-allocated
858  * @crit_thresh: Critical threshold for near-full processing of this srng
859  * @safe_thresh: Safe threshold for near-full processing of this srng
860  * @near_full: Flag to indicate srng is near-full
861  */
862 struct dp_srng {
863 	hal_ring_handle_t hal_srng;
864 	void *base_vaddr_unaligned;
865 	void *base_vaddr_aligned;
866 	qdf_dma_addr_t base_paddr_unaligned;
867 	qdf_dma_addr_t base_paddr_aligned;
868 	uint32_t alloc_size;
869 	uint8_t cached;
870 	int irq;
871 	uint32_t num_entries;
872 #ifdef DP_MEM_PRE_ALLOC
873 	uint8_t is_mem_prealloc;
874 #endif
875 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
876 	uint16_t crit_thresh;
877 	uint16_t safe_thresh;
878 	qdf_atomic_t near_full;
879 #endif
880 };
881 
882 struct dp_rx_reorder_array_elem {
883 	qdf_nbuf_t head;
884 	qdf_nbuf_t tail;
885 };
886 
887 #define DP_RX_BA_INACTIVE 0
888 #define DP_RX_BA_ACTIVE 1
889 #define DP_RX_BA_IN_PROGRESS 2
890 struct dp_reo_cmd_info {
891 	uint16_t cmd;
892 	enum hal_reo_cmd_type cmd_type;
893 	void *data;
894 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
895 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
896 };
897 
898 struct dp_peer_delay_stats {
899 	struct cdp_delay_tid_stats delay_tid_stats[CDP_MAX_DATA_TIDS]
900 						  [CDP_MAX_TXRX_CTX];
901 };
902 
903 /* Rx TID defrag*/
904 struct dp_rx_tid_defrag {
905 	/* TID */
906 	int tid;
907 
908 	/* only used for defrag right now */
909 	TAILQ_ENTRY(dp_rx_tid_defrag) defrag_waitlist_elem;
910 
911 	/* Store dst desc for reinjection */
912 	hal_ring_desc_t dst_ring_desc;
913 	struct dp_rx_desc *head_frag_desc;
914 
915 	/* Sequence and fragments that are being processed currently */
916 	uint32_t curr_seq_num;
917 	uint32_t curr_frag_num;
918 
919 	/* TODO: Check the following while adding defragmentation support */
920 	struct dp_rx_reorder_array_elem *array;
921 	/* base - single rx reorder element used for non-aggr cases */
922 	struct dp_rx_reorder_array_elem base;
923 	/* rx_tid lock */
924 	qdf_spinlock_t defrag_tid_lock;
925 
926 	/* head PN number */
927 	uint64_t pn128[2];
928 
929 	uint32_t defrag_timeout_ms;
930 
931 	/* defrag usage only, dp_peer pointer related with this tid */
932 	struct dp_txrx_peer *defrag_peer;
933 };
934 
935 /* Rx TID */
936 struct dp_rx_tid {
937 	/* TID */
938 	int tid;
939 
940 	/* Num of addba requests */
941 	uint32_t num_of_addba_req;
942 
943 	/* Num of addba responses */
944 	uint32_t num_of_addba_resp;
945 
946 	/* Num of delba requests */
947 	uint32_t num_of_delba_req;
948 
949 	/* Num of addba responses successful */
950 	uint32_t num_addba_rsp_success;
951 
952 	/* Num of addba responses failed */
953 	uint32_t num_addba_rsp_failed;
954 
955 	/* pn size */
956 	uint8_t pn_size;
957 	/* REO TID queue descriptors */
958 	void *hw_qdesc_vaddr_unaligned;
959 	void *hw_qdesc_vaddr_aligned;
960 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
961 	qdf_dma_addr_t hw_qdesc_paddr;
962 	uint32_t hw_qdesc_alloc_size;
963 
964 	/* RX ADDBA session state */
965 	int ba_status;
966 
967 	/* RX BA window size */
968 	uint16_t ba_win_size;
969 
970 	/* Starting sequence number in Addba request */
971 	uint16_t startseqnum;
972 	uint16_t dialogtoken;
973 	uint16_t statuscode;
974 	/* user defined ADDBA response status code */
975 	uint16_t userstatuscode;
976 
977 	/* rx_tid lock */
978 	qdf_spinlock_t tid_lock;
979 
980 	/* Store ppdu_id when 2k exception is received */
981 	uint32_t ppdu_id_2k;
982 
983 	/* Delba Tx completion status */
984 	uint8_t delba_tx_status;
985 
986 	/* Delba Tx retry count */
987 	uint8_t delba_tx_retry;
988 
989 	/* Delba stats */
990 	uint32_t delba_tx_success_cnt;
991 	uint32_t delba_tx_fail_cnt;
992 
993 	/* Delba reason code for retries */
994 	uint8_t delba_rcode;
995 
996 	/* Coex Override preserved windows size 1 based */
997 	uint16_t rx_ba_win_size_override;
998 #ifdef IPA_OFFLOAD
999 	/* rx msdu count per tid */
1000 	struct cdp_pkt_info rx_msdu_cnt;
1001 #endif
1002 
1003 };
1004 
1005 /**
1006  * struct dp_intr_stats - DP Interrupt Stats for an interrupt context
1007  * @num_tx_ring_masks: interrupts with tx_ring_mask set
1008  * @num_rx_ring_masks: interrupts with rx_ring_mask set
1009  * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set
1010  * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set
1011  * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set
1012  * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set
1013  * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set
1014  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
1015  * @num_host2rxdma_mon_ring_masks: interrupts with host2rxdma_ring_mask set
1016  * @num_rx_ring_near_full_masks: Near-full interrupts for REO DST ring
1017  * @num_tx_comp_ring_near_full_masks: Near-full interrupts for TX completion
1018  * @num_rx_wbm_rel_ring_near_full_masks: total number of times the wbm rel ring
1019  *                                       near full interrupt was received
1020  * @num_reo_status_ring_near_full_masks: total number of times the reo status
1021  *                                       near full interrupt was received
1022  * @num_near_full_masks: total number of times the near full interrupt
1023  *                       was received
1024  * @num_masks: total number of times the interrupt was received
1025  * @num_host2txmon_ring__masks: interrupts with host2txmon_ring_mask set
1026  * @num_near_full_masks: total number of times the interrupt was received
1027  * @num_masks: total number of times the near full interrupt was received
1028  * @num_tx_mon_ring_masks: interrupts with num_tx_mon_ring_masks set
1029  *
1030  * Counter for individual masks are incremented only if there are any packets
1031  * on that ring.
1032  */
1033 struct dp_intr_stats {
1034 	uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS];
1035 	uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS];
1036 	uint32_t num_rx_mon_ring_masks;
1037 	uint32_t num_rx_err_ring_masks;
1038 	uint32_t num_rx_wbm_rel_ring_masks;
1039 	uint32_t num_reo_status_ring_masks;
1040 	uint32_t num_rxdma2host_ring_masks;
1041 	uint32_t num_host2rxdma_ring_masks;
1042 	uint32_t num_host2rxdma_mon_ring_masks;
1043 	uint32_t num_rx_ring_near_full_masks[MAX_REO_DEST_RINGS];
1044 	uint32_t num_tx_comp_ring_near_full_masks[MAX_TCL_DATA_RINGS];
1045 	uint32_t num_rx_wbm_rel_ring_near_full_masks;
1046 	uint32_t num_reo_status_ring_near_full_masks;
1047 	uint32_t num_host2txmon_ring__masks;
1048 	uint32_t num_near_full_masks;
1049 	uint32_t num_masks;
1050 	uint32_t num_tx_mon_ring_masks;
1051 };
1052 
1053 #ifdef DP_UMAC_HW_RESET_SUPPORT
1054 /**
1055  * struct dp_intr_bkp - DP per interrupt context ring masks old state
1056  * @tx_ring_mask: WBM Tx completion rings (0-2) associated with this napi ctxt
1057  * @rx_ring_mask: Rx REO rings (0-3) associated with this interrupt context
1058  * @rx_mon_ring_mask: Rx monitor ring mask (0-2)
1059  * @rx_err_ring_mask: REO Exception Ring
1060  * @rx_wbm_rel_ring_mask: WBM2SW Rx Release Ring
1061  * @reo_status_ring_mask: REO command response ring
1062  * @rxdma2host_ring_mask: RXDMA to host destination ring
1063  * @host2rxdma_ring_mask: Host to RXDMA buffer ring
1064  * @host2rxdma_mon_ring_mask: Host to RXDMA monitor  buffer ring
1065  * @host2txmon_ring_mask: Tx monitor buffer ring
1066  * @tx_mon_ring_mask: Tx monitor ring mask (0-2)
1067  *
1068  */
1069 struct dp_intr_bkp {
1070 	uint8_t tx_ring_mask;
1071 	uint8_t rx_ring_mask;
1072 	uint8_t rx_mon_ring_mask;
1073 	uint8_t rx_err_ring_mask;
1074 	uint8_t rx_wbm_rel_ring_mask;
1075 	uint8_t reo_status_ring_mask;
1076 	uint8_t rxdma2host_ring_mask;
1077 	uint8_t host2rxdma_ring_mask;
1078 	uint8_t host2rxdma_mon_ring_mask;
1079 	uint8_t host2txmon_ring_mask;
1080 	uint8_t tx_mon_ring_mask;
1081 };
1082 #endif
1083 
1084 /* per interrupt context  */
1085 struct dp_intr {
1086 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
1087 				associated with this napi context */
1088 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
1089 				with this interrupt context */
1090 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
1091 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
1092 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
1093 	uint8_t reo_status_ring_mask; /* REO command response ring */
1094 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
1095 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
1096 	/* Host to RXDMA monitor  buffer ring */
1097 	uint8_t host2rxdma_mon_ring_mask;
1098 	/* RX REO rings near full interrupt mask */
1099 	uint8_t rx_near_full_grp_1_mask;
1100 	/* RX REO rings near full interrupt mask */
1101 	uint8_t rx_near_full_grp_2_mask;
1102 	/* WBM TX completion rings near full interrupt mask */
1103 	uint8_t tx_ring_near_full_mask;
1104 	uint8_t host2txmon_ring_mask; /* Tx monitor buffer ring */
1105 	uint8_t tx_mon_ring_mask;  /* Tx monitor ring mask (0-2) */
1106 	struct dp_soc *soc;    /* Reference to SoC structure ,
1107 				to get DMA ring handles */
1108 	qdf_lro_ctx_t lro_ctx;
1109 	uint8_t dp_intr_id;
1110 
1111 	/* Interrupt Stats for individual masks */
1112 	struct dp_intr_stats intr_stats;
1113 	uint8_t umac_reset_intr_mask;  /* UMAC reset interrupt mask */
1114 };
1115 
1116 #define REO_DESC_FREELIST_SIZE 64
1117 #define REO_DESC_FREE_DEFER_MS 1000
1118 struct reo_desc_list_node {
1119 	qdf_list_node_t node;
1120 	unsigned long free_ts;
1121 	struct dp_rx_tid rx_tid;
1122 	bool resend_update_reo_cmd;
1123 	uint32_t pending_ext_desc_size;
1124 #ifdef REO_QDESC_HISTORY
1125 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1126 #endif
1127 };
1128 
1129 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
1130 #define REO_DESC_DEFERRED_FREELIST_SIZE 256
1131 #define REO_DESC_DEFERRED_FREE_MS 30000
1132 
1133 struct reo_desc_deferred_freelist_node {
1134 	qdf_list_node_t node;
1135 	unsigned long free_ts;
1136 	void *hw_qdesc_vaddr_unaligned;
1137 	qdf_dma_addr_t hw_qdesc_paddr;
1138 	uint32_t hw_qdesc_alloc_size;
1139 #ifdef REO_QDESC_HISTORY
1140 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1141 #endif /* REO_QDESC_HISTORY */
1142 };
1143 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
1144 
1145 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1146 /**
1147  * struct reo_cmd_event_record: Elements to record for each reo command
1148  * @cmd_type: reo command type
1149  * @cmd_return_status: reo command post status
1150  * @timestamp: record timestamp for the reo command
1151  */
1152 struct reo_cmd_event_record {
1153 	enum hal_reo_cmd_type cmd_type;
1154 	uint8_t cmd_return_status;
1155 	uint64_t timestamp;
1156 };
1157 
1158 /**
1159  * struct reo_cmd_event_history: Account for reo cmd events
1160  * @index: record number
1161  * @cmd_record: list of records
1162  */
1163 struct reo_cmd_event_history {
1164 	qdf_atomic_t index;
1165 	struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX];
1166 };
1167 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1168 
1169 /* SoC level data path statistics */
1170 struct dp_soc_stats {
1171 	struct {
1172 		uint32_t added;
1173 		uint32_t deleted;
1174 		uint32_t aged_out;
1175 		uint32_t map_err;
1176 		uint32_t ast_mismatch;
1177 	} ast;
1178 
1179 	struct {
1180 		uint32_t added;
1181 		uint32_t deleted;
1182 	} mec;
1183 
1184 	/* SOC level TX stats */
1185 	struct {
1186 		/* Total packets transmitted */
1187 		struct cdp_pkt_info egress[MAX_TCL_DATA_RINGS];
1188 		/* Enqueues per tcl ring */
1189 		uint32_t tcl_enq[MAX_TCL_DATA_RINGS];
1190 		/* packets dropped on tx because of no peer */
1191 		struct cdp_pkt_info tx_invalid_peer;
1192 		/* descriptors in each tcl ring */
1193 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
1194 		/* Descriptors in use at soc */
1195 		uint32_t desc_in_use;
1196 		/* tqm_release_reason == FW removed */
1197 		uint32_t dropped_fw_removed;
1198 		/* tx completion release_src != TQM or FW */
1199 		uint32_t invalid_release_source;
1200 		/* TX descriptor from completion ring Desc is not valid */
1201 		uint32_t invalid_tx_comp_desc;
1202 		/* tx completion wbm_internal_error */
1203 		uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS];
1204 		/* tx completion non_wbm_internal_error */
1205 		uint32_t non_wbm_internal_err;
1206 		/* TX Comp loop packet limit hit */
1207 		uint32_t tx_comp_loop_pkt_limit_hit;
1208 		/* Head pointer Out of sync at the end of dp_tx_comp_handler */
1209 		uint32_t hp_oos2;
1210 		/* tx desc freed as part of vdev detach */
1211 		uint32_t tx_comp_exception;
1212 		/* TQM drops after/during peer delete */
1213 		uint64_t tqm_drop_no_peer;
1214 		/* Number of tx completions reaped per WBM2SW release ring */
1215 		uint32_t tx_comp[MAX_TCL_DATA_RINGS];
1216 		/* Number of tx completions force freed */
1217 		uint32_t tx_comp_force_freed;
1218 		/* Tx completion ring near full */
1219 		uint32_t near_full;
1220 		/* Tx drops with buffer src as HAL_TX_COMP_RELEASE_SOURCE_FW */
1221 		uint32_t fw2wbm_tx_drop;
1222 	} tx;
1223 
1224 	/* SOC level RX stats */
1225 	struct {
1226 		/* Total rx packets count */
1227 		struct cdp_pkt_info ingress;
1228 		/* Rx errors */
1229 		/* Total Packets in Rx Error ring */
1230 		uint32_t err_ring_pkts;
1231 		/* No of Fragments */
1232 		uint32_t rx_frags;
1233 		/* No of incomplete fragments in waitlist */
1234 		uint32_t rx_frag_wait;
1235 		/* Fragments dropped due to errors */
1236 		uint32_t rx_frag_err;
1237 		/* Fragments received OOR causing sequence num mismatch */
1238 		uint32_t rx_frag_oor;
1239 		/* Fragments dropped due to len errors in skb */
1240 		uint32_t rx_frag_err_len_error;
1241 		/* Fragments dropped due to no peer found */
1242 		uint32_t rx_frag_err_no_peer;
1243 		/* No of reinjected packets */
1244 		uint32_t reo_reinject;
1245 		/* Reap loop packet limit hit */
1246 		uint32_t reap_loop_pkt_limit_hit;
1247 		/* Head pointer Out of sync at the end of dp_rx_process */
1248 		uint32_t hp_oos2;
1249 		/* Rx ring near full */
1250 		uint32_t near_full;
1251 		/* Break ring reaping as not all scattered msdu received */
1252 		uint32_t msdu_scatter_wait_break;
1253 		/* Number of bar frames received */
1254 		uint32_t bar_frame;
1255 		/* Number of frames routed from rxdma */
1256 		uint32_t rxdma2rel_route_drop;
1257 		/* Number of frames routed from reo*/
1258 		uint32_t reo2rel_route_drop;
1259 		uint64_t fast_recycled;
1260 		/* Number of hw stats requested */
1261 		uint32_t rx_hw_stats_requested;
1262 		/* Number of hw stats request timeout */
1263 		uint32_t rx_hw_stats_timeout;
1264 
1265 		struct {
1266 			/* Invalid RBM error count */
1267 			uint32_t invalid_rbm;
1268 			/* Invalid VDEV Error count */
1269 			uint32_t invalid_vdev;
1270 			/* Invalid PDEV error count */
1271 			uint32_t invalid_pdev;
1272 
1273 			/* Packets delivered to stack that no related peer */
1274 			uint32_t pkt_delivered_no_peer;
1275 			/* Defrag peer uninit error count */
1276 			uint32_t defrag_peer_uninit;
1277 			/* Invalid sa_idx or da_idx*/
1278 			uint32_t invalid_sa_da_idx;
1279 			/* MSDU DONE failures */
1280 			uint32_t msdu_done_fail;
1281 			/* Invalid PEER Error count */
1282 			struct cdp_pkt_info rx_invalid_peer;
1283 			/* Invalid PEER ID count */
1284 			struct cdp_pkt_info rx_invalid_peer_id;
1285 			/* Invalid packet length */
1286 			struct cdp_pkt_info rx_invalid_pkt_len;
1287 			/* HAL ring access Fail error count */
1288 			uint32_t hal_ring_access_fail;
1289 			/* HAL ring access full Fail error count */
1290 			uint32_t hal_ring_access_full_fail;
1291 			/* RX DMA error count */
1292 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
1293 			/* RX REO DEST Desc Invalid Magic count */
1294 			uint32_t rx_desc_invalid_magic;
1295 			/* REO Error count */
1296 			uint32_t reo_error[HAL_REO_ERR_MAX];
1297 			/* HAL REO ERR Count */
1298 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
1299 			/* HAL REO DEST Duplicate count */
1300 			uint32_t hal_reo_dest_dup;
1301 			/* HAL WBM RELEASE Duplicate count */
1302 			uint32_t hal_wbm_rel_dup;
1303 			/* HAL RXDMA error Duplicate count */
1304 			uint32_t hal_rxdma_err_dup;
1305 			/* ipa smmu map duplicate count */
1306 			uint32_t ipa_smmu_map_dup;
1307 			/* ipa smmu unmap duplicate count */
1308 			uint32_t ipa_smmu_unmap_dup;
1309 			/* ipa smmu unmap while ipa pipes is disabled */
1310 			uint32_t ipa_unmap_no_pipe;
1311 			/* REO cmd send fail/requeue count */
1312 			uint32_t reo_cmd_send_fail;
1313 			/* REO cmd send drain count */
1314 			uint32_t reo_cmd_send_drain;
1315 			/* RX msdu drop count due to scatter */
1316 			uint32_t scatter_msdu;
1317 			/* RX msdu drop count due to invalid cookie */
1318 			uint32_t invalid_cookie;
1319 			/* Count of stale cookie read in RX path */
1320 			uint32_t stale_cookie;
1321 			/* Delba sent count due to RX 2k jump */
1322 			uint32_t rx_2k_jump_delba_sent;
1323 			/* RX 2k jump msdu indicated to stack count */
1324 			uint32_t rx_2k_jump_to_stack;
1325 			/* RX 2k jump msdu dropped count */
1326 			uint32_t rx_2k_jump_drop;
1327 			/* REO ERR msdu buffer received */
1328 			uint32_t reo_err_msdu_buf_rcved;
1329 			/* REO ERR msdu buffer with invalid coookie received */
1330 			uint32_t reo_err_msdu_buf_invalid_cookie;
1331 			/* REO OOR msdu drop count */
1332 			uint32_t reo_err_oor_drop;
1333 			/* REO OOR msdu indicated to stack count */
1334 			uint32_t reo_err_oor_to_stack;
1335 			/* REO OOR scattered msdu count */
1336 			uint32_t reo_err_oor_sg_count;
1337 			/* RX msdu rejected count on delivery to vdev stack_fn*/
1338 			uint32_t rejected;
1339 			/* Incorrect msdu count in MPDU desc info */
1340 			uint32_t msdu_count_mismatch;
1341 			/* RX raw frame dropped count */
1342 			uint32_t raw_frm_drop;
1343 			/* Stale link desc cookie count*/
1344 			uint32_t invalid_link_cookie;
1345 			/* Nbuf sanity failure */
1346 			uint32_t nbuf_sanity_fail;
1347 			/* Duplicate link desc refilled */
1348 			uint32_t dup_refill_link_desc;
1349 			/* Incorrect msdu continuation bit in MSDU desc */
1350 			uint32_t msdu_continuation_err;
1351 			/* count of start sequence (ssn) updates */
1352 			uint32_t ssn_update_count;
1353 			/* count of bar handling fail */
1354 			uint32_t bar_handle_fail_count;
1355 			/* EAPOL drop count in intrabss scenario */
1356 			uint32_t intrabss_eapol_drop;
1357 			/* PN check failed for 2K-jump or OOR error */
1358 			uint32_t pn_in_dest_check_fail;
1359 			/* MSDU len err count */
1360 			uint32_t msdu_len_err;
1361 			/* Rx flush count */
1362 			uint32_t rx_flush_count;
1363 			/* Rx invalid tid count */
1364 			uint32_t rx_invalid_tid_err;
1365 			/* Invalid address1 in defrag path*/
1366 			uint32_t defrag_ad1_invalid;
1367 			/* decrypt error drop */
1368 			uint32_t decrypt_err_drop;
1369 		} err;
1370 
1371 		/* packet count per core - per ring */
1372 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
1373 	} rx;
1374 
1375 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1376 	struct reo_cmd_event_history cmd_event_history;
1377 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1378 };
1379 
1380 union dp_align_mac_addr {
1381 	uint8_t raw[QDF_MAC_ADDR_SIZE];
1382 	struct {
1383 		uint16_t bytes_ab;
1384 		uint16_t bytes_cd;
1385 		uint16_t bytes_ef;
1386 	} align2;
1387 	struct {
1388 		uint32_t bytes_abcd;
1389 		uint16_t bytes_ef;
1390 	} align4;
1391 	struct __attribute__((__packed__)) {
1392 		uint16_t bytes_ab;
1393 		uint32_t bytes_cdef;
1394 	} align4_2;
1395 };
1396 
1397 /**
1398  * struct dp_ast_free_cb_params - HMWDS free callback cookie
1399  * @mac_addr: ast mac address
1400  * @peer_mac_addr: mac address of peer
1401  * @type: ast entry type
1402  * @vdev_id: vdev_id
1403  * @flags: ast flags
1404  */
1405 struct dp_ast_free_cb_params {
1406 	union dp_align_mac_addr mac_addr;
1407 	union dp_align_mac_addr peer_mac_addr;
1408 	enum cdp_txrx_ast_entry_type type;
1409 	uint8_t vdev_id;
1410 	uint32_t flags;
1411 };
1412 
1413 /**
1414  * struct dp_ast_entry - AST entry
1415  *
1416  * @ast_idx: Hardware AST Index
1417  * @peer_id: Next Hop peer_id (for non-WDS nodes, this will be point to
1418  *           associated peer with this MAC address)
1419  * @mac_addr:  MAC Address for this AST entry
1420  * @next_hop: Set to 1 if this is for a WDS node
1421  * @is_active: flag to indicate active data traffic on this node
1422  *             (used for aging out/expiry)
1423  * @ase_list_elem: node in peer AST list
1424  * @is_bss: flag to indicate if entry corresponds to bss peer
1425  * @is_mapped: flag to indicate that we have mapped the AST entry
1426  *             in ast_table
1427  * @pdev_id: pdev ID
1428  * @vdev_id: vdev ID
1429  * @ast_hash_value: hast value in HW
1430  * @ref_cnt: reference count
1431  * @type: flag to indicate type of the entry(static/WDS/MEC)
1432  * @delete_in_progress: Flag to indicate that delete commands send to FW
1433  *                      and host is waiting for response from FW
1434  * @callback: ast free/unmap callback
1435  * @cookie: argument to callback
1436  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
1437  */
1438 struct dp_ast_entry {
1439 	uint16_t ast_idx;
1440 	uint16_t peer_id;
1441 	union dp_align_mac_addr mac_addr;
1442 	bool next_hop;
1443 	bool is_active;
1444 	bool is_mapped;
1445 	uint8_t pdev_id;
1446 	uint8_t vdev_id;
1447 	uint16_t ast_hash_value;
1448 	qdf_atomic_t ref_cnt;
1449 	enum cdp_txrx_ast_entry_type type;
1450 	bool delete_in_progress;
1451 	txrx_ast_free_cb callback;
1452 	void *cookie;
1453 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
1454 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
1455 };
1456 
1457 /**
1458  * struct dp_mec_entry - MEC entry
1459  *
1460  * @mac_addr:  MAC Address for this MEC entry
1461  * @is_active: flag to indicate active data traffic on this node
1462  *             (used for aging out/expiry)
1463  * @pdev_id: pdev ID
1464  * @vdev_id: vdev ID
1465  * @hash_list_elem: node in soc MEC hash list (mac address used as hash)
1466  */
1467 struct dp_mec_entry {
1468 	union dp_align_mac_addr mac_addr;
1469 	bool is_active;
1470 	uint8_t pdev_id;
1471 	uint8_t vdev_id;
1472 
1473 	TAILQ_ENTRY(dp_mec_entry) hash_list_elem;
1474 };
1475 
1476 /* SOC level htt stats */
1477 struct htt_t2h_stats {
1478 	/* lock to protect htt_stats_msg update */
1479 	qdf_spinlock_t lock;
1480 
1481 	/* work queue to process htt stats */
1482 	qdf_work_t work;
1483 
1484 	/* T2H Ext stats message queue */
1485 	qdf_nbuf_queue_t msg;
1486 
1487 	/* number of completed stats in htt_stats_msg */
1488 	uint32_t num_stats;
1489 };
1490 
1491 struct link_desc_bank {
1492 	void *base_vaddr_unaligned;
1493 	void *base_vaddr;
1494 	qdf_dma_addr_t base_paddr_unaligned;
1495 	qdf_dma_addr_t base_paddr;
1496 	uint32_t size;
1497 };
1498 
1499 struct rx_buff_pool {
1500 	qdf_nbuf_queue_head_t emerg_nbuf_q;
1501 	uint32_t nbuf_fail_cnt;
1502 	bool is_initialized;
1503 };
1504 
1505 struct rx_refill_buff_pool {
1506 	bool is_initialized;
1507 	uint16_t head;
1508 	uint16_t tail;
1509 	struct dp_pdev *dp_pdev;
1510 	uint16_t max_bufq_len;
1511 	qdf_nbuf_t buf_elem[2048];
1512 };
1513 
1514 #ifdef DP_TX_HW_DESC_HISTORY
1515 #define DP_TX_HW_DESC_HIST_MAX 6144
1516 #define DP_TX_HW_DESC_HIST_PER_SLOT_MAX 2048
1517 #define DP_TX_HW_DESC_HIST_MAX_SLOTS 3
1518 #define DP_TX_HW_DESC_HIST_SLOT_SHIFT 11
1519 
1520 struct dp_tx_hw_desc_evt {
1521 	uint8_t tcl_desc[HAL_TX_DESC_LEN_BYTES];
1522 	uint8_t tcl_ring_id;
1523 	uint64_t posted;
1524 	uint32_t hp;
1525 	uint32_t tp;
1526 };
1527 
1528 /* struct dp_tx_hw_desc_history - TX HW desc hisotry
1529  * @index: Index where the last entry is written
1530  * @entry: history entries
1531  */
1532 struct dp_tx_hw_desc_history {
1533 	qdf_atomic_t index;
1534 	uint16_t num_entries_per_slot;
1535 	uint16_t allocated;
1536 	struct dp_tx_hw_desc_evt *entry[DP_TX_HW_DESC_HIST_MAX_SLOTS];
1537 };
1538 #endif
1539 
1540 /**
1541  * enum dp_mon_status_process_event - Events for monitor status buffer record
1542  * @DP_MON_STATUS_BUF_REAP: Monitor status buffer is reaped from ring
1543  * @DP_MON_STATUS_BUF_ENQUEUE: Status buffer is enqueued to local queue
1544  * @DP_MON_STATUS_BUF_DEQUEUE: Status buffer is dequeued from local queue
1545  */
1546 enum dp_mon_status_process_event {
1547 	DP_MON_STATUS_BUF_REAP,
1548 	DP_MON_STATUS_BUF_ENQUEUE,
1549 	DP_MON_STATUS_BUF_DEQUEUE,
1550 };
1551 
1552 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
1553 #define DP_MON_STATUS_HIST_MAX	2048
1554 
1555 /**
1556  * struct dp_mon_stat_info_record - monitor stat ring buffer info
1557  * @hbi: HW ring buffer info
1558  * @timestamp: timestamp when this entry was recorded
1559  * @event: event
1560  * @rx_desc: RX descriptor corresponding to the received buffer
1561  * @nbuf: buffer attached to rx_desc, if event is REAP, else the buffer
1562  *	  which was enqueued or dequeued.
1563  * @rx_desc_nbuf_data: nbuf data pointer.
1564  */
1565 struct dp_mon_stat_info_record {
1566 	struct hal_buf_info hbi;
1567 	uint64_t timestamp;
1568 	enum dp_mon_status_process_event event;
1569 	void *rx_desc;
1570 	qdf_nbuf_t nbuf;
1571 	uint8_t *rx_desc_nbuf_data;
1572 };
1573 
1574 /* struct dp_rx_history - rx ring hisotry
1575  * @index: Index where the last entry is written
1576  * @entry: history entries
1577  */
1578 struct dp_mon_status_ring_history {
1579 	qdf_atomic_t index;
1580 	struct dp_mon_stat_info_record entry[DP_MON_STATUS_HIST_MAX];
1581 };
1582 #endif
1583 
1584 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1585 /*
1586  * The logic for get current index of these history is dependent on this
1587  * value being power of 2.
1588  */
1589 #define DP_RX_HIST_MAX 2048
1590 #define DP_RX_ERR_HIST_MAX 2048
1591 #define DP_RX_REINJECT_HIST_MAX 1024
1592 #define DP_RX_REFILL_HIST_MAX 2048
1593 
1594 QDF_COMPILE_TIME_ASSERT(rx_history_size,
1595 			(DP_RX_HIST_MAX &
1596 			 (DP_RX_HIST_MAX - 1)) == 0);
1597 QDF_COMPILE_TIME_ASSERT(rx_err_history_size,
1598 			(DP_RX_ERR_HIST_MAX &
1599 			 (DP_RX_ERR_HIST_MAX - 1)) == 0);
1600 QDF_COMPILE_TIME_ASSERT(rx_reinject_history_size,
1601 			(DP_RX_REINJECT_HIST_MAX &
1602 			 (DP_RX_REINJECT_HIST_MAX - 1)) == 0);
1603 QDF_COMPILE_TIME_ASSERT(rx_refill_history_size,
1604 			(DP_RX_REFILL_HIST_MAX &
1605 			(DP_RX_REFILL_HIST_MAX - 1)) == 0);
1606 
1607 
1608 /**
1609  * struct dp_buf_info_record - ring buffer info
1610  * @hbi: HW ring buffer info
1611  * @timestamp: timestamp when this entry was recorded
1612  */
1613 struct dp_buf_info_record {
1614 	struct hal_buf_info hbi;
1615 	uint64_t timestamp;
1616 };
1617 
1618 /**
1619  * struct dp_refill_info_record - ring refill buffer info
1620  * @hp: HP value after refill
1621  * @tp: cached tail value during refill
1622  * @num_req: number of buffers requested to refill
1623  * @num_refill: number of buffers refilled to ring
1624  * @timestamp: timestamp when this entry was recorded
1625  */
1626 struct dp_refill_info_record {
1627 	uint32_t hp;
1628 	uint32_t tp;
1629 	uint32_t num_req;
1630 	uint32_t num_refill;
1631 	uint64_t timestamp;
1632 };
1633 
1634 /**
1635  * struct dp_rx_history - rx ring hisotry
1636  * @index: Index where the last entry is written
1637  * @entry: history entries
1638  */
1639 struct dp_rx_history {
1640 	qdf_atomic_t index;
1641 	struct dp_buf_info_record entry[DP_RX_HIST_MAX];
1642 };
1643 
1644 /**
1645  * struct dp_rx_err_history - rx err ring hisotry
1646  * @index: Index where the last entry is written
1647  * @entry: history entries
1648  */
1649 struct dp_rx_err_history {
1650 	qdf_atomic_t index;
1651 	struct dp_buf_info_record entry[DP_RX_ERR_HIST_MAX];
1652 };
1653 
1654 /**
1655  * struct dp_rx_reinject_history - rx reinject ring hisotry
1656  * @index: Index where the last entry is written
1657  * @entry: history entries
1658  */
1659 struct dp_rx_reinject_history {
1660 	qdf_atomic_t index;
1661 	struct dp_buf_info_record entry[DP_RX_REINJECT_HIST_MAX];
1662 };
1663 
1664 /**
1665  * struct dp_rx_refill_history - rx buf refill hisotry
1666  * @index: Index where the last entry is written
1667  * @entry: history entries
1668  */
1669 struct dp_rx_refill_history {
1670 	qdf_atomic_t index;
1671 	struct dp_refill_info_record entry[DP_RX_REFILL_HIST_MAX];
1672 };
1673 
1674 #endif
1675 
1676 /**
1677  * enum dp_cfg_event_type - Datapath config events type
1678  * @DP_CFG_EVENT_VDEV_ATTACH: vdev attach
1679  * @DP_CFG_EVENT_VDEV_DETACH: vdev detach
1680  * @DP_CFG_EVENT_VDEV_UNREF_DEL: vdev memory free after last ref is released
1681  * @DP_CFG_EVENT_PEER_CREATE: peer create
1682  * @DP_CFG_EVENT_PEER_DELETE: peer delete
1683  * @DP_CFG_EVENT_PEER_UNREF_DEL: peer memory free after last ref is released
1684  * @DP_CFG_EVENT_PEER_SETUP: peer setup
1685  * @DP_CFG_EVENT_MLO_ADD_LINK: add link peer to mld peer
1686  * @DP_CFG_EVENT_MLO_DEL_LINK: delete link peer from mld peer
1687  * @DP_CFG_EVENT_MLO_SETUP: MLO peer setup
1688  * @DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE: MLD peer vdev update
1689  * @DP_CFG_EVENT_PEER_MAP: peer map
1690  * @DP_CFG_EVENT_PEER_UNMAP: peer unmap
1691  * @DP_CFG_EVENT_MLO_PEER_MAP: MLD peer map
1692  * @DP_CFG_EVENT_MLO_PEER_UNMAP: MLD peer unmap
1693  */
1694 enum dp_cfg_event_type {
1695 	DP_CFG_EVENT_VDEV_ATTACH,
1696 	DP_CFG_EVENT_VDEV_DETACH,
1697 	DP_CFG_EVENT_VDEV_UNREF_DEL,
1698 	DP_CFG_EVENT_PEER_CREATE,
1699 	DP_CFG_EVENT_PEER_DELETE,
1700 	DP_CFG_EVENT_PEER_UNREF_DEL,
1701 	DP_CFG_EVENT_PEER_SETUP,
1702 	DP_CFG_EVENT_MLO_ADD_LINK,
1703 	DP_CFG_EVENT_MLO_DEL_LINK,
1704 	DP_CFG_EVENT_MLO_SETUP,
1705 	DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE,
1706 	DP_CFG_EVENT_PEER_MAP,
1707 	DP_CFG_EVENT_PEER_UNMAP,
1708 	DP_CFG_EVENT_MLO_PEER_MAP,
1709 	DP_CFG_EVENT_MLO_PEER_UNMAP,
1710 };
1711 
1712 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
1713 /* Size must be in 2 power, for bitwise index rotation */
1714 #define DP_CFG_EVT_HISTORY_SIZE 0x800
1715 #define DP_CFG_EVT_HIST_PER_SLOT_MAX 256
1716 #define DP_CFG_EVT_HIST_MAX_SLOTS 8
1717 #define DP_CFG_EVT_HIST_SLOT_SHIFT 8
1718 
1719 /**
1720  * struct dp_vdev_attach_detach_desc - vdev ops descriptor
1721  * @vdev: DP vdev handle
1722  * @mac_addr: vdev mac address
1723  * @vdev_id: vdev id
1724  * @ref_count: vdev ref count
1725  */
1726 struct dp_vdev_attach_detach_desc {
1727 	struct dp_vdev *vdev;
1728 	union dp_align_mac_addr mac_addr;
1729 	uint8_t vdev_id;
1730 	int32_t ref_count;
1731 };
1732 
1733 /**
1734  * struct dp_peer_cmn_ops_desc - peer events descriptor
1735  * @vdev_id: vdev_id of the vdev on which peer exists
1736  * @is_reuse: indicates if its a peer reuse case, during peer create
1737  * @peer: DP peer handle
1738  * @vdev: DP vdev handle on which peer exists
1739  * @mac_addr: peer mac address
1740  * @vdev_mac_addr: vdev mac address
1741  * @vdev_ref_count: vdev ref count
1742  * @peer_ref_count: peer ref count
1743  */
1744 struct dp_peer_cmn_ops_desc {
1745 	uint8_t vdev_id : 5,
1746 		is_reuse : 1;
1747 	struct dp_peer *peer;
1748 	struct dp_vdev *vdev;
1749 	union dp_align_mac_addr mac_addr;
1750 	union dp_align_mac_addr vdev_mac_addr;
1751 	int32_t vdev_ref_count;
1752 	int32_t peer_ref_count;
1753 };
1754 
1755 /**
1756  * struct dp_mlo_add_del_link_desc - MLO add/del link event descriptor
1757  * @idx: index at which link peer got added in MLD peer's list
1758  * @num_links: num links added in the MLD peer's list
1759  * @action_result: add/del was success or not
1760  * @reserved: reserved bit
1761  * @link_peer: link peer handle
1762  * @mld_peer: MLD peer handle
1763  * @link_mac_addr: link peer mac address
1764  * @mld_mac_addr: MLD peer mac address
1765  */
1766 struct dp_mlo_add_del_link_desc {
1767 	uint8_t idx : 3,
1768 		num_links : 3,
1769 		action_result : 1,
1770 		reserved : 1;
1771 	struct dp_peer *link_peer;
1772 	struct dp_peer *mld_peer;
1773 	union dp_align_mac_addr link_mac_addr;
1774 	union dp_align_mac_addr mld_mac_addr;
1775 };
1776 
1777 /**
1778  * struct dp_mlo_setup_vdev_update_desc - MLD peer vdev update event desc
1779  * @mld_peer: MLD peer handle
1780  * @prev_vdev: previous vdev handle
1781  * @new_vdev: new vdev handle
1782  */
1783 struct dp_mlo_setup_vdev_update_desc {
1784 	struct dp_peer *mld_peer;
1785 	struct dp_vdev *prev_vdev;
1786 	struct dp_vdev *new_vdev;
1787 };
1788 
1789 /**
1790  * struct dp_rx_peer_map_unmap_desc - peer map/unmap event descriptor
1791  * @peer_id: peer id
1792  * @ml_peer_id: ML peer id, if its an MLD peer
1793  * @hw_peer_id: hw peer id
1794  * @vdev_id: vdev id of the peer
1795  * @is_ml_peer: is this MLD peer
1796  * @mac_addr: mac address of the peer
1797  * @peer: peer handle
1798  */
1799 struct dp_rx_peer_map_unmap_desc {
1800 	uint16_t peer_id;
1801 	uint16_t ml_peer_id;
1802 	uint16_t hw_peer_id;
1803 	uint8_t vdev_id;
1804 	uint8_t is_ml_peer;
1805 	union dp_align_mac_addr mac_addr;
1806 	struct dp_peer *peer;
1807 };
1808 
1809 /**
1810  * struct dp_peer_setup_desc - peer setup event descriptor
1811  * @peer: DP peer handle
1812  * @vdev: vdev handle on which peer exists
1813  * @vdev_ref_count: vdev ref count
1814  * @mac_addr: peer mac address
1815  * @mld_mac_addr: MLD mac address
1816  * @is_first_link: is the current link the first link created
1817  * @is_primary_link: is the current link primary link
1818  * @vdev_id: vdev id of the vdev on which the current link peer exists
1819  * @reserved: reserved bit
1820  */
1821 struct dp_peer_setup_desc {
1822 	struct dp_peer *peer;
1823 	struct dp_vdev *vdev;
1824 	int32_t vdev_ref_count;
1825 	union dp_align_mac_addr mac_addr;
1826 	union dp_align_mac_addr mld_mac_addr;
1827 	uint8_t is_first_link : 1,
1828 		is_primary_link : 1,
1829 		vdev_id : 5,
1830 		reserved : 1;
1831 };
1832 
1833 /**
1834  * union dp_cfg_event_desc - DP config event descriptor
1835  * @vdev_evt: vdev events desc
1836  * @peer_cmn_evt: common peer events desc
1837  * @peer_setup_evt: peer setup event desc
1838  * @mlo_link_delink_evt: MLO link/delink event desc
1839  * @mlo_setup_vdev_update: MLD peer vdev update event desc
1840  * @peer_map_unmap_evt: peer map/unmap event desc
1841  */
1842 union dp_cfg_event_desc {
1843 	struct dp_vdev_attach_detach_desc vdev_evt;
1844 	struct dp_peer_cmn_ops_desc peer_cmn_evt;
1845 	struct dp_peer_setup_desc peer_setup_evt;
1846 	struct dp_mlo_add_del_link_desc mlo_link_delink_evt;
1847 	struct dp_mlo_setup_vdev_update_desc mlo_setup_vdev_update;
1848 	struct dp_rx_peer_map_unmap_desc peer_map_unmap_evt;
1849 };
1850 
1851 /**
1852  * struct dp_cfg_event - DP config event descriptor
1853  * @timestamp: timestamp at which event was recorded
1854  * @type: event type
1855  * @event_desc: event descriptor
1856  */
1857 struct dp_cfg_event {
1858 	uint64_t timestamp;
1859 	enum dp_cfg_event_type type;
1860 	union dp_cfg_event_desc event_desc;
1861 };
1862 
1863 /**
1864  * struct dp_cfg_event_history - DP config event history
1865  * @index: current index
1866  * @num_entries_per_slot: number of entries per slot
1867  * @allocated: Is the history allocated or not
1868  * @entry: event history descriptors
1869  */
1870 struct dp_cfg_event_history {
1871 	qdf_atomic_t index;
1872 	uint16_t num_entries_per_slot;
1873 	uint16_t allocated;
1874 	struct dp_cfg_event *entry[DP_CFG_EVT_HIST_MAX_SLOTS];
1875 };
1876 #endif
1877 
1878 enum dp_tx_event_type {
1879 	DP_TX_DESC_INVAL_EVT = 0,
1880 	DP_TX_DESC_MAP,
1881 	DP_TX_DESC_COOKIE,
1882 	DP_TX_DESC_FLUSH,
1883 	DP_TX_DESC_UNMAP,
1884 	DP_TX_COMP_UNMAP,
1885 	DP_TX_COMP_UNMAP_ERR,
1886 	DP_TX_COMP_MSDU_EXT,
1887 };
1888 
1889 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
1890 /* Size must be in 2 power, for bitwise index rotation */
1891 #define DP_TX_TCL_HISTORY_SIZE 0x4000
1892 #define DP_TX_TCL_HIST_PER_SLOT_MAX 2048
1893 #define DP_TX_TCL_HIST_MAX_SLOTS 8
1894 #define DP_TX_TCL_HIST_SLOT_SHIFT 11
1895 
1896 /* Size must be in 2 power, for bitwise index rotation */
1897 #define DP_TX_COMP_HISTORY_SIZE 0x4000
1898 #define DP_TX_COMP_HIST_PER_SLOT_MAX 2048
1899 #define DP_TX_COMP_HIST_MAX_SLOTS 8
1900 #define DP_TX_COMP_HIST_SLOT_SHIFT 11
1901 
1902 struct dp_tx_desc_event {
1903 	qdf_nbuf_t skb;
1904 	dma_addr_t paddr;
1905 	uint32_t sw_cookie;
1906 	enum dp_tx_event_type type;
1907 	uint64_t ts;
1908 };
1909 
1910 struct dp_tx_tcl_history {
1911 	qdf_atomic_t index;
1912 	uint16_t num_entries_per_slot;
1913 	uint16_t allocated;
1914 	struct dp_tx_desc_event *entry[DP_TX_TCL_HIST_MAX_SLOTS];
1915 };
1916 
1917 struct dp_tx_comp_history {
1918 	qdf_atomic_t index;
1919 	uint16_t num_entries_per_slot;
1920 	uint16_t allocated;
1921 	struct dp_tx_desc_event *entry[DP_TX_COMP_HIST_MAX_SLOTS];
1922 };
1923 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
1924 
1925 /* structure to record recent operation related variable */
1926 struct dp_last_op_info {
1927 	/* last link desc buf info through WBM release ring */
1928 	struct hal_buf_info wbm_rel_link_desc;
1929 	/* last link desc buf info through REO reinject ring */
1930 	struct hal_buf_info reo_reinject_link_desc;
1931 };
1932 
1933 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1934 
1935 /**
1936  * struct dp_swlm_tcl_data - params for tcl register write coalescing
1937  *			     decision making
1938  * @nbuf: TX packet
1939  * @tid: tid for transmitting the current packet
1940  * @num_ll_connections: Number of low latency connections on this vdev
1941  * @ring_id: TCL ring id
1942  * @pkt_len: Packet length
1943  *
1944  * This structure contains the information required by the software
1945  * latency manager to decide on whether to coalesce the current TCL
1946  * register write or not.
1947  */
1948 struct dp_swlm_tcl_data {
1949 	qdf_nbuf_t nbuf;
1950 	uint8_t tid;
1951 	uint8_t num_ll_connections;
1952 	uint8_t ring_id;
1953 	uint32_t pkt_len;
1954 };
1955 
1956 /**
1957  * union swlm_data - SWLM query data
1958  * @tcl_data: data for TCL query in SWLM
1959  */
1960 union swlm_data {
1961 	struct dp_swlm_tcl_data *tcl_data;
1962 };
1963 
1964 /**
1965  * struct dp_swlm_ops - SWLM ops
1966  * @tcl_wr_coalesce_check: handler to check if the current TCL register
1967  *			   write can be coalesced or not
1968  */
1969 struct dp_swlm_ops {
1970 	int (*tcl_wr_coalesce_check)(struct dp_soc *soc,
1971 				     struct dp_swlm_tcl_data *tcl_data);
1972 };
1973 
1974 /**
1975  * struct dp_swlm_stats - Stats for Software Latency manager.
1976  * @tcl: TCL stats
1977  * @tcl.timer_flush_success: Num TCL HP writes success from timer context
1978  * @tcl.timer_flush_fail: Num TCL HP writes failure from timer context
1979  * @tcl.tid_fail: Num TCL register write coalescing skips, since the pkt
1980  *		 was being transmitted on a TID above coalescing threshold
1981  * @tcl.sp_frames: Num TCL register write coalescing skips, since the pkt
1982  *		  being transmitted was a special frame
1983  * @tcl.ll_connection: Num TCL register write coalescing skips, since the
1984  *		       vdev has low latency connections
1985  * @tcl.bytes_thresh_reached: Num TCL HP writes flush after the coalescing
1986  *			     bytes threshold was reached
1987  * @tcl.time_thresh_reached: Num TCL HP writes flush after the coalescing
1988  *			    session time expired
1989  * @tcl.tput_criteria_fail: Num TCL HP writes coalescing fails, since the
1990  *			   throughput did not meet session threshold
1991  * @tcl.coalesce_success: Num of TCL HP writes coalesced successfully.
1992  * @tcl.coalesce_fail: Num of TCL HP writes coalesces failed
1993  */
1994 struct dp_swlm_stats {
1995 	struct {
1996 		uint32_t timer_flush_success;
1997 		uint32_t timer_flush_fail;
1998 		uint32_t tid_fail;
1999 		uint32_t sp_frames;
2000 		uint32_t ll_connection;
2001 		uint32_t bytes_thresh_reached;
2002 		uint32_t time_thresh_reached;
2003 		uint32_t tput_criteria_fail;
2004 		uint32_t coalesce_success;
2005 		uint32_t coalesce_fail;
2006 	} tcl[MAX_TCL_DATA_RINGS];
2007 };
2008 
2009 /**
2010  * struct dp_swlm_tcl_params: Parameters based on TCL for different modules
2011  *			      in the Software latency manager.
2012  * @soc: DP soc reference
2013  * @ring_id: TCL ring id
2014  * @flush_timer: Timer for flushing the coalesced TCL HP writes
2015  * @sampling_session_tx_bytes: Num bytes transmitted in the sampling time
2016  * @bytes_flush_thresh: Bytes threshold to flush the TCL HP register write
2017  * @coalesce_end_time: End timestamp for current coalescing session
2018  * @bytes_coalesced: Num bytes coalesced in the current session
2019  * @prev_tx_packets: Previous TX packets accounted
2020  * @prev_tx_bytes: Previous TX bytes accounted
2021  * @prev_rx_bytes: Previous RX bytes accounted
2022  * @expire_time: expiry time for sample
2023  * @tput_pass_cnt: threshold throughput pass counter
2024  */
2025 struct dp_swlm_tcl_params {
2026 	struct dp_soc *soc;
2027 	uint32_t ring_id;
2028 	qdf_timer_t flush_timer;
2029 	uint32_t sampling_session_tx_bytes;
2030 	uint32_t bytes_flush_thresh;
2031 	uint64_t coalesce_end_time;
2032 	uint32_t bytes_coalesced;
2033 	uint32_t prev_tx_packets;
2034 	uint32_t prev_tx_bytes;
2035 	uint32_t prev_rx_bytes;
2036 	uint64_t expire_time;
2037 	uint32_t tput_pass_cnt;
2038 };
2039 
2040 /**
2041  * struct dp_swlm_params: Parameters for different modules in the
2042  *			  Software latency manager.
2043  * @rx_traffic_thresh: Threshold for RX traffic, to begin TCL register
2044  *			   write coalescing
2045  * @tx_traffic_thresh: Threshold for TX traffic, to begin TCL register
2046  *			   write coalescing
2047  * @sampling_time: Sampling time to test the throughput threshold
2048  * @time_flush_thresh: Time threshold to flush the TCL HP register write
2049  * @tx_thresh_multiplier: Multiplier to deduce the bytes threshold after
2050  *			      which the TCL HP register is written, thereby
2051  *			      ending the coalescing.
2052  * @tx_pkt_thresh: Threshold for TX packet count, to begin TCL register
2053  *		       write coalescing
2054  * @tcl: TCL ring specific params
2055  */
2056 
2057 struct dp_swlm_params {
2058 	uint32_t rx_traffic_thresh;
2059 	uint32_t tx_traffic_thresh;
2060 	uint32_t sampling_time;
2061 	uint32_t time_flush_thresh;
2062 	uint32_t tx_thresh_multiplier;
2063 	uint32_t tx_pkt_thresh;
2064 	struct dp_swlm_tcl_params tcl[MAX_TCL_DATA_RINGS];
2065 };
2066 
2067 /**
2068  * struct dp_swlm - Software latency manager context
2069  * @ops: SWLM ops pointers
2070  * @is_enabled: SWLM enabled/disabled
2071  * @is_init: SWLM module initialized
2072  * @stats: SWLM stats
2073  * @params: SWLM SRNG params
2074  * @tcl_flush_timer: flush timer for TCL register writes
2075  */
2076 struct dp_swlm {
2077 	struct dp_swlm_ops *ops;
2078 	uint8_t is_enabled:1,
2079 		is_init:1;
2080 	struct dp_swlm_stats stats;
2081 	struct dp_swlm_params params;
2082 };
2083 #endif
2084 
2085 #ifdef IPA_OFFLOAD
2086 /* IPA uC datapath offload Wlan Tx resources */
2087 struct ipa_dp_tx_rsc {
2088 	/* Resource info to be passed to IPA */
2089 	qdf_dma_addr_t ipa_tcl_ring_base_paddr;
2090 	void *ipa_tcl_ring_base_vaddr;
2091 	uint32_t ipa_tcl_ring_size;
2092 	qdf_dma_addr_t ipa_tcl_hp_paddr;
2093 	uint32_t alloc_tx_buf_cnt;
2094 
2095 	qdf_dma_addr_t ipa_wbm_ring_base_paddr;
2096 	void *ipa_wbm_ring_base_vaddr;
2097 	uint32_t ipa_wbm_ring_size;
2098 	qdf_dma_addr_t ipa_wbm_tp_paddr;
2099 	/* WBM2SW HP shadow paddr */
2100 	qdf_dma_addr_t ipa_wbm_hp_shadow_paddr;
2101 
2102 	/* TX buffers populated into the WBM ring */
2103 	void **tx_buf_pool_vaddr_unaligned;
2104 	qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
2105 };
2106 
2107 /* IPA uC datapath offload Wlan Rx resources */
2108 struct ipa_dp_rx_rsc {
2109 	/* Resource info to be passed to IPA */
2110 	qdf_dma_addr_t ipa_reo_ring_base_paddr;
2111 	void *ipa_reo_ring_base_vaddr;
2112 	uint32_t ipa_reo_ring_size;
2113 	qdf_dma_addr_t ipa_reo_tp_paddr;
2114 
2115 	/* Resource info to be passed to firmware and IPA */
2116 	qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
2117 	void *ipa_rx_refill_buf_ring_base_vaddr;
2118 	uint32_t ipa_rx_refill_buf_ring_size;
2119 	qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
2120 };
2121 #endif
2122 
2123 struct dp_tx_msdu_info_s;
2124 /**
2125  * enum dp_context_type- DP Context Type
2126  * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
2127  * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
2128  * @DP_CONTEXT_TYPE_VDEV: Context type DP VDEV
2129  * @DP_CONTEXT_TYPE_PEER: Context type DP PEER
2130  * @DP_CONTEXT_TYPE_MON_SOC: Context type DP MON SOC
2131  * @DP_CONTEXT_TYPE_MON_PDEV: Context type DP MON PDEV
2132  *
2133  * Helper enums to be used to retrieve the size of the corresponding
2134  * data structure by passing the type.
2135  */
2136 enum dp_context_type {
2137 	DP_CONTEXT_TYPE_SOC,
2138 	DP_CONTEXT_TYPE_PDEV,
2139 	DP_CONTEXT_TYPE_VDEV,
2140 	DP_CONTEXT_TYPE_PEER,
2141 	DP_CONTEXT_TYPE_MON_SOC,
2142 	DP_CONTEXT_TYPE_MON_PDEV
2143 };
2144 
2145 /**
2146  * struct dp_arch_ops - DP target specific arch ops
2147  * @txrx_soc_attach:
2148  * @txrx_soc_detach:
2149  * @txrx_soc_init:
2150  * @txrx_soc_deinit:
2151  * @txrx_soc_srng_alloc:
2152  * @txrx_soc_srng_init:
2153  * @txrx_soc_srng_deinit:
2154  * @txrx_soc_srng_free:
2155  * @txrx_pdev_attach:
2156  * @txrx_pdev_detach:
2157  * @txrx_vdev_attach:
2158  * @txrx_vdev_detach:
2159  * @txrx_peer_map_attach:
2160  * @txrx_peer_map_detach:
2161  * @dp_rxdma_ring_sel_cfg:
2162  * @soc_cfg_attach:
2163  * @txrx_peer_setup:
2164  * @peer_get_reo_hash:
2165  * @reo_remap_config:
2166  * @tx_hw_enqueue: enqueue TX data to HW
2167  * @tx_comp_get_params_from_hal_desc: get software tx descriptor and release
2168  * 				      source from HAL desc for wbm release ring
2169  * @dp_tx_mlo_mcast_send: Tx send handler for MLO multicast enhance
2170  * @dp_tx_process_htt_completion:
2171  * @dp_rx_process:
2172  * @dp_tx_send_fast:
2173  * @dp_tx_desc_pool_init:
2174  * @dp_tx_desc_pool_deinit:
2175  * @dp_rx_desc_pool_init:
2176  * @dp_rx_desc_pool_deinit:
2177  * @dp_wbm_get_rx_desc_from_hal_desc:
2178  * @dp_rx_intrabss_mcast_handler:
2179  * @dp_rx_word_mask_subscribe:
2180  * @dp_rx_desc_cookie_2_va:
2181  * @dp_service_near_full_srngs: Handler for servicing the near full IRQ
2182  * @tx_implicit_rbm_set:
2183  * @dp_rx_peer_metadata_peer_id_get:
2184  * @dp_rx_peer_mdata_link_id_get: Handle to get link id
2185  * @dp_rx_chain_msdus:
2186  * @txrx_set_vdev_param: target specific ops while setting vdev params
2187  * @txrx_get_vdev_mcast_param: target specific ops for getting vdev
2188  *			       params related to multicast
2189  * @txrx_get_context_size:
2190  * @txrx_get_mon_context_size:
2191  * @dp_srng_test_and_update_nf_params: Check if the srng is in near full state
2192  *				and set the near-full params.
2193  * @dp_tx_mcast_handler:
2194  * @dp_rx_mcast_handler:
2195  * @dp_tx_is_mcast_primary:
2196  * @dp_soc_get_by_idle_bm_id:
2197  * @mlo_peer_find_hash_detach:
2198  * @mlo_peer_find_hash_attach:
2199  * @mlo_peer_find_hash_add:
2200  * @mlo_peer_find_hash_remove:
2201  * @mlo_peer_find_hash_find:
2202  * @get_hw_link_id:
2203  * @get_reo_qdesc_addr:
2204  * @get_rx_hash_key:
2205  * @dp_set_rx_fst:
2206  * @dp_get_rx_fst:
2207  * @dp_rx_fst_deref:
2208  * @dp_rx_fst_ref:
2209  * @txrx_print_peer_stats:
2210  * @dp_peer_rx_reorder_queue_setup: Dp peer reorder queue setup
2211  * @dp_find_peer_by_destmac:
2212  * @dp_bank_reconfig:
2213  * @dp_rx_replenish_soc_get:
2214  * @dp_soc_get_num_soc:
2215  * @dp_reconfig_tx_vdev_mcast_ctrl:
2216  * @dp_cc_reg_cfg_init:
2217  * @dp_tx_compute_hw_delay:
2218  * @print_mlo_ast_stats:
2219  * @dp_partner_chips_map:
2220  * @dp_partner_chips_unmap:
2221  * @ipa_get_bank_id: Get TCL bank id used by IPA
2222  * @ipa_get_wdi_ver: Get WDI version
2223  * @dp_txrx_ppeds_rings_status:
2224  * @dp_tx_ppeds_inuse_desc:
2225  * @dp_tx_ppeds_cfg_astidx_cache_mapping:
2226  * @txrx_soc_ppeds_start:
2227  * @txrx_soc_ppeds_stop:
2228  * @dp_register_ppeds_interrupts:
2229  * @dp_free_ppeds_interrupts:
2230  * @dp_rx_wbm_err_reap_desc: Reap WBM Error Ring Descriptor
2231  * @dp_rx_null_q_desc_handle: Handle Null Queue Exception Error
2232  * @dp_tx_desc_pool_alloc: Allocate arch specific TX descriptor pool
2233  * @dp_tx_desc_pool_free: Free arch specific TX descriptor pool
2234  * @txrx_srng_init: Init txrx srng
2235  * @ppeds_handle_attached:
2236  * @txrx_soc_ppeds_interrupt_stop:
2237  * @txrx_soc_ppeds_interrupt_start:
2238  * @txrx_soc_ppeds_service_status_update:
2239  * @txrx_soc_ppeds_enabled_check:
2240  * @txrx_soc_ppeds_txdesc_pool_reset:
2241  * @dp_update_ring_hptp: Update rings hptp during suspend/resume
2242  */
2243 struct dp_arch_ops {
2244 	/* INIT/DEINIT Arch Ops */
2245 	QDF_STATUS (*txrx_soc_attach)(struct dp_soc *soc,
2246 				      struct cdp_soc_attach_params *params);
2247 	QDF_STATUS (*txrx_soc_detach)(struct dp_soc *soc);
2248 	void* (*txrx_soc_init)(struct dp_soc *soc, HTC_HANDLE htc_handle,
2249 			       struct hif_opaque_softc *hif_handle);
2250 	QDF_STATUS (*txrx_soc_deinit)(struct dp_soc *soc);
2251 	QDF_STATUS (*txrx_soc_srng_alloc)(struct dp_soc *soc);
2252 	QDF_STATUS (*txrx_soc_srng_init)(struct dp_soc *soc);
2253 	void (*txrx_soc_srng_deinit)(struct dp_soc *soc);
2254 	void (*txrx_soc_srng_free)(struct dp_soc *soc);
2255 	QDF_STATUS (*txrx_pdev_attach)(struct dp_pdev *pdev,
2256 				       struct cdp_pdev_attach_params *params);
2257 	QDF_STATUS (*txrx_pdev_detach)(struct dp_pdev *pdev);
2258 	QDF_STATUS (*txrx_vdev_attach)(struct dp_soc *soc,
2259 				       struct dp_vdev *vdev);
2260 	QDF_STATUS (*txrx_vdev_detach)(struct dp_soc *soc,
2261 				       struct dp_vdev *vdev);
2262 	QDF_STATUS (*txrx_peer_map_attach)(struct dp_soc *soc);
2263 	void (*txrx_peer_map_detach)(struct dp_soc *soc);
2264 	QDF_STATUS (*dp_rxdma_ring_sel_cfg)(struct dp_soc *soc);
2265 	void (*soc_cfg_attach)(struct dp_soc *soc);
2266 	QDF_STATUS (*txrx_peer_setup)(struct cdp_soc_t *soc_hdl,
2267 				      uint8_t vdev_id, uint8_t *peer_mac,
2268 				      struct cdp_peer_setup_info *setup_info);
2269 	void (*peer_get_reo_hash)(struct dp_vdev *vdev,
2270 				  struct cdp_peer_setup_info *setup_info,
2271 				  enum cdp_host_reo_dest_ring *reo_dest,
2272 				  bool *hash_based,
2273 				  uint8_t *lmac_peer_id_msb);
2274 	 bool (*reo_remap_config)(struct dp_soc *soc, uint32_t *remap0,
2275 				  uint32_t *remap1, uint32_t *remap2);
2276 
2277 	/* TX RX Arch Ops */
2278 	QDF_STATUS (*tx_hw_enqueue)(struct dp_soc *soc, struct dp_vdev *vdev,
2279 				    struct dp_tx_desc_s *tx_desc,
2280 				    uint16_t fw_metadata,
2281 				    struct cdp_tx_exception_metadata *metadata,
2282 				    struct dp_tx_msdu_info_s *msdu_info);
2283 
2284 	void (*tx_comp_get_params_from_hal_desc)(struct dp_soc *soc,
2285 						 void *tx_comp_hal_desc,
2286 						 struct dp_tx_desc_s **desc);
2287 
2288 	qdf_nbuf_t (*dp_tx_mlo_mcast_send)(struct dp_soc *soc,
2289 					   struct dp_vdev *vdev,
2290 					   qdf_nbuf_t nbuf,
2291 					   struct cdp_tx_exception_metadata
2292 					   *tx_exc_metadata);
2293 
2294 	void (*dp_tx_process_htt_completion)(struct dp_soc *soc,
2295 					     struct dp_tx_desc_s *tx_desc,
2296 					     uint8_t *status,
2297 					     uint8_t ring_id);
2298 
2299 	uint32_t (*dp_rx_process)(struct dp_intr *int_ctx,
2300 				  hal_ring_handle_t hal_ring_hdl,
2301 				  uint8_t reo_ring_num, uint32_t quota);
2302 
2303 	qdf_nbuf_t (*dp_tx_send_fast)(struct cdp_soc_t *soc_hdl,
2304 				      uint8_t vdev_id,
2305 				      qdf_nbuf_t nbuf);
2306 
2307 	QDF_STATUS (*dp_tx_desc_pool_init)(struct dp_soc *soc,
2308 					   uint32_t num_elem,
2309 					   uint8_t pool_id);
2310 	void (*dp_tx_desc_pool_deinit)(
2311 				struct dp_soc *soc,
2312 				struct dp_tx_desc_pool_s *tx_desc_pool,
2313 				uint8_t pool_id);
2314 
2315 	QDF_STATUS (*dp_rx_desc_pool_init)(struct dp_soc *soc,
2316 					   struct rx_desc_pool *rx_desc_pool,
2317 					   uint32_t pool_id);
2318 	void (*dp_rx_desc_pool_deinit)(struct dp_soc *soc,
2319 				       struct rx_desc_pool *rx_desc_pool,
2320 				       uint32_t pool_id);
2321 
2322 	QDF_STATUS (*dp_wbm_get_rx_desc_from_hal_desc)(
2323 						struct dp_soc *soc,
2324 						void *ring_desc,
2325 						struct dp_rx_desc **r_rx_desc);
2326 
2327 	bool
2328 	(*dp_rx_intrabss_mcast_handler)(struct dp_soc *soc,
2329 					struct dp_txrx_peer *ta_txrx_peer,
2330 					qdf_nbuf_t nbuf_copy,
2331 					struct cdp_tid_rx_stats *tid_stats,
2332 					uint8_t link_id);
2333 
2334 	void (*dp_rx_word_mask_subscribe)(
2335 				struct dp_soc *soc,
2336 				uint32_t *msg_word,
2337 				void *rx_filter);
2338 
2339 	struct dp_rx_desc *(*dp_rx_desc_cookie_2_va)(struct dp_soc *soc,
2340 						     uint32_t cookie);
2341 	uint32_t (*dp_service_near_full_srngs)(struct dp_soc *soc,
2342 					       struct dp_intr *int_ctx,
2343 					       uint32_t dp_budget);
2344 	void (*tx_implicit_rbm_set)(struct dp_soc *soc, uint8_t tx_ring_id,
2345 				    uint8_t bm_id);
2346 	uint16_t (*dp_rx_peer_metadata_peer_id_get)(struct dp_soc *soc,
2347 						    uint32_t peer_metadata);
2348 	uint8_t (*dp_rx_peer_mdata_link_id_get)(uint32_t peer_metadata);
2349 	bool (*dp_rx_chain_msdus)(struct dp_soc *soc, qdf_nbuf_t nbuf,
2350 				  uint8_t *rx_tlv_hdr, uint8_t mac_id);
2351 	/* Control Arch Ops */
2352 	QDF_STATUS (*txrx_set_vdev_param)(struct dp_soc *soc,
2353 					  struct dp_vdev *vdev,
2354 					  enum cdp_vdev_param_type param,
2355 					  cdp_config_param_type val);
2356 
2357 	QDF_STATUS (*txrx_get_vdev_mcast_param)(struct dp_soc *soc,
2358 						struct dp_vdev *vdev,
2359 						cdp_config_param_type *val);
2360 
2361 	/* Misc Arch Ops */
2362 	qdf_size_t (*txrx_get_context_size)(enum dp_context_type);
2363 #ifdef WIFI_MONITOR_SUPPORT
2364 	qdf_size_t (*txrx_get_mon_context_size)(enum dp_context_type);
2365 #endif
2366 	int (*dp_srng_test_and_update_nf_params)(struct dp_soc *soc,
2367 						 struct dp_srng *dp_srng,
2368 						 int *max_reap_limit);
2369 
2370 	/* MLO ops */
2371 #ifdef WLAN_FEATURE_11BE_MLO
2372 #ifdef WLAN_MCAST_MLO
2373 	void (*dp_tx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
2374 				    qdf_nbuf_t nbuf);
2375 	bool (*dp_rx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
2376 				    struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
2377 				    uint8_t link_id);
2378 	bool (*dp_tx_is_mcast_primary)(struct dp_soc *soc,
2379 				       struct dp_vdev *vdev);
2380 #endif
2381 	struct dp_soc * (*dp_soc_get_by_idle_bm_id)(struct dp_soc *soc,
2382 						    uint8_t bm_id);
2383 
2384 	void (*mlo_peer_find_hash_detach)(struct dp_soc *soc);
2385 	QDF_STATUS (*mlo_peer_find_hash_attach)(struct dp_soc *soc);
2386 	void (*mlo_peer_find_hash_add)(struct dp_soc *soc,
2387 				       struct dp_peer *peer);
2388 	void (*mlo_peer_find_hash_remove)(struct dp_soc *soc,
2389 					  struct dp_peer *peer);
2390 
2391 	struct dp_peer *(*mlo_peer_find_hash_find)(struct dp_soc *soc,
2392 						   uint8_t *peer_mac_addr,
2393 						   int mac_addr_is_aligned,
2394 						   enum dp_mod_id mod_id,
2395 						   uint8_t vdev_id);
2396 #endif
2397 	uint8_t (*get_hw_link_id)(struct dp_pdev *pdev);
2398 	uint64_t (*get_reo_qdesc_addr)(hal_soc_handle_t hal_soc_hdl,
2399 				       uint8_t *dst_ring_desc,
2400 				       uint8_t *buf,
2401 				       struct dp_txrx_peer *peer,
2402 				       unsigned int tid);
2403 	void (*get_rx_hash_key)(struct dp_soc *soc,
2404 				struct cdp_lro_hash_config *lro_hash);
2405 	void (*dp_set_rx_fst)(struct dp_rx_fst *fst);
2406 	struct dp_rx_fst *(*dp_get_rx_fst)(void);
2407 	uint32_t (*dp_rx_fst_deref)(void);
2408 	void (*dp_rx_fst_ref)(void);
2409 	void (*txrx_print_peer_stats)(struct cdp_peer_stats *peer_stats,
2410 				      enum peer_stats_type stats_type);
2411 	QDF_STATUS (*dp_peer_rx_reorder_queue_setup)(struct dp_soc *soc,
2412 						     struct dp_peer *peer,
2413 						     int tid,
2414 						     uint32_t ba_window_size);
2415 	struct dp_peer *(*dp_find_peer_by_destmac)(struct dp_soc *soc,
2416 						   uint8_t *dest_mac_addr,
2417 						   uint8_t vdev_id);
2418 	void (*dp_bank_reconfig)(struct dp_soc *soc, struct dp_vdev *vdev);
2419 
2420 	struct dp_soc * (*dp_rx_replenish_soc_get)(struct dp_soc *soc,
2421 						   uint8_t chip_id);
2422 
2423 	uint8_t (*dp_soc_get_num_soc)(struct dp_soc *soc);
2424 	void (*dp_reconfig_tx_vdev_mcast_ctrl)(struct dp_soc *soc,
2425 					       struct dp_vdev *vdev);
2426 
2427 	void (*dp_cc_reg_cfg_init)(struct dp_soc *soc, bool is_4k_align);
2428 
2429 	QDF_STATUS
2430 	(*dp_tx_compute_hw_delay)(struct dp_soc *soc,
2431 				  struct dp_vdev *vdev,
2432 				  struct hal_tx_completion_status *ts,
2433 				  uint32_t *delay_us);
2434 	void (*print_mlo_ast_stats)(struct dp_soc *soc);
2435 	void (*dp_partner_chips_map)(struct dp_soc *soc,
2436 				     struct dp_peer *peer,
2437 				     uint16_t peer_id);
2438 	void (*dp_partner_chips_unmap)(struct dp_soc *soc,
2439 				       uint16_t peer_id);
2440 
2441 #ifdef IPA_OFFLOAD
2442 	int8_t (*ipa_get_bank_id)(struct dp_soc *soc);
2443 	void (*ipa_get_wdi_ver)(uint8_t *wdi_ver);
2444 #endif
2445 #ifdef WLAN_SUPPORT_PPEDS
2446 	void (*dp_txrx_ppeds_rings_status)(struct dp_soc *soc);
2447 	void (*dp_tx_ppeds_inuse_desc)(struct dp_soc *soc);
2448 	void (*dp_tx_ppeds_cfg_astidx_cache_mapping)(struct dp_soc *soc,
2449 						     struct dp_vdev *vdev,
2450 						     bool peer_map);
2451 #endif
2452 	bool (*ppeds_handle_attached)(struct dp_soc *soc);
2453 	QDF_STATUS (*txrx_soc_ppeds_start)(struct dp_soc *soc);
2454 	void (*txrx_soc_ppeds_stop)(struct dp_soc *soc);
2455 	int (*dp_register_ppeds_interrupts)(struct dp_soc *soc,
2456 					    struct dp_srng *srng, int vector,
2457 					    int ring_type, int ring_num);
2458 	void (*dp_free_ppeds_interrupts)(struct dp_soc *soc,
2459 					 struct dp_srng *srng, int ring_type,
2460 					 int ring_num);
2461 	qdf_nbuf_t (*dp_rx_wbm_err_reap_desc)(struct dp_intr *int_ctx,
2462 					      struct dp_soc *soc,
2463 					      hal_ring_handle_t hal_ring_hdl,
2464 					      uint32_t quota,
2465 					      uint32_t *rx_bufs_used);
2466 	QDF_STATUS (*dp_rx_null_q_desc_handle)(struct dp_soc *soc,
2467 					       qdf_nbuf_t nbuf,
2468 					       uint8_t *rx_tlv_hdr,
2469 					       uint8_t pool_id,
2470 					       struct dp_txrx_peer *txrx_peer,
2471 					       bool is_reo_exception,
2472 					       uint8_t link_id);
2473 
2474 	QDF_STATUS (*dp_tx_desc_pool_alloc)(struct dp_soc *soc,
2475 					    uint32_t num_elem,
2476 					    uint8_t pool_id);
2477 	void (*dp_tx_desc_pool_free)(struct dp_soc *soc, uint8_t pool_id);
2478 
2479 	QDF_STATUS (*txrx_srng_init)(struct dp_soc *soc, struct dp_srng *srng,
2480 				     int ring_type, int ring_num, int mac_id);
2481 #ifdef WLAN_SUPPORT_PPEDS
2482 	void (*txrx_soc_ppeds_interrupt_stop)(struct dp_soc *soc);
2483 	void (*txrx_soc_ppeds_interrupt_start)(struct dp_soc *soc);
2484 	void (*txrx_soc_ppeds_service_status_update)(struct dp_soc *soc,
2485 						     bool enable);
2486 	bool (*txrx_soc_ppeds_enabled_check)(struct dp_soc *soc);
2487 	void (*txrx_soc_ppeds_txdesc_pool_reset)(struct dp_soc *soc,
2488 						 qdf_nbuf_t *nbuf_list);
2489 #endif
2490 	void (*dp_update_ring_hptp)(struct dp_soc *soc, bool force_flush_tx);
2491 };
2492 
2493 /**
2494  * struct dp_soc_features: Data structure holding the SOC level feature flags.
2495  * @pn_in_reo_dest: PN provided by hardware in the REO destination ring.
2496  * @dmac_cmn_src_rxbuf_ring_enabled: Flag to indicate DMAC mode common Rx
2497  *				     buffer source rings
2498  * @rssi_dbm_conv_support: Rssi dbm conversion support param.
2499  * @umac_hw_reset_support: UMAC HW reset support
2500  * @wds_ext_ast_override_enable:
2501  */
2502 struct dp_soc_features {
2503 	uint8_t pn_in_reo_dest:1,
2504 		dmac_cmn_src_rxbuf_ring_enabled:1;
2505 	bool rssi_dbm_conv_support;
2506 	bool umac_hw_reset_support;
2507 	bool wds_ext_ast_override_enable;
2508 };
2509 
2510 enum sysfs_printing_mode {
2511 	PRINTING_MODE_DISABLED = 0,
2512 	PRINTING_MODE_ENABLED
2513 };
2514 
2515 /**
2516  * typedef notify_pre_reset_fw_callback() - pre-reset callback
2517  * @soc: DP SoC
2518  */
2519 typedef void (*notify_pre_reset_fw_callback)(struct dp_soc *soc);
2520 
2521 #ifdef WLAN_SYSFS_DP_STATS
2522 /**
2523  * struct sysfs_stats_config: Data structure holding stats sysfs config.
2524  * @rw_stats_lock: Lock to read and write to stat_type and pdev_id.
2525  * @sysfs_read_lock: Lock held while another stat req is being executed.
2526  * @sysfs_write_user_buffer: Lock to change buff len, max buf len
2527  * and *buf.
2528  * @sysfs_txrx_fw_request_done: Event to wait for firmware response.
2529  * @stat_type_requested: stat type requested.
2530  * @mac_id: mac id for which stat type are requested.
2531  * @printing_mode: Should a print go through.
2532  * @process_id: Process allowed to write to buffer.
2533  * @curr_buffer_length: Curr length of buffer written
2534  * @max_buffer_length: Max buffer length.
2535  * @buf: Sysfs buffer.
2536  */
2537 struct sysfs_stats_config {
2538 	/* lock held to read stats */
2539 	qdf_spinlock_t rw_stats_lock;
2540 	qdf_mutex_t sysfs_read_lock;
2541 	qdf_spinlock_t sysfs_write_user_buffer;
2542 	qdf_event_t sysfs_txrx_fw_request_done;
2543 	uint32_t stat_type_requested;
2544 	uint32_t mac_id;
2545 	enum sysfs_printing_mode printing_mode;
2546 	int process_id;
2547 	uint16_t curr_buffer_length;
2548 	uint16_t max_buffer_length;
2549 	char *buf;
2550 };
2551 #endif
2552 
2553 /* SOC level structure for data path */
2554 struct dp_soc {
2555 	/**
2556 	 * re-use memory section starts
2557 	 */
2558 
2559 	/* Common base structure - Should be the first member */
2560 	struct cdp_soc_t cdp_soc;
2561 
2562 	/* SoC Obj */
2563 	struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
2564 
2565 	/* OS device abstraction */
2566 	qdf_device_t osdev;
2567 
2568 	/*cce disable*/
2569 	bool cce_disable;
2570 
2571 	/* WLAN config context */
2572 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
2573 
2574 	/* HTT handle for host-fw interaction */
2575 	struct htt_soc *htt_handle;
2576 
2577 	/* Commint init done */
2578 	qdf_atomic_t cmn_init_done;
2579 
2580 	/* Opaque hif handle */
2581 	struct hif_opaque_softc *hif_handle;
2582 
2583 	/* PDEVs on this SOC */
2584 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
2585 
2586 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
2587 	struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT];
2588 
2589 	struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW];
2590 
2591 	/* RXDMA error destination ring */
2592 	struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW];
2593 
2594 	/* RXDMA monitor buffer replenish ring */
2595 	struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW];
2596 
2597 	/* RXDMA monitor destination ring */
2598 	struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW];
2599 
2600 	/* RXDMA monitor status ring. TBD: Check format of this ring */
2601 	struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW];
2602 
2603 	/* Number of PDEVs */
2604 	uint8_t pdev_count;
2605 
2606 	/*ast override support in HW*/
2607 	bool ast_override_support;
2608 
2609 	/*number of hw dscp tid map*/
2610 	uint8_t num_hw_dscp_tid_map;
2611 
2612 	/* HAL SOC handle */
2613 	hal_soc_handle_t hal_soc;
2614 
2615 	/* rx monitor pkt tlv size */
2616 	uint16_t rx_mon_pkt_tlv_size;
2617 	/* rx pkt tlv size */
2618 	uint16_t rx_pkt_tlv_size;
2619 	/* rx pkt tlv size in current operation mode */
2620 	uint16_t curr_rx_pkt_tlv_size;
2621 
2622 	struct dp_arch_ops arch_ops;
2623 
2624 	/* Device ID coming from Bus sub-system */
2625 	uint32_t device_id;
2626 
2627 	/* Link descriptor pages */
2628 	struct qdf_mem_multi_page_t link_desc_pages;
2629 
2630 	/* total link descriptors for regular RX and TX */
2631 	uint32_t total_link_descs;
2632 
2633 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
2634 	struct dp_srng wbm_idle_link_ring;
2635 
2636 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
2637 	 */
2638 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
2639 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
2640 	uint32_t num_scatter_bufs;
2641 
2642 	/* Tx SW descriptor pool */
2643 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
2644 
2645 	/* Tx MSDU Extension descriptor pool */
2646 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
2647 
2648 	/* Tx TSO descriptor pool */
2649 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
2650 
2651 	/* Tx TSO Num of segments pool */
2652 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
2653 
2654 	/* REO destination rings */
2655 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
2656 
2657 	/* REO exception ring - See if should combine this with reo_dest_ring */
2658 	struct dp_srng reo_exception_ring;
2659 
2660 	/* REO reinjection ring */
2661 	struct dp_srng reo_reinject_ring;
2662 
2663 	/* REO command ring */
2664 	struct dp_srng reo_cmd_ring;
2665 
2666 	/* REO command status ring */
2667 	struct dp_srng reo_status_ring;
2668 
2669 	/* WBM Rx release ring */
2670 	struct dp_srng rx_rel_ring;
2671 
2672 	/* TCL data ring */
2673 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
2674 
2675 	/* Number of Tx comp rings */
2676 	uint8_t num_tx_comp_rings;
2677 
2678 	/* Number of TCL data rings */
2679 	uint8_t num_tcl_data_rings;
2680 
2681 	/* TCL CMD_CREDIT ring */
2682 	bool init_tcl_cmd_cred_ring;
2683 
2684 	/* It is used as credit based ring on QCN9000 else command ring */
2685 	struct dp_srng tcl_cmd_credit_ring;
2686 
2687 	/* TCL command status ring */
2688 	struct dp_srng tcl_status_ring;
2689 
2690 	/* WBM Tx completion rings */
2691 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
2692 
2693 	/* Common WBM link descriptor release ring (SW to WBM) */
2694 	struct dp_srng wbm_desc_rel_ring;
2695 
2696 	/* DP Interrupts */
2697 	struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
2698 
2699 	/* Monitor mode mac id to dp_intr_id map */
2700 	int mon_intr_id_lmac_map[MAX_NUM_LMAC_HW];
2701 	/* Rx SW descriptor pool for RXDMA monitor buffer */
2702 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
2703 
2704 	/* Rx SW descriptor pool for RXDMA status buffer */
2705 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
2706 
2707 	/* Rx SW descriptor pool for RXDMA buffer */
2708 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
2709 
2710 	/* Number of REO destination rings */
2711 	uint8_t num_reo_dest_rings;
2712 
2713 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2714 	/* lock to control access to soc TX descriptors */
2715 	qdf_spinlock_t flow_pool_array_lock;
2716 
2717 	/* pause callback to pause TX queues as per flow control */
2718 	tx_pause_callback pause_cb;
2719 
2720 	/* flow pool related statistics */
2721 	struct dp_txrx_pool_stats pool_stats;
2722 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
2723 
2724 	notify_pre_reset_fw_callback notify_fw_callback;
2725 
2726 	unsigned long service_rings_running;
2727 
2728 	uint32_t wbm_idle_scatter_buf_size;
2729 
2730 	/* VDEVs on this SOC */
2731 	struct dp_vdev *vdev_id_map[MAX_VDEV_CNT];
2732 
2733 	/* Tx H/W queues lock */
2734 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
2735 
2736 	/* Tx ring map for interrupt processing */
2737 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2738 
2739 	/* Rx ring map for interrupt processing */
2740 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2741 
2742 	/* peer ID to peer object map (array of pointers to peer objects) */
2743 	struct dp_peer **peer_id_to_obj_map;
2744 
2745 	struct {
2746 		unsigned mask;
2747 		unsigned idx_bits;
2748 		TAILQ_HEAD(, dp_peer) * bins;
2749 	} peer_hash;
2750 
2751 	/* rx defrag state – TBD: do we need this per radio? */
2752 	struct {
2753 		struct {
2754 			TAILQ_HEAD(, dp_rx_tid_defrag) waitlist;
2755 			uint32_t timeout_ms;
2756 			uint32_t next_flush_ms;
2757 			qdf_spinlock_t defrag_lock;
2758 		} defrag;
2759 		struct {
2760 			int defrag_timeout_check;
2761 			int dup_check;
2762 		} flags;
2763 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
2764 		qdf_spinlock_t reo_cmd_lock;
2765 	} rx;
2766 
2767 	/* optional rx processing function */
2768 	void (*rx_opt_proc)(
2769 		struct dp_vdev *vdev,
2770 		struct dp_peer *peer,
2771 		unsigned tid,
2772 		qdf_nbuf_t msdu_list);
2773 
2774 	/* pool addr for mcast enhance buff */
2775 	struct {
2776 		int size;
2777 		uint32_t paddr;
2778 		uint32_t *vaddr;
2779 		struct dp_tx_me_buf_t *freelist;
2780 		int buf_in_use;
2781 		qdf_dma_mem_context(memctx);
2782 	} me_buf;
2783 
2784 	/* Protect peer hash table */
2785 	DP_MUTEX_TYPE peer_hash_lock;
2786 	/* Protect peer_id_to_objmap */
2787 	DP_MUTEX_TYPE peer_map_lock;
2788 
2789 	/* maximum number of suppoerted peers */
2790 	uint32_t max_peers;
2791 	/* maximum value for peer_id */
2792 	uint32_t max_peer_id;
2793 
2794 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2795 	uint32_t peer_id_shift;
2796 	uint32_t peer_id_mask;
2797 #endif
2798 
2799 	/* SoC level data path statistics */
2800 	struct dp_soc_stats stats;
2801 #ifdef WLAN_SYSFS_DP_STATS
2802 	/* sysfs config for DP stats */
2803 	struct sysfs_stats_config *sysfs_config;
2804 #endif
2805 	/* timestamp to keep track of msdu buffers received on reo err ring */
2806 	uint64_t rx_route_err_start_pkt_ts;
2807 
2808 	/* Num RX Route err in a given window to keep track of rate of errors */
2809 	uint32_t rx_route_err_in_window;
2810 
2811 	/* Enable processing of Tx completion status words */
2812 	bool process_tx_status;
2813 	bool process_rx_status;
2814 	struct dp_ast_entry **ast_table;
2815 	struct {
2816 		unsigned mask;
2817 		unsigned idx_bits;
2818 		TAILQ_HEAD(, dp_ast_entry) * bins;
2819 	} ast_hash;
2820 
2821 #ifdef DP_TX_HW_DESC_HISTORY
2822 	struct dp_tx_hw_desc_history tx_hw_desc_history;
2823 #endif
2824 
2825 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2826 	struct dp_rx_history *rx_ring_history[MAX_REO_DEST_RINGS];
2827 	struct dp_rx_refill_history *rx_refill_ring_history[MAX_PDEV_CNT];
2828 	struct dp_rx_err_history *rx_err_ring_history;
2829 	struct dp_rx_reinject_history *rx_reinject_ring_history;
2830 #endif
2831 
2832 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
2833 	struct dp_mon_status_ring_history *mon_status_ring_history;
2834 #endif
2835 
2836 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
2837 	struct dp_tx_tcl_history tx_tcl_history;
2838 	struct dp_tx_comp_history tx_comp_history;
2839 #endif
2840 
2841 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
2842 	struct dp_cfg_event_history cfg_event_history;
2843 #endif
2844 
2845 	qdf_spinlock_t ast_lock;
2846 	/*Timer for AST entry ageout maintenance */
2847 	qdf_timer_t ast_aging_timer;
2848 
2849 	/*Timer counter for WDS AST entry ageout*/
2850 	uint8_t wds_ast_aging_timer_cnt;
2851 	bool pending_ageout;
2852 	bool ast_offload_support;
2853 	bool host_ast_db_enable;
2854 	uint32_t max_ast_ageout_count;
2855 	uint8_t eapol_over_control_port;
2856 
2857 	uint8_t sta_mode_search_policy;
2858 	qdf_timer_t lmac_reap_timer;
2859 	uint8_t lmac_timer_init;
2860 	qdf_timer_t int_timer;
2861 	uint8_t intr_mode;
2862 	uint8_t lmac_polled_mode;
2863 
2864 	qdf_list_t reo_desc_freelist;
2865 	qdf_spinlock_t reo_desc_freelist_lock;
2866 
2867 	/* htt stats */
2868 	struct htt_t2h_stats htt_stats;
2869 
2870 	void *external_txrx_handle; /* External data path handle */
2871 #ifdef IPA_OFFLOAD
2872 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc;
2873 #ifdef IPA_WDI3_TX_TWO_PIPES
2874 	/* Resources for the alternative IPA TX pipe */
2875 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc_alt;
2876 #endif
2877 
2878 	struct ipa_dp_rx_rsc ipa_uc_rx_rsc;
2879 #ifdef IPA_WDI3_VLAN_SUPPORT
2880 	struct ipa_dp_rx_rsc ipa_uc_rx_rsc_alt;
2881 #endif
2882 	qdf_atomic_t ipa_pipes_enabled;
2883 	bool ipa_first_tx_db_access;
2884 	qdf_spinlock_t ipa_rx_buf_map_lock;
2885 	bool ipa_rx_buf_map_lock_initialized;
2886 	uint8_t ipa_reo_ctx_lock_required[MAX_REO_DEST_RINGS];
2887 #endif
2888 
2889 #ifdef WLAN_FEATURE_STATS_EXT
2890 	struct {
2891 		uint32_t rx_mpdu_received;
2892 		uint32_t rx_mpdu_missed;
2893 	} ext_stats;
2894 	qdf_event_t rx_hw_stats_event;
2895 	qdf_spinlock_t rx_hw_stats_lock;
2896 	bool is_last_stats_ctx_init;
2897 #endif /* WLAN_FEATURE_STATS_EXT */
2898 
2899 	/* Indicates HTT map/unmap versions*/
2900 	uint8_t peer_map_unmap_versions;
2901 	/* Per peer per Tid ba window size support */
2902 	uint8_t per_tid_basize_max_tid;
2903 	/* Soc level flag to enable da_war */
2904 	uint8_t da_war_enabled;
2905 	/* number of active ast entries */
2906 	uint32_t num_ast_entries;
2907 	/* peer extended rate statistics context at soc level*/
2908 	struct cdp_soc_rate_stats_ctx *rate_stats_ctx;
2909 	/* peer extended rate statistics control flag */
2910 	bool peerstats_enabled;
2911 
2912 	/* 8021p PCP-TID map values */
2913 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
2914 	/* TID map priority value */
2915 	uint8_t tidmap_prty;
2916 	/* Pointer to global per ring type specific configuration table */
2917 	struct wlan_srng_cfg *wlan_srng_cfg;
2918 	/* Num Tx outstanding on device */
2919 	qdf_atomic_t num_tx_outstanding;
2920 	/* Num Tx exception on device */
2921 	qdf_atomic_t num_tx_exception;
2922 	/* Num Tx allowed */
2923 	uint32_t num_tx_allowed;
2924 	/* Num Regular Tx allowed */
2925 	uint32_t num_reg_tx_allowed;
2926 	/* Num Tx allowed for special frames*/
2927 	uint32_t num_tx_spl_allowed;
2928 	/* Preferred HW mode */
2929 	uint8_t preferred_hw_mode;
2930 
2931 	/**
2932 	 * Flag to indicate whether WAR to address single cache entry
2933 	 * invalidation bug is enabled or not
2934 	 */
2935 	bool is_rx_fse_full_cache_invalidate_war_enabled;
2936 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2937 	/**
2938 	 * Pointer to DP RX Flow FST at SOC level if
2939 	 * is_rx_flow_search_table_per_pdev is false
2940 	 * TBD: rx_fst[num_macs] if we decide to have per mac FST
2941 	 */
2942 	struct dp_rx_fst *rx_fst;
2943 #ifdef WLAN_SUPPORT_RX_FISA
2944 	uint8_t fisa_enable;
2945 	uint8_t fisa_lru_del_enable;
2946 	/**
2947 	 * Params used for controlling the fisa aggregation dynamically
2948 	 */
2949 	struct {
2950 		qdf_atomic_t skip_fisa;
2951 		uint8_t fisa_force_flush[MAX_REO_DEST_RINGS];
2952 	} skip_fisa_param;
2953 
2954 	/**
2955 	 * CMEM address and size for FST in CMEM, This is the address
2956 	 * shared during init time.
2957 	 */
2958 	uint64_t fst_cmem_base;
2959 	uint64_t fst_cmem_size;
2960 #endif
2961 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
2962 	/* SG supported for msdu continued packets from wbm release ring */
2963 	bool wbm_release_desc_rx_sg_support;
2964 	bool peer_map_attach_success;
2965 	/* Flag to disable mac1 ring interrupts */
2966 	bool disable_mac1_intr;
2967 	/* Flag to disable mac2 ring interrupts */
2968 	bool disable_mac2_intr;
2969 
2970 	struct {
2971 		/* 1st msdu in sg for msdu continued packets in wbm rel ring */
2972 		bool wbm_is_first_msdu_in_sg;
2973 		/* Wbm sg list head */
2974 		qdf_nbuf_t wbm_sg_nbuf_head;
2975 		/* Wbm sg list tail */
2976 		qdf_nbuf_t wbm_sg_nbuf_tail;
2977 		uint32_t wbm_sg_desc_msdu_len;
2978 	} wbm_sg_param;
2979 	/* Number of msdu exception descriptors */
2980 	uint32_t num_msdu_exception_desc;
2981 
2982 	/* RX buffer params */
2983 	struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
2984 	struct rx_refill_buff_pool rx_refill_buff_pool;
2985 	/* Save recent operation related variable */
2986 	struct dp_last_op_info last_op_info;
2987 	TAILQ_HEAD(, dp_peer) inactive_peer_list;
2988 	qdf_spinlock_t inactive_peer_list_lock;
2989 	TAILQ_HEAD(, dp_vdev) inactive_vdev_list;
2990 	qdf_spinlock_t inactive_vdev_list_lock;
2991 	/* lock to protect vdev_id_map table*/
2992 	qdf_spinlock_t vdev_map_lock;
2993 
2994 	/* Flow Search Table is in CMEM */
2995 	bool fst_in_cmem;
2996 
2997 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2998 	struct dp_swlm swlm;
2999 #endif
3000 
3001 #ifdef FEATURE_RUNTIME_PM
3002 	/* DP Rx timestamp */
3003 	qdf_time_t rx_last_busy;
3004 	/* Dp runtime refcount */
3005 	qdf_atomic_t dp_runtime_refcount;
3006 	/* Dp tx pending count in RTPM */
3007 	qdf_atomic_t tx_pending_rtpm;
3008 #endif
3009 	/* Invalid buffer that allocated for RX buffer */
3010 	qdf_nbuf_queue_t invalid_buf_queue;
3011 
3012 #ifdef FEATURE_MEC
3013 	/** @mec_lock: spinlock for MEC table */
3014 	qdf_spinlock_t mec_lock;
3015 	/** @mec_cnt: number of active mec entries */
3016 	qdf_atomic_t mec_cnt;
3017 	struct {
3018 		/** @mask: mask bits */
3019 		uint32_t mask;
3020 		/** @idx_bits: index to shift bits */
3021 		uint32_t idx_bits;
3022 		/** @bins: MEC table */
3023 		TAILQ_HEAD(, dp_mec_entry) * bins;
3024 	} mec_hash;
3025 #endif
3026 
3027 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3028 	qdf_list_t reo_desc_deferred_freelist;
3029 	qdf_spinlock_t reo_desc_deferred_freelist_lock;
3030 	bool reo_desc_deferred_freelist_init;
3031 #endif
3032 	/* BM id for first WBM2SW  ring */
3033 	uint32_t wbm_sw0_bm_id;
3034 
3035 	/* Store arch_id from device_id */
3036 	uint16_t arch_id;
3037 
3038 	/* link desc ID start per device type */
3039 	uint32_t link_desc_id_start;
3040 
3041 	/* CMEM buffer target reserved for host usage */
3042 	uint64_t cmem_base;
3043 	/* CMEM size in bytes */
3044 	uint64_t cmem_total_size;
3045 	/* CMEM free size in bytes */
3046 	uint64_t cmem_avail_size;
3047 
3048 	/* SOC level feature flags */
3049 	struct dp_soc_features features;
3050 
3051 #ifdef WIFI_MONITOR_SUPPORT
3052 	struct dp_mon_soc *monitor_soc;
3053 #endif
3054 	uint8_t rxdma2sw_rings_not_supported:1,
3055 		wbm_sg_last_msdu_war:1,
3056 		mec_fw_offload:1,
3057 		multi_peer_grp_cmd_supported:1;
3058 
3059 	/* Number of Rx refill rings */
3060 	uint8_t num_rx_refill_buf_rings;
3061 #ifdef FEATURE_RUNTIME_PM
3062 	/* flag to indicate vote for runtime_pm for high tput castt*/
3063 	qdf_atomic_t rtpm_high_tput_flag;
3064 #endif
3065 	/* Buffer manager ID for idle link descs */
3066 	uint8_t idle_link_bm_id;
3067 	qdf_atomic_t ref_count;
3068 
3069 	unsigned long vdev_stats_id_map;
3070 	bool txmon_hw_support;
3071 
3072 #ifdef DP_UMAC_HW_RESET_SUPPORT
3073 	struct dp_soc_umac_reset_ctx umac_reset_ctx;
3074 #endif
3075 	/* PPDU to link_id mapping parameters */
3076 	uint8_t link_id_offset;
3077 	uint8_t link_id_bits;
3078 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
3079 	/* A flag using to decide the switch of rx link speed  */
3080 	bool high_throughput;
3081 #endif
3082 	bool is_tx_pause;
3083 
3084 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3085 	/* number of IPv4 flows inserted */
3086 	qdf_atomic_t ipv4_fse_cnt;
3087 	/* number of IPv6 flows inserted */
3088 	qdf_atomic_t ipv6_fse_cnt;
3089 #endif
3090 	/* Reo queue ref table items */
3091 	struct reo_queue_ref_table reo_qref;
3092 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP
3093 	/* Flag to show if TX ILP is enabled */
3094 	bool tx_ilp_enable;
3095 #endif
3096 };
3097 
3098 #ifdef IPA_OFFLOAD
3099 /**
3100  * struct dp_ipa_resources - Resources needed for IPA
3101  * @tx_ring:
3102  * @tx_num_alloc_buffer:
3103  * @tx_comp_ring:
3104  * @rx_rdy_ring:
3105  * @rx_refill_ring:
3106  * @tx_comp_doorbell_paddr: IPA UC doorbell registers paddr
3107  * @tx_comp_doorbell_vaddr:
3108  * @rx_ready_doorbell_paddr:
3109  * @is_db_ddr_mapped:
3110  * @tx_alt_ring:
3111  * @tx_alt_ring_num_alloc_buffer:
3112  * @tx_alt_comp_ring:
3113  * @tx_alt_comp_doorbell_paddr: IPA UC doorbell registers paddr
3114  * @tx_alt_comp_doorbell_vaddr:
3115  * @rx_alt_rdy_ring:
3116  * @rx_alt_refill_ring:
3117  * @rx_alt_ready_doorbell_paddr:
3118  */
3119 struct dp_ipa_resources {
3120 	qdf_shared_mem_t tx_ring;
3121 	uint32_t tx_num_alloc_buffer;
3122 
3123 	qdf_shared_mem_t tx_comp_ring;
3124 	qdf_shared_mem_t rx_rdy_ring;
3125 	qdf_shared_mem_t rx_refill_ring;
3126 
3127 	/* IPA UC doorbell registers paddr */
3128 	qdf_dma_addr_t tx_comp_doorbell_paddr;
3129 	uint32_t *tx_comp_doorbell_vaddr;
3130 	qdf_dma_addr_t rx_ready_doorbell_paddr;
3131 
3132 	bool is_db_ddr_mapped;
3133 
3134 #ifdef IPA_WDI3_TX_TWO_PIPES
3135 	qdf_shared_mem_t tx_alt_ring;
3136 	uint32_t tx_alt_ring_num_alloc_buffer;
3137 	qdf_shared_mem_t tx_alt_comp_ring;
3138 
3139 	/* IPA UC doorbell registers paddr */
3140 	qdf_dma_addr_t tx_alt_comp_doorbell_paddr;
3141 	uint32_t *tx_alt_comp_doorbell_vaddr;
3142 #endif
3143 #ifdef IPA_WDI3_VLAN_SUPPORT
3144 	qdf_shared_mem_t rx_alt_rdy_ring;
3145 	qdf_shared_mem_t rx_alt_refill_ring;
3146 	qdf_dma_addr_t rx_alt_ready_doorbell_paddr;
3147 #endif
3148 };
3149 #endif
3150 
3151 #define MAX_RX_MAC_RINGS 2
3152 /* Same as NAC_MAX_CLENT */
3153 #define DP_NAC_MAX_CLIENT  24
3154 
3155 /*
3156  * 24 bits cookie size
3157  * 10 bits page id 0 ~ 1023 for MCL
3158  * 3 bits page id 0 ~ 7 for WIN
3159  * WBM Idle List Desc size = 128,
3160  * Num descs per page = 4096/128 = 32 for MCL
3161  * Num descs per page = 2MB/128 = 16384 for WIN
3162  */
3163 /*
3164  * Macros to setup link descriptor cookies - for link descriptors, we just
3165  * need first 3 bits to store bank/page ID for WIN. The
3166  * remaining bytes will be used to set a unique ID, which will
3167  * be useful in debugging
3168  */
3169 #ifdef MAX_ALLOC_PAGE_SIZE
3170 #if PAGE_SIZE == 4096
3171 #define LINK_DESC_PAGE_ID_MASK  0x007FE0
3172 #define LINK_DESC_ID_SHIFT      5
3173 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
3174 #elif PAGE_SIZE == 65536
3175 #define LINK_DESC_PAGE_ID_MASK  0x007E00
3176 #define LINK_DESC_ID_SHIFT      9
3177 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x800
3178 #else
3179 #error "Unsupported kernel PAGE_SIZE"
3180 #endif
3181 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
3182 	((((_page_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_desc_id))
3183 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
3184 	(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
3185 #else
3186 #define LINK_DESC_PAGE_ID_MASK  0x7
3187 #define LINK_DESC_ID_SHIFT      3
3188 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
3189 	((((_desc_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_page_id))
3190 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
3191 	((_cookie) & LINK_DESC_PAGE_ID_MASK)
3192 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
3193 #endif
3194 #define LINK_DESC_ID_START_20_BITS_COOKIE 0x4000
3195 
3196 /* same as ieee80211_nac_param */
3197 enum dp_nac_param_cmd {
3198 	/* IEEE80211_NAC_PARAM_ADD */
3199 	DP_NAC_PARAM_ADD = 1,
3200 	/* IEEE80211_NAC_PARAM_DEL */
3201 	DP_NAC_PARAM_DEL,
3202 	/* IEEE80211_NAC_PARAM_LIST */
3203 	DP_NAC_PARAM_LIST,
3204 };
3205 
3206 /**
3207  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
3208  * @neighbour_peers_macaddr: neighbour peer's mac address
3209  * @vdev: associated vdev
3210  * @ast_entry: ast_entry for neighbour peer
3211  * @rssi: rssi value
3212  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
3213  */
3214 struct dp_neighbour_peer {
3215 	union dp_align_mac_addr neighbour_peers_macaddr;
3216 	struct dp_vdev *vdev;
3217 	struct dp_ast_entry *ast_entry;
3218 	uint8_t rssi;
3219 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
3220 };
3221 
3222 #ifdef WLAN_TX_PKT_CAPTURE_ENH
3223 #define WLAN_TX_PKT_CAPTURE_ENH 1
3224 #define DP_TX_PPDU_PROC_THRESHOLD 8
3225 #define DP_TX_PPDU_PROC_TIMEOUT 10
3226 #endif
3227 
3228 /**
3229  * struct ppdu_info - PPDU Status info descriptor
3230  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
3231  * @sched_cmdid: schedule command id, which will be same in a burst
3232  * @max_ppdu_id: wrap around for ppdu id
3233  * @tsf_l32:
3234  * @tlv_bitmap:
3235  * @last_tlv_cnt: Keep track for missing ppdu tlvs
3236  * @last_user: last ppdu processed for user
3237  * @is_ampdu: set if Ampdu aggregate
3238  * @nbuf: ppdu descriptor payload
3239  * @ppdu_desc: ppdu descriptor
3240  * @ulist: Union of lists
3241  * @ppdu_info_dlist_elem: linked list of ppdu tlvs
3242  * @ppdu_info_slist_elem: Singly linked list (queue) of ppdu tlvs
3243  * @ppdu_info_list_elem: linked list of ppdu tlvs
3244  * @ppdu_info_queue_elem: Singly linked list (queue) of ppdu tlvs
3245  * @compltn_common_tlv: Successful tlv counter from COMPLTN COMMON tlv
3246  * @ack_ba_tlv: Successful tlv counter from ACK BA tlv
3247  * @done:
3248  */
3249 struct ppdu_info {
3250 	uint32_t ppdu_id;
3251 	uint32_t sched_cmdid;
3252 	uint32_t max_ppdu_id;
3253 	uint32_t tsf_l32;
3254 	uint16_t tlv_bitmap;
3255 	uint16_t last_tlv_cnt;
3256 	uint16_t last_user:8,
3257 		 is_ampdu:1;
3258 	qdf_nbuf_t nbuf;
3259 	struct cdp_tx_completion_ppdu *ppdu_desc;
3260 #ifdef WLAN_TX_PKT_CAPTURE_ENH
3261 	union {
3262 		TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem;
3263 		STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem;
3264 	} ulist;
3265 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem
3266 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem
3267 #else
3268 	TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
3269 #endif
3270 	uint8_t compltn_common_tlv;
3271 	uint8_t ack_ba_tlv;
3272 	bool done;
3273 };
3274 
3275 /**
3276  * struct msdu_completion_info - wbm msdu completion info
3277  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
3278  * @peer_id: peer_id
3279  * @tid: tid which used during transmit
3280  * @first_msdu: first msdu indication
3281  * @last_msdu: last msdu indication
3282  * @msdu_part_of_amsdu: msdu part of amsdu
3283  * @transmit_cnt: retried count
3284  * @status: transmit status
3285  * @tsf: timestamp which it transmitted
3286  */
3287 struct msdu_completion_info {
3288 	uint32_t ppdu_id;
3289 	uint16_t peer_id;
3290 	uint8_t tid;
3291 	uint8_t first_msdu:1,
3292 		last_msdu:1,
3293 		msdu_part_of_amsdu:1;
3294 	uint8_t transmit_cnt;
3295 	uint8_t status;
3296 	uint32_t tsf;
3297 };
3298 
3299 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
3300 struct rx_protocol_tag_map {
3301 	/* This is the user configured tag for the said protocol type */
3302 	uint16_t tag;
3303 };
3304 
3305 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
3306 /**
3307  * struct rx_protocol_tag_stats - protocol statistics
3308  * @tag_ctr: number of rx msdus matching this tag
3309  */
3310 struct rx_protocol_tag_stats {
3311 	uint32_t tag_ctr;
3312 };
3313 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
3314 
3315 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
3316 
3317 #ifdef WLAN_RX_PKT_CAPTURE_ENH
3318 /* Template data to be set for Enhanced RX Monitor packets */
3319 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a
3320 
3321 /**
3322  * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern
3323  * at end of each MSDU in monitor-lite mode
3324  * @reserved1: reserved for future use
3325  * @reserved2: reserved for future use
3326  * @flow_tag: flow tag value read from skb->cb
3327  * @protocol_tag: protocol tag value read from skb->cb
3328  */
3329 struct dp_rx_mon_enh_trailer_data {
3330 	uint16_t reserved1;
3331 	uint16_t reserved2;
3332 	uint16_t flow_tag;
3333 	uint16_t protocol_tag;
3334 };
3335 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
3336 
3337 #ifdef HTT_STATS_DEBUGFS_SUPPORT
3338 /* Number of debugfs entries created for HTT stats */
3339 #define PDEV_HTT_STATS_DBGFS_SIZE HTT_DBG_NUM_EXT_STATS
3340 
3341 /**
3342  * struct pdev_htt_stats_dbgfs_priv - Structure to maintain debugfs information
3343  * of HTT stats
3344  * @pdev: dp pdev of debugfs entry
3345  * @stats_id: stats id of debugfs entry
3346  */
3347 struct pdev_htt_stats_dbgfs_priv {
3348 	struct dp_pdev *pdev;
3349 	uint16_t stats_id;
3350 };
3351 
3352 /**
3353  * struct pdev_htt_stats_dbgfs_cfg - PDEV level data structure for debugfs
3354  * support for HTT stats
3355  * @debugfs_entry: qdf_debugfs directory entry
3356  * @m: qdf debugfs file handler
3357  * @pdev_htt_stats_dbgfs_ops: File operations of entry created
3358  * @priv: HTT stats debugfs private object
3359  * @htt_stats_dbgfs_event: HTT stats event for debugfs support
3360  * @lock: HTT stats debugfs lock
3361  * @htt_stats_dbgfs_msg_process: Function callback to print HTT stats
3362  */
3363 struct pdev_htt_stats_dbgfs_cfg {
3364 	qdf_dentry_t debugfs_entry[PDEV_HTT_STATS_DBGFS_SIZE];
3365 	qdf_debugfs_file_t m;
3366 	struct qdf_debugfs_fops
3367 			pdev_htt_stats_dbgfs_ops[PDEV_HTT_STATS_DBGFS_SIZE - 1];
3368 	struct pdev_htt_stats_dbgfs_priv priv[PDEV_HTT_STATS_DBGFS_SIZE - 1];
3369 	qdf_event_t htt_stats_dbgfs_event;
3370 	qdf_mutex_t lock;
3371 	void (*htt_stats_dbgfs_msg_process)(void *data, A_INT32 len);
3372 };
3373 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
3374 
3375 struct dp_srng_ring_state {
3376 	enum hal_ring_type ring_type;
3377 	uint32_t sw_head;
3378 	uint32_t sw_tail;
3379 	uint32_t hw_head;
3380 	uint32_t hw_tail;
3381 
3382 };
3383 
3384 struct dp_soc_srngs_state {
3385 	uint32_t seq_num;
3386 	uint32_t max_ring_id;
3387 	struct dp_srng_ring_state ring_state[DP_MAX_SRNGS];
3388 	TAILQ_ENTRY(dp_soc_srngs_state) list_elem;
3389 };
3390 
3391 #ifdef WLAN_FEATURE_11BE_MLO
3392 /* struct dp_mlo_sync_timestamp - PDEV level data structure for storing
3393  * MLO timestamp received via HTT msg.
3394  * msg_type: This would be set to HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND
3395  * pdev_id: pdev_id
3396  * chip_id: chip_id
3397  * mac_clk_freq: mac clock frequency of the mac HW block in MHz
3398  * sync_tstmp_lo_us: lower 32 bits of the WLAN global time stamp (in us) at
3399  *                   which last sync interrupt was received
3400  * sync_tstmp_hi_us: upper 32 bits of the WLAN global time stamp (in us) at
3401  *                   which last sync interrupt was received
3402  * mlo_offset_lo_us: lower 32 bits of the MLO time stamp offset in us
3403  * mlo_offset_hi_us: upper 32 bits of the MLO time stamp offset in us
3404  * mlo_offset_clks:  MLO time stamp offset in clock ticks for sub us
3405  * mlo_comp_us:      MLO time stamp compensation applied in us
3406  * mlo_comp_clks:    MLO time stamp compensation applied in clock ticks
3407  *                   for sub us resolution
3408  * mlo_comp_timer:   period of MLO compensation timer at which compensation
3409  *                   is applied, in us
3410  */
3411 struct dp_mlo_sync_timestamp {
3412 	uint32_t msg_type:8,
3413 		 pdev_id:2,
3414 		 chip_id:2,
3415 		 rsvd1:4,
3416 		 mac_clk_freq:16;
3417 	uint32_t sync_tstmp_lo_us;
3418 	uint32_t sync_tstmp_hi_us;
3419 	uint32_t mlo_offset_lo_us;
3420 	uint32_t mlo_offset_hi_us;
3421 	uint32_t mlo_offset_clks;
3422 	uint32_t mlo_comp_us:16,
3423 		 mlo_comp_clks:10,
3424 		 rsvd2:6;
3425 	uint32_t mlo_comp_timer:22,
3426 		 rsvd3:10;
3427 };
3428 #endif
3429 
3430 /* PDEV level structure for data path */
3431 struct dp_pdev {
3432 	/**
3433 	 * Re-use Memory Section Starts
3434 	 */
3435 
3436 	/* PDEV Id */
3437 	uint8_t pdev_id;
3438 
3439 	/* LMAC Id */
3440 	uint8_t lmac_id;
3441 
3442 	/* Target pdev  Id */
3443 	uint8_t target_pdev_id;
3444 
3445 	bool pdev_deinit;
3446 
3447 	/* TXRX SOC handle */
3448 	struct dp_soc *soc;
3449 
3450 	/* pdev status down or up required to handle dynamic hw
3451 	 * mode switch between DBS and DBS_SBS.
3452 	 * 1 = down
3453 	 * 0 = up
3454 	 */
3455 	bool is_pdev_down;
3456 
3457 	/* Enhanced Stats is enabled */
3458 	uint8_t enhanced_stats_en:1,
3459 		link_peer_stats:1;
3460 
3461 	/* Flag to indicate fast RX */
3462 	bool rx_fast_flag;
3463 
3464 	/* Second ring used to replenish rx buffers */
3465 	struct dp_srng rx_refill_buf_ring2;
3466 #ifdef IPA_WDI3_VLAN_SUPPORT
3467 	/* Third ring used to replenish rx buffers */
3468 	struct dp_srng rx_refill_buf_ring3;
3469 #endif
3470 
3471 #ifdef FEATURE_DIRECT_LINK
3472 	/* Fourth ring used to replenish rx buffers */
3473 	struct dp_srng rx_refill_buf_ring4;
3474 #endif
3475 
3476 	/* Empty ring used by firmware to post rx buffers to the MAC */
3477 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
3478 
3479 	int ch_band_lmac_id_mapping[REG_BAND_UNKNOWN];
3480 
3481 	/* wlan_cfg pdev ctxt*/
3482 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
3483 
3484 	/**
3485 	 * TODO: See if we need a ring map here for LMAC rings.
3486 	 * 1. Monitor rings are currently planning to be processed on receiving
3487 	 * PPDU end interrupts and hence won't need ring based interrupts.
3488 	 * 2. Rx buffer rings will be replenished during REO destination
3489 	 * processing and doesn't require regular interrupt handling - we will
3490 	 * only handle low water mark interrupts which is not expected
3491 	 * frequently
3492 	 */
3493 
3494 	/* VDEV list */
3495 	TAILQ_HEAD(, dp_vdev) vdev_list;
3496 
3497 	/* vdev list lock */
3498 	qdf_spinlock_t vdev_list_lock;
3499 
3500 	/* Number of vdevs this device have */
3501 	uint16_t vdev_count;
3502 
3503 	/* PDEV transmit lock */
3504 	qdf_spinlock_t tx_lock;
3505 
3506 	/*tx_mutex for me*/
3507 	DP_MUTEX_TYPE tx_mutex;
3508 
3509 	/* msdu chain head & tail */
3510 	qdf_nbuf_t invalid_peer_head_msdu;
3511 	qdf_nbuf_t invalid_peer_tail_msdu;
3512 
3513 	/* Band steering  */
3514 	/* TBD */
3515 
3516 	/* PDEV level data path statistics */
3517 	struct cdp_pdev_stats stats;
3518 
3519 	/* Global RX decap mode for the device */
3520 	enum htt_pkt_type rx_decap_mode;
3521 
3522 	qdf_atomic_t num_tx_outstanding;
3523 	int32_t tx_descs_max;
3524 
3525 	qdf_atomic_t num_tx_exception;
3526 
3527 	/* MCL specific local peer handle */
3528 	struct {
3529 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
3530 		uint8_t freelist;
3531 		qdf_spinlock_t lock;
3532 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
3533 	} local_peer_ids;
3534 
3535 	/* dscp_tid_map_*/
3536 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
3537 
3538 	/* operating channel */
3539 	struct {
3540 		uint8_t num;
3541 		uint8_t band;
3542 		uint16_t freq;
3543 	} operating_channel;
3544 
3545 	/* pool addr for mcast enhance buff */
3546 	struct {
3547 		int size;
3548 		uint32_t paddr;
3549 		char *vaddr;
3550 		struct dp_tx_me_buf_t *freelist;
3551 		int buf_in_use;
3552 		qdf_dma_mem_context(memctx);
3553 	} me_buf;
3554 
3555 	bool hmmc_tid_override_en;
3556 	uint8_t hmmc_tid;
3557 
3558 	/* Number of VAPs with mcast enhancement enabled */
3559 	qdf_atomic_t mc_num_vap_attached;
3560 
3561 	qdf_atomic_t stats_cmd_complete;
3562 
3563 #ifdef IPA_OFFLOAD
3564 	ipa_uc_op_cb_type ipa_uc_op_cb;
3565 	void *usr_ctxt;
3566 	struct dp_ipa_resources ipa_resource;
3567 #endif
3568 
3569 	/* TBD */
3570 
3571 	/* map this pdev to a particular Reo Destination ring */
3572 	enum cdp_host_reo_dest_ring reo_dest;
3573 
3574 	/* WDI event handlers */
3575 	struct wdi_event_subscribe_t **wdi_event_list;
3576 
3577 	bool cfr_rcc_mode;
3578 
3579 	/* enable time latency check for tx completion */
3580 	bool latency_capture_enable;
3581 
3582 	/* enable calculation of delay stats*/
3583 	bool delay_stats_flag;
3584 	void *dp_txrx_handle; /* Advanced data path handle */
3585 	uint32_t ppdu_id;
3586 	bool first_nbuf;
3587 	/* Current noise-floor reading for the pdev channel */
3588 	int16_t chan_noise_floor;
3589 
3590 	/*
3591 	 * For multiradio device, this flag indicates if
3592 	 * this radio is primary or secondary.
3593 	 *
3594 	 * For HK 1.0, this is used for WAR for the AST issue.
3595 	 * HK 1.x mandates creation of only 1 AST entry with same MAC address
3596 	 * across 2 radios. is_primary indicates the radio on which DP should
3597 	 * install HW AST entry if there is a request to add 2 AST entries
3598 	 * with same MAC address across 2 radios
3599 	 */
3600 	uint8_t is_primary;
3601 	struct cdp_tx_sojourn_stats sojourn_stats;
3602 	qdf_nbuf_t sojourn_buf;
3603 
3604 	union dp_rx_desc_list_elem_t *free_list_head;
3605 	union dp_rx_desc_list_elem_t *free_list_tail;
3606 	/* Cached peer_id from htt_peer_details_tlv */
3607 	uint16_t fw_stats_peer_id;
3608 
3609 	/* qdf_event for fw_peer_stats */
3610 	qdf_event_t fw_peer_stats_event;
3611 
3612 	/* qdf_event for fw_stats */
3613 	qdf_event_t fw_stats_event;
3614 
3615 	/* qdf_event for fw__obss_stats */
3616 	qdf_event_t fw_obss_stats_event;
3617 
3618 	/* To check if request is already sent for obss stats */
3619 	bool pending_fw_obss_stats_response;
3620 
3621 	/* User configured max number of tx buffers */
3622 	uint32_t num_tx_allowed;
3623 
3624 	/*
3625 	 * User configured max num of tx buffers excluding the
3626 	 * number of buffers reserved for handling special frames
3627 	 */
3628 	uint32_t num_reg_tx_allowed;
3629 
3630 	/* User configured max number of tx buffers for the special frames*/
3631 	uint32_t num_tx_spl_allowed;
3632 
3633 	/* unique cookie required for peer session */
3634 	uint32_t next_peer_cookie;
3635 
3636 	/*
3637 	 * Run time enabled when the first protocol tag is added,
3638 	 * run time disabled when the last protocol tag is deleted
3639 	 */
3640 	bool  is_rx_protocol_tagging_enabled;
3641 
3642 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
3643 	/*
3644 	 * The protocol type is used as array index to save
3645 	 * user provided tag info
3646 	 */
3647 	struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX];
3648 
3649 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
3650 	/*
3651 	 * Track msdus received from each reo ring separately to avoid
3652 	 * simultaneous writes from different core
3653 	 */
3654 	struct rx_protocol_tag_stats
3655 		reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX];
3656 	/* Track msdus received from exception ring separately */
3657 	struct rx_protocol_tag_stats
3658 		rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3659 	struct rx_protocol_tag_stats
3660 		mon_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3661 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
3662 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
3663 
3664 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3665 	/**
3666 	 * Pointer to DP Flow FST at SOC level if
3667 	 * is_rx_flow_search_table_per_pdev is true
3668 	 */
3669 	struct dp_rx_fst *rx_fst;
3670 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
3671 
3672 #ifdef FEATURE_TSO_STATS
3673 	/* TSO Id to index into TSO packet information */
3674 	qdf_atomic_t tso_idx;
3675 #endif /* FEATURE_TSO_STATS */
3676 
3677 #ifdef WLAN_SUPPORT_DATA_STALL
3678 	data_stall_detect_cb data_stall_detect_callback;
3679 #endif /* WLAN_SUPPORT_DATA_STALL */
3680 
3681 	/* flag to indicate whether LRO hash command has been sent to FW */
3682 	uint8_t is_lro_hash_configured;
3683 
3684 #ifdef HTT_STATS_DEBUGFS_SUPPORT
3685 	/* HTT stats debugfs params */
3686 	struct pdev_htt_stats_dbgfs_cfg *dbgfs_cfg;
3687 #endif
3688 	struct {
3689 		qdf_work_t work;
3690 		qdf_workqueue_t *work_queue;
3691 		uint32_t seq_num;
3692 		uint8_t queue_depth;
3693 		qdf_spinlock_t list_lock;
3694 
3695 		TAILQ_HEAD(, dp_soc_srngs_state) list;
3696 	} bkp_stats;
3697 #ifdef WIFI_MONITOR_SUPPORT
3698 	struct dp_mon_pdev *monitor_pdev;
3699 #endif
3700 #ifdef WLAN_FEATURE_11BE_MLO
3701 	struct dp_mlo_sync_timestamp timestamp;
3702 #endif
3703 	/* Is isolation mode enabled */
3704 	bool  isolation;
3705 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
3706 	uint8_t is_first_wakeup_packet;
3707 #endif
3708 #ifdef CONNECTIVITY_PKTLOG
3709 	/* packetdump callback functions */
3710 	ol_txrx_pktdump_cb dp_tx_packetdump_cb;
3711 	ol_txrx_pktdump_cb dp_rx_packetdump_cb;
3712 #endif
3713 
3714 	/* Firmware Stats for TLV received from Firmware */
3715 	uint64_t fw_stats_tlv_bitmap_rcvd;
3716 
3717 	/* For Checking Pending Firmware Response */
3718 	bool pending_fw_stats_response;
3719 };
3720 
3721 struct dp_peer;
3722 
3723 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3724 #define WLAN_ROAM_PEER_AUTH_STATUS_NONE 0x0
3725 /*
3726  * This macro is equivalent to macro ROAM_AUTH_STATUS_AUTHENTICATED used
3727  * in connection mgr
3728  */
3729 #define WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED 0x2
3730 #endif
3731 
3732 /* VDEV structure for data path state */
3733 struct dp_vdev {
3734 	/* OS device abstraction */
3735 	qdf_device_t osdev;
3736 
3737 	/* physical device that is the parent of this virtual device */
3738 	struct dp_pdev *pdev;
3739 
3740 	/* VDEV operating mode */
3741 	enum wlan_op_mode opmode;
3742 
3743 	/* VDEV subtype */
3744 	enum wlan_op_subtype subtype;
3745 
3746 	/* Tx encapsulation type for this VAP */
3747 	enum htt_cmn_pkt_type tx_encap_type;
3748 
3749 	/* Rx Decapsulation type for this VAP */
3750 	enum htt_cmn_pkt_type rx_decap_type;
3751 
3752 	/* WDS enabled */
3753 	bool wds_enabled;
3754 
3755 	/* MEC enabled */
3756 	bool mec_enabled;
3757 
3758 #ifdef QCA_SUPPORT_WDS_EXTENDED
3759 	bool wds_ext_enabled;
3760 	bool drop_tx_mcast;
3761 #endif /* QCA_SUPPORT_WDS_EXTENDED */
3762 	bool drop_3addr_mcast;
3763 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
3764 	bool skip_bar_update;
3765 	unsigned long skip_bar_update_last_ts;
3766 #endif
3767 	/* WDS Aging timer period */
3768 	uint32_t wds_aging_timer_val;
3769 
3770 	/* NAWDS enabled */
3771 	bool nawds_enabled;
3772 
3773 	/* Multicast enhancement enabled */
3774 	uint8_t mcast_enhancement_en;
3775 
3776 	/* IGMP multicast enhancement enabled */
3777 	uint8_t igmp_mcast_enhanc_en;
3778 
3779 	/* vdev_id - ID used to specify a particular vdev to the target */
3780 	uint8_t vdev_id;
3781 
3782 	/* Default HTT meta data for this VDEV */
3783 	/* TBD: check alignment constraints */
3784 	uint16_t htt_tcl_metadata;
3785 
3786 	/* vdev lmac_id */
3787 	uint8_t lmac_id;
3788 
3789 	/* vdev bank_id */
3790 	uint8_t bank_id;
3791 
3792 	/* Mesh mode vdev */
3793 	uint32_t mesh_vdev;
3794 
3795 	/* Mesh mode rx filter setting */
3796 	uint32_t mesh_rx_filter;
3797 
3798 	/* DSCP-TID mapping table ID */
3799 	uint8_t dscp_tid_map_id;
3800 
3801 	/* Address search type to be set in TX descriptor */
3802 	uint8_t search_type;
3803 
3804 	/*
3805 	 * Flag to indicate if s/w tid classification should be
3806 	 * skipped
3807 	 */
3808 	uint8_t skip_sw_tid_classification;
3809 
3810 	/* Flag to enable peer authorization */
3811 	uint8_t peer_authorize;
3812 
3813 	/* AST hash value for BSS peer in HW valid for STA VAP*/
3814 	uint16_t bss_ast_hash;
3815 
3816 	/* AST hash index for BSS peer in HW valid for STA VAP*/
3817 	uint16_t bss_ast_idx;
3818 
3819 	bool multipass_en;
3820 
3821 	/* Address search flags to be configured in HAL descriptor */
3822 	uint8_t hal_desc_addr_search_flags;
3823 
3824 	/* Handle to the OS shim SW's virtual device */
3825 	ol_osif_vdev_handle osif_vdev;
3826 
3827 	/* MAC address */
3828 	union dp_align_mac_addr mac_addr;
3829 
3830 #ifdef WLAN_FEATURE_11BE_MLO
3831 	/* MLO MAC address corresponding to vdev */
3832 	union dp_align_mac_addr mld_mac_addr;
3833 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
3834 	bool mlo_vdev;
3835 #endif
3836 #endif
3837 
3838 	/* node in the pdev's list of vdevs */
3839 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
3840 
3841 	/* dp_peer list */
3842 	TAILQ_HEAD(, dp_peer) peer_list;
3843 	/* to protect peer_list */
3844 	DP_MUTEX_TYPE peer_list_lock;
3845 
3846 	/* RX call back function to flush GRO packets*/
3847 	ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;
3848 	/* default RX call back function called by dp */
3849 	ol_txrx_rx_fp osif_rx;
3850 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
3851 	/* callback to receive eapol frames */
3852 	ol_txrx_rx_fp osif_rx_eapol;
3853 #endif
3854 	/* callback to deliver rx frames to the OS */
3855 	ol_txrx_rx_fp osif_rx_stack;
3856 	/* Callback to handle rx fisa frames */
3857 	ol_txrx_fisa_rx_fp osif_fisa_rx;
3858 	ol_txrx_fisa_flush_fp osif_fisa_flush;
3859 
3860 	/* call back function to flush out queued rx packets*/
3861 	ol_txrx_rx_flush_fp osif_rx_flush;
3862 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
3863 	ol_txrx_get_key_fp osif_get_key;
3864 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
3865 
3866 #ifdef notyet
3867 	/* callback to check if the msdu is an WAI (WAPI) frame */
3868 	ol_rx_check_wai_fp osif_check_wai;
3869 #endif
3870 
3871 	/* proxy arp function */
3872 	ol_txrx_proxy_arp_fp osif_proxy_arp;
3873 
3874 	ol_txrx_mcast_me_fp me_convert;
3875 
3876 	/* completion function used by this vdev*/
3877 	ol_txrx_completion_fp tx_comp;
3878 
3879 	ol_txrx_get_tsf_time get_tsf_time;
3880 
3881 	/* callback to classify critical packets */
3882 	ol_txrx_classify_critical_pkt_fp tx_classify_critical_pkt_cb;
3883 
3884 	/* deferred vdev deletion state */
3885 	struct {
3886 		/* VDEV delete pending */
3887 		int pending;
3888 		/*
3889 		* callback and a context argument to provide a
3890 		* notification for when the vdev is deleted.
3891 		*/
3892 		ol_txrx_vdev_delete_cb callback;
3893 		void *context;
3894 	} delete;
3895 
3896 	/* tx data delivery notification callback function */
3897 	struct {
3898 		ol_txrx_data_tx_cb func;
3899 		void *ctxt;
3900 	} tx_non_std_data_callback;
3901 
3902 
3903 	/* safe mode control to bypass the encrypt and decipher process*/
3904 	uint32_t safemode;
3905 
3906 	/* rx filter related */
3907 	uint32_t drop_unenc;
3908 #ifdef notyet
3909 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
3910 	uint32_t filters_num;
3911 #endif
3912 	/* TDLS Link status */
3913 	bool tdls_link_connected;
3914 	bool is_tdls_frame;
3915 
3916 	/* per vdev rx nbuf queue */
3917 	qdf_nbuf_queue_t rxq;
3918 
3919 	uint8_t tx_ring_id;
3920 	struct dp_tx_desc_pool_s *tx_desc;
3921 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
3922 
3923 	/* Capture timestamp of previous tx packet enqueued */
3924 	uint64_t prev_tx_enq_tstamp;
3925 
3926 	/* Capture timestamp of previous rx packet delivered */
3927 	uint64_t prev_rx_deliver_tstamp;
3928 
3929 	/* VDEV Stats */
3930 	struct cdp_vdev_stats stats;
3931 
3932 	/* Is this a proxySTA VAP */
3933 	uint8_t proxysta_vdev : 1, /* Is this a proxySTA VAP */
3934 		wrap_vdev : 1, /* Is this a QWRAP AP VAP */
3935 		isolation_vdev : 1, /* Is this a QWRAP AP VAP */
3936 		reserved : 5; /* Reserved */
3937 
3938 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3939 	struct dp_tx_desc_pool_s *pool;
3940 #endif
3941 	/* AP BRIDGE enabled */
3942 	bool ap_bridge_enabled;
3943 
3944 	enum cdp_sec_type  sec_type;
3945 
3946 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
3947 	bool raw_mode_war;
3948 
3949 
3950 	/* 8021p PCP-TID mapping table ID */
3951 	uint8_t tidmap_tbl_id;
3952 
3953 	/* 8021p PCP-TID map values */
3954 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
3955 
3956 	/* TIDmap priority */
3957 	uint8_t tidmap_prty;
3958 
3959 #ifdef QCA_MULTIPASS_SUPPORT
3960 	uint16_t *iv_vlan_map;
3961 
3962 	/* dp_peer special list */
3963 	TAILQ_HEAD(, dp_txrx_peer) mpass_peer_list;
3964 	DP_MUTEX_TYPE mpass_peer_mutex;
3965 #endif
3966 	/* Extended data path handle */
3967 	struct cdp_ext_vdev *vdev_dp_ext_handle;
3968 #ifdef VDEV_PEER_PROTOCOL_COUNT
3969 	/*
3970 	 * Rx-Ingress and Tx-Egress are in the lower level DP layer
3971 	 * Rx-Egress and Tx-ingress are handled in osif layer for DP
3972 	 * So
3973 	 * Rx-Egress and Tx-ingress mask definitions are in OSIF layer
3974 	 * Rx-Ingress and Tx-Egress definitions are here below
3975 	 */
3976 #define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1
3977 #define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2
3978 #define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4
3979 #define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8
3980 	bool peer_protocol_count_track;
3981 	int peer_protocol_count_dropmask;
3982 #endif
3983 	/* callback to collect connectivity stats */
3984 	ol_txrx_stats_rx_fp stats_cb;
3985 	uint32_t num_peers;
3986 	/* entry to inactive_list*/
3987 	TAILQ_ENTRY(dp_vdev) inactive_list_elem;
3988 
3989 #ifdef WLAN_SUPPORT_RX_FISA
3990 	/**
3991 	 * Params used for controlling the fisa aggregation dynamically
3992 	 */
3993 	uint8_t fisa_disallowed[MAX_REO_DEST_RINGS];
3994 	uint8_t fisa_force_flushed[MAX_REO_DEST_RINGS];
3995 #endif
3996 	/*
3997 	 * Refcount for VDEV currently incremented when
3998 	 * peer is created for VDEV
3999 	 */
4000 	qdf_atomic_t ref_cnt;
4001 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
4002 	uint8_t num_latency_critical_conn;
4003 #ifdef WLAN_SUPPORT_MESH_LATENCY
4004 	uint8_t peer_tid_latency_enabled;
4005 	/* tid latency configuration parameters */
4006 	struct {
4007 		uint32_t service_interval;
4008 		uint32_t burst_size;
4009 		uint8_t latency_tid;
4010 	} mesh_tid_latency_config;
4011 #endif
4012 #ifdef WIFI_MONITOR_SUPPORT
4013 	struct dp_mon_vdev *monitor_vdev;
4014 #endif
4015 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
4016 	/* Delta between TQM clock and TSF clock */
4017 	uint32_t delta_tsf;
4018 #endif
4019 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
4020 	/* Indicate if uplink delay report is enabled or not */
4021 	qdf_atomic_t ul_delay_report;
4022 	/* accumulative delay for every TX completion */
4023 	qdf_atomic_t ul_delay_accum;
4024 	/* accumulative number of packets delay has accumulated */
4025 	qdf_atomic_t ul_pkts_accum;
4026 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
4027 
4028 	/* vdev_stats_id - ID used for stats collection by FW from HW*/
4029 	uint8_t vdev_stats_id;
4030 #ifdef HW_TX_DELAY_STATS_ENABLE
4031 	/* hw tx delay stats enable */
4032 	uint8_t hw_tx_delay_stats_enabled;
4033 #endif
4034 #ifdef DP_RX_UDP_OVER_PEER_ROAM
4035 	uint32_t roaming_peer_status;
4036 	union dp_align_mac_addr roaming_peer_mac;
4037 #endif
4038 #ifdef DP_TRAFFIC_END_INDICATION
4039 	/* per vdev feature enable/disable status */
4040 	bool traffic_end_ind_en;
4041 	/* per vdev nbuf queue for traffic end indication packets */
4042 	qdf_nbuf_queue_t end_ind_pkt_q;
4043 #endif
4044 #ifdef FEATURE_DIRECT_LINK
4045 	/* Flag to indicate if to_fw should be set for tx pkts on this vdev */
4046 	bool to_fw;
4047 #endif
4048 };
4049 
4050 enum {
4051 	dp_sec_mcast = 0,
4052 	dp_sec_ucast
4053 };
4054 
4055 #ifdef WDS_VENDOR_EXTENSION
4056 typedef struct {
4057 	uint8_t	wds_tx_mcast_4addr:1,
4058 		wds_tx_ucast_4addr:1,
4059 		wds_rx_filter:1,      /* enforce rx filter */
4060 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
4061 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
4062 
4063 } dp_ecm_policy;
4064 #endif
4065 
4066 /**
4067  * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
4068  * @cached_bufq: nbuff list to enqueue rx packets
4069  * @bufq_lock: spinlock for nbuff list access
4070  * @thresh: maximum threshold for number of rx buff to enqueue
4071  * @entries: number of entries
4072  * @dropped: number of packets dropped
4073  */
4074 struct dp_peer_cached_bufq {
4075 	qdf_list_t cached_bufq;
4076 	qdf_spinlock_t bufq_lock;
4077 	uint32_t thresh;
4078 	uint32_t entries;
4079 	uint32_t dropped;
4080 };
4081 
4082 /**
4083  * enum dp_peer_ast_flowq
4084  * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue
4085  * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue
4086  * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP
4087  * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP
4088  * @DP_PEER_AST_FLOWQ_MAX: max value
4089  */
4090 enum dp_peer_ast_flowq {
4091 	DP_PEER_AST_FLOWQ_HI_PRIO,
4092 	DP_PEER_AST_FLOWQ_LOW_PRIO,
4093 	DP_PEER_AST_FLOWQ_UDP,
4094 	DP_PEER_AST_FLOWQ_NON_UDP,
4095 	DP_PEER_AST_FLOWQ_MAX,
4096 };
4097 
4098 /**
4099  * struct dp_ast_flow_override_info - ast override info
4100  * @ast_idx: ast indexes in peer map message
4101  * @ast_valid_mask: ast valid mask for each ast index
4102  * @ast_flow_mask: ast flow mask for each ast index
4103  * @tid_valid_low_pri_mask: per tid mask for low priority flow
4104  * @tid_valid_hi_pri_mask: per tid mask for hi priority flow
4105  */
4106 struct dp_ast_flow_override_info {
4107 	uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX];
4108 	uint8_t ast_valid_mask;
4109 	uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX];
4110 	uint8_t tid_valid_low_pri_mask;
4111 	uint8_t tid_valid_hi_pri_mask;
4112 };
4113 
4114 /**
4115  * struct dp_peer_ast_params - ast parameters for a msdu flow-queue
4116  * @ast_idx: ast index populated by FW
4117  * @is_valid: ast flow valid mask
4118  * @valid_tid_mask: per tid mask for this ast index
4119  * @flowQ: flow queue id associated with this ast index
4120  */
4121 struct dp_peer_ast_params {
4122 	uint16_t ast_idx;
4123 	uint8_t is_valid;
4124 	uint8_t valid_tid_mask;
4125 	uint8_t flowQ;
4126 };
4127 
4128 #define DP_MLO_FLOW_INFO_MAX	3
4129 
4130 /**
4131  * struct dp_mlo_flow_override_info - Flow override info
4132  * @ast_idx: Primary TCL AST Index
4133  * @ast_idx_valid: Is AST index valid
4134  * @chip_id: CHIP ID
4135  * @tidmask: tidmask
4136  * @cache_set_num: Cache set number
4137  */
4138 struct dp_mlo_flow_override_info {
4139 	uint16_t ast_idx;
4140 	uint8_t ast_idx_valid;
4141 	uint8_t chip_id;
4142 	uint8_t tidmask;
4143 	uint8_t cache_set_num;
4144 };
4145 
4146 /**
4147  * struct dp_mlo_link_info - Link info
4148  * @peer_chip_id: Peer Chip ID
4149  * @vdev_id: Vdev ID
4150  */
4151 struct dp_mlo_link_info {
4152 	uint8_t peer_chip_id;
4153 	uint8_t vdev_id;
4154 };
4155 
4156 #ifdef WLAN_SUPPORT_MSCS
4157 /*MSCS Procedure based macros */
4158 #define IEEE80211_MSCS_MAX_ELEM_SIZE    5
4159 #define IEEE80211_TCLAS_MASK_CLA_TYPE_4  4
4160 /**
4161  * struct dp_peer_mscs_parameter - MSCS database obtained from
4162  * MSCS Request and Response in the control path. This data is used
4163  * by the AP to find out what priority to set based on the tuple
4164  * classification during packet processing.
4165  * @user_priority_bitmap: User priority bitmap obtained during
4166  * handshake
4167  * @user_priority_limit: User priority limit obtained during
4168  * handshake
4169  * @classifier_mask: params to be compared during processing
4170  */
4171 struct dp_peer_mscs_parameter {
4172 	uint8_t user_priority_bitmap;
4173 	uint8_t user_priority_limit;
4174 	uint8_t classifier_mask;
4175 };
4176 #endif
4177 
4178 #ifdef QCA_SUPPORT_WDS_EXTENDED
4179 #define WDS_EXT_PEER_INIT_BIT 0
4180 
4181 /**
4182  * struct dp_wds_ext_peer - wds ext peer structure
4183  * This is used when wds extended feature is enabled
4184  * both compile time and run time. It is created
4185  * when 1st 4 address frame is received from
4186  * wds backhaul.
4187  * @osif_peer: Handle to the OS shim SW's virtual device
4188  * @init: wds ext netdev state
4189  */
4190 struct dp_wds_ext_peer {
4191 	ol_osif_peer_handle osif_peer;
4192 	unsigned long init;
4193 };
4194 #endif /* QCA_SUPPORT_WDS_EXTENDED */
4195 
4196 #ifdef WLAN_SUPPORT_MESH_LATENCY
4197 /*Advanced Mesh latency feature based macros */
4198 
4199 /**
4200  * struct dp_peer_mesh_latency_parameter - Mesh latency related
4201  * parameters. This data is updated per peer per TID based on
4202  * the flow tuple classification in external rule database
4203  * during packet processing.
4204  * @service_interval_dl: Service interval associated with TID in DL
4205  * @burst_size_dl: Burst size additive over multiple flows in DL
4206  * @service_interval_ul: Service interval associated with TID in UL
4207  * @burst_size_ul: Burst size additive over multiple flows in UL
4208  * @ac: custom ac derived from service interval
4209  * @msduq: MSDU queue number within TID
4210  */
4211 struct dp_peer_mesh_latency_parameter {
4212 	uint32_t service_interval_dl;
4213 	uint32_t burst_size_dl;
4214 	uint32_t service_interval_ul;
4215 	uint32_t burst_size_ul;
4216 	uint8_t ac;
4217 	uint8_t msduq;
4218 };
4219 #endif
4220 
4221 #ifdef WLAN_FEATURE_11BE_MLO
4222 /* Max number of links for MLO connection */
4223 #define DP_MAX_MLO_LINKS 4
4224 
4225 /**
4226  * struct dp_peer_link_info - link peer information for MLO
4227  * @mac_addr: Mac address
4228  * @vdev_id: Vdev ID for current link peer
4229  * @is_valid: flag for link peer info valid or not
4230  * @chip_id: chip id
4231  */
4232 struct dp_peer_link_info {
4233 	union dp_align_mac_addr mac_addr;
4234 	uint8_t vdev_id;
4235 	uint8_t is_valid;
4236 	uint8_t chip_id;
4237 };
4238 
4239 /**
4240  * struct dp_mld_link_peers - this structure is used to get link peers
4241  *			      pointer from mld peer
4242  * @link_peers: link peers pointer array
4243  * @num_links: number of link peers fetched
4244  */
4245 struct dp_mld_link_peers {
4246 	struct dp_peer *link_peers[DP_MAX_MLO_LINKS];
4247 	uint8_t num_links;
4248 };
4249 #else
4250 #define DP_MAX_MLO_LINKS 0
4251 #endif
4252 
4253 typedef void *dp_txrx_ref_handle;
4254 
4255 /**
4256  * struct dp_peer_per_pkt_tx_stats- Peer Tx stats updated in per pkt
4257  *				Tx completion path
4258  * @ucast: Unicast Packet Count
4259  * @mcast: Multicast Packet Count
4260  * @bcast: Broadcast Packet Count
4261  * @nawds_mcast: NAWDS Multicast Packet Count
4262  * @tx_success: Successful Tx Packets
4263  * @nawds_mcast_drop: NAWDS Multicast Drop Count
4264  * @ofdma: Total Packets as ofdma
4265  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
4266  * @amsdu_cnt: Number of MSDUs part of AMSDU
4267  * @dropped: Dropped packet statistics
4268  * @dropped.fw_rem: Discarded by firmware
4269  * @dropped.fw_rem_notx: firmware_discard_untransmitted
4270  * @dropped.fw_rem_tx: firmware_discard_transmitted
4271  * @dropped.age_out: aged out in mpdu/msdu queues
4272  * @dropped.fw_reason1: discarded by firmware reason 1
4273  * @dropped.fw_reason2: discarded by firmware reason 2
4274  * @dropped.fw_reason3: discarded by firmware reason  3
4275  * @dropped.fw_rem_no_match: dropped due to fw no match command
4276  * @dropped.drop_threshold: dropped due to HW threshold
4277  * @dropped.drop_link_desc_na: dropped due resource not available in HW
4278  * @dropped.invalid_drop: Invalid msdu drop
4279  * @dropped.mcast_vdev_drop: MCAST drop configured for VDEV in HW
4280  * @dropped.invalid_rr: Invalid TQM release reason
4281  * @failed_retry_count: packets failed due to retry above 802.11 retry limit
4282  * @retry_count: packets successfully send after one or more retry
4283  * @multiple_retry_count: packets successfully sent after more than one retry
4284  * @no_ack_count: no ack pkt count for different protocols
4285  * @tx_success_twt: Successful Tx Packets in TWT session
4286  * @last_tx_ts: last timestamp in jiffies when tx comp occurred
4287  * @avg_sojourn_msdu: Avg sojourn msdu stat
4288  * @protocol_trace_cnt: per-peer protocol counter
4289  * @release_src_not_tqm: Counter to keep track of release source is not TQM
4290  *			 in TX completion status processing
4291  * @inval_link_id_pkt_cnt: Counter to capture Invalid Link Id
4292  */
4293 struct dp_peer_per_pkt_tx_stats {
4294 	struct cdp_pkt_info ucast;
4295 	struct cdp_pkt_info mcast;
4296 	struct cdp_pkt_info bcast;
4297 	struct cdp_pkt_info nawds_mcast;
4298 	struct cdp_pkt_info tx_success;
4299 	uint32_t nawds_mcast_drop;
4300 	uint32_t ofdma;
4301 	uint32_t non_amsdu_cnt;
4302 	uint32_t amsdu_cnt;
4303 	struct {
4304 		struct cdp_pkt_info fw_rem;
4305 		uint32_t fw_rem_notx;
4306 		uint32_t fw_rem_tx;
4307 		uint32_t age_out;
4308 		uint32_t fw_reason1;
4309 		uint32_t fw_reason2;
4310 		uint32_t fw_reason3;
4311 		uint32_t fw_rem_queue_disable;
4312 		uint32_t fw_rem_no_match;
4313 		uint32_t drop_threshold;
4314 		uint32_t drop_link_desc_na;
4315 		uint32_t invalid_drop;
4316 		uint32_t mcast_vdev_drop;
4317 		uint32_t invalid_rr;
4318 	} dropped;
4319 	uint32_t failed_retry_count;
4320 	uint32_t retry_count;
4321 	uint32_t multiple_retry_count;
4322 	uint32_t no_ack_count[QDF_PROTO_SUBTYPE_MAX];
4323 	struct cdp_pkt_info tx_success_twt;
4324 	unsigned long last_tx_ts;
4325 	qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
4326 #ifdef VDEV_PEER_PROTOCOL_COUNT
4327 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
4328 #endif
4329 	uint32_t release_src_not_tqm;
4330 	uint32_t inval_link_id_pkt_cnt;
4331 };
4332 
4333 /**
4334  * struct dp_peer_extd_tx_stats - Peer Tx stats updated in either
4335  *	per pkt Tx completion path when macro QCA_ENHANCED_STATS_SUPPORT is
4336  *	disabled or in HTT Tx PPDU completion path when macro is enabled
4337  * @stbc: Packets in STBC
4338  * @ldpc: Packets in LDPC
4339  * @retries: Packet retries
4340  * @pkt_type: pkt count for different .11 modes
4341  * @wme_ac_type: Wireless Multimedia type Count
4342  * @excess_retries_per_ac: Wireless Multimedia type Count
4343  * @ampdu_cnt: completion of aggregation
4344  * @non_ampdu_cnt: tx completion not aggregated
4345  * @num_ppdu_cookie_valid: no. of valid ppdu cookies rcvd from FW
4346  * @tx_ppdus: ppdus in tx
4347  * @tx_mpdus_success: mpdus successful in tx
4348  * @tx_mpdus_tried: mpdus tried in tx
4349  * @tx_rate: Tx Rate in kbps
4350  * @last_tx_rate: Last tx rate for unicast packets
4351  * @last_tx_rate_mcs: Tx rate mcs for unicast packets
4352  * @mcast_last_tx_rate: Last tx rate for multicast packets
4353  * @mcast_last_tx_rate_mcs: Last tx rate mcs for multicast
4354  * @rnd_avg_tx_rate: Rounded average tx rate
4355  * @avg_tx_rate: Average TX rate
4356  * @tx_ratecode: Tx rate code of last frame
4357  * @pream_punct_cnt: Preamble Punctured count
4358  * @sgi_count: SGI count
4359  * @nss: Packet count for different num_spatial_stream values
4360  * @bw: Packet Count for different bandwidths
4361  * @ru_start: RU start index
4362  * @ru_tones: RU tones size
4363  * @ru_loc: pkt info for RU location 26/ 52/ 106/ 242/ 484 counter
4364  * @transmit_type: pkt info for tx transmit type
4365  * @mu_group_id: mumimo mu group id
4366  * @last_ack_rssi: RSSI of last acked packet
4367  * @nss_info: NSS 1,2, ...8
4368  * @mcs_info: MCS index
4369  * @bw_info: Bandwidth
4370  *       <enum 0 bw_20_MHz>
4371  *       <enum 1 bw_40_MHz>
4372  *       <enum 2 bw_80_MHz>
4373  *       <enum 3 bw_160_MHz>
4374  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
4375  *       <enum 1     0_4_us_sgi > Legacy short GI
4376  *       <enum 2     1_6_us_sgi > HE related GI
4377  *       <enum 3     3_2_us_sgi > HE
4378  * @preamble_info: preamble
4379  * @tx_ucast_total: total ucast count
4380  * @tx_ucast_success: total ucast success count
4381  * @retries_mpdu: mpdu number of successfully transmitted after retries
4382  * @mpdu_success_with_retries: mpdu retry count in case of successful tx
4383  * @su_be_ppdu_cnt: SU Tx packet count for 11BE
4384  * @mu_be_ppdu_cnt: MU Tx packet count for 11BE
4385  * @punc_bw: MSDU count for punctured bw
4386  * @rts_success: RTS success count
4387  * @rts_failure: RTS failure count
4388  * @bar_cnt: Block ACK Request frame count
4389  * @ndpa_cnt: NDP announcement frame count
4390  * @rssi_chain: rssi chain
4391  * @wme_ac_type_bytes: Wireless Multimedia bytes Count
4392  */
4393 struct dp_peer_extd_tx_stats {
4394 	uint32_t stbc;
4395 	uint32_t ldpc;
4396 	uint32_t retries;
4397 	struct cdp_pkt_type pkt_type[DOT11_MAX];
4398 	uint32_t wme_ac_type[WME_AC_MAX];
4399 	uint32_t excess_retries_per_ac[WME_AC_MAX];
4400 	uint32_t ampdu_cnt;
4401 	uint32_t non_ampdu_cnt;
4402 	uint32_t num_ppdu_cookie_valid;
4403 	uint32_t tx_ppdus;
4404 	uint32_t tx_mpdus_success;
4405 	uint32_t tx_mpdus_tried;
4406 
4407 	uint32_t tx_rate;
4408 	uint32_t last_tx_rate;
4409 	uint32_t last_tx_rate_mcs;
4410 	uint32_t mcast_last_tx_rate;
4411 	uint32_t mcast_last_tx_rate_mcs;
4412 	uint64_t rnd_avg_tx_rate;
4413 	uint64_t avg_tx_rate;
4414 	uint16_t tx_ratecode;
4415 
4416 	uint32_t sgi_count[MAX_GI];
4417 	uint32_t pream_punct_cnt;
4418 	uint32_t nss[SS_COUNT];
4419 	uint32_t bw[MAX_BW];
4420 	uint32_t ru_start;
4421 	uint32_t ru_tones;
4422 	struct cdp_tx_pkt_info ru_loc[MAX_RU_LOCATIONS];
4423 
4424 	struct cdp_tx_pkt_info transmit_type[MAX_TRANSMIT_TYPES];
4425 	uint32_t mu_group_id[MAX_MU_GROUP_ID];
4426 
4427 	uint32_t last_ack_rssi;
4428 
4429 	uint32_t nss_info:4,
4430 		 mcs_info:4,
4431 		 bw_info:4,
4432 		 gi_info:4,
4433 		 preamble_info:4;
4434 
4435 	uint32_t retries_mpdu;
4436 	uint32_t mpdu_success_with_retries;
4437 	struct cdp_pkt_info tx_ucast_total;
4438 	struct cdp_pkt_info tx_ucast_success;
4439 #ifdef WLAN_FEATURE_11BE
4440 	struct cdp_pkt_type su_be_ppdu_cnt;
4441 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
4442 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
4443 #endif
4444 	uint32_t rts_success;
4445 	uint32_t rts_failure;
4446 	uint32_t bar_cnt;
4447 	uint32_t ndpa_cnt;
4448 	int32_t rssi_chain[CDP_RSSI_CHAIN_LEN];
4449 	uint64_t wme_ac_type_bytes[WME_AC_MAX];
4450 };
4451 
4452 /**
4453  * struct dp_peer_per_pkt_rx_stats - Peer Rx stats updated in per pkt Rx path
4454  * @rcvd_reo: Packets received on the reo ring
4455  * @rx_lmac: Packets received on each lmac
4456  * @unicast: Total unicast packets
4457  * @multicast: Total multicast packets
4458  * @bcast:  Broadcast Packet Count
4459  * @raw: Raw Pakets received
4460  * @nawds_mcast_drop: Total NAWDS multicast packets dropped
4461  * @mec_drop: Total MEC packets dropped
4462  * @ppeds_drop: Total DS packets dropped
4463  * @last_rx_ts: last timestamp in jiffies when RX happened
4464  * @intra_bss: Intra BSS statistics
4465  * @intra_bss.pkts: Intra BSS packets received
4466  * @intra_bss.fail: Intra BSS packets failed
4467  * @intra_bss.mdns_no_fws: Intra BSS MDNS packets not forwarded
4468  * @err: error counters
4469  * @err.mic_err: Rx MIC errors CCMP
4470  * @err.decrypt_err: Rx Decryption Errors CRC
4471  * @err.fcserr: rx MIC check failed (CCMP)
4472  * @err.pn_err: pn check failed
4473  * @err.oor_err: Rx OOR errors
4474  * @err.jump_2k_err: 2k jump errors
4475  * @err.rxdma_wifi_parse_err: rxdma wifi parse errors
4476  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
4477  * @amsdu_cnt: Number of MSDUs part of AMSDU
4478  * @rx_retries: retries of packet in rx
4479  * @multipass_rx_pkt_drop: Dropped multipass rx pkt
4480  * @peer_unauth_rx_pkt_drop: Unauth rx packet drops
4481  * @policy_check_drop: policy check drops
4482  * @to_stack_twt: Total packets sent up the stack in TWT session
4483  * @protocol_trace_cnt: per-peer protocol counters
4484  * @mcast_3addr_drop:
4485  * @rx_total: total rx count
4486  * @inval_link_id_pkt_cnt: Counter to capture Invalid Link Id
4487  */
4488 struct dp_peer_per_pkt_rx_stats {
4489 	struct cdp_pkt_info rcvd_reo[CDP_MAX_RX_RINGS];
4490 	struct cdp_pkt_info rx_lmac[CDP_MAX_LMACS];
4491 	struct cdp_pkt_info unicast;
4492 	struct cdp_pkt_info multicast;
4493 	struct cdp_pkt_info bcast;
4494 	struct cdp_pkt_info raw;
4495 	uint32_t nawds_mcast_drop;
4496 	struct cdp_pkt_info mec_drop;
4497 	struct cdp_pkt_info ppeds_drop;
4498 	unsigned long last_rx_ts;
4499 	struct {
4500 		struct cdp_pkt_info pkts;
4501 		struct cdp_pkt_info fail;
4502 		uint32_t mdns_no_fwd;
4503 	} intra_bss;
4504 	struct {
4505 		uint32_t mic_err;
4506 		uint32_t decrypt_err;
4507 		uint32_t fcserr;
4508 		uint32_t pn_err;
4509 		uint32_t oor_err;
4510 		uint32_t jump_2k_err;
4511 		uint32_t rxdma_wifi_parse_err;
4512 	} err;
4513 	uint32_t non_amsdu_cnt;
4514 	uint32_t amsdu_cnt;
4515 	uint32_t rx_retries;
4516 	uint32_t multipass_rx_pkt_drop;
4517 	uint32_t peer_unauth_rx_pkt_drop;
4518 	uint32_t policy_check_drop;
4519 	struct cdp_pkt_info to_stack_twt;
4520 #ifdef VDEV_PEER_PROTOCOL_COUNT
4521 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
4522 #endif
4523 	uint32_t mcast_3addr_drop;
4524 #ifdef IPA_OFFLOAD
4525 	struct cdp_pkt_info rx_total;
4526 #endif
4527 	uint32_t inval_link_id_pkt_cnt;
4528 };
4529 
4530 /**
4531  * struct dp_peer_extd_rx_stats - Peer Rx stats updated in either
4532  *	per pkt Rx path when macro QCA_ENHANCED_STATS_SUPPORT is disabled or in
4533  *	Rx monitor patch when macro is enabled
4534  * @pkt_type: pkt counter for different .11 modes
4535  * @wme_ac_type: Wireless Multimedia type Count
4536  * @mpdu_cnt_fcs_ok: SU Rx success mpdu count
4537  * @mpdu_cnt_fcs_err: SU Rx fail mpdu count
4538  * @non_ampdu_cnt: Number of MSDUs with no MPDU level aggregation
4539  * @ampdu_cnt: Number of MSDUs part of AMSPU
4540  * @rx_mpdus: mpdu in rx
4541  * @rx_ppdus: ppdu in rx
4542  * @su_ax_ppdu_cnt: SU Rx packet count for .11ax
4543  * @rx_mu: Rx MU stats
4544  * @reception_type: Reception type of packets
4545  * @ppdu_cnt: PPDU packet count in reception type
4546  * @sgi_count: sgi count
4547  * @nss: packet count in spatiel Streams
4548  * @ppdu_nss: PPDU packet count in spatial streams
4549  * @bw: Packet Count in different bandwidths
4550  * @rx_mpdu_cnt: rx mpdu count per MCS rate
4551  * @rx_rate: Rx rate
4552  * @last_rx_rate: Previous rx rate
4553  * @rnd_avg_rx_rate: Rounded average rx rate
4554  * @avg_rx_rate: Average Rx rate
4555  * @rx_ratecode: Rx rate code of last frame
4556  * @avg_snr: Average snr
4557  * @rx_snr_measured_time: Time at which snr is measured
4558  * @snr: SNR of received signal
4559  * @last_snr: Previous snr
4560  * @nss_info: NSS 1,2, ...8
4561  * @mcs_info: MCS index
4562  * @bw_info: Bandwidth
4563  *       <enum 0 bw_20_MHz>
4564  *       <enum 1 bw_40_MHz>
4565  *       <enum 2 bw_80_MHz>
4566  *       <enum 3 bw_160_MHz>
4567  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
4568  *       <enum 1     0_4_us_sgi > Legacy short GI
4569  *       <enum 2     1_6_us_sgi > HE related GI
4570  *       <enum 3     3_2_us_sgi > HE
4571  * @preamble_info: preamble
4572  * @mpdu_retry_cnt: retries of mpdu in rx
4573  * @su_be_ppdu_cnt: SU Rx packet count for BE
4574  * @mu_be_ppdu_cnt: MU rx packet count for BE
4575  * @punc_bw: MSDU count for punctured bw
4576  * @bar_cnt: Block ACK Request frame count
4577  * @ndpa_cnt: NDP announcement frame count
4578  * @wme_ac_type_bytes: Wireless Multimedia type Bytes Count
4579  */
4580 struct dp_peer_extd_rx_stats {
4581 	struct cdp_pkt_type pkt_type[DOT11_MAX];
4582 	uint32_t wme_ac_type[WME_AC_MAX];
4583 	uint32_t mpdu_cnt_fcs_ok;
4584 	uint32_t mpdu_cnt_fcs_err;
4585 	uint32_t non_ampdu_cnt;
4586 	uint32_t ampdu_cnt;
4587 	uint32_t rx_mpdus;
4588 	uint32_t rx_ppdus;
4589 
4590 	struct cdp_pkt_type su_ax_ppdu_cnt;
4591 	struct cdp_rx_mu rx_mu[TXRX_TYPE_MU_MAX];
4592 	uint32_t reception_type[MAX_RECEPTION_TYPES];
4593 	uint32_t ppdu_cnt[MAX_RECEPTION_TYPES];
4594 
4595 	uint32_t sgi_count[MAX_GI];
4596 	uint32_t nss[SS_COUNT];
4597 	uint32_t ppdu_nss[SS_COUNT];
4598 	uint32_t bw[MAX_BW];
4599 	uint32_t rx_mpdu_cnt[MAX_MCS];
4600 
4601 	uint32_t rx_rate;
4602 	uint32_t last_rx_rate;
4603 	uint32_t rnd_avg_rx_rate;
4604 	uint32_t avg_rx_rate;
4605 	uint32_t rx_ratecode;
4606 
4607 	uint32_t avg_snr;
4608 	unsigned long rx_snr_measured_time;
4609 	uint8_t snr;
4610 	uint8_t last_snr;
4611 
4612 	uint32_t nss_info:4,
4613 		 mcs_info:4,
4614 		 bw_info:4,
4615 		 gi_info:4,
4616 		 preamble_info:4;
4617 
4618 	uint32_t mpdu_retry_cnt;
4619 #ifdef WLAN_FEATURE_11BE
4620 	struct cdp_pkt_type su_be_ppdu_cnt;
4621 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
4622 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
4623 #endif
4624 	uint32_t bar_cnt;
4625 	uint32_t ndpa_cnt;
4626 	uint64_t wme_ac_type_bytes[WME_AC_MAX];
4627 };
4628 
4629 /**
4630  * struct dp_peer_per_pkt_stats - Per pkt stats for peer
4631  * @tx: Per pkt Tx stats
4632  * @rx: Per pkt Rx stats
4633  */
4634 struct dp_peer_per_pkt_stats {
4635 	struct dp_peer_per_pkt_tx_stats tx;
4636 	struct dp_peer_per_pkt_rx_stats rx;
4637 };
4638 
4639 /**
4640  * struct dp_peer_extd_stats - Stats from extended path for peer
4641  * @tx: Extended path tx stats
4642  * @rx: Extended path rx stats
4643  */
4644 struct dp_peer_extd_stats {
4645 	struct dp_peer_extd_tx_stats tx;
4646 	struct dp_peer_extd_rx_stats rx;
4647 };
4648 
4649 /**
4650  * struct dp_peer_stats - Peer stats
4651  * @per_pkt_stats: Per packet path stats
4652  * @extd_stats: Extended path stats
4653  */
4654 struct dp_peer_stats {
4655 	struct dp_peer_per_pkt_stats per_pkt_stats;
4656 #ifndef QCA_ENHANCED_STATS_SUPPORT
4657 	struct dp_peer_extd_stats extd_stats;
4658 #endif
4659 };
4660 
4661 /**
4662  * struct dp_txrx_peer: DP txrx_peer structure used in per pkt path
4663  * @vdev: VDEV to which this peer is associated
4664  * @peer_id: peer ID for this peer
4665  * @authorize: Set when authorized
4666  * @in_twt: in TWT session
4667  * @hw_txrx_stats_en: Indicate HW offload vdev stats
4668  * @is_mld_peer:1: MLD peer
4669  * @tx_failed: Total Tx failure
4670  * @comp_pkt: Pkt Info for which completions were received
4671  * @to_stack: Total packets sent up the stack
4672  * @delay_stats: Peer delay stats
4673  * @jitter_stats: Peer jitter stats
4674  * @security: Security credentials
4675  * @nawds_enabled: NAWDS flag
4676  * @bss_peer: set for bss peer
4677  * @isolation: enable peer isolation for this peer
4678  * @wds_enabled: WDS peer
4679  * @wds_ecm:
4680  * @flush_in_progress:
4681  * @bufq_info:
4682  * @mpass_peer_list_elem: node in the special peer list element
4683  * @vlan_id: vlan id for key
4684  * @wds_ext:
4685  * @osif_rx:
4686  * @rx_tid:
4687  * @sawf_stats:
4688  * @bw: bandwidth of peer connection
4689  * @mpdu_retry_threshold: MPDU retry threshold to increment tx bad count
4690  * @stats_arr_size: peer stats array size
4691  * @stats: Peer link and mld statistics
4692  */
4693 struct dp_txrx_peer {
4694 	struct dp_vdev *vdev;
4695 	uint16_t peer_id;
4696 	uint8_t authorize:1,
4697 		in_twt:1,
4698 		hw_txrx_stats_en:1,
4699 		is_mld_peer:1;
4700 	uint32_t tx_failed;
4701 	struct cdp_pkt_info comp_pkt;
4702 	struct cdp_pkt_info to_stack;
4703 
4704 	struct dp_peer_delay_stats *delay_stats;
4705 
4706 	struct cdp_peer_tid_stats *jitter_stats;
4707 
4708 	struct {
4709 		enum cdp_sec_type sec_type;
4710 		u_int32_t michael_key[2]; /* relevant for TKIP */
4711 	} security[2]; /* 0 -> multicast, 1 -> unicast */
4712 
4713 	uint16_t nawds_enabled:1,
4714 		bss_peer:1,
4715 		isolation:1,
4716 		wds_enabled:1;
4717 #ifdef WDS_VENDOR_EXTENSION
4718 	dp_ecm_policy wds_ecm;
4719 #endif
4720 #ifdef PEER_CACHE_RX_PKTS
4721 	qdf_atomic_t flush_in_progress;
4722 	struct dp_peer_cached_bufq bufq_info;
4723 #endif
4724 #ifdef QCA_MULTIPASS_SUPPORT
4725 	TAILQ_ENTRY(dp_txrx_peer) mpass_peer_list_elem;
4726 	uint16_t vlan_id;
4727 #endif
4728 #ifdef QCA_SUPPORT_WDS_EXTENDED
4729 	struct dp_wds_ext_peer wds_ext;
4730 	ol_txrx_rx_fp osif_rx;
4731 #endif
4732 	struct dp_rx_tid_defrag rx_tid[DP_MAX_TIDS];
4733 #ifdef CONFIG_SAWF
4734 	struct dp_peer_sawf_stats *sawf_stats;
4735 #endif
4736 #ifdef DP_PEER_EXTENDED_API
4737 	enum cdp_peer_bw bw;
4738 	uint8_t mpdu_retry_threshold;
4739 #endif
4740 	uint8_t stats_arr_size;
4741 
4742 	/* dp_peer_stats should be the last member in the structure */
4743 	struct dp_peer_stats stats[];
4744 };
4745 
4746 /* Peer structure for data path state */
4747 struct dp_peer {
4748 	struct dp_txrx_peer *txrx_peer;
4749 #ifdef WIFI_MONITOR_SUPPORT
4750 	struct dp_mon_peer *monitor_peer;
4751 #endif
4752 	/* peer ID for this peer */
4753 	uint16_t peer_id;
4754 
4755 	/* VDEV to which this peer is associated */
4756 	struct dp_vdev *vdev;
4757 
4758 	struct dp_ast_entry *self_ast_entry;
4759 
4760 	qdf_atomic_t ref_cnt;
4761 
4762 	union dp_align_mac_addr mac_addr;
4763 
4764 	/* node in the vdev's list of peers */
4765 	TAILQ_ENTRY(dp_peer) peer_list_elem;
4766 	/* node in the hash table bin's list of peers */
4767 	TAILQ_ENTRY(dp_peer) hash_list_elem;
4768 
4769 	/* TID structures pointer */
4770 	struct dp_rx_tid *rx_tid;
4771 
4772 	/* TBD: No transmit TID state required? */
4773 
4774 	struct {
4775 		enum cdp_sec_type sec_type;
4776 		u_int32_t michael_key[2]; /* relevant for TKIP */
4777 	} security[2]; /* 0 -> multicast, 1 -> unicast */
4778 
4779 	/* NAWDS Flag and Bss Peer bit */
4780 	uint16_t bss_peer:1, /* set for bss peer */
4781 		authorize:1, /* Set when authorized */
4782 		valid:1, /* valid bit */
4783 		delete_in_progress:1, /* Indicate kickout sent */
4784 		sta_self_peer:1, /* Indicate STA self peer */
4785 		is_tdls_peer:1; /* Indicate TDLS peer */
4786 
4787 #ifdef WLAN_FEATURE_11BE_MLO
4788 	uint8_t first_link:1, /* first link peer for MLO */
4789 		primary_link:1; /* primary link for MLO */
4790 #endif
4791 
4792 	/* MCL specific peer local id */
4793 	uint16_t local_id;
4794 	enum ol_txrx_peer_state state;
4795 	qdf_spinlock_t peer_info_lock;
4796 
4797 	/* Peer calibrated stats */
4798 	struct cdp_calibr_stats stats;
4799 
4800 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
4801 	/* TBD */
4802 
4803 	/* Active Block ack sessions */
4804 	uint16_t active_ba_session_cnt;
4805 
4806 	/* Current HW buffersize setting */
4807 	uint16_t hw_buffer_size;
4808 
4809 	/*
4810 	 * Flag to check if sessions with 256 buffersize
4811 	 * should be terminated.
4812 	 */
4813 	uint8_t kill_256_sessions;
4814 	qdf_atomic_t is_default_route_set;
4815 
4816 #ifdef QCA_PEER_MULTIQ_SUPPORT
4817 	struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
4818 #endif
4819 	/* entry to inactive_list*/
4820 	TAILQ_ENTRY(dp_peer) inactive_list_elem;
4821 
4822 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
4823 
4824 	uint8_t peer_state;
4825 	qdf_spinlock_t peer_state_lock;
4826 #ifdef WLAN_SUPPORT_MSCS
4827 	struct dp_peer_mscs_parameter mscs_ipv4_parameter, mscs_ipv6_parameter;
4828 	bool mscs_active;
4829 #endif
4830 #ifdef WLAN_SUPPORT_MESH_LATENCY
4831 	struct dp_peer_mesh_latency_parameter mesh_latency_params[DP_MAX_TIDS];
4832 #endif
4833 #ifdef WLAN_FEATURE_11BE_MLO
4834 	/* peer type */
4835 	enum cdp_peer_type peer_type;
4836 	/*---------for link peer---------*/
4837 	struct dp_peer *mld_peer;
4838 
4839 	/*Link ID of link peer*/
4840 	uint8_t link_id;
4841 	bool link_id_valid;
4842 
4843 	/*---------for mld peer----------*/
4844 	struct dp_peer_link_info link_peers[DP_MAX_MLO_LINKS];
4845 	uint8_t num_links;
4846 	DP_MUTEX_TYPE link_peers_info_lock;
4847 #endif
4848 #ifdef CONFIG_SAWF_DEF_QUEUES
4849 	struct dp_peer_sawf *sawf;
4850 #endif
4851 	/* AST hash index for peer in HW */
4852 	uint16_t ast_idx;
4853 
4854 	/* AST hash value for peer in HW */
4855 	uint16_t ast_hash;
4856 };
4857 
4858 /**
4859  * struct dp_invalid_peer_msg - Invalid peer message
4860  * @nbuf: data buffer
4861  * @wh: 802.11 header
4862  * @vdev_id: id of vdev
4863  */
4864 struct dp_invalid_peer_msg {
4865 	qdf_nbuf_t nbuf;
4866 	struct ieee80211_frame *wh;
4867 	uint8_t vdev_id;
4868 };
4869 
4870 /**
4871  * struct dp_tx_me_buf_t - ME buffer
4872  * @next: pointer to next buffer
4873  * @data: Destination Mac address
4874  * @paddr_macbuf: physical address for dest_mac
4875  */
4876 struct dp_tx_me_buf_t {
4877 	/* Note: ME buf pool initialization logic expects next pointer to
4878 	 * be the first element. Dont add anything before next */
4879 	struct dp_tx_me_buf_t *next;
4880 	uint8_t data[QDF_MAC_ADDR_SIZE];
4881 	qdf_dma_addr_t paddr_macbuf;
4882 };
4883 
4884 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
4885 struct hal_rx_fst;
4886 
4887 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4888 struct dp_rx_fse {
4889 	/* HAL Rx Flow Search Entry which matches HW definition */
4890 	void *hal_rx_fse;
4891 	/* Toeplitz hash value */
4892 	uint32_t flow_hash;
4893 	/* Flow index, equivalent to hash value truncated to FST size */
4894 	uint32_t flow_id;
4895 	/* Stats tracking for this flow */
4896 	struct cdp_flow_stats stats;
4897 	/* Flag indicating whether flow is IPv4 address tuple */
4898 	uint8_t is_ipv4_addr_entry;
4899 	/* Flag indicating whether flow is valid */
4900 	uint8_t is_valid;
4901 };
4902 
4903 struct dp_rx_fst {
4904 	/* Software (DP) FST */
4905 	uint8_t *base;
4906 	/* Pointer to HAL FST */
4907 	struct hal_rx_fst *hal_rx_fst;
4908 	/* Base physical address of HAL RX HW FST */
4909 	uint64_t hal_rx_fst_base_paddr;
4910 	/* Maximum number of flows FSE supports */
4911 	uint16_t max_entries;
4912 	/* Num entries in flow table */
4913 	uint16_t num_entries;
4914 	/* SKID Length */
4915 	uint16_t max_skid_length;
4916 	/* Hash mask to obtain legitimate hash entry */
4917 	uint32_t hash_mask;
4918 	/* Timer for bundling of flows */
4919 	qdf_timer_t cache_invalidate_timer;
4920 	/**
4921 	 * Flag which tracks whether cache update
4922 	 * is needed on timer expiry
4923 	 */
4924 	qdf_atomic_t is_cache_update_pending;
4925 	/* Flag to indicate completion of FSE setup in HW/FW */
4926 	bool fse_setup_done;
4927 };
4928 
4929 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
4930 #elif WLAN_SUPPORT_RX_FISA
4931 
4932 /**
4933  * struct dp_fisa_reo_mismatch_stats - reo mismatch sub-case stats for FISA
4934  * @allow_cce_match: packet allowed due to cce mismatch
4935  * @allow_fse_metdata_mismatch: packet allowed since it belongs to same flow,
4936  *			only fse_metadata is not same.
4937  * @allow_non_aggr: packet allowed due to any other reason.
4938  */
4939 struct dp_fisa_reo_mismatch_stats {
4940 	uint32_t allow_cce_match;
4941 	uint32_t allow_fse_metdata_mismatch;
4942 	uint32_t allow_non_aggr;
4943 };
4944 
4945 struct dp_fisa_stats {
4946 	/* flow index invalid from RX HW TLV */
4947 	uint32_t invalid_flow_index;
4948 	/* workqueue deferred due to suspend */
4949 	uint32_t update_deferred;
4950 	struct dp_fisa_reo_mismatch_stats reo_mismatch;
4951 	uint32_t incorrect_rdi;
4952 };
4953 
4954 enum fisa_aggr_ret {
4955 	FISA_AGGR_DONE,
4956 	FISA_AGGR_NOT_ELIGIBLE,
4957 	FISA_FLUSH_FLOW
4958 };
4959 
4960 /**
4961  * struct fisa_pkt_hist - FISA Packet history structure
4962  * @tlv_hist: array of TLV history
4963  * @ts_hist: array of timestamps of fisa packets
4964  * @idx: index indicating the next location to be used in the array.
4965  */
4966 struct fisa_pkt_hist {
4967 	uint8_t *tlv_hist;
4968 	qdf_time_t ts_hist[FISA_FLOW_MAX_AGGR_COUNT];
4969 	uint32_t idx;
4970 };
4971 
4972 struct dp_fisa_rx_sw_ft {
4973 	/* HAL Rx Flow Search Entry which matches HW definition */
4974 	void *hw_fse;
4975 	/* hash value */
4976 	uint32_t flow_hash;
4977 	/* toeplitz hash value*/
4978 	uint32_t flow_id_toeplitz;
4979 	/* Flow index, equivalent to hash value truncated to FST size */
4980 	uint32_t flow_id;
4981 	/* Stats tracking for this flow */
4982 	struct cdp_flow_stats stats;
4983 	/* Flag indicating whether flow is IPv4 address tuple */
4984 	uint8_t is_ipv4_addr_entry;
4985 	/* Flag indicating whether flow is valid */
4986 	uint8_t is_valid;
4987 	uint8_t is_populated;
4988 	uint8_t is_flow_udp;
4989 	uint8_t is_flow_tcp;
4990 	qdf_nbuf_t head_skb;
4991 	uint16_t cumulative_l4_checksum;
4992 	uint16_t adjusted_cumulative_ip_length;
4993 	uint16_t cur_aggr;
4994 	uint16_t napi_flush_cumulative_l4_checksum;
4995 	uint16_t napi_flush_cumulative_ip_length;
4996 	qdf_nbuf_t last_skb;
4997 	uint32_t head_skb_ip_hdr_offset;
4998 	uint32_t head_skb_l4_hdr_offset;
4999 	struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
5000 	uint8_t napi_id;
5001 	struct dp_vdev *vdev;
5002 	uint64_t bytes_aggregated;
5003 	uint32_t flush_count;
5004 	uint32_t aggr_count;
5005 	uint8_t do_not_aggregate;
5006 	uint16_t hal_cumultive_ip_len;
5007 	struct dp_soc *soc_hdl;
5008 	/* last aggregate count fetched from RX PKT TLV */
5009 	uint32_t last_hal_aggr_count;
5010 	uint32_t cur_aggr_gso_size;
5011 	qdf_net_udphdr_t *head_skb_udp_hdr;
5012 	uint16_t frags_cumulative_len;
5013 	/* CMEM parameters */
5014 	uint32_t cmem_offset;
5015 	uint32_t metadata;
5016 	uint32_t reo_dest_indication;
5017 	qdf_time_t flow_init_ts;
5018 	qdf_time_t last_accessed_ts;
5019 #ifdef WLAN_SUPPORT_RX_FISA_HIST
5020 	struct fisa_pkt_hist pkt_hist;
5021 #endif
5022 };
5023 
5024 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_fisa_rx_sw_ft)
5025 #define MAX_FSE_CACHE_FL_HST 10
5026 /**
5027  * struct fse_cache_flush_history - Debug history cache flush
5028  * @timestamp: Entry update timestamp
5029  * @flows_added: Number of flows added for this flush
5030  * @flows_deleted: Number of flows deleted for this flush
5031  */
5032 struct fse_cache_flush_history {
5033 	uint64_t timestamp;
5034 	uint32_t flows_added;
5035 	uint32_t flows_deleted;
5036 };
5037 
5038 struct dp_rx_fst {
5039 	/* Software (DP) FST */
5040 	uint8_t *base;
5041 	/* Pointer to HAL FST */
5042 	struct hal_rx_fst *hal_rx_fst;
5043 	/* Base physical address of HAL RX HW FST */
5044 	uint64_t hal_rx_fst_base_paddr;
5045 	/* Maximum number of flows FSE supports */
5046 	uint16_t max_entries;
5047 	/* Num entries in flow table */
5048 	uint16_t num_entries;
5049 	/* SKID Length */
5050 	uint16_t max_skid_length;
5051 	/* Hash mask to obtain legitimate hash entry */
5052 	uint32_t hash_mask;
5053 	/* Lock for adding/deleting entries of FST */
5054 	qdf_spinlock_t dp_rx_fst_lock;
5055 	uint32_t add_flow_count;
5056 	uint32_t del_flow_count;
5057 	uint32_t hash_collision_cnt;
5058 	struct dp_soc *soc_hdl;
5059 	qdf_atomic_t fse_cache_flush_posted;
5060 	qdf_timer_t fse_cache_flush_timer;
5061 	/* Allow FSE cache flush cmd to FW */
5062 	bool fse_cache_flush_allow;
5063 	struct fse_cache_flush_history cache_fl_rec[MAX_FSE_CACHE_FL_HST];
5064 	/* FISA DP stats */
5065 	struct dp_fisa_stats stats;
5066 
5067 	/* CMEM params */
5068 	qdf_work_t fst_update_work;
5069 	qdf_workqueue_t *fst_update_wq;
5070 	qdf_list_t fst_update_list;
5071 	uint32_t meta_counter;
5072 	uint32_t cmem_ba;
5073 	qdf_spinlock_t dp_rx_sw_ft_lock[MAX_REO_DEST_RINGS];
5074 	qdf_event_t cmem_resp_event;
5075 	bool flow_deletion_supported;
5076 	bool fst_in_cmem;
5077 	qdf_atomic_t pm_suspended;
5078 	bool fst_wq_defer;
5079 };
5080 
5081 #endif /* WLAN_SUPPORT_RX_FISA */
5082 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
5083 
5084 #ifdef WLAN_FEATURE_STATS_EXT
5085 /**
5086  * struct dp_req_rx_hw_stats_t - RX peer HW stats query structure
5087  * @pending_tid_stats_cnt: pending tid stats count which waits for REO status
5088  * @is_query_timeout: flag to show is stats query timeout
5089  */
5090 struct dp_req_rx_hw_stats_t {
5091 	qdf_atomic_t pending_tid_stats_cnt;
5092 	bool is_query_timeout;
5093 };
5094 #endif
5095 /* soc level structure to declare arch specific ops for DP */
5096 
5097 #ifndef WLAN_SOFTUMAC_SUPPORT
5098 /**
5099  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
5100  * @soc: DP SOC handle
5101  * @mac_id: mac id
5102  *
5103  * Return: none
5104  */
5105 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id);
5106 
5107 /**
5108  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
5109  * @soc: DP SOC handle
5110  * @mac_id: mac id
5111  *
5112  * Allocates memory pages for link descriptors, the page size is 4K for
5113  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
5114  * allocated for regular RX/TX and if the there is a proper mac_id link
5115  * descriptors are allocated for RX monitor mode.
5116  *
5117  * Return: QDF_STATUS_SUCCESS: Success
5118  *	   QDF_STATUS_E_FAILURE: Failure
5119  */
5120 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
5121 					    uint32_t mac_id);
5122 
5123 /**
5124  * dp_link_desc_ring_replenish() - Replenish hw link desc rings
5125  * @soc: DP SOC handle
5126  * @mac_id: mac id
5127  *
5128  * Return: None
5129  */
5130 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
5131 #else
5132 static inline void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc,
5133 						   uint32_t mac_id)
5134 {
5135 }
5136 
5137 static inline QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
5138 							  uint32_t mac_id)
5139 {
5140 	return QDF_STATUS_SUCCESS;
5141 }
5142 
5143 static inline void dp_link_desc_ring_replenish(struct dp_soc *soc,
5144 					       uint32_t mac_id)
5145 {
5146 }
5147 #endif
5148 
5149 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
5150 void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc);
5151 #else
5152 static inline void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc) {}
5153 #endif
5154 
5155 /**
5156  * dp_srng_alloc() - Allocate memory for SRNG
5157  * @soc  : Data path soc handle
5158  * @srng : SRNG pointer
5159  * @ring_type : Ring Type
5160  * @num_entries: Number of entries
5161  * @cached: cached flag variable
5162  *
5163  * Return: QDF_STATUS
5164  */
5165 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
5166 			 int ring_type, uint32_t num_entries,
5167 			 bool cached);
5168 
5169 /**
5170  * dp_srng_free() - Free SRNG memory
5171  * @soc: Data path soc handle
5172  * @srng: SRNG pointer
5173  *
5174  * Return: None
5175  */
5176 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng);
5177 
5178 /**
5179  * dp_srng_init() - Initialize SRNG
5180  * @soc  : Data path soc handle
5181  * @srng : SRNG pointer
5182  * @ring_type : Ring Type
5183  * @ring_num: Ring number
5184  * @mac_id: mac_id
5185  *
5186  * Return: QDF_STATUS
5187  */
5188 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
5189 			int ring_type, int ring_num, int mac_id);
5190 
5191 /**
5192  * dp_srng_init_idx() - Initialize SRNG
5193  * @soc  : Data path soc handle
5194  * @srng : SRNG pointer
5195  * @ring_type : Ring Type
5196  * @ring_num: Ring number
5197  * @mac_id: mac_id
5198  * @idx: ring index
5199  *
5200  * Return: QDF_STATUS
5201  */
5202 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
5203 			    int ring_type, int ring_num, int mac_id,
5204 			    uint32_t idx);
5205 
5206 /**
5207  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
5208  * @soc: DP SOC handle
5209  * @srng: source ring structure
5210  * @ring_type: type of ring
5211  * @ring_num: ring number
5212  *
5213  * Return: None
5214  */
5215 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
5216 		    int ring_type, int ring_num);
5217 
5218 void dp_print_peer_txrx_stats_be(struct cdp_peer_stats *peer_stats,
5219 				 enum peer_stats_type stats_type);
5220 void dp_print_peer_txrx_stats_li(struct cdp_peer_stats *peer_stats,
5221 				 enum peer_stats_type stats_type);
5222 
5223 void dp_print_peer_txrx_stats_rh(struct cdp_peer_stats *peer_stats,
5224 				 enum peer_stats_type stats_type);
5225 
5226 /**
5227  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
5228  * @soc: DP soc handle
5229  * @work_done: work done in softirq context
5230  * @start_time: start time for the softirq
5231  *
5232  * Return: enum with yield code
5233  */
5234 enum timer_yield_status
5235 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
5236 			  uint64_t start_time);
5237 
5238 /**
5239  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5240  * @vdev: Datapath VDEV handle
5241  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5242  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5243  *
5244  * Return: None
5245  */
5246 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5247 				  enum cdp_host_reo_dest_ring *reo_dest,
5248 				  bool *hash_based);
5249 
5250 /**
5251  * dp_reo_remap_config() - configure reo remap register value based
5252  *                         nss configuration.
5253  * @soc: DP soc handle
5254  * @remap0: output parameter indicates reo remap 0 register value
5255  * @remap1: output parameter indicates reo remap 1 register value
5256  * @remap2: output parameter indicates reo remap 2 register value
5257  *
5258  * based on offload_radio value below remap configuration
5259  * get applied.
5260  *	0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
5261  *	1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
5262  *	2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
5263  *	3 - both Radios handled by NSS (remap not required)
5264  *	4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
5265  *
5266  * Return: bool type, true if remap is configured else false.
5267  */
5268 
5269 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
5270 			 uint32_t *remap1, uint32_t *remap2);
5271 
5272 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
5273 /**
5274  * dp_tx_comp_get_prefetched_params_from_hal_desc() - Get prefetched TX desc
5275  * @soc: DP soc handle
5276  * @tx_comp_hal_desc: HAL TX Comp Descriptor
5277  * @r_tx_desc: SW Tx Descriptor retrieved from HAL desc.
5278  *
5279  * Return: None
5280  */
5281 void dp_tx_comp_get_prefetched_params_from_hal_desc(
5282 					struct dp_soc *soc,
5283 					void *tx_comp_hal_desc,
5284 					struct dp_tx_desc_s **r_tx_desc);
5285 #endif
5286 #endif /* _DP_TYPES_H_ */
5287