xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision aa4f14ce5a0f2d8d7a46d69aa43b7eee261d5846)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_TYPES_H_
21 #define _DP_TYPES_H_
22 
23 #include <qdf_types.h>
24 #include <qdf_nbuf.h>
25 #include <qdf_lock.h>
26 #include <qdf_atomic.h>
27 #include <qdf_util.h>
28 #include <qdf_list.h>
29 #include <qdf_lro.h>
30 #include <queue.h>
31 #include <htt_common.h>
32 #include <htt.h>
33 #include <htt_stats.h>
34 #include <cdp_txrx_cmn.h>
35 #ifdef DP_MOB_DEFS
36 #include <cds_ieee80211_common.h>
37 #endif
38 #include <wdi_event_api.h>    /* WDI subscriber event list */
39 
40 #include "hal_hw_headers.h"
41 #include <hal_tx.h>
42 #include <hal_reo.h>
43 #include "wlan_cfg.h"
44 #include "hal_rx.h"
45 #include <hal_api.h>
46 #include <hal_api_mon.h>
47 #include "hal_rx.h"
48 
49 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
50 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
51 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
52 #define dp_init_info(params...) \
53 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
54 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
55 
56 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
57 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
58 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
59 #define dp_vdev_info(params...) \
60 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
61 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
62 
63 #define MAX_BW 8
64 #define MAX_RETRIES 4
65 #define MAX_RECEPTION_TYPES 4
66 
67 #define MINIDUMP_STR_SIZE 25
68 #include <dp_umac_reset.h>
69 
70 #define REPT_MU_MIMO 1
71 #define REPT_MU_OFDMA_MIMO 3
72 #define DP_VO_TID 6
73  /** MAX TID MAPS AVAILABLE PER PDEV */
74 #define DP_MAX_TID_MAPS 16
75 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */
76 #define DSCP_TID_MAP_MAX (64 + 6)
77 #define DP_IP_DSCP_SHIFT 2
78 #define DP_IP_DSCP_MASK 0x3f
79 #define DP_FC0_SUBTYPE_QOS 0x80
80 #define DP_QOS_TID 0x0f
81 #define DP_IPV6_PRIORITY_SHIFT 20
82 #define MAX_MON_LINK_DESC_BANKS 2
83 #define DP_VDEV_ALL CDP_VDEV_ALL
84 
85 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
86 #define WLAN_DP_RESET_MON_BUF_RING_FILTER
87 #define MAX_TXDESC_POOLS 6
88 #else
89 #define MAX_TXDESC_POOLS 4
90 #endif
91 
92 /* Max no of descriptors to handle special frames like EAPOL */
93 #define MAX_TX_SPL_DESC 1024
94 
95 #define MAX_RXDESC_POOLS 4
96 #define MAX_PPE_TXDESC_POOLS 1
97 
98 /* Max no. of VDEV per PSOC */
99 #ifdef WLAN_PSOC_MAX_VDEVS
100 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS
101 #else
102 #define MAX_VDEV_CNT 51
103 #endif
104 
105 /* Max no. of VDEVs, a PDEV can support */
106 #ifdef WLAN_PDEV_MAX_VDEVS
107 #define DP_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS
108 #else
109 #define DP_PDEV_MAX_VDEVS 17
110 #endif
111 
112 #define EXCEPTION_DEST_RING_ID 0
113 #define MAX_IDLE_SCATTER_BUFS 16
114 #define DP_MAX_IRQ_PER_CONTEXT 12
115 #define DEFAULT_HW_PEER_ID 0xffff
116 
117 #define MAX_AST_AGEOUT_COUNT 128
118 
119 #ifdef TX_ADDR_INDEX_SEARCH
120 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_INDEX_SEARCH
121 #else
122 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_SEARCH_DEFAULT
123 #endif
124 
125 #define WBM_INT_ERROR_ALL 0
126 #define WBM_INT_ERROR_REO_NULL_BUFFER 1
127 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2
128 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3
129 #define WBM_INT_ERROR_REO_BUFF_REAPED 4
130 #define MAX_WBM_INT_ERROR_REASONS 5
131 
132 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
133 /* Maximum retries for Delba per tid per peer */
134 #define DP_MAX_DELBA_RETRY 3
135 
136 #ifdef AST_OFFLOAD_ENABLE
137 #define AST_OFFLOAD_ENABLE_STATUS 1
138 #else
139 #define AST_OFFLOAD_ENABLE_STATUS 0
140 #endif
141 
142 #ifdef FEATURE_MEC_OFFLOAD
143 #define FW_MEC_FW_OFFLOAD_ENABLED 1
144 #else
145 #define FW_MEC_FW_OFFLOAD_ENABLED 0
146 #endif
147 
148 #define PCP_TID_MAP_MAX 8
149 #define MAX_MU_USERS 37
150 
151 #define REO_CMD_EVENT_HIST_MAX 64
152 
153 #define DP_MAX_SRNGS 64
154 
155 /* 2G PHYB */
156 #define PHYB_2G_LMAC_ID 2
157 #define PHYB_2G_TARGET_PDEV_ID 2
158 
159 /* Flags for skippig s/w tid classification */
160 #define DP_TX_HW_DSCP_TID_MAP_VALID 0x1
161 #define DP_TXRX_HLOS_TID_OVERRIDE_ENABLED 0x2
162 #define DP_TX_MESH_ENABLED 0x4
163 #define DP_TX_INVALID_QOS_TAG 0xf
164 
165 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
166 #define DP_RX_REFILL_BUFF_POOL_SIZE  2048
167 #define DP_RX_REFILL_BUFF_POOL_BURST 64
168 #define DP_RX_REFILL_THRD_THRESHOLD  512
169 #endif
170 
171 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
172 #define DP_RX_FSE_FLOW_MATCH_SFE 0xAAAA
173 #endif
174 
175 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
176 #define DP_SKIP_BAR_UPDATE_TIMEOUT 5000
177 #endif
178 
179 #define DP_TX_MAGIC_PATTERN_INUSE	0xABCD1234
180 #define DP_TX_MAGIC_PATTERN_FREE	0xDEADBEEF
181 
182 #define DP_INTR_POLL_TIMER_MS	5
183 
184 #ifdef IPA_OFFLOAD
185 #define DP_PEER_REO_STATS_TID_SHIFT 16
186 #define DP_PEER_REO_STATS_TID_MASK 0xFFFF0000
187 #define DP_PEER_REO_STATS_PEER_ID_MASK 0x0000FFFF
188 #define DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid) \
189 	((comb_peer_id_tid & DP_PEER_REO_STATS_TID_MASK) >> \
190 	DP_PEER_REO_STATS_TID_SHIFT)
191 #define DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid) \
192 	(comb_peer_id_tid & DP_PEER_REO_STATS_PEER_ID_MASK)
193 #endif
194 
195 typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc, void *arg,
196 				   int chip_id);
197 
198 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
199 #define DP_MLD_MODE_UNIFIED_NONBOND 0
200 #define DP_MLD_MODE_UNIFIED_BOND    1
201 #define DP_MLD_MODE_HYBRID_NONBOND  2
202 #define DP_MLD_MODE_MAX             DP_MLD_MODE_HYBRID_NONBOND
203 
204 #define DP_LINK_VDEV_ITER 1
205 #define DP_BRIDGE_VDEV_ITER 2
206 #define DP_ALL_VDEV_ITER 3
207 #define IS_LINK_VDEV_ITER_REQUIRED(type) (type & DP_LINK_VDEV_ITER)
208 #define IS_BRIDGE_VDEV_ITER_REQUIRED(type) (type & DP_BRIDGE_VDEV_ITER)
209 #endif
210 
211 enum rx_pktlog_mode {
212 	DP_RX_PKTLOG_DISABLED = 0,
213 	DP_RX_PKTLOG_FULL,
214 	DP_RX_PKTLOG_LITE,
215 };
216 
217 /* enum m_copy_mode - Available mcopy mode
218  *
219  */
220 enum m_copy_mode {
221 	M_COPY_DISABLED = 0,
222 	M_COPY = 2,
223 	M_COPY_EXTENDED = 4,
224 };
225 
226 struct msdu_list {
227 	qdf_nbuf_t head;
228 	qdf_nbuf_t tail;
229 	uint32_t sum_len;
230 };
231 
232 struct dp_soc_cmn;
233 struct dp_pdev;
234 struct dp_vdev;
235 struct dp_tx_desc_s;
236 struct dp_soc;
237 union dp_rx_desc_list_elem_t;
238 struct cdp_peer_rate_stats_ctx;
239 struct cdp_soc_rate_stats_ctx;
240 struct dp_rx_fst;
241 struct dp_mon_filter;
242 struct dp_mon_mpdu;
243 #ifdef BE_PKTLOG_SUPPORT
244 struct dp_mon_filter_be;
245 #endif
246 struct dp_peer;
247 struct dp_txrx_peer;
248 
249 /**
250  * enum dp_peer_state - DP peer states
251  * @DP_PEER_STATE_NONE:
252  * @DP_PEER_STATE_INIT:
253  * @DP_PEER_STATE_ACTIVE:
254  * @DP_PEER_STATE_LOGICAL_DELETE:
255  * @DP_PEER_STATE_INACTIVE:
256  * @DP_PEER_STATE_FREED:
257  * @DP_PEER_STATE_INVALID:
258  */
259 enum dp_peer_state {
260 	DP_PEER_STATE_NONE,
261 	DP_PEER_STATE_INIT,
262 	DP_PEER_STATE_ACTIVE,
263 	DP_PEER_STATE_LOGICAL_DELETE,
264 	DP_PEER_STATE_INACTIVE,
265 	DP_PEER_STATE_FREED,
266 	DP_PEER_STATE_INVALID,
267 };
268 
269 /**
270  * enum dp_mod_id - DP module IDs
271  * @DP_MOD_ID_TX_RX:
272  * @DP_MOD_ID_TX_COMP:
273  * @DP_MOD_ID_RX:
274  * @DP_MOD_ID_HTT_COMP:
275  * @DP_MOD_ID_RX_ERR:
276  * @DP_MOD_ID_TX_PPDU_STATS:
277  * @DP_MOD_ID_RX_PPDU_STATS:
278  * @DP_MOD_ID_CDP:
279  * @DP_MOD_ID_GENERIC_STATS:
280  * @DP_MOD_ID_TX_MULTIPASS:
281  * @DP_MOD_ID_TX_CAPTURE:
282  * @DP_MOD_ID_NSS_OFFLOAD:
283  * @DP_MOD_ID_CONFIG:
284  * @DP_MOD_ID_HTT:
285  * @DP_MOD_ID_IPA:
286  * @DP_MOD_ID_AST:
287  * @DP_MOD_ID_MCAST2UCAST:
288  * @DP_MOD_ID_CHILD:
289  * @DP_MOD_ID_MESH:
290  * @DP_MOD_ID_TX_EXCEPTION:
291  * @DP_MOD_ID_TDLS:
292  * @DP_MOD_ID_MISC:
293  * @DP_MOD_ID_MSCS:
294  * @DP_MOD_ID_TX:
295  * @DP_MOD_ID_SAWF:
296  * @DP_MOD_ID_REINJECT:
297  * @DP_MOD_ID_SCS:
298  * @DP_MOD_ID_UMAC_RESET:
299  * @DP_MOD_ID_TX_MCAST:
300  * @DP_MOD_ID_DS:
301  * @DP_MOD_ID_MLO_DEV:
302  * @DP_MOD_ID_MAX:
303  */
304 enum dp_mod_id {
305 	DP_MOD_ID_TX_RX,
306 	DP_MOD_ID_TX_COMP,
307 	DP_MOD_ID_RX,
308 	DP_MOD_ID_HTT_COMP,
309 	DP_MOD_ID_RX_ERR,
310 	DP_MOD_ID_TX_PPDU_STATS,
311 	DP_MOD_ID_RX_PPDU_STATS,
312 	DP_MOD_ID_CDP,
313 	DP_MOD_ID_GENERIC_STATS,
314 	DP_MOD_ID_TX_MULTIPASS,
315 	DP_MOD_ID_TX_CAPTURE,
316 	DP_MOD_ID_NSS_OFFLOAD,
317 	DP_MOD_ID_CONFIG,
318 	DP_MOD_ID_HTT,
319 	DP_MOD_ID_IPA,
320 	DP_MOD_ID_AST,
321 	DP_MOD_ID_MCAST2UCAST,
322 	DP_MOD_ID_CHILD,
323 	DP_MOD_ID_MESH,
324 	DP_MOD_ID_TX_EXCEPTION,
325 	DP_MOD_ID_TDLS,
326 	DP_MOD_ID_MISC,
327 	DP_MOD_ID_MSCS,
328 	DP_MOD_ID_TX,
329 	DP_MOD_ID_SAWF,
330 	DP_MOD_ID_REINJECT,
331 	DP_MOD_ID_SCS,
332 	DP_MOD_ID_UMAC_RESET,
333 	DP_MOD_ID_TX_MCAST,
334 	DP_MOD_ID_DS,
335 	DP_MOD_ID_MLO_DEV,
336 	DP_MOD_ID_MAX,
337 };
338 
339 /**
340  * enum dp_peer_type - DP peer type
341  * @DP_PEER_TYPE_LEGACY:
342  * @DP_PEER_TYPE_MLO_LINK:
343  * @DP_PEER_TYPE_MLO:
344  */
345 enum dp_peer_type {
346 	DP_PEER_TYPE_LEGACY,
347 	DP_PEER_TYPE_MLO_LINK,
348 	DP_PEER_TYPE_MLO,
349 };
350 
351 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
352 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
353 
354 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
355 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
356 
357 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
358 	TAILQ_FOREACH_SAFE((_ase), &_peer->ast_entry_list, ase_list_elem, (_temp_ase))
359 
360 #define DP_MUTEX_TYPE qdf_spinlock_t
361 
362 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
363 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
364 
365 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
366     ((_a)[0] == 0x33 &&                         \
367      (_a)[1] == 0x33)
368 
369 #define DP_FRAME_IS_BROADCAST(_a)              \
370     ((_a)[0] == 0xff &&                         \
371      (_a)[1] == 0xff &&                         \
372      (_a)[2] == 0xff &&                         \
373      (_a)[3] == 0xff &&                         \
374      (_a)[4] == 0xff &&                         \
375      (_a)[5] == 0xff)
376 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
377 		(_llc)->llc_ssap == 0xaa && \
378 		(_llc)->llc_un.type_snap.control == 0x3)
379 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
380 #define DP_FRAME_FC0_TYPE_MASK 0x0c
381 #define DP_FRAME_FC0_TYPE_DATA 0x08
382 #define DP_FRAME_IS_DATA(_frame) \
383 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
384 
385 /*
386  * macros to convert hw mac id to sw mac id:
387  * mac ids used by hardware start from a value of 1 while
388  * those in host software start from a value of 0. Use the
389  * macros below to convert between mac ids used by software and
390  * hardware
391  */
392 #define DP_SW2HW_MACID(id) ((id) + 1)
393 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
394 
395 /*
396  * Number of Tx Queues
397  * enum and macro to define how many threshold levels is used
398  * for the AC based flow control
399  */
400 #ifdef QCA_AC_BASED_FLOW_CONTROL
401 enum dp_fl_ctrl_threshold {
402 	DP_TH_BE_BK = 0,
403 	DP_TH_VI,
404 	DP_TH_VO,
405 	DP_TH_HI,
406 };
407 
408 #define FL_TH_MAX (4)
409 #define FL_TH_VI_PERCENTAGE (80)
410 #define FL_TH_VO_PERCENTAGE (60)
411 #define FL_TH_HI_PERCENTAGE (40)
412 #endif
413 
414 /**
415  * enum dp_intr_mode
416  * @DP_INTR_INTEGRATED: Line interrupts
417  * @DP_INTR_MSI: MSI interrupts
418  * @DP_INTR_POLL: Polling
419  * @DP_INTR_LEGACY_VIRTUAL_IRQ:
420  */
421 enum dp_intr_mode {
422 	DP_INTR_INTEGRATED = 0,
423 	DP_INTR_MSI,
424 	DP_INTR_POLL,
425 	DP_INTR_LEGACY_VIRTUAL_IRQ,
426 };
427 
428 /**
429  * enum dp_tx_frm_type
430  * @dp_tx_frm_std: Regular frame, no added header fragments
431  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
432  * @dp_tx_frm_sg: SG segment
433  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
434  * @dp_tx_frm_me: Multicast to Unicast Converted frame
435  * @dp_tx_frm_raw: Raw Frame
436  * @dp_tx_frm_rmnet:
437  */
438 enum dp_tx_frm_type {
439 	dp_tx_frm_std = 0,
440 	dp_tx_frm_tso,
441 	dp_tx_frm_sg,
442 	dp_tx_frm_audio,
443 	dp_tx_frm_me,
444 	dp_tx_frm_raw,
445 	dp_tx_frm_rmnet,
446 };
447 
448 /**
449  * enum dp_ast_type
450  * @dp_ast_type_wds: WDS peer AST type
451  * @dp_ast_type_static: static ast entry type
452  * @dp_ast_type_mec: Multicast echo ast entry type
453  */
454 enum dp_ast_type {
455 	dp_ast_type_wds = 0,
456 	dp_ast_type_static,
457 	dp_ast_type_mec,
458 };
459 
460 /**
461  * enum dp_nss_cfg
462  * @dp_nss_cfg_default: No radios are offloaded
463  * @dp_nss_cfg_first_radio: First radio offloaded
464  * @dp_nss_cfg_second_radio: Second radio offloaded
465  * @dp_nss_cfg_dbdc: Dual radios offloaded
466  * @dp_nss_cfg_dbtc: Three radios offloaded
467  * @dp_nss_cfg_max: max value
468  */
469 enum dp_nss_cfg {
470 	dp_nss_cfg_default = 0x0,
471 	dp_nss_cfg_first_radio = 0x1,
472 	dp_nss_cfg_second_radio = 0x2,
473 	dp_nss_cfg_dbdc = 0x3,
474 	dp_nss_cfg_dbtc = 0x7,
475 	dp_nss_cfg_max
476 };
477 
478 #ifdef WLAN_TX_PKT_CAPTURE_ENH
479 #define DP_CPU_RING_MAP_1 1
480 #endif
481 
482 /**
483  * enum dp_cpu_ring_map_types - dp tx cpu ring map
484  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
485  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
486  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
487  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
488  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
489  * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring
490  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
491  */
492 enum dp_cpu_ring_map_types {
493 	DP_NSS_DEFAULT_MAP,
494 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
495 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
496 	DP_NSS_DBDC_OFFLOADED_MAP,
497 	DP_NSS_DBTC_OFFLOADED_MAP,
498 #ifdef WLAN_TX_PKT_CAPTURE_ENH
499 	DP_SINGLE_TX_RING_MAP,
500 #endif
501 	DP_NSS_CPU_RING_MAP_MAX
502 };
503 
504 /**
505  * struct dp_rx_nbuf_frag_info - Hold vaddr and paddr for a buffer
506  *
507  * @paddr: Physical address of buffer allocated.
508  * @virt_addr: union of virtual address representations
509  * @nbuf: Allocated nbuf in case of nbuf approach.
510  * @vaddr: Virtual address of frag allocated in case of frag approach.
511  */
512 struct dp_rx_nbuf_frag_info {
513 	qdf_dma_addr_t paddr;
514 	union {
515 		qdf_nbuf_t nbuf;
516 		qdf_frag_t vaddr;
517 	} virt_addr;
518 };
519 
520 /**
521  * enum dp_ctxt_type - context type
522  * @DP_PDEV_TYPE: PDEV context
523  * @DP_RX_RING_HIST_TYPE: Datapath rx ring history
524  * @DP_RX_ERR_RING_HIST_TYPE: Datapath rx error ring history
525  * @DP_RX_REINJECT_RING_HIST_TYPE: Datapath reinject ring history
526  * @DP_TX_TCL_HIST_TYPE:
527  * @DP_TX_COMP_HIST_TYPE:
528  * @DP_FISA_RX_FT_TYPE:
529  * @DP_RX_REFILL_RING_HIST_TYPE: Datapath rx refill ring history
530  * @DP_TX_HW_DESC_HIST_TYPE: Datapath TX HW descriptor history
531  * @DP_MON_SOC_TYPE: Datapath monitor soc context
532  * @DP_MON_PDEV_TYPE: Datapath monitor pdev context
533  * @DP_MON_STATUS_BUF_HIST_TYPE: DP monitor status buffer history
534  * @DP_CFG_EVENT_HIST_TYPE: DP config events history
535  * @DP_MON_TX_DESC_POOL_TYPE: DP TX desc pool buffer
536  * @DP_MON_RX_DESC_POOL_TYPE: DP RX desc pool buffer
537  */
538 enum dp_ctxt_type {
539 	DP_PDEV_TYPE,
540 	DP_RX_RING_HIST_TYPE,
541 	DP_RX_ERR_RING_HIST_TYPE,
542 	DP_RX_REINJECT_RING_HIST_TYPE,
543 	DP_TX_TCL_HIST_TYPE,
544 	DP_TX_COMP_HIST_TYPE,
545 	DP_FISA_RX_FT_TYPE,
546 	DP_RX_REFILL_RING_HIST_TYPE,
547 	DP_TX_HW_DESC_HIST_TYPE,
548 	DP_MON_SOC_TYPE,
549 	DP_MON_PDEV_TYPE,
550 	DP_MON_STATUS_BUF_HIST_TYPE,
551 	DP_CFG_EVENT_HIST_TYPE,
552 	DP_MON_TX_DESC_POOL_TYPE,
553 	DP_MON_RX_DESC_POOL_TYPE,
554 };
555 
556 /**
557  * struct rx_desc_pool
558  * @pool_size: number of RX descriptor in the pool
559  * @elem_size: Element size
560  * @desc_pages: Multi page descriptors
561  * @array: pointer to array of RX descriptor
562  * @freelist: pointer to free RX descriptor link list
563  * @lock: Protection for the RX descriptor pool
564  * @owner: owner for nbuf
565  * @buf_size: Buffer size
566  * @buf_alignment: Buffer alignment
567  * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
568  * @desc_type: type of desc this pool serves
569  */
570 struct rx_desc_pool {
571 	uint32_t pool_size;
572 #ifdef RX_DESC_MULTI_PAGE_ALLOC
573 	uint16_t elem_size;
574 	struct qdf_mem_multi_page_t desc_pages;
575 #else
576 	union dp_rx_desc_list_elem_t *array;
577 #endif
578 	union dp_rx_desc_list_elem_t *freelist;
579 	qdf_spinlock_t lock;
580 	uint8_t owner;
581 	uint16_t buf_size;
582 	uint8_t buf_alignment;
583 	bool rx_mon_dest_frag_enable;
584 	enum qdf_dp_desc_type desc_type;
585 };
586 
587 /**
588  * struct dp_tx_ext_desc_elem_s
589  * @next: next extension descriptor pointer
590  * @vaddr: hlos virtual address pointer
591  * @paddr: physical address pointer for descriptor
592  * @flags: mark features for extension descriptor
593  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
594  *		Tx completion of ME packet
595  * @tso_desc: Pointer to Tso desc
596  * @tso_num_desc: Pointer to tso_num_desc
597  */
598 struct dp_tx_ext_desc_elem_s {
599 	struct dp_tx_ext_desc_elem_s *next;
600 	void *vaddr;
601 	qdf_dma_addr_t paddr;
602 	uint16_t flags;
603 	struct dp_tx_me_buf_t *me_buffer;
604 	struct qdf_tso_seg_elem_t *tso_desc;
605 	struct qdf_tso_num_seg_elem_t *tso_num_desc;
606 };
607 
608 /*
609  * NB: intentionally not using kernel-doc comment because the kernel-doc
610  *     script does not handle the qdf_dma_mem_context macro
611  * struct dp_tx_ext_desc_pool_s - Tx Extension Descriptor Pool
612  * @elem_count: Number of descriptors in the pool
613  * @elem_size: Size of each descriptor
614  * @num_free: Number of free descriptors
615  * @desc_pages: multiple page allocation information for actual descriptors
616  * @link_elem_size: size of the link descriptor in cacheable memory used for
617  * 		    chaining the extension descriptors
618  * @desc_link_pages: multiple page allocation information for link descriptors
619  * @freelist:
620  * @lock:
621  * @memctx:
622  */
623 struct dp_tx_ext_desc_pool_s {
624 	uint16_t elem_count;
625 	int elem_size;
626 	uint16_t num_free;
627 	struct qdf_mem_multi_page_t desc_pages;
628 	int link_elem_size;
629 	struct qdf_mem_multi_page_t desc_link_pages;
630 	struct dp_tx_ext_desc_elem_s *freelist;
631 	qdf_spinlock_t lock;
632 	qdf_dma_mem_context(memctx);
633 };
634 
635 /**
636  * struct dp_tx_desc_s - Tx Descriptor
637  * @next: Next in the chain of descriptors in freelist or in the completion list
638  * @nbuf: Buffer Address
639  * @length:
640  * @magic:
641  * @timestamp_tick:
642  * @flags: Flags to track the state of descriptor and special frame handling
643  * @id: Descriptor ID
644  * @dma_addr:
645  * @vdev_id: vdev_id of vdev over which the packet was transmitted
646  * @tx_status:
647  * @peer_id:
648  * @pdev: Handle to pdev
649  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
650  * 		   This is maintained in descriptor to allow more efficient
651  * 		   processing in completion event processing code.
652  * 		   This field is filled in with the htt_pkt_type enum.
653  * @buffer_src: buffer source TQM, REO, FW etc.
654  * @reserved:
655  * @frm_type: Frame Type - ToDo check if this is redundant
656  * @pkt_offset: Offset from which the actual packet data starts
657  * @pool_id: Pool ID - used when releasing the descriptor
658  * @shinfo_addr:
659  * @msdu_ext_desc: MSDU extension descriptor
660  * @timestamp:
661  * @comp:
662  * @tcl_cmd_vaddr: VADDR of the TCL descriptor, valid for soft-umac arch
663  * @tcl_cmd_paddr: PADDR of the TCL descriptor, valid for soft-umac arch
664  */
665 struct dp_tx_desc_s {
666 	struct dp_tx_desc_s *next;
667 	qdf_nbuf_t nbuf;
668 	uint16_t length;
669 #ifdef DP_TX_TRACKING
670 	uint32_t magic;
671 	uint64_t timestamp_tick;
672 #endif
673 	uint16_t flags;
674 	uint32_t id;
675 	qdf_dma_addr_t dma_addr;
676 	uint8_t vdev_id;
677 	uint8_t tx_status;
678 	uint16_t peer_id;
679 	struct dp_pdev *pdev;
680 	uint8_t tx_encap_type:2,
681 		buffer_src:3,
682 		reserved:3;
683 	uint8_t frm_type;
684 	uint8_t pkt_offset;
685 	uint8_t  pool_id;
686 	unsigned char *shinfo_addr;
687 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
688 	qdf_ktime_t timestamp;
689 	struct hal_tx_desc_comp_s comp;
690 #ifdef WLAN_SOFTUMAC_SUPPORT
691 	void *tcl_cmd_vaddr;
692 	qdf_dma_addr_t tcl_cmd_paddr;
693 #endif
694 };
695 
696 #ifdef QCA_AC_BASED_FLOW_CONTROL
697 /**
698  * enum flow_pool_status - flow pool status
699  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
700  *				and network queues are unpaused
701  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
702  *			   and network queues are paused
703  * @FLOW_POOL_BE_BK_PAUSED:
704  * @FLOW_POOL_VI_PAUSED:
705  * @FLOW_POOL_VO_PAUSED:
706  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
707  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
708  * @FLOW_POOL_ACTIVE_UNPAUSED_REATTACH: pool is reattached but network
709  *					queues are not paused
710  */
711 enum flow_pool_status {
712 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
713 	FLOW_POOL_ACTIVE_PAUSED = 1,
714 	FLOW_POOL_BE_BK_PAUSED = 2,
715 	FLOW_POOL_VI_PAUSED = 3,
716 	FLOW_POOL_VO_PAUSED = 4,
717 	FLOW_POOL_INVALID = 5,
718 	FLOW_POOL_INACTIVE = 6,
719 	FLOW_POOL_ACTIVE_UNPAUSED_REATTACH = 7,
720 };
721 
722 #else
723 /**
724  * enum flow_pool_status - flow pool status
725  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
726  *				and network queues are unpaused
727  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
728  *			   and network queues are paused
729  * @FLOW_POOL_BE_BK_PAUSED:
730  * @FLOW_POOL_VI_PAUSED:
731  * @FLOW_POOL_VO_PAUSED:
732  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
733  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
734  */
735 enum flow_pool_status {
736 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
737 	FLOW_POOL_ACTIVE_PAUSED = 1,
738 	FLOW_POOL_BE_BK_PAUSED = 2,
739 	FLOW_POOL_VI_PAUSED = 3,
740 	FLOW_POOL_VO_PAUSED = 4,
741 	FLOW_POOL_INVALID = 5,
742 	FLOW_POOL_INACTIVE = 6,
743 };
744 
745 #endif
746 
747 /**
748  * struct dp_tx_tso_seg_pool_s
749  * @pool_size: total number of pool elements
750  * @num_free: free element count
751  * @freelist: first free element pointer
752  * @desc_pages: multiple page allocation information for actual descriptors
753  * @lock: lock for accessing the pool
754  */
755 struct dp_tx_tso_seg_pool_s {
756 	uint16_t pool_size;
757 	uint16_t num_free;
758 	struct qdf_tso_seg_elem_t *freelist;
759 	struct qdf_mem_multi_page_t desc_pages;
760 	qdf_spinlock_t lock;
761 };
762 
763 /**
764  * struct dp_tx_tso_num_seg_pool_s - TSO Num seg pool
765  * @num_seg_pool_size: total number of pool elements
766  * @num_free: free element count
767  * @freelist: first free element pointer
768  * @desc_pages: multiple page allocation information for actual descriptors
769  * @lock: lock for accessing the pool
770  */
771 
772 struct dp_tx_tso_num_seg_pool_s {
773 	uint16_t num_seg_pool_size;
774 	uint16_t num_free;
775 	struct qdf_tso_num_seg_elem_t *freelist;
776 	struct qdf_mem_multi_page_t desc_pages;
777 	/*tso mutex */
778 	qdf_spinlock_t lock;
779 };
780 
781 /**
782  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
783  * @elem_size: Size of each descriptor in the pool
784  * @num_allocated: Number of used descriptors
785  * @freelist: Chain of free descriptors
786  * @desc_pages: multiple page allocation information for actual descriptors
787  * @pool_size: Total number of descriptors in the pool
788  * @flow_pool_id:
789  * @num_invalid_bin: Deleted pool with pending Tx completions.
790  * @avail_desc:
791  * @status:
792  * @flow_type:
793  * @stop_th:
794  * @start_th:
795  * @max_pause_time:
796  * @latest_pause_time:
797  * @pkt_drop_no_desc:
798  * @flow_pool_lock:
799  * @pool_create_cnt:
800  * @pool_owner_ctx:
801  * @elem_count:
802  * @num_free: Number of free descriptors
803  * @lock: Lock for descriptor allocation/free from/to the pool
804  */
805 struct dp_tx_desc_pool_s {
806 	uint16_t elem_size;
807 	uint32_t num_allocated;
808 	struct dp_tx_desc_s *freelist;
809 	struct qdf_mem_multi_page_t desc_pages;
810 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
811 	uint16_t pool_size;
812 	uint8_t flow_pool_id;
813 	uint8_t num_invalid_bin;
814 	uint16_t avail_desc;
815 	enum flow_pool_status status;
816 	enum htt_flow_type flow_type;
817 #ifdef QCA_AC_BASED_FLOW_CONTROL
818 	uint16_t stop_th[FL_TH_MAX];
819 	uint16_t start_th[FL_TH_MAX];
820 	qdf_time_t max_pause_time[FL_TH_MAX];
821 	qdf_time_t latest_pause_time[FL_TH_MAX];
822 #else
823 	uint16_t stop_th;
824 	uint16_t start_th;
825 #endif
826 	uint16_t pkt_drop_no_desc;
827 	qdf_spinlock_t flow_pool_lock;
828 	uint8_t pool_create_cnt;
829 	void *pool_owner_ctx;
830 #else
831 	uint16_t elem_count;
832 	uint32_t num_free;
833 	qdf_spinlock_t lock;
834 #endif
835 };
836 
837 /**
838  * struct dp_txrx_pool_stats - flow pool related statistics
839  * @pool_map_count: flow pool map received
840  * @pool_unmap_count: flow pool unmap received
841  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
842  */
843 struct dp_txrx_pool_stats {
844 	uint16_t pool_map_count;
845 	uint16_t pool_unmap_count;
846 	uint16_t pkt_drop_no_pool;
847 };
848 
849 /**
850  * struct dp_srng - DP srng structure
851  * @hal_srng: hal_srng handle
852  * @base_vaddr_unaligned: un-aligned virtual base address of the srng ring
853  * @base_vaddr_aligned: aligned virtual base address of the srng ring
854  * @base_paddr_unaligned: un-aligned physical base address of the srng ring
855  * @base_paddr_aligned: aligned physical base address of the srng ring
856  * @alloc_size: size of the srng ring
857  * @cached: is the srng ring memory cached or un-cached memory
858  * @irq: irq number of the srng ring
859  * @num_entries: number of entries in the srng ring
860  * @stats: Structure to track the ring utilization stats
861  * @is_mem_prealloc: Is this srng memory pre-allocated
862  * @crit_thresh: Critical threshold for near-full processing of this srng
863  * @safe_thresh: Safe threshold for near-full processing of this srng
864  * @near_full: Flag to indicate srng is near-full
865  */
866 struct dp_srng {
867 	hal_ring_handle_t hal_srng;
868 	void *base_vaddr_unaligned;
869 	void *base_vaddr_aligned;
870 	qdf_dma_addr_t base_paddr_unaligned;
871 	qdf_dma_addr_t base_paddr_aligned;
872 	uint32_t alloc_size;
873 	uint8_t cached;
874 	int irq;
875 	uint32_t num_entries;
876 	struct ring_util_stats stats;
877 #ifdef DP_MEM_PRE_ALLOC
878 	uint8_t is_mem_prealloc;
879 #endif
880 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
881 	uint16_t crit_thresh;
882 	uint16_t safe_thresh;
883 	qdf_atomic_t near_full;
884 #endif
885 };
886 
887 struct dp_rx_reorder_array_elem {
888 	qdf_nbuf_t head;
889 	qdf_nbuf_t tail;
890 };
891 
892 #define DP_RX_BA_INACTIVE 0
893 #define DP_RX_BA_ACTIVE 1
894 #define DP_RX_BA_IN_PROGRESS 2
895 struct dp_reo_cmd_info {
896 	uint16_t cmd;
897 	enum hal_reo_cmd_type cmd_type;
898 	void *data;
899 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
900 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
901 };
902 
903 struct dp_peer_delay_stats {
904 	struct cdp_delay_tid_stats delay_tid_stats[CDP_MAX_DATA_TIDS]
905 						  [CDP_MAX_TXRX_CTX];
906 };
907 
908 /* Rx TID defrag*/
909 struct dp_rx_tid_defrag {
910 	/* TID */
911 	int tid;
912 
913 	/* only used for defrag right now */
914 	TAILQ_ENTRY(dp_rx_tid_defrag) defrag_waitlist_elem;
915 
916 	/* Store dst desc for reinjection */
917 	hal_ring_desc_t dst_ring_desc;
918 	struct dp_rx_desc *head_frag_desc;
919 
920 	/* Sequence and fragments that are being processed currently */
921 	uint32_t curr_seq_num;
922 	uint32_t curr_frag_num;
923 
924 	/* TODO: Check the following while adding defragmentation support */
925 	struct dp_rx_reorder_array_elem *array;
926 	/* base - single rx reorder element used for non-aggr cases */
927 	struct dp_rx_reorder_array_elem base;
928 	/* rx_tid lock */
929 	qdf_spinlock_t defrag_tid_lock;
930 
931 	/* head PN number */
932 	uint64_t pn128[2];
933 
934 	uint32_t defrag_timeout_ms;
935 
936 	/* defrag usage only, dp_peer pointer related with this tid */
937 	struct dp_txrx_peer *defrag_peer;
938 };
939 
940 /* Rx TID */
941 struct dp_rx_tid {
942 	/* TID */
943 	int tid;
944 
945 	/* Num of addba requests */
946 	uint32_t num_of_addba_req;
947 
948 	/* Num of addba responses */
949 	uint32_t num_of_addba_resp;
950 
951 	/* Num of delba requests */
952 	uint32_t num_of_delba_req;
953 
954 	/* Num of addba responses successful */
955 	uint32_t num_addba_rsp_success;
956 
957 	/* Num of addba responses failed */
958 	uint32_t num_addba_rsp_failed;
959 
960 	/* pn size */
961 	uint8_t pn_size;
962 	/* REO TID queue descriptors */
963 	void *hw_qdesc_vaddr_unaligned;
964 	void *hw_qdesc_vaddr_aligned;
965 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
966 	qdf_dma_addr_t hw_qdesc_paddr;
967 	uint32_t hw_qdesc_alloc_size;
968 
969 	/* RX ADDBA session state */
970 	int ba_status;
971 
972 	/* RX BA window size */
973 	uint16_t ba_win_size;
974 
975 	/* Starting sequence number in Addba request */
976 	uint16_t startseqnum;
977 	uint16_t dialogtoken;
978 	uint16_t statuscode;
979 	/* user defined ADDBA response status code */
980 	uint16_t userstatuscode;
981 
982 	/* rx_tid lock */
983 	qdf_spinlock_t tid_lock;
984 
985 	/* Store ppdu_id when 2k exception is received */
986 	uint32_t ppdu_id_2k;
987 
988 	/* Delba Tx completion status */
989 	uint8_t delba_tx_status;
990 
991 	/* Delba Tx retry count */
992 	uint8_t delba_tx_retry;
993 
994 	/* Delba stats */
995 	uint32_t delba_tx_success_cnt;
996 	uint32_t delba_tx_fail_cnt;
997 
998 	/* Delba reason code for retries */
999 	uint8_t delba_rcode;
1000 
1001 	/* Coex Override preserved windows size 1 based */
1002 	uint16_t rx_ba_win_size_override;
1003 #ifdef IPA_OFFLOAD
1004 	/* rx msdu count per tid */
1005 	struct cdp_pkt_info rx_msdu_cnt;
1006 #endif
1007 
1008 };
1009 
1010 /**
1011  * struct dp_intr_stats - DP Interrupt Stats for an interrupt context
1012  * @num_tx_ring_masks: interrupts with tx_ring_mask set
1013  * @num_rx_ring_masks: interrupts with rx_ring_mask set
1014  * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set
1015  * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set
1016  * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set
1017  * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set
1018  * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set
1019  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
1020  * @num_host2rxdma_mon_ring_masks: interrupts with host2rxdma_ring_mask set
1021  * @num_rx_ring_near_full_masks: Near-full interrupts for REO DST ring
1022  * @num_tx_comp_ring_near_full_masks: Near-full interrupts for TX completion
1023  * @num_rx_wbm_rel_ring_near_full_masks: total number of times the wbm rel ring
1024  *                                       near full interrupt was received
1025  * @num_reo_status_ring_near_full_masks: total number of times the reo status
1026  *                                       near full interrupt was received
1027  * @num_near_full_masks: total number of times the near full interrupt
1028  *                       was received
1029  * @num_masks: total number of times the interrupt was received
1030  * @num_host2txmon_ring__masks: interrupts with host2txmon_ring_mask set
1031  * @num_near_full_masks: total number of times the interrupt was received
1032  * @num_masks: total number of times the near full interrupt was received
1033  * @num_tx_mon_ring_masks: interrupts with num_tx_mon_ring_masks set
1034  *
1035  * Counter for individual masks are incremented only if there are any packets
1036  * on that ring.
1037  */
1038 struct dp_intr_stats {
1039 	uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS];
1040 	uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS];
1041 	uint32_t num_rx_mon_ring_masks;
1042 	uint32_t num_rx_err_ring_masks;
1043 	uint32_t num_rx_wbm_rel_ring_masks;
1044 	uint32_t num_reo_status_ring_masks;
1045 	uint32_t num_rxdma2host_ring_masks;
1046 	uint32_t num_host2rxdma_ring_masks;
1047 	uint32_t num_host2rxdma_mon_ring_masks;
1048 	uint32_t num_rx_ring_near_full_masks[MAX_REO_DEST_RINGS];
1049 	uint32_t num_tx_comp_ring_near_full_masks[MAX_TCL_DATA_RINGS];
1050 	uint32_t num_rx_wbm_rel_ring_near_full_masks;
1051 	uint32_t num_reo_status_ring_near_full_masks;
1052 	uint32_t num_host2txmon_ring__masks;
1053 	uint32_t num_near_full_masks;
1054 	uint32_t num_masks;
1055 	uint32_t num_tx_mon_ring_masks;
1056 };
1057 
1058 #ifdef DP_UMAC_HW_RESET_SUPPORT
1059 /**
1060  * struct dp_intr_bkp - DP per interrupt context ring masks old state
1061  * @tx_ring_mask: WBM Tx completion rings (0-2) associated with this napi ctxt
1062  * @rx_ring_mask: Rx REO rings (0-3) associated with this interrupt context
1063  * @rx_mon_ring_mask: Rx monitor ring mask (0-2)
1064  * @rx_err_ring_mask: REO Exception Ring
1065  * @rx_wbm_rel_ring_mask: WBM2SW Rx Release Ring
1066  * @reo_status_ring_mask: REO command response ring
1067  * @rxdma2host_ring_mask: RXDMA to host destination ring
1068  * @host2rxdma_ring_mask: Host to RXDMA buffer ring
1069  * @host2rxdma_mon_ring_mask: Host to RXDMA monitor  buffer ring
1070  * @host2txmon_ring_mask: Tx monitor buffer ring
1071  * @tx_mon_ring_mask: Tx monitor ring mask (0-2)
1072  *
1073  */
1074 struct dp_intr_bkp {
1075 	uint8_t tx_ring_mask;
1076 	uint8_t rx_ring_mask;
1077 	uint8_t rx_mon_ring_mask;
1078 	uint8_t rx_err_ring_mask;
1079 	uint8_t rx_wbm_rel_ring_mask;
1080 	uint8_t reo_status_ring_mask;
1081 	uint8_t rxdma2host_ring_mask;
1082 	uint8_t host2rxdma_ring_mask;
1083 	uint8_t host2rxdma_mon_ring_mask;
1084 	uint8_t host2txmon_ring_mask;
1085 	uint8_t tx_mon_ring_mask;
1086 };
1087 #endif
1088 
1089 /* per interrupt context  */
1090 struct dp_intr {
1091 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
1092 				associated with this napi context */
1093 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
1094 				with this interrupt context */
1095 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
1096 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
1097 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
1098 	uint8_t reo_status_ring_mask; /* REO command response ring */
1099 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
1100 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
1101 	/* Host to RXDMA monitor  buffer ring */
1102 	uint8_t host2rxdma_mon_ring_mask;
1103 	/* RX REO rings near full interrupt mask */
1104 	uint8_t rx_near_full_grp_1_mask;
1105 	/* RX REO rings near full interrupt mask */
1106 	uint8_t rx_near_full_grp_2_mask;
1107 	/* WBM TX completion rings near full interrupt mask */
1108 	uint8_t tx_ring_near_full_mask;
1109 	uint8_t host2txmon_ring_mask; /* Tx monitor buffer ring */
1110 	uint8_t tx_mon_ring_mask;  /* Tx monitor ring mask (0-2) */
1111 	struct dp_soc *soc;    /* Reference to SoC structure ,
1112 				to get DMA ring handles */
1113 	qdf_lro_ctx_t lro_ctx;
1114 	uint8_t dp_intr_id;
1115 
1116 	/* Interrupt Stats for individual masks */
1117 	struct dp_intr_stats intr_stats;
1118 	uint8_t umac_reset_intr_mask;  /* UMAC reset interrupt mask */
1119 };
1120 
1121 #define REO_DESC_FREELIST_SIZE 64
1122 #define REO_DESC_FREE_DEFER_MS 1000
1123 struct reo_desc_list_node {
1124 	qdf_list_node_t node;
1125 	unsigned long free_ts;
1126 	struct dp_rx_tid rx_tid;
1127 	bool resend_update_reo_cmd;
1128 	uint32_t pending_ext_desc_size;
1129 #ifdef REO_QDESC_HISTORY
1130 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1131 #endif
1132 };
1133 
1134 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
1135 #define REO_DESC_DEFERRED_FREELIST_SIZE 256
1136 #define REO_DESC_DEFERRED_FREE_MS 30000
1137 
1138 struct reo_desc_deferred_freelist_node {
1139 	qdf_list_node_t node;
1140 	unsigned long free_ts;
1141 	void *hw_qdesc_vaddr_unaligned;
1142 	qdf_dma_addr_t hw_qdesc_paddr;
1143 	uint32_t hw_qdesc_alloc_size;
1144 #ifdef REO_QDESC_HISTORY
1145 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1146 #endif /* REO_QDESC_HISTORY */
1147 };
1148 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
1149 
1150 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1151 /**
1152  * struct reo_cmd_event_record: Elements to record for each reo command
1153  * @cmd_type: reo command type
1154  * @cmd_return_status: reo command post status
1155  * @timestamp: record timestamp for the reo command
1156  */
1157 struct reo_cmd_event_record {
1158 	enum hal_reo_cmd_type cmd_type;
1159 	uint8_t cmd_return_status;
1160 	uint64_t timestamp;
1161 };
1162 
1163 /**
1164  * struct reo_cmd_event_history: Account for reo cmd events
1165  * @index: record number
1166  * @cmd_record: list of records
1167  */
1168 struct reo_cmd_event_history {
1169 	qdf_atomic_t index;
1170 	struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX];
1171 };
1172 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1173 
1174 /* SoC level data path statistics */
1175 struct dp_soc_stats {
1176 	struct {
1177 		uint32_t added;
1178 		uint32_t deleted;
1179 		uint32_t aged_out;
1180 		uint32_t map_err;
1181 		uint32_t ast_mismatch;
1182 	} ast;
1183 
1184 	struct {
1185 		uint32_t added;
1186 		uint32_t deleted;
1187 	} mec;
1188 
1189 	/* SOC level TX stats */
1190 	struct {
1191 		/* Total packets transmitted */
1192 		struct cdp_pkt_info egress[MAX_TCL_DATA_RINGS];
1193 		/* Enqueues per tcl ring */
1194 		uint32_t tcl_enq[MAX_TCL_DATA_RINGS];
1195 		/* packets dropped on tx because of no peer */
1196 		struct cdp_pkt_info tx_invalid_peer;
1197 		/* descriptors in each tcl ring */
1198 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
1199 		/* Descriptors in use at soc */
1200 		uint32_t desc_in_use;
1201 		/* tqm_release_reason == FW removed */
1202 		uint32_t dropped_fw_removed;
1203 		/* tx completion release_src != TQM or FW */
1204 		uint32_t invalid_release_source;
1205 		/* TX descriptor from completion ring Desc is not valid */
1206 		uint32_t invalid_tx_comp_desc;
1207 		/* tx completion wbm_internal_error */
1208 		uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS];
1209 		/* tx completion non_wbm_internal_error */
1210 		uint32_t non_wbm_internal_err;
1211 		/* TX Comp loop packet limit hit */
1212 		uint32_t tx_comp_loop_pkt_limit_hit;
1213 		/* Head pointer Out of sync at the end of dp_tx_comp_handler */
1214 		uint32_t hp_oos2;
1215 		/* tx desc freed as part of vdev detach */
1216 		uint32_t tx_comp_exception;
1217 		/* TQM drops after/during peer delete */
1218 		uint64_t tqm_drop_no_peer;
1219 		/* Number of tx completions reaped per WBM2SW release ring */
1220 		uint32_t tx_comp[MAX_TCL_DATA_RINGS];
1221 		/* Number of tx completions force freed */
1222 		uint32_t tx_comp_force_freed;
1223 		/* Tx completion ring near full */
1224 		uint32_t near_full;
1225 		/* Tx drops with buffer src as HAL_TX_COMP_RELEASE_SOURCE_FW */
1226 		uint32_t fw2wbm_tx_drop;
1227 	} tx;
1228 
1229 	/* SOC level RX stats */
1230 	struct {
1231 		/* Total rx packets count */
1232 		struct cdp_pkt_info ingress;
1233 		/* Rx errors */
1234 		/* Total Packets in Rx Error ring */
1235 		uint32_t err_ring_pkts;
1236 		/* No of Fragments */
1237 		uint32_t rx_frags;
1238 		/* No of incomplete fragments in waitlist */
1239 		uint32_t rx_frag_wait;
1240 		/* Fragments dropped due to errors */
1241 		uint32_t rx_frag_err;
1242 		/* Fragments received OOR causing sequence num mismatch */
1243 		uint32_t rx_frag_oor;
1244 		/* Fragments dropped due to len errors in skb */
1245 		uint32_t rx_frag_err_len_error;
1246 		/* Fragments dropped due to no peer found */
1247 		uint32_t rx_frag_err_no_peer;
1248 		/* No of reinjected packets */
1249 		uint32_t reo_reinject;
1250 		/* Reap loop packet limit hit */
1251 		uint32_t reap_loop_pkt_limit_hit;
1252 		/* Head pointer Out of sync at the end of dp_rx_process */
1253 		uint32_t hp_oos2;
1254 		/* Rx ring near full */
1255 		uint32_t near_full;
1256 		/* Break ring reaping as not all scattered msdu received */
1257 		uint32_t msdu_scatter_wait_break;
1258 		/* Number of bar frames received */
1259 		uint32_t bar_frame;
1260 		/* Number of frames routed from rxdma */
1261 		uint32_t rxdma2rel_route_drop;
1262 		/* Number of frames routed from reo*/
1263 		uint32_t reo2rel_route_drop;
1264 		uint64_t fast_recycled;
1265 		/* Number of hw stats requested */
1266 		uint32_t rx_hw_stats_requested;
1267 		/* Number of hw stats request timeout */
1268 		uint32_t rx_hw_stats_timeout;
1269 
1270 		struct {
1271 			/* Invalid RBM error count */
1272 			uint32_t invalid_rbm;
1273 			/* Invalid VDEV Error count */
1274 			uint32_t invalid_vdev;
1275 			/* Invalid PDEV error count */
1276 			uint32_t invalid_pdev;
1277 
1278 			/* Packets delivered to stack that no related peer */
1279 			uint32_t pkt_delivered_no_peer;
1280 			/* Defrag peer uninit error count */
1281 			uint32_t defrag_peer_uninit;
1282 			/* Invalid sa_idx or da_idx*/
1283 			uint32_t invalid_sa_da_idx;
1284 			/* MSDU DONE failures */
1285 			uint32_t msdu_done_fail;
1286 			/* Invalid PEER Error count */
1287 			struct cdp_pkt_info rx_invalid_peer;
1288 			/* Invalid PEER ID count */
1289 			struct cdp_pkt_info rx_invalid_peer_id;
1290 			/* Invalid packet length */
1291 			struct cdp_pkt_info rx_invalid_pkt_len;
1292 			/* HAL ring access Fail error count */
1293 			uint32_t hal_ring_access_fail;
1294 			/* HAL ring access full Fail error count */
1295 			uint32_t hal_ring_access_full_fail;
1296 			/* RX DMA error count */
1297 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
1298 			/* RX REO DEST Desc Invalid Magic count */
1299 			uint32_t rx_desc_invalid_magic;
1300 			/* REO Error count */
1301 			uint32_t reo_error[HAL_REO_ERR_MAX];
1302 			/* HAL REO ERR Count */
1303 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
1304 			/* HAL REO DEST Duplicate count */
1305 			uint32_t hal_reo_dest_dup;
1306 			/* HAL WBM RELEASE Duplicate count */
1307 			uint32_t hal_wbm_rel_dup;
1308 			/* HAL RXDMA error Duplicate count */
1309 			uint32_t hal_rxdma_err_dup;
1310 			/* ipa smmu map duplicate count */
1311 			uint32_t ipa_smmu_map_dup;
1312 			/* ipa smmu unmap duplicate count */
1313 			uint32_t ipa_smmu_unmap_dup;
1314 			/* ipa smmu unmap while ipa pipes is disabled */
1315 			uint32_t ipa_unmap_no_pipe;
1316 			/* REO cmd send fail/requeue count */
1317 			uint32_t reo_cmd_send_fail;
1318 			/* REO cmd send drain count */
1319 			uint32_t reo_cmd_send_drain;
1320 			/* RX msdu drop count due to scatter */
1321 			uint32_t scatter_msdu;
1322 			/* RX msdu drop count due to invalid cookie */
1323 			uint32_t invalid_cookie;
1324 			/* Count of stale cookie read in RX path */
1325 			uint32_t stale_cookie;
1326 			/* Delba sent count due to RX 2k jump */
1327 			uint32_t rx_2k_jump_delba_sent;
1328 			/* RX 2k jump msdu indicated to stack count */
1329 			uint32_t rx_2k_jump_to_stack;
1330 			/* RX 2k jump msdu dropped count */
1331 			uint32_t rx_2k_jump_drop;
1332 			/* REO ERR msdu buffer received */
1333 			uint32_t reo_err_msdu_buf_rcved;
1334 			/* REO ERR msdu buffer with invalid coookie received */
1335 			uint32_t reo_err_msdu_buf_invalid_cookie;
1336 			/* REO OOR msdu drop count */
1337 			uint32_t reo_err_oor_drop;
1338 			/* REO OOR msdu indicated to stack count */
1339 			uint32_t reo_err_oor_to_stack;
1340 			/* REO OOR scattered msdu count */
1341 			uint32_t reo_err_oor_sg_count;
1342 			/* RX msdu rejected count on delivery to vdev stack_fn*/
1343 			uint32_t rejected;
1344 			/* Incorrect msdu count in MPDU desc info */
1345 			uint32_t msdu_count_mismatch;
1346 			/* RX raw frame dropped count */
1347 			uint32_t raw_frm_drop;
1348 			/* Stale link desc cookie count*/
1349 			uint32_t invalid_link_cookie;
1350 			/* Nbuf sanity failure */
1351 			uint32_t nbuf_sanity_fail;
1352 			/* Duplicate link desc refilled */
1353 			uint32_t dup_refill_link_desc;
1354 			/* Incorrect msdu continuation bit in MSDU desc */
1355 			uint32_t msdu_continuation_err;
1356 			/* count of start sequence (ssn) updates */
1357 			uint32_t ssn_update_count;
1358 			/* count of bar handling fail */
1359 			uint32_t bar_handle_fail_count;
1360 			/* EAPOL drop count in intrabss scenario */
1361 			uint32_t intrabss_eapol_drop;
1362 			/* PN check failed for 2K-jump or OOR error */
1363 			uint32_t pn_in_dest_check_fail;
1364 			/* MSDU len err count */
1365 			uint32_t msdu_len_err;
1366 			/* Rx flush count */
1367 			uint32_t rx_flush_count;
1368 			/* Rx invalid tid count */
1369 			uint32_t rx_invalid_tid_err;
1370 			/* Invalid address1 in defrag path*/
1371 			uint32_t defrag_ad1_invalid;
1372 			/* decrypt error drop */
1373 			uint32_t decrypt_err_drop;
1374 		} err;
1375 
1376 		/* packet count per core - per ring */
1377 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
1378 	} rx;
1379 
1380 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1381 	struct reo_cmd_event_history cmd_event_history;
1382 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1383 };
1384 
1385 union dp_align_mac_addr {
1386 	uint8_t raw[QDF_MAC_ADDR_SIZE];
1387 	struct {
1388 		uint16_t bytes_ab;
1389 		uint16_t bytes_cd;
1390 		uint16_t bytes_ef;
1391 	} align2;
1392 	struct {
1393 		uint32_t bytes_abcd;
1394 		uint16_t bytes_ef;
1395 	} align4;
1396 	struct __attribute__((__packed__)) {
1397 		uint16_t bytes_ab;
1398 		uint32_t bytes_cdef;
1399 	} align4_2;
1400 };
1401 
1402 /**
1403  * struct dp_ast_free_cb_params - HMWDS free callback cookie
1404  * @mac_addr: ast mac address
1405  * @peer_mac_addr: mac address of peer
1406  * @type: ast entry type
1407  * @vdev_id: vdev_id
1408  * @flags: ast flags
1409  */
1410 struct dp_ast_free_cb_params {
1411 	union dp_align_mac_addr mac_addr;
1412 	union dp_align_mac_addr peer_mac_addr;
1413 	enum cdp_txrx_ast_entry_type type;
1414 	uint8_t vdev_id;
1415 	uint32_t flags;
1416 };
1417 
1418 /**
1419  * struct dp_ast_entry - AST entry
1420  *
1421  * @ast_idx: Hardware AST Index
1422  * @peer_id: Next Hop peer_id (for non-WDS nodes, this will be point to
1423  *           associated peer with this MAC address)
1424  * @mac_addr:  MAC Address for this AST entry
1425  * @next_hop: Set to 1 if this is for a WDS node
1426  * @is_active: flag to indicate active data traffic on this node
1427  *             (used for aging out/expiry)
1428  * @ase_list_elem: node in peer AST list
1429  * @is_bss: flag to indicate if entry corresponds to bss peer
1430  * @is_mapped: flag to indicate that we have mapped the AST entry
1431  *             in ast_table
1432  * @pdev_id: pdev ID
1433  * @vdev_id: vdev ID
1434  * @ast_hash_value: hast value in HW
1435  * @ref_cnt: reference count
1436  * @type: flag to indicate type of the entry(static/WDS/MEC)
1437  * @delete_in_progress: Flag to indicate that delete commands send to FW
1438  *                      and host is waiting for response from FW
1439  * @callback: ast free/unmap callback
1440  * @cookie: argument to callback
1441  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
1442  */
1443 struct dp_ast_entry {
1444 	uint16_t ast_idx;
1445 	uint16_t peer_id;
1446 	union dp_align_mac_addr mac_addr;
1447 	bool next_hop;
1448 	bool is_active;
1449 	bool is_mapped;
1450 	uint8_t pdev_id;
1451 	uint8_t vdev_id;
1452 	uint16_t ast_hash_value;
1453 	qdf_atomic_t ref_cnt;
1454 	enum cdp_txrx_ast_entry_type type;
1455 	bool delete_in_progress;
1456 	txrx_ast_free_cb callback;
1457 	void *cookie;
1458 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
1459 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
1460 };
1461 
1462 /**
1463  * struct dp_mec_entry - MEC entry
1464  *
1465  * @mac_addr:  MAC Address for this MEC entry
1466  * @is_active: flag to indicate active data traffic on this node
1467  *             (used for aging out/expiry)
1468  * @pdev_id: pdev ID
1469  * @vdev_id: vdev ID
1470  * @hash_list_elem: node in soc MEC hash list (mac address used as hash)
1471  */
1472 struct dp_mec_entry {
1473 	union dp_align_mac_addr mac_addr;
1474 	bool is_active;
1475 	uint8_t pdev_id;
1476 	uint8_t vdev_id;
1477 
1478 	TAILQ_ENTRY(dp_mec_entry) hash_list_elem;
1479 };
1480 
1481 /* SOC level htt stats */
1482 struct htt_t2h_stats {
1483 	/* lock to protect htt_stats_msg update */
1484 	qdf_spinlock_t lock;
1485 
1486 	/* work queue to process htt stats */
1487 	qdf_work_t work;
1488 
1489 	/* T2H Ext stats message queue */
1490 	qdf_nbuf_queue_t msg;
1491 
1492 	/* number of completed stats in htt_stats_msg */
1493 	uint32_t num_stats;
1494 };
1495 
1496 struct link_desc_bank {
1497 	void *base_vaddr_unaligned;
1498 	void *base_vaddr;
1499 	qdf_dma_addr_t base_paddr_unaligned;
1500 	qdf_dma_addr_t base_paddr;
1501 	uint32_t size;
1502 };
1503 
1504 struct rx_buff_pool {
1505 	qdf_nbuf_queue_head_t emerg_nbuf_q;
1506 	uint32_t nbuf_fail_cnt;
1507 	bool is_initialized;
1508 };
1509 
1510 struct rx_refill_buff_pool {
1511 	bool is_initialized;
1512 	uint16_t head;
1513 	uint16_t tail;
1514 	struct dp_pdev *dp_pdev;
1515 	uint16_t max_bufq_len;
1516 	qdf_nbuf_t buf_elem[2048];
1517 };
1518 
1519 #ifdef DP_TX_HW_DESC_HISTORY
1520 #define DP_TX_HW_DESC_HIST_MAX 6144
1521 #define DP_TX_HW_DESC_HIST_PER_SLOT_MAX 2048
1522 #define DP_TX_HW_DESC_HIST_MAX_SLOTS 3
1523 #define DP_TX_HW_DESC_HIST_SLOT_SHIFT 11
1524 
1525 struct dp_tx_hw_desc_evt {
1526 	uint8_t tcl_desc[HAL_TX_DESC_LEN_BYTES];
1527 	uint8_t tcl_ring_id;
1528 	uint64_t posted;
1529 	uint32_t hp;
1530 	uint32_t tp;
1531 };
1532 
1533 /* struct dp_tx_hw_desc_history - TX HW desc hisotry
1534  * @index: Index where the last entry is written
1535  * @entry: history entries
1536  */
1537 struct dp_tx_hw_desc_history {
1538 	qdf_atomic_t index;
1539 	uint16_t num_entries_per_slot;
1540 	uint16_t allocated;
1541 	struct dp_tx_hw_desc_evt *entry[DP_TX_HW_DESC_HIST_MAX_SLOTS];
1542 };
1543 #endif
1544 
1545 /**
1546  * enum dp_mon_status_process_event - Events for monitor status buffer record
1547  * @DP_MON_STATUS_BUF_REAP: Monitor status buffer is reaped from ring
1548  * @DP_MON_STATUS_BUF_ENQUEUE: Status buffer is enqueued to local queue
1549  * @DP_MON_STATUS_BUF_DEQUEUE: Status buffer is dequeued from local queue
1550  */
1551 enum dp_mon_status_process_event {
1552 	DP_MON_STATUS_BUF_REAP,
1553 	DP_MON_STATUS_BUF_ENQUEUE,
1554 	DP_MON_STATUS_BUF_DEQUEUE,
1555 };
1556 
1557 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
1558 #define DP_MON_STATUS_HIST_MAX	2048
1559 
1560 /**
1561  * struct dp_mon_stat_info_record - monitor stat ring buffer info
1562  * @hbi: HW ring buffer info
1563  * @timestamp: timestamp when this entry was recorded
1564  * @event: event
1565  * @rx_desc: RX descriptor corresponding to the received buffer
1566  * @nbuf: buffer attached to rx_desc, if event is REAP, else the buffer
1567  *	  which was enqueued or dequeued.
1568  * @rx_desc_nbuf_data: nbuf data pointer.
1569  */
1570 struct dp_mon_stat_info_record {
1571 	struct hal_buf_info hbi;
1572 	uint64_t timestamp;
1573 	enum dp_mon_status_process_event event;
1574 	void *rx_desc;
1575 	qdf_nbuf_t nbuf;
1576 	uint8_t *rx_desc_nbuf_data;
1577 };
1578 
1579 /* struct dp_rx_history - rx ring hisotry
1580  * @index: Index where the last entry is written
1581  * @entry: history entries
1582  */
1583 struct dp_mon_status_ring_history {
1584 	qdf_atomic_t index;
1585 	struct dp_mon_stat_info_record entry[DP_MON_STATUS_HIST_MAX];
1586 };
1587 #endif
1588 
1589 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1590 /*
1591  * The logic for get current index of these history is dependent on this
1592  * value being power of 2.
1593  */
1594 #define DP_RX_HIST_MAX 2048
1595 #define DP_RX_ERR_HIST_MAX 2048
1596 #define DP_RX_REINJECT_HIST_MAX 1024
1597 #define DP_RX_REFILL_HIST_MAX 2048
1598 
1599 QDF_COMPILE_TIME_ASSERT(rx_history_size,
1600 			(DP_RX_HIST_MAX &
1601 			 (DP_RX_HIST_MAX - 1)) == 0);
1602 QDF_COMPILE_TIME_ASSERT(rx_err_history_size,
1603 			(DP_RX_ERR_HIST_MAX &
1604 			 (DP_RX_ERR_HIST_MAX - 1)) == 0);
1605 QDF_COMPILE_TIME_ASSERT(rx_reinject_history_size,
1606 			(DP_RX_REINJECT_HIST_MAX &
1607 			 (DP_RX_REINJECT_HIST_MAX - 1)) == 0);
1608 QDF_COMPILE_TIME_ASSERT(rx_refill_history_size,
1609 			(DP_RX_REFILL_HIST_MAX &
1610 			(DP_RX_REFILL_HIST_MAX - 1)) == 0);
1611 
1612 
1613 /**
1614  * struct dp_buf_info_record - ring buffer info
1615  * @hbi: HW ring buffer info
1616  * @timestamp: timestamp when this entry was recorded
1617  */
1618 struct dp_buf_info_record {
1619 	struct hal_buf_info hbi;
1620 	uint64_t timestamp;
1621 };
1622 
1623 /**
1624  * struct dp_refill_info_record - ring refill buffer info
1625  * @hp: HP value after refill
1626  * @tp: cached tail value during refill
1627  * @num_req: number of buffers requested to refill
1628  * @num_refill: number of buffers refilled to ring
1629  * @timestamp: timestamp when this entry was recorded
1630  */
1631 struct dp_refill_info_record {
1632 	uint32_t hp;
1633 	uint32_t tp;
1634 	uint32_t num_req;
1635 	uint32_t num_refill;
1636 	uint64_t timestamp;
1637 };
1638 
1639 /**
1640  * struct dp_rx_history - rx ring hisotry
1641  * @index: Index where the last entry is written
1642  * @entry: history entries
1643  */
1644 struct dp_rx_history {
1645 	qdf_atomic_t index;
1646 	struct dp_buf_info_record entry[DP_RX_HIST_MAX];
1647 };
1648 
1649 /**
1650  * struct dp_rx_err_history - rx err ring hisotry
1651  * @index: Index where the last entry is written
1652  * @entry: history entries
1653  */
1654 struct dp_rx_err_history {
1655 	qdf_atomic_t index;
1656 	struct dp_buf_info_record entry[DP_RX_ERR_HIST_MAX];
1657 };
1658 
1659 /**
1660  * struct dp_rx_reinject_history - rx reinject ring hisotry
1661  * @index: Index where the last entry is written
1662  * @entry: history entries
1663  */
1664 struct dp_rx_reinject_history {
1665 	qdf_atomic_t index;
1666 	struct dp_buf_info_record entry[DP_RX_REINJECT_HIST_MAX];
1667 };
1668 
1669 /**
1670  * struct dp_rx_refill_history - rx buf refill hisotry
1671  * @index: Index where the last entry is written
1672  * @entry: history entries
1673  */
1674 struct dp_rx_refill_history {
1675 	qdf_atomic_t index;
1676 	struct dp_refill_info_record entry[DP_RX_REFILL_HIST_MAX];
1677 };
1678 
1679 #endif
1680 
1681 /**
1682  * enum dp_cfg_event_type - Datapath config events type
1683  * @DP_CFG_EVENT_VDEV_ATTACH: vdev attach
1684  * @DP_CFG_EVENT_VDEV_DETACH: vdev detach
1685  * @DP_CFG_EVENT_VDEV_UNREF_DEL: vdev memory free after last ref is released
1686  * @DP_CFG_EVENT_PEER_CREATE: peer create
1687  * @DP_CFG_EVENT_PEER_DELETE: peer delete
1688  * @DP_CFG_EVENT_PEER_UNREF_DEL: peer memory free after last ref is released
1689  * @DP_CFG_EVENT_PEER_SETUP: peer setup
1690  * @DP_CFG_EVENT_MLO_ADD_LINK: add link peer to mld peer
1691  * @DP_CFG_EVENT_MLO_DEL_LINK: delete link peer from mld peer
1692  * @DP_CFG_EVENT_MLO_SETUP: MLO peer setup
1693  * @DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE: MLD peer vdev update
1694  * @DP_CFG_EVENT_PEER_MAP: peer map
1695  * @DP_CFG_EVENT_PEER_UNMAP: peer unmap
1696  * @DP_CFG_EVENT_MLO_PEER_MAP: MLD peer map
1697  * @DP_CFG_EVENT_MLO_PEER_UNMAP: MLD peer unmap
1698  */
1699 enum dp_cfg_event_type {
1700 	DP_CFG_EVENT_VDEV_ATTACH,
1701 	DP_CFG_EVENT_VDEV_DETACH,
1702 	DP_CFG_EVENT_VDEV_UNREF_DEL,
1703 	DP_CFG_EVENT_PEER_CREATE,
1704 	DP_CFG_EVENT_PEER_DELETE,
1705 	DP_CFG_EVENT_PEER_UNREF_DEL,
1706 	DP_CFG_EVENT_PEER_SETUP,
1707 	DP_CFG_EVENT_MLO_ADD_LINK,
1708 	DP_CFG_EVENT_MLO_DEL_LINK,
1709 	DP_CFG_EVENT_MLO_SETUP,
1710 	DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE,
1711 	DP_CFG_EVENT_PEER_MAP,
1712 	DP_CFG_EVENT_PEER_UNMAP,
1713 	DP_CFG_EVENT_MLO_PEER_MAP,
1714 	DP_CFG_EVENT_MLO_PEER_UNMAP,
1715 };
1716 
1717 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
1718 /* Size must be in 2 power, for bitwise index rotation */
1719 #define DP_CFG_EVT_HISTORY_SIZE 0x800
1720 #define DP_CFG_EVT_HIST_PER_SLOT_MAX 256
1721 #define DP_CFG_EVT_HIST_MAX_SLOTS 8
1722 #define DP_CFG_EVT_HIST_SLOT_SHIFT 8
1723 
1724 /**
1725  * struct dp_vdev_attach_detach_desc - vdev ops descriptor
1726  * @vdev: DP vdev handle
1727  * @mac_addr: vdev mac address
1728  * @vdev_id: vdev id
1729  * @ref_count: vdev ref count
1730  */
1731 struct dp_vdev_attach_detach_desc {
1732 	struct dp_vdev *vdev;
1733 	union dp_align_mac_addr mac_addr;
1734 	uint8_t vdev_id;
1735 	int32_t ref_count;
1736 };
1737 
1738 /**
1739  * struct dp_peer_cmn_ops_desc - peer events descriptor
1740  * @vdev_id: vdev_id of the vdev on which peer exists
1741  * @is_reuse: indicates if its a peer reuse case, during peer create
1742  * @peer: DP peer handle
1743  * @vdev: DP vdev handle on which peer exists
1744  * @mac_addr: peer mac address
1745  * @vdev_mac_addr: vdev mac address
1746  * @vdev_ref_count: vdev ref count
1747  * @peer_ref_count: peer ref count
1748  */
1749 struct dp_peer_cmn_ops_desc {
1750 	uint8_t vdev_id : 5,
1751 		is_reuse : 1;
1752 	struct dp_peer *peer;
1753 	struct dp_vdev *vdev;
1754 	union dp_align_mac_addr mac_addr;
1755 	union dp_align_mac_addr vdev_mac_addr;
1756 	int32_t vdev_ref_count;
1757 	int32_t peer_ref_count;
1758 };
1759 
1760 /**
1761  * struct dp_mlo_add_del_link_desc - MLO add/del link event descriptor
1762  * @idx: index at which link peer got added in MLD peer's list
1763  * @num_links: num links added in the MLD peer's list
1764  * @action_result: add/del was success or not
1765  * @reserved: reserved bit
1766  * @link_peer: link peer handle
1767  * @mld_peer: MLD peer handle
1768  * @link_mac_addr: link peer mac address
1769  * @mld_mac_addr: MLD peer mac address
1770  */
1771 struct dp_mlo_add_del_link_desc {
1772 	uint8_t idx : 3,
1773 		num_links : 3,
1774 		action_result : 1,
1775 		reserved : 1;
1776 	struct dp_peer *link_peer;
1777 	struct dp_peer *mld_peer;
1778 	union dp_align_mac_addr link_mac_addr;
1779 	union dp_align_mac_addr mld_mac_addr;
1780 };
1781 
1782 /**
1783  * struct dp_mlo_setup_vdev_update_desc - MLD peer vdev update event desc
1784  * @mld_peer: MLD peer handle
1785  * @prev_vdev: previous vdev handle
1786  * @new_vdev: new vdev handle
1787  */
1788 struct dp_mlo_setup_vdev_update_desc {
1789 	struct dp_peer *mld_peer;
1790 	struct dp_vdev *prev_vdev;
1791 	struct dp_vdev *new_vdev;
1792 };
1793 
1794 /**
1795  * struct dp_rx_peer_map_unmap_desc - peer map/unmap event descriptor
1796  * @peer_id: peer id
1797  * @ml_peer_id: ML peer id, if its an MLD peer
1798  * @hw_peer_id: hw peer id
1799  * @vdev_id: vdev id of the peer
1800  * @is_ml_peer: is this MLD peer
1801  * @mac_addr: mac address of the peer
1802  * @peer: peer handle
1803  */
1804 struct dp_rx_peer_map_unmap_desc {
1805 	uint16_t peer_id;
1806 	uint16_t ml_peer_id;
1807 	uint16_t hw_peer_id;
1808 	uint8_t vdev_id;
1809 	uint8_t is_ml_peer;
1810 	union dp_align_mac_addr mac_addr;
1811 	struct dp_peer *peer;
1812 };
1813 
1814 /**
1815  * struct dp_peer_setup_desc - peer setup event descriptor
1816  * @peer: DP peer handle
1817  * @vdev: vdev handle on which peer exists
1818  * @vdev_ref_count: vdev ref count
1819  * @mac_addr: peer mac address
1820  * @mld_mac_addr: MLD mac address
1821  * @is_first_link: is the current link the first link created
1822  * @is_primary_link: is the current link primary link
1823  * @vdev_id: vdev id of the vdev on which the current link peer exists
1824  * @reserved: reserved bit
1825  */
1826 struct dp_peer_setup_desc {
1827 	struct dp_peer *peer;
1828 	struct dp_vdev *vdev;
1829 	int32_t vdev_ref_count;
1830 	union dp_align_mac_addr mac_addr;
1831 	union dp_align_mac_addr mld_mac_addr;
1832 	uint8_t is_first_link : 1,
1833 		is_primary_link : 1,
1834 		vdev_id : 5,
1835 		reserved : 1;
1836 };
1837 
1838 /**
1839  * union dp_cfg_event_desc - DP config event descriptor
1840  * @vdev_evt: vdev events desc
1841  * @peer_cmn_evt: common peer events desc
1842  * @peer_setup_evt: peer setup event desc
1843  * @mlo_link_delink_evt: MLO link/delink event desc
1844  * @mlo_setup_vdev_update: MLD peer vdev update event desc
1845  * @peer_map_unmap_evt: peer map/unmap event desc
1846  */
1847 union dp_cfg_event_desc {
1848 	struct dp_vdev_attach_detach_desc vdev_evt;
1849 	struct dp_peer_cmn_ops_desc peer_cmn_evt;
1850 	struct dp_peer_setup_desc peer_setup_evt;
1851 	struct dp_mlo_add_del_link_desc mlo_link_delink_evt;
1852 	struct dp_mlo_setup_vdev_update_desc mlo_setup_vdev_update;
1853 	struct dp_rx_peer_map_unmap_desc peer_map_unmap_evt;
1854 };
1855 
1856 /**
1857  * struct dp_cfg_event - DP config event descriptor
1858  * @timestamp: timestamp at which event was recorded
1859  * @type: event type
1860  * @event_desc: event descriptor
1861  */
1862 struct dp_cfg_event {
1863 	uint64_t timestamp;
1864 	enum dp_cfg_event_type type;
1865 	union dp_cfg_event_desc event_desc;
1866 };
1867 
1868 /**
1869  * struct dp_cfg_event_history - DP config event history
1870  * @index: current index
1871  * @num_entries_per_slot: number of entries per slot
1872  * @allocated: Is the history allocated or not
1873  * @entry: event history descriptors
1874  */
1875 struct dp_cfg_event_history {
1876 	qdf_atomic_t index;
1877 	uint16_t num_entries_per_slot;
1878 	uint16_t allocated;
1879 	struct dp_cfg_event *entry[DP_CFG_EVT_HIST_MAX_SLOTS];
1880 };
1881 #endif
1882 
1883 enum dp_tx_event_type {
1884 	DP_TX_DESC_INVAL_EVT = 0,
1885 	DP_TX_DESC_MAP,
1886 	DP_TX_DESC_COOKIE,
1887 	DP_TX_DESC_FLUSH,
1888 	DP_TX_DESC_UNMAP,
1889 	DP_TX_COMP_UNMAP,
1890 	DP_TX_COMP_UNMAP_ERR,
1891 	DP_TX_COMP_MSDU_EXT,
1892 };
1893 
1894 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
1895 /* Size must be in 2 power, for bitwise index rotation */
1896 #define DP_TX_TCL_HISTORY_SIZE 0x4000
1897 #define DP_TX_TCL_HIST_PER_SLOT_MAX 2048
1898 #define DP_TX_TCL_HIST_MAX_SLOTS 8
1899 #define DP_TX_TCL_HIST_SLOT_SHIFT 11
1900 
1901 /* Size must be in 2 power, for bitwise index rotation */
1902 #define DP_TX_COMP_HISTORY_SIZE 0x4000
1903 #define DP_TX_COMP_HIST_PER_SLOT_MAX 2048
1904 #define DP_TX_COMP_HIST_MAX_SLOTS 8
1905 #define DP_TX_COMP_HIST_SLOT_SHIFT 11
1906 
1907 struct dp_tx_desc_event {
1908 	qdf_nbuf_t skb;
1909 	dma_addr_t paddr;
1910 	uint32_t sw_cookie;
1911 	enum dp_tx_event_type type;
1912 	uint64_t ts;
1913 };
1914 
1915 struct dp_tx_tcl_history {
1916 	qdf_atomic_t index;
1917 	uint16_t num_entries_per_slot;
1918 	uint16_t allocated;
1919 	struct dp_tx_desc_event *entry[DP_TX_TCL_HIST_MAX_SLOTS];
1920 };
1921 
1922 struct dp_tx_comp_history {
1923 	qdf_atomic_t index;
1924 	uint16_t num_entries_per_slot;
1925 	uint16_t allocated;
1926 	struct dp_tx_desc_event *entry[DP_TX_COMP_HIST_MAX_SLOTS];
1927 };
1928 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
1929 
1930 /* structure to record recent operation related variable */
1931 struct dp_last_op_info {
1932 	/* last link desc buf info through WBM release ring */
1933 	struct hal_buf_info wbm_rel_link_desc;
1934 	/* last link desc buf info through REO reinject ring */
1935 	struct hal_buf_info reo_reinject_link_desc;
1936 };
1937 
1938 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1939 
1940 /**
1941  * struct dp_swlm_tcl_data - params for tcl register write coalescing
1942  *			     decision making
1943  * @nbuf: TX packet
1944  * @tid: tid for transmitting the current packet
1945  * @num_ll_connections: Number of low latency connections on this vdev
1946  * @ring_id: TCL ring id
1947  * @pkt_len: Packet length
1948  *
1949  * This structure contains the information required by the software
1950  * latency manager to decide on whether to coalesce the current TCL
1951  * register write or not.
1952  */
1953 struct dp_swlm_tcl_data {
1954 	qdf_nbuf_t nbuf;
1955 	uint8_t tid;
1956 	uint8_t num_ll_connections;
1957 	uint8_t ring_id;
1958 	uint32_t pkt_len;
1959 };
1960 
1961 /**
1962  * union swlm_data - SWLM query data
1963  * @tcl_data: data for TCL query in SWLM
1964  */
1965 union swlm_data {
1966 	struct dp_swlm_tcl_data *tcl_data;
1967 };
1968 
1969 /**
1970  * struct dp_swlm_ops - SWLM ops
1971  * @tcl_wr_coalesce_check: handler to check if the current TCL register
1972  *			   write can be coalesced or not
1973  */
1974 struct dp_swlm_ops {
1975 	int (*tcl_wr_coalesce_check)(struct dp_soc *soc,
1976 				     struct dp_swlm_tcl_data *tcl_data);
1977 };
1978 
1979 /**
1980  * struct dp_swlm_stats - Stats for Software Latency manager.
1981  * @tcl: TCL stats
1982  * @tcl.timer_flush_success: Num TCL HP writes success from timer context
1983  * @tcl.timer_flush_fail: Num TCL HP writes failure from timer context
1984  * @tcl.tid_fail: Num TCL register write coalescing skips, since the pkt
1985  *		 was being transmitted on a TID above coalescing threshold
1986  * @tcl.sp_frames: Num TCL register write coalescing skips, since the pkt
1987  *		  being transmitted was a special frame
1988  * @tcl.ll_connection: Num TCL register write coalescing skips, since the
1989  *		       vdev has low latency connections
1990  * @tcl.bytes_thresh_reached: Num TCL HP writes flush after the coalescing
1991  *			     bytes threshold was reached
1992  * @tcl.time_thresh_reached: Num TCL HP writes flush after the coalescing
1993  *			    session time expired
1994  * @tcl.tput_criteria_fail: Num TCL HP writes coalescing fails, since the
1995  *			   throughput did not meet session threshold
1996  * @tcl.coalesce_success: Num of TCL HP writes coalesced successfully.
1997  * @tcl.coalesce_fail: Num of TCL HP writes coalesces failed
1998  */
1999 struct dp_swlm_stats {
2000 	struct {
2001 		uint32_t timer_flush_success;
2002 		uint32_t timer_flush_fail;
2003 		uint32_t tid_fail;
2004 		uint32_t sp_frames;
2005 		uint32_t ll_connection;
2006 		uint32_t bytes_thresh_reached;
2007 		uint32_t time_thresh_reached;
2008 		uint32_t tput_criteria_fail;
2009 		uint32_t coalesce_success;
2010 		uint32_t coalesce_fail;
2011 	} tcl[MAX_TCL_DATA_RINGS];
2012 };
2013 
2014 /**
2015  * struct dp_swlm_tcl_params: Parameters based on TCL for different modules
2016  *			      in the Software latency manager.
2017  * @soc: DP soc reference
2018  * @ring_id: TCL ring id
2019  * @flush_timer: Timer for flushing the coalesced TCL HP writes
2020  * @sampling_session_tx_bytes: Num bytes transmitted in the sampling time
2021  * @bytes_flush_thresh: Bytes threshold to flush the TCL HP register write
2022  * @coalesce_end_time: End timestamp for current coalescing session
2023  * @bytes_coalesced: Num bytes coalesced in the current session
2024  * @prev_tx_packets: Previous TX packets accounted
2025  * @prev_tx_bytes: Previous TX bytes accounted
2026  * @prev_rx_bytes: Previous RX bytes accounted
2027  * @expire_time: expiry time for sample
2028  * @tput_pass_cnt: threshold throughput pass counter
2029  */
2030 struct dp_swlm_tcl_params {
2031 	struct dp_soc *soc;
2032 	uint32_t ring_id;
2033 	qdf_timer_t flush_timer;
2034 	uint32_t sampling_session_tx_bytes;
2035 	uint32_t bytes_flush_thresh;
2036 	uint64_t coalesce_end_time;
2037 	uint32_t bytes_coalesced;
2038 	uint32_t prev_tx_packets;
2039 	uint32_t prev_tx_bytes;
2040 	uint32_t prev_rx_bytes;
2041 	uint64_t expire_time;
2042 	uint32_t tput_pass_cnt;
2043 };
2044 
2045 /**
2046  * struct dp_swlm_params: Parameters for different modules in the
2047  *			  Software latency manager.
2048  * @rx_traffic_thresh: Threshold for RX traffic, to begin TCL register
2049  *			   write coalescing
2050  * @tx_traffic_thresh: Threshold for TX traffic, to begin TCL register
2051  *			   write coalescing
2052  * @sampling_time: Sampling time to test the throughput threshold
2053  * @time_flush_thresh: Time threshold to flush the TCL HP register write
2054  * @tx_thresh_multiplier: Multiplier to deduce the bytes threshold after
2055  *			      which the TCL HP register is written, thereby
2056  *			      ending the coalescing.
2057  * @tx_pkt_thresh: Threshold for TX packet count, to begin TCL register
2058  *		       write coalescing
2059  * @tcl: TCL ring specific params
2060  */
2061 
2062 struct dp_swlm_params {
2063 	uint32_t rx_traffic_thresh;
2064 	uint32_t tx_traffic_thresh;
2065 	uint32_t sampling_time;
2066 	uint32_t time_flush_thresh;
2067 	uint32_t tx_thresh_multiplier;
2068 	uint32_t tx_pkt_thresh;
2069 	struct dp_swlm_tcl_params tcl[MAX_TCL_DATA_RINGS];
2070 };
2071 
2072 /**
2073  * struct dp_swlm - Software latency manager context
2074  * @ops: SWLM ops pointers
2075  * @is_enabled: SWLM enabled/disabled
2076  * @is_init: SWLM module initialized
2077  * @stats: SWLM stats
2078  * @params: SWLM SRNG params
2079  * @tcl_flush_timer: flush timer for TCL register writes
2080  */
2081 struct dp_swlm {
2082 	struct dp_swlm_ops *ops;
2083 	uint8_t is_enabled:1,
2084 		is_init:1;
2085 	struct dp_swlm_stats stats;
2086 	struct dp_swlm_params params;
2087 };
2088 #endif
2089 
2090 #ifdef IPA_OFFLOAD
2091 /* IPA uC datapath offload Wlan Tx resources */
2092 struct ipa_dp_tx_rsc {
2093 	/* Resource info to be passed to IPA */
2094 	qdf_dma_addr_t ipa_tcl_ring_base_paddr;
2095 	void *ipa_tcl_ring_base_vaddr;
2096 	uint32_t ipa_tcl_ring_size;
2097 	qdf_dma_addr_t ipa_tcl_hp_paddr;
2098 	uint32_t alloc_tx_buf_cnt;
2099 
2100 	qdf_dma_addr_t ipa_wbm_ring_base_paddr;
2101 	void *ipa_wbm_ring_base_vaddr;
2102 	uint32_t ipa_wbm_ring_size;
2103 	qdf_dma_addr_t ipa_wbm_tp_paddr;
2104 	/* WBM2SW HP shadow paddr */
2105 	qdf_dma_addr_t ipa_wbm_hp_shadow_paddr;
2106 
2107 	/* TX buffers populated into the WBM ring */
2108 	void **tx_buf_pool_vaddr_unaligned;
2109 	qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
2110 };
2111 
2112 /* IPA uC datapath offload Wlan Rx resources */
2113 struct ipa_dp_rx_rsc {
2114 	/* Resource info to be passed to IPA */
2115 	qdf_dma_addr_t ipa_reo_ring_base_paddr;
2116 	void *ipa_reo_ring_base_vaddr;
2117 	uint32_t ipa_reo_ring_size;
2118 	qdf_dma_addr_t ipa_reo_tp_paddr;
2119 
2120 	/* Resource info to be passed to firmware and IPA */
2121 	qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
2122 	void *ipa_rx_refill_buf_ring_base_vaddr;
2123 	uint32_t ipa_rx_refill_buf_ring_size;
2124 	qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
2125 };
2126 #endif
2127 
2128 struct dp_tx_msdu_info_s;
2129 /**
2130  * enum dp_context_type- DP Context Type
2131  * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
2132  * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
2133  * @DP_CONTEXT_TYPE_VDEV: Context type DP VDEV
2134  * @DP_CONTEXT_TYPE_PEER: Context type DP PEER
2135  * @DP_CONTEXT_TYPE_MON_SOC: Context type DP MON SOC
2136  * @DP_CONTEXT_TYPE_MON_PDEV: Context type DP MON PDEV
2137  *
2138  * Helper enums to be used to retrieve the size of the corresponding
2139  * data structure by passing the type.
2140  */
2141 enum dp_context_type {
2142 	DP_CONTEXT_TYPE_SOC,
2143 	DP_CONTEXT_TYPE_PDEV,
2144 	DP_CONTEXT_TYPE_VDEV,
2145 	DP_CONTEXT_TYPE_PEER,
2146 	DP_CONTEXT_TYPE_MON_SOC,
2147 	DP_CONTEXT_TYPE_MON_PDEV
2148 };
2149 
2150 /**
2151  * struct dp_arch_ops - DP target specific arch ops
2152  * @txrx_soc_attach:
2153  * @txrx_soc_detach:
2154  * @txrx_soc_init:
2155  * @txrx_soc_deinit:
2156  * @txrx_soc_srng_alloc:
2157  * @txrx_soc_srng_init:
2158  * @txrx_soc_srng_deinit:
2159  * @txrx_soc_srng_free:
2160  * @txrx_pdev_attach:
2161  * @txrx_pdev_detach:
2162  * @txrx_vdev_attach:
2163  * @txrx_vdev_detach:
2164  * @txrx_peer_map_attach:
2165  * @txrx_peer_map_detach:
2166  * @dp_rxdma_ring_sel_cfg:
2167  * @soc_cfg_attach:
2168  * @txrx_peer_setup:
2169  * @peer_get_reo_hash:
2170  * @reo_remap_config:
2171  * @tx_hw_enqueue: enqueue TX data to HW
2172  * @tx_comp_get_params_from_hal_desc: get software tx descriptor and release
2173  * 				      source from HAL desc for wbm release ring
2174  * @dp_tx_mlo_mcast_send: Tx send handler for MLO multicast enhance
2175  * @dp_tx_process_htt_completion:
2176  * @dp_rx_process:
2177  * @dp_tx_send_fast:
2178  * @dp_tx_desc_pool_init:
2179  * @dp_tx_desc_pool_deinit:
2180  * @dp_rx_desc_pool_init:
2181  * @dp_rx_desc_pool_deinit:
2182  * @dp_wbm_get_rx_desc_from_hal_desc:
2183  * @dp_rx_intrabss_mcast_handler:
2184  * @dp_rx_word_mask_subscribe:
2185  * @dp_rx_desc_cookie_2_va:
2186  * @dp_service_near_full_srngs: Handler for servicing the near full IRQ
2187  * @tx_implicit_rbm_set:
2188  * @dp_rx_peer_metadata_peer_id_get:
2189  * @dp_rx_chain_msdus:
2190  * @txrx_set_vdev_param: target specific ops while setting vdev params
2191  * @txrx_get_vdev_mcast_param: target specific ops for getting vdev
2192  *			       params related to multicast
2193  * @txrx_get_context_size:
2194  * @txrx_get_mon_context_size:
2195  * @dp_srng_test_and_update_nf_params: Check if the srng is in near full state
2196  *				and set the near-full params.
2197  * @dp_tx_mcast_handler:
2198  * @dp_rx_mcast_handler:
2199  * @dp_tx_is_mcast_primary:
2200  * @dp_soc_get_by_idle_bm_id:
2201  * @mlo_peer_find_hash_detach:
2202  * @mlo_peer_find_hash_attach:
2203  * @mlo_peer_find_hash_add:
2204  * @mlo_peer_find_hash_remove:
2205  * @mlo_peer_find_hash_find:
2206  * @get_hw_link_id:
2207  * @dp_rx_peer_set_link_id: set link id in nbuf cb
2208  * @get_reo_qdesc_addr:
2209  * @get_rx_hash_key:
2210  * @dp_set_rx_fst:
2211  * @dp_get_rx_fst:
2212  * @dp_rx_fst_deref:
2213  * @dp_rx_fst_ref:
2214  * @txrx_print_peer_stats:
2215  * @dp_peer_rx_reorder_queue_setup: Dp peer reorder queue setup
2216  * @dp_bank_reconfig:
2217  * @dp_get_soc_by_chip_id: Get soc by chip id
2218  * @dp_soc_get_num_soc:
2219  * @dp_reconfig_tx_vdev_mcast_ctrl:
2220  * @dp_cc_reg_cfg_init:
2221  * @dp_tx_compute_hw_delay:
2222  * @print_mlo_ast_stats:
2223  * @dp_partner_chips_map:
2224  * @dp_partner_chips_unmap:
2225  * @ipa_get_bank_id: Get TCL bank id used by IPA
2226  * @ipa_get_wdi_ver: Get WDI version
2227  * @dp_txrx_ppeds_rings_status:
2228  * @dp_tx_ppeds_inuse_desc:
2229  * @dp_ppeds_clear_stats: Clear ppeds related stats
2230  * @dp_tx_ppeds_cfg_astidx_cache_mapping:
2231  * @dp_txrx_ppeds_rings_stats: Printing the util stats of ring
2232  * @dp_txrx_ppeds_clear_rings_stats: Clearing the ring util stats
2233  * @txrx_soc_ppeds_start:
2234  * @txrx_soc_ppeds_stop:
2235  * @dp_register_ppeds_interrupts:
2236  * @dp_free_ppeds_interrupts:
2237  * @dp_rx_wbm_err_reap_desc: Reap WBM Error Ring Descriptor
2238  * @dp_rx_null_q_desc_handle: Handle Null Queue Exception Error
2239  * @dp_tx_desc_pool_alloc: Allocate arch specific TX descriptor pool
2240  * @dp_tx_desc_pool_free: Free arch specific TX descriptor pool
2241  * @txrx_srng_init: Init txrx srng
2242  * @dp_get_vdev_stats_for_unmap_peer: Get vdev stats pointer for unmap peer
2243  * @dp_get_interface_stats: Get interface stats
2244  * @ppeds_handle_attached:
2245  * @txrx_soc_ppeds_interrupt_stop:
2246  * @txrx_soc_ppeds_interrupt_start:
2247  * @txrx_soc_ppeds_service_status_update:
2248  * @txrx_soc_ppeds_enabled_check:
2249  * @txrx_soc_ppeds_txdesc_pool_reset:
2250  * @dp_update_ring_hptp: Update rings hptp during suspend/resume
2251  * @dp_get_fst_cmem_base: Get CMEM base address for FISA
2252  * @dp_flush_tx_ring: Flush TCL ring HP
2253  * @dp_mlo_print_ptnr_info: print partner vdev info
2254  */
2255 struct dp_arch_ops {
2256 	/* INIT/DEINIT Arch Ops */
2257 	QDF_STATUS (*txrx_soc_attach)(struct dp_soc *soc,
2258 				      struct cdp_soc_attach_params *params);
2259 	QDF_STATUS (*txrx_soc_detach)(struct dp_soc *soc);
2260 	void* (*txrx_soc_init)(struct dp_soc *soc, HTC_HANDLE htc_handle,
2261 			       struct hif_opaque_softc *hif_handle);
2262 	QDF_STATUS (*txrx_soc_deinit)(struct dp_soc *soc);
2263 	QDF_STATUS (*txrx_soc_srng_alloc)(struct dp_soc *soc);
2264 	QDF_STATUS (*txrx_soc_srng_init)(struct dp_soc *soc);
2265 	void (*txrx_soc_srng_deinit)(struct dp_soc *soc);
2266 	void (*txrx_soc_srng_free)(struct dp_soc *soc);
2267 	QDF_STATUS (*txrx_pdev_attach)(struct dp_pdev *pdev,
2268 				       struct cdp_pdev_attach_params *params);
2269 	QDF_STATUS (*txrx_pdev_detach)(struct dp_pdev *pdev);
2270 	QDF_STATUS (*txrx_vdev_attach)(struct dp_soc *soc,
2271 				       struct dp_vdev *vdev);
2272 	QDF_STATUS (*txrx_vdev_detach)(struct dp_soc *soc,
2273 				       struct dp_vdev *vdev);
2274 	QDF_STATUS (*txrx_peer_map_attach)(struct dp_soc *soc);
2275 	void (*txrx_peer_map_detach)(struct dp_soc *soc);
2276 	QDF_STATUS (*dp_rxdma_ring_sel_cfg)(struct dp_soc *soc);
2277 	void (*soc_cfg_attach)(struct dp_soc *soc);
2278 	QDF_STATUS (*txrx_peer_setup)(struct cdp_soc_t *soc_hdl,
2279 				      uint8_t vdev_id, uint8_t *peer_mac,
2280 				      struct cdp_peer_setup_info *setup_info);
2281 	void (*peer_get_reo_hash)(struct dp_vdev *vdev,
2282 				  struct cdp_peer_setup_info *setup_info,
2283 				  enum cdp_host_reo_dest_ring *reo_dest,
2284 				  bool *hash_based,
2285 				  uint8_t *lmac_peer_id_msb);
2286 	 bool (*reo_remap_config)(struct dp_soc *soc, uint32_t *remap0,
2287 				  uint32_t *remap1, uint32_t *remap2);
2288 
2289 	/* TX RX Arch Ops */
2290 	QDF_STATUS (*tx_hw_enqueue)(struct dp_soc *soc, struct dp_vdev *vdev,
2291 				    struct dp_tx_desc_s *tx_desc,
2292 				    uint16_t fw_metadata,
2293 				    struct cdp_tx_exception_metadata *metadata,
2294 				    struct dp_tx_msdu_info_s *msdu_info);
2295 
2296 	void (*tx_comp_get_params_from_hal_desc)(struct dp_soc *soc,
2297 						 void *tx_comp_hal_desc,
2298 						 struct dp_tx_desc_s **desc);
2299 
2300 	qdf_nbuf_t (*dp_tx_mlo_mcast_send)(struct dp_soc *soc,
2301 					   struct dp_vdev *vdev,
2302 					   qdf_nbuf_t nbuf,
2303 					   struct cdp_tx_exception_metadata
2304 					   *tx_exc_metadata);
2305 
2306 	void (*dp_tx_process_htt_completion)(struct dp_soc *soc,
2307 					     struct dp_tx_desc_s *tx_desc,
2308 					     uint8_t *status,
2309 					     uint8_t ring_id);
2310 
2311 	uint32_t (*dp_rx_process)(struct dp_intr *int_ctx,
2312 				  hal_ring_handle_t hal_ring_hdl,
2313 				  uint8_t reo_ring_num, uint32_t quota);
2314 
2315 	qdf_nbuf_t (*dp_tx_send_fast)(struct cdp_soc_t *soc_hdl,
2316 				      uint8_t vdev_id,
2317 				      qdf_nbuf_t nbuf);
2318 
2319 	QDF_STATUS (*dp_tx_desc_pool_init)(struct dp_soc *soc,
2320 					   uint32_t num_elem,
2321 					   uint8_t pool_id);
2322 	void (*dp_tx_desc_pool_deinit)(
2323 				struct dp_soc *soc,
2324 				struct dp_tx_desc_pool_s *tx_desc_pool,
2325 				uint8_t pool_id);
2326 
2327 	QDF_STATUS (*dp_rx_desc_pool_init)(struct dp_soc *soc,
2328 					   struct rx_desc_pool *rx_desc_pool,
2329 					   uint32_t pool_id);
2330 	void (*dp_rx_desc_pool_deinit)(struct dp_soc *soc,
2331 				       struct rx_desc_pool *rx_desc_pool,
2332 				       uint32_t pool_id);
2333 
2334 	QDF_STATUS (*dp_wbm_get_rx_desc_from_hal_desc)(
2335 						struct dp_soc *soc,
2336 						void *ring_desc,
2337 						struct dp_rx_desc **r_rx_desc);
2338 
2339 	bool
2340 	(*dp_rx_intrabss_mcast_handler)(struct dp_soc *soc,
2341 					struct dp_txrx_peer *ta_txrx_peer,
2342 					qdf_nbuf_t nbuf_copy,
2343 					struct cdp_tid_rx_stats *tid_stats,
2344 					uint8_t link_id);
2345 
2346 	void (*dp_rx_word_mask_subscribe)(
2347 				struct dp_soc *soc,
2348 				uint32_t *msg_word,
2349 				void *rx_filter);
2350 
2351 	struct dp_rx_desc *(*dp_rx_desc_cookie_2_va)(struct dp_soc *soc,
2352 						     uint32_t cookie);
2353 	uint32_t (*dp_service_near_full_srngs)(struct dp_soc *soc,
2354 					       struct dp_intr *int_ctx,
2355 					       uint32_t dp_budget);
2356 	void (*tx_implicit_rbm_set)(struct dp_soc *soc, uint8_t tx_ring_id,
2357 				    uint8_t bm_id);
2358 	uint16_t (*dp_rx_peer_metadata_peer_id_get)(struct dp_soc *soc,
2359 						    uint32_t peer_metadata);
2360 	bool (*dp_rx_chain_msdus)(struct dp_soc *soc, qdf_nbuf_t nbuf,
2361 				  uint8_t *rx_tlv_hdr, uint8_t mac_id);
2362 	/* Control Arch Ops */
2363 	QDF_STATUS (*txrx_set_vdev_param)(struct dp_soc *soc,
2364 					  struct dp_vdev *vdev,
2365 					  enum cdp_vdev_param_type param,
2366 					  cdp_config_param_type val);
2367 
2368 	QDF_STATUS (*txrx_get_vdev_mcast_param)(struct dp_soc *soc,
2369 						struct dp_vdev *vdev,
2370 						cdp_config_param_type *val);
2371 
2372 	/* Misc Arch Ops */
2373 	qdf_size_t (*txrx_get_context_size)(enum dp_context_type);
2374 #ifdef WIFI_MONITOR_SUPPORT
2375 	qdf_size_t (*txrx_get_mon_context_size)(enum dp_context_type);
2376 #endif
2377 	int (*dp_srng_test_and_update_nf_params)(struct dp_soc *soc,
2378 						 struct dp_srng *dp_srng,
2379 						 int *max_reap_limit);
2380 
2381 	/* MLO ops */
2382 #ifdef WLAN_FEATURE_11BE_MLO
2383 #ifdef WLAN_MCAST_MLO
2384 	void (*dp_tx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
2385 				    qdf_nbuf_t nbuf);
2386 	bool (*dp_rx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
2387 				    struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
2388 				    uint8_t link_id);
2389 	bool (*dp_tx_is_mcast_primary)(struct dp_soc *soc,
2390 				       struct dp_vdev *vdev);
2391 #endif
2392 	struct dp_soc * (*dp_soc_get_by_idle_bm_id)(struct dp_soc *soc,
2393 						    uint8_t bm_id);
2394 
2395 	void (*mlo_peer_find_hash_detach)(struct dp_soc *soc);
2396 	QDF_STATUS (*mlo_peer_find_hash_attach)(struct dp_soc *soc);
2397 	void (*mlo_peer_find_hash_add)(struct dp_soc *soc,
2398 				       struct dp_peer *peer);
2399 	void (*mlo_peer_find_hash_remove)(struct dp_soc *soc,
2400 					  struct dp_peer *peer);
2401 
2402 	struct dp_peer *(*mlo_peer_find_hash_find)(struct dp_soc *soc,
2403 						   uint8_t *peer_mac_addr,
2404 						   int mac_addr_is_aligned,
2405 						   enum dp_mod_id mod_id,
2406 						   uint8_t vdev_id);
2407 #endif
2408 	uint8_t (*get_hw_link_id)(struct dp_pdev *pdev);
2409 	void (*dp_rx_peer_set_link_id)(qdf_nbuf_t nbuf, uint32_t peer_mdata);
2410 	uint64_t (*get_reo_qdesc_addr)(hal_soc_handle_t hal_soc_hdl,
2411 				       uint8_t *dst_ring_desc,
2412 				       uint8_t *buf,
2413 				       struct dp_txrx_peer *peer,
2414 				       unsigned int tid);
2415 	void (*get_rx_hash_key)(struct dp_soc *soc,
2416 				struct cdp_lro_hash_config *lro_hash);
2417 	void (*dp_set_rx_fst)(struct dp_rx_fst *fst);
2418 	struct dp_rx_fst *(*dp_get_rx_fst)(void);
2419 	uint32_t (*dp_rx_fst_deref)(void);
2420 	void (*dp_rx_fst_ref)(void);
2421 	void (*txrx_print_peer_stats)(struct cdp_peer_stats *peer_stats,
2422 				      enum peer_stats_type stats_type);
2423 	QDF_STATUS (*dp_peer_rx_reorder_queue_setup)(struct dp_soc *soc,
2424 						     struct dp_peer *peer,
2425 						     int tid,
2426 						     uint32_t ba_window_size);
2427 	void (*dp_bank_reconfig)(struct dp_soc *soc, struct dp_vdev *vdev);
2428 
2429 	struct dp_soc * (*dp_get_soc_by_chip_id)(struct dp_soc *soc,
2430 						 uint8_t chip_id);
2431 
2432 	uint8_t (*dp_soc_get_num_soc)(struct dp_soc *soc);
2433 	void (*dp_reconfig_tx_vdev_mcast_ctrl)(struct dp_soc *soc,
2434 					       struct dp_vdev *vdev);
2435 
2436 	void (*dp_cc_reg_cfg_init)(struct dp_soc *soc, bool is_4k_align);
2437 
2438 	QDF_STATUS
2439 	(*dp_tx_compute_hw_delay)(struct dp_soc *soc,
2440 				  struct dp_vdev *vdev,
2441 				  struct hal_tx_completion_status *ts,
2442 				  uint32_t *delay_us);
2443 	void (*print_mlo_ast_stats)(struct dp_soc *soc);
2444 	void (*dp_partner_chips_map)(struct dp_soc *soc,
2445 				     struct dp_peer *peer,
2446 				     uint16_t peer_id);
2447 	void (*dp_partner_chips_unmap)(struct dp_soc *soc,
2448 				       uint16_t peer_id);
2449 
2450 #ifdef IPA_OFFLOAD
2451 	int8_t (*ipa_get_bank_id)(struct dp_soc *soc);
2452 	void (*ipa_get_wdi_ver)(uint8_t *wdi_ver);
2453 #endif
2454 #ifdef WLAN_SUPPORT_PPEDS
2455 	void (*dp_txrx_ppeds_rings_status)(struct dp_soc *soc);
2456 	void (*dp_tx_ppeds_inuse_desc)(struct dp_soc *soc);
2457 	void (*dp_ppeds_clear_stats)(struct dp_soc *soc);
2458 	void (*dp_tx_ppeds_cfg_astidx_cache_mapping)(struct dp_soc *soc,
2459 						     struct dp_vdev *vdev,
2460 						     bool peer_map);
2461 	void (*dp_txrx_ppeds_rings_stats)(struct dp_soc *soc);
2462 	void (*dp_txrx_ppeds_clear_rings_stats)(struct dp_soc *soc);
2463 #endif
2464 	bool (*ppeds_handle_attached)(struct dp_soc *soc);
2465 	QDF_STATUS (*txrx_soc_ppeds_start)(struct dp_soc *soc);
2466 	void (*txrx_soc_ppeds_stop)(struct dp_soc *soc);
2467 	int (*dp_register_ppeds_interrupts)(struct dp_soc *soc,
2468 					    struct dp_srng *srng, int vector,
2469 					    int ring_type, int ring_num);
2470 	void (*dp_free_ppeds_interrupts)(struct dp_soc *soc,
2471 					 struct dp_srng *srng, int ring_type,
2472 					 int ring_num);
2473 	qdf_nbuf_t (*dp_rx_wbm_err_reap_desc)(struct dp_intr *int_ctx,
2474 					      struct dp_soc *soc,
2475 					      hal_ring_handle_t hal_ring_hdl,
2476 					      uint32_t quota,
2477 					      uint32_t *rx_bufs_used);
2478 	QDF_STATUS (*dp_rx_null_q_desc_handle)(struct dp_soc *soc,
2479 					       qdf_nbuf_t nbuf,
2480 					       uint8_t *rx_tlv_hdr,
2481 					       uint8_t pool_id,
2482 					       struct dp_txrx_peer *txrx_peer,
2483 					       bool is_reo_exception,
2484 					       uint8_t link_id);
2485 
2486 	QDF_STATUS (*dp_tx_desc_pool_alloc)(struct dp_soc *soc,
2487 					    uint32_t num_elem,
2488 					    uint8_t pool_id);
2489 	void (*dp_tx_desc_pool_free)(struct dp_soc *soc, uint8_t pool_id);
2490 
2491 	QDF_STATUS (*txrx_srng_init)(struct dp_soc *soc, struct dp_srng *srng,
2492 				     int ring_type, int ring_num, int mac_id);
2493 
2494 	void (*dp_get_vdev_stats_for_unmap_peer)(
2495 					struct dp_vdev *vdev,
2496 					struct dp_peer *peer,
2497 					struct cdp_vdev_stats **vdev_stats);
2498 	QDF_STATUS (*dp_get_interface_stats)(struct cdp_soc_t *soc_hdl,
2499 					     uint8_t vdev_id,
2500 					     void *buf,
2501 					     bool is_aggregate);
2502 #ifdef WLAN_SUPPORT_PPEDS
2503 	void (*txrx_soc_ppeds_interrupt_stop)(struct dp_soc *soc);
2504 	void (*txrx_soc_ppeds_interrupt_start)(struct dp_soc *soc);
2505 	void (*txrx_soc_ppeds_service_status_update)(struct dp_soc *soc,
2506 						     bool enable);
2507 	bool (*txrx_soc_ppeds_enabled_check)(struct dp_soc *soc);
2508 	void (*txrx_soc_ppeds_txdesc_pool_reset)(struct dp_soc *soc,
2509 						 qdf_nbuf_t *nbuf_list);
2510 #endif
2511 	void (*dp_update_ring_hptp)(struct dp_soc *soc, bool force_flush_tx);
2512 	uint64_t (*dp_get_fst_cmem_base)(struct dp_soc *soc, uint64_t size);
2513 	int (*dp_flush_tx_ring)(struct dp_pdev *pdev, int ring_id);
2514 	void (*dp_mlo_print_ptnr_info)(struct dp_vdev *vdev);
2515 };
2516 
2517 /**
2518  * struct dp_soc_features: Data structure holding the SOC level feature flags.
2519  * @pn_in_reo_dest: PN provided by hardware in the REO destination ring.
2520  * @dmac_cmn_src_rxbuf_ring_enabled: Flag to indicate DMAC mode common Rx
2521  *				     buffer source rings
2522  * @rssi_dbm_conv_support: Rssi dbm conversion support param.
2523  * @umac_hw_reset_support: UMAC HW reset support
2524  * @wds_ext_ast_override_enable:
2525  */
2526 struct dp_soc_features {
2527 	uint8_t pn_in_reo_dest:1,
2528 		dmac_cmn_src_rxbuf_ring_enabled:1;
2529 	bool rssi_dbm_conv_support;
2530 	bool umac_hw_reset_support;
2531 	bool wds_ext_ast_override_enable;
2532 };
2533 
2534 enum sysfs_printing_mode {
2535 	PRINTING_MODE_DISABLED = 0,
2536 	PRINTING_MODE_ENABLED
2537 };
2538 
2539 /**
2540  * typedef notify_pre_reset_fw_callback() - pre-reset callback
2541  * @soc: DP SoC
2542  */
2543 typedef void (*notify_pre_reset_fw_callback)(struct dp_soc *soc);
2544 
2545 #ifdef WLAN_SYSFS_DP_STATS
2546 /**
2547  * struct sysfs_stats_config: Data structure holding stats sysfs config.
2548  * @rw_stats_lock: Lock to read and write to stat_type and pdev_id.
2549  * @sysfs_read_lock: Lock held while another stat req is being executed.
2550  * @sysfs_write_user_buffer: Lock to change buff len, max buf len
2551  * and *buf.
2552  * @sysfs_txrx_fw_request_done: Event to wait for firmware response.
2553  * @stat_type_requested: stat type requested.
2554  * @mac_id: mac id for which stat type are requested.
2555  * @printing_mode: Should a print go through.
2556  * @process_id: Process allowed to write to buffer.
2557  * @curr_buffer_length: Curr length of buffer written
2558  * @max_buffer_length: Max buffer length.
2559  * @buf: Sysfs buffer.
2560  */
2561 struct sysfs_stats_config {
2562 	/* lock held to read stats */
2563 	qdf_spinlock_t rw_stats_lock;
2564 	qdf_mutex_t sysfs_read_lock;
2565 	qdf_spinlock_t sysfs_write_user_buffer;
2566 	qdf_event_t sysfs_txrx_fw_request_done;
2567 	uint32_t stat_type_requested;
2568 	uint32_t mac_id;
2569 	enum sysfs_printing_mode printing_mode;
2570 	int process_id;
2571 	uint16_t curr_buffer_length;
2572 	uint16_t max_buffer_length;
2573 	char *buf;
2574 };
2575 #endif
2576 
2577 struct test_mem_free {
2578 	unsigned long ts_qdesc_mem_hdl;
2579 	qdf_dma_addr_t hw_qdesc_paddr;
2580 	void *hw_qdesc_vaddr_align;
2581 	void *hw_qdesc_vaddr_unalign;
2582 	uint32_t peer_id;
2583 	uint32_t tid;
2584 	uint8_t chip_id;
2585 	unsigned long ts_hw_flush_back;
2586 };
2587 
2588 struct test_qaddr_del {
2589 	unsigned long ts_qaddr_del;
2590 	uint32_t peer_id;
2591 	uint32_t paddr;
2592 	uint32_t tid;
2593 	uint8_t chip_id;
2594 };
2595 
2596 /* SOC level structure for data path */
2597 struct dp_soc {
2598 	/**
2599 	 * re-use memory section starts
2600 	 */
2601 
2602 	/* Common base structure - Should be the first member */
2603 	struct cdp_soc_t cdp_soc;
2604 
2605 	/* SoC Obj */
2606 	struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
2607 
2608 	/* OS device abstraction */
2609 	qdf_device_t osdev;
2610 
2611 	/*cce disable*/
2612 	bool cce_disable;
2613 
2614 	/* WLAN config context */
2615 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
2616 
2617 	/* HTT handle for host-fw interaction */
2618 	struct htt_soc *htt_handle;
2619 
2620 	/* Commint init done */
2621 	qdf_atomic_t cmn_init_done;
2622 
2623 	/* Opaque hif handle */
2624 	struct hif_opaque_softc *hif_handle;
2625 
2626 	/* PDEVs on this SOC */
2627 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
2628 
2629 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
2630 	struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT];
2631 
2632 	struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW];
2633 
2634 	/* RXDMA error destination ring */
2635 	struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW];
2636 
2637 	/* RXDMA monitor buffer replenish ring */
2638 	struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW];
2639 
2640 	/* RXDMA monitor destination ring */
2641 	struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW];
2642 
2643 	/* RXDMA monitor status ring. TBD: Check format of this ring */
2644 	struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW];
2645 
2646 	/* Ring to handover links to hw in monitor mode for SOFTUMAC arch */
2647 	struct dp_srng sw2rxdma_link_ring[MAX_NUM_LMAC_HW];
2648 
2649 	/* Number of PDEVs */
2650 	uint8_t pdev_count;
2651 
2652 	/*ast override support in HW*/
2653 	bool ast_override_support;
2654 
2655 	/*number of hw dscp tid map*/
2656 	uint8_t num_hw_dscp_tid_map;
2657 
2658 	/* HAL SOC handle */
2659 	hal_soc_handle_t hal_soc;
2660 
2661 	/* rx monitor pkt tlv size */
2662 	uint16_t rx_mon_pkt_tlv_size;
2663 	/* rx pkt tlv size */
2664 	uint16_t rx_pkt_tlv_size;
2665 	/* rx pkt tlv size in current operation mode */
2666 	uint16_t curr_rx_pkt_tlv_size;
2667 
2668 	struct dp_arch_ops arch_ops;
2669 
2670 	/* Device ID coming from Bus sub-system */
2671 	uint32_t device_id;
2672 
2673 	/* Link descriptor pages */
2674 	struct qdf_mem_multi_page_t link_desc_pages;
2675 
2676 	/* total link descriptors for regular RX and TX */
2677 	uint32_t total_link_descs;
2678 
2679 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
2680 	struct dp_srng wbm_idle_link_ring;
2681 
2682 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
2683 	 */
2684 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
2685 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
2686 	uint32_t num_scatter_bufs;
2687 
2688 	/* Tx SW descriptor pool */
2689 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
2690 
2691 	/* Tx MSDU Extension descriptor pool */
2692 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
2693 
2694 	/* Tx TSO descriptor pool */
2695 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
2696 
2697 	/* Tx TSO Num of segments pool */
2698 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
2699 
2700 	/* REO destination rings */
2701 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
2702 
2703 	/* REO exception ring - See if should combine this with reo_dest_ring */
2704 	struct dp_srng reo_exception_ring;
2705 
2706 	/* REO reinjection ring */
2707 	struct dp_srng reo_reinject_ring;
2708 
2709 	/* REO command ring */
2710 	struct dp_srng reo_cmd_ring;
2711 
2712 	/* REO command status ring */
2713 	struct dp_srng reo_status_ring;
2714 
2715 	/* WBM Rx release ring */
2716 	struct dp_srng rx_rel_ring;
2717 
2718 	/* TCL data ring */
2719 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
2720 
2721 	/* Number of Tx comp rings */
2722 	uint8_t num_tx_comp_rings;
2723 
2724 	/* Number of TCL data rings */
2725 	uint8_t num_tcl_data_rings;
2726 
2727 	/* TCL CMD_CREDIT ring */
2728 	bool init_tcl_cmd_cred_ring;
2729 
2730 	/* It is used as credit based ring on QCN9000 else command ring */
2731 	struct dp_srng tcl_cmd_credit_ring;
2732 
2733 	/* TCL command status ring */
2734 	struct dp_srng tcl_status_ring;
2735 
2736 	/* WBM Tx completion rings */
2737 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
2738 
2739 	/* Common WBM link descriptor release ring (SW to WBM) */
2740 	struct dp_srng wbm_desc_rel_ring;
2741 
2742 	/* DP Interrupts */
2743 	struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
2744 
2745 	/* Monitor mode mac id to dp_intr_id map */
2746 	int mon_intr_id_lmac_map[MAX_NUM_LMAC_HW];
2747 	/* Rx SW descriptor pool for RXDMA monitor buffer */
2748 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
2749 
2750 	/* Rx SW descriptor pool for RXDMA status buffer */
2751 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
2752 
2753 	/* Rx SW descriptor pool for RXDMA buffer */
2754 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
2755 
2756 	/* Number of REO destination rings */
2757 	uint8_t num_reo_dest_rings;
2758 
2759 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2760 	/* lock to control access to soc TX descriptors */
2761 	qdf_spinlock_t flow_pool_array_lock;
2762 
2763 	/* pause callback to pause TX queues as per flow control */
2764 	tx_pause_callback pause_cb;
2765 
2766 	/* flow pool related statistics */
2767 	struct dp_txrx_pool_stats pool_stats;
2768 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
2769 
2770 	notify_pre_reset_fw_callback notify_fw_callback;
2771 
2772 	unsigned long service_rings_running;
2773 
2774 	uint32_t wbm_idle_scatter_buf_size;
2775 
2776 	/* VDEVs on this SOC */
2777 	struct dp_vdev *vdev_id_map[MAX_VDEV_CNT];
2778 
2779 	/* Tx H/W queues lock */
2780 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
2781 
2782 	/* Tx ring map for interrupt processing */
2783 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2784 
2785 	/* Rx ring map for interrupt processing */
2786 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2787 
2788 	/* peer ID to peer object map (array of pointers to peer objects) */
2789 	struct dp_peer **peer_id_to_obj_map;
2790 
2791 	struct {
2792 		unsigned mask;
2793 		unsigned idx_bits;
2794 		TAILQ_HEAD(, dp_peer) * bins;
2795 	} peer_hash;
2796 
2797 	/* rx defrag state – TBD: do we need this per radio? */
2798 	struct {
2799 		struct {
2800 			TAILQ_HEAD(, dp_rx_tid_defrag) waitlist;
2801 			uint32_t timeout_ms;
2802 			uint32_t next_flush_ms;
2803 			qdf_spinlock_t defrag_lock;
2804 		} defrag;
2805 		struct {
2806 			int defrag_timeout_check;
2807 			int dup_check;
2808 		} flags;
2809 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
2810 		qdf_spinlock_t reo_cmd_lock;
2811 	} rx;
2812 
2813 	/* optional rx processing function */
2814 	void (*rx_opt_proc)(
2815 		struct dp_vdev *vdev,
2816 		struct dp_peer *peer,
2817 		unsigned tid,
2818 		qdf_nbuf_t msdu_list);
2819 
2820 	/* pool addr for mcast enhance buff */
2821 	struct {
2822 		int size;
2823 		uint32_t paddr;
2824 		uint32_t *vaddr;
2825 		struct dp_tx_me_buf_t *freelist;
2826 		int buf_in_use;
2827 		qdf_dma_mem_context(memctx);
2828 	} me_buf;
2829 
2830 	/* Protect peer hash table */
2831 	DP_MUTEX_TYPE peer_hash_lock;
2832 	/* Protect peer_id_to_objmap */
2833 	DP_MUTEX_TYPE peer_map_lock;
2834 
2835 	/* maximum number of suppoerted peers */
2836 	uint32_t max_peers;
2837 	/* maximum value for peer_id */
2838 	uint32_t max_peer_id;
2839 
2840 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2841 	uint32_t peer_id_shift;
2842 	uint32_t peer_id_mask;
2843 #endif
2844 
2845 	/* rx peer metadata field shift and mask configuration */
2846 	uint8_t htt_peer_id_s;
2847 	uint32_t htt_peer_id_m;
2848 	uint8_t htt_vdev_id_s;
2849 	uint32_t htt_vdev_id_m;
2850 	uint8_t htt_mld_peer_valid_s;
2851 	uint32_t htt_mld_peer_valid_m;
2852 	/* rx peer metadata version */
2853 	uint8_t rx_peer_metadata_ver;
2854 
2855 	/* SoC level data path statistics */
2856 	struct dp_soc_stats stats;
2857 #ifdef WLAN_SYSFS_DP_STATS
2858 	/* sysfs config for DP stats */
2859 	struct sysfs_stats_config *sysfs_config;
2860 #endif
2861 	/* timestamp to keep track of msdu buffers received on reo err ring */
2862 	uint64_t rx_route_err_start_pkt_ts;
2863 
2864 	/* Num RX Route err in a given window to keep track of rate of errors */
2865 	uint32_t rx_route_err_in_window;
2866 
2867 	/* Enable processing of Tx completion status words */
2868 	bool process_tx_status;
2869 	bool process_rx_status;
2870 	struct dp_ast_entry **ast_table;
2871 	struct {
2872 		unsigned mask;
2873 		unsigned idx_bits;
2874 		TAILQ_HEAD(, dp_ast_entry) * bins;
2875 	} ast_hash;
2876 
2877 #ifdef DP_TX_HW_DESC_HISTORY
2878 	struct dp_tx_hw_desc_history tx_hw_desc_history;
2879 #endif
2880 
2881 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2882 	struct dp_rx_history *rx_ring_history[MAX_REO_DEST_RINGS];
2883 	struct dp_rx_refill_history *rx_refill_ring_history[MAX_PDEV_CNT];
2884 	struct dp_rx_err_history *rx_err_ring_history;
2885 	struct dp_rx_reinject_history *rx_reinject_ring_history;
2886 #endif
2887 
2888 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
2889 	struct dp_mon_status_ring_history *mon_status_ring_history;
2890 #endif
2891 
2892 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
2893 	struct dp_tx_tcl_history tx_tcl_history;
2894 	struct dp_tx_comp_history tx_comp_history;
2895 #endif
2896 
2897 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
2898 	struct dp_cfg_event_history cfg_event_history;
2899 #endif
2900 
2901 	qdf_spinlock_t ast_lock;
2902 	/*Timer for AST entry ageout maintenance */
2903 	qdf_timer_t ast_aging_timer;
2904 
2905 	/*Timer counter for WDS AST entry ageout*/
2906 	uint8_t wds_ast_aging_timer_cnt;
2907 	bool pending_ageout;
2908 	bool ast_offload_support;
2909 	bool host_ast_db_enable;
2910 	uint32_t max_ast_ageout_count;
2911 	uint8_t eapol_over_control_port;
2912 
2913 	uint8_t sta_mode_search_policy;
2914 	qdf_timer_t lmac_reap_timer;
2915 	uint8_t lmac_timer_init;
2916 	qdf_timer_t int_timer;
2917 	uint8_t intr_mode;
2918 	uint8_t lmac_polled_mode;
2919 
2920 	qdf_list_t reo_desc_freelist;
2921 	qdf_spinlock_t reo_desc_freelist_lock;
2922 
2923 	/* htt stats */
2924 	struct htt_t2h_stats htt_stats;
2925 
2926 	void *external_txrx_handle; /* External data path handle */
2927 	qdf_atomic_t ipa_mapped;
2928 #ifdef IPA_OFFLOAD
2929 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc;
2930 #ifdef IPA_WDI3_TX_TWO_PIPES
2931 	/* Resources for the alternative IPA TX pipe */
2932 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc_alt;
2933 #endif
2934 
2935 	struct ipa_dp_rx_rsc ipa_uc_rx_rsc;
2936 #ifdef IPA_WDI3_VLAN_SUPPORT
2937 	struct ipa_dp_rx_rsc ipa_uc_rx_rsc_alt;
2938 #endif
2939 	qdf_atomic_t ipa_pipes_enabled;
2940 	bool ipa_first_tx_db_access;
2941 	qdf_spinlock_t ipa_rx_buf_map_lock;
2942 	bool ipa_rx_buf_map_lock_initialized;
2943 	uint8_t ipa_reo_ctx_lock_required[MAX_REO_DEST_RINGS];
2944 #endif
2945 
2946 #ifdef WLAN_FEATURE_STATS_EXT
2947 	struct {
2948 		uint32_t rx_mpdu_received;
2949 		uint32_t rx_mpdu_missed;
2950 	} ext_stats;
2951 	qdf_event_t rx_hw_stats_event;
2952 	qdf_spinlock_t rx_hw_stats_lock;
2953 	bool is_last_stats_ctx_init;
2954 #endif /* WLAN_FEATURE_STATS_EXT */
2955 
2956 	/* Indicates HTT map/unmap versions*/
2957 	uint8_t peer_map_unmap_versions;
2958 	/* Per peer per Tid ba window size support */
2959 	uint8_t per_tid_basize_max_tid;
2960 	/* Soc level flag to enable da_war */
2961 	uint8_t da_war_enabled;
2962 	/* number of active ast entries */
2963 	uint32_t num_ast_entries;
2964 	/* peer extended rate statistics context at soc level*/
2965 	struct cdp_soc_rate_stats_ctx *rate_stats_ctx;
2966 	/* peer extended rate statistics control flag */
2967 	bool peerstats_enabled;
2968 
2969 	/* 8021p PCP-TID map values */
2970 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
2971 	/* TID map priority value */
2972 	uint8_t tidmap_prty;
2973 	/* Pointer to global per ring type specific configuration table */
2974 	struct wlan_srng_cfg *wlan_srng_cfg;
2975 	/* Num Tx outstanding on device */
2976 	qdf_atomic_t num_tx_outstanding;
2977 	/* Num Tx exception on device */
2978 	qdf_atomic_t num_tx_exception;
2979 	/* Num Tx allowed */
2980 	uint32_t num_tx_allowed;
2981 	/* Num Regular Tx allowed */
2982 	uint32_t num_reg_tx_allowed;
2983 	/* Num Tx allowed for special frames*/
2984 	uint32_t num_tx_spl_allowed;
2985 	/* Preferred HW mode */
2986 	uint8_t preferred_hw_mode;
2987 
2988 	/**
2989 	 * Flag to indicate whether WAR to address single cache entry
2990 	 * invalidation bug is enabled or not
2991 	 */
2992 	bool is_rx_fse_full_cache_invalidate_war_enabled;
2993 #if defined(WLAN_SUPPORT_RX_FLOW_TAG)
2994 	/**
2995 	 * Pointer to DP RX Flow FST at SOC level if
2996 	 * is_rx_flow_search_table_per_pdev is false
2997 	 * TBD: rx_fst[num_macs] if we decide to have per mac FST
2998 	 */
2999 	struct dp_rx_fst *rx_fst;
3000 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
3001 	/* SG supported for msdu continued packets from wbm release ring */
3002 	bool wbm_release_desc_rx_sg_support;
3003 	bool peer_map_attach_success;
3004 	/* Flag to disable mac1 ring interrupts */
3005 	bool disable_mac1_intr;
3006 	/* Flag to disable mac2 ring interrupts */
3007 	bool disable_mac2_intr;
3008 
3009 	struct {
3010 		/* 1st msdu in sg for msdu continued packets in wbm rel ring */
3011 		bool wbm_is_first_msdu_in_sg;
3012 		/* Wbm sg list head */
3013 		qdf_nbuf_t wbm_sg_nbuf_head;
3014 		/* Wbm sg list tail */
3015 		qdf_nbuf_t wbm_sg_nbuf_tail;
3016 		uint32_t wbm_sg_desc_msdu_len;
3017 	} wbm_sg_param;
3018 	/* Number of msdu exception descriptors */
3019 	uint32_t num_msdu_exception_desc;
3020 
3021 	/* RX buffer params */
3022 	struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
3023 	struct rx_refill_buff_pool rx_refill_buff_pool;
3024 	/* Save recent operation related variable */
3025 	struct dp_last_op_info last_op_info;
3026 	TAILQ_HEAD(, dp_peer) inactive_peer_list;
3027 	qdf_spinlock_t inactive_peer_list_lock;
3028 	TAILQ_HEAD(, dp_vdev) inactive_vdev_list;
3029 	qdf_spinlock_t inactive_vdev_list_lock;
3030 	/* lock to protect vdev_id_map table*/
3031 	qdf_spinlock_t vdev_map_lock;
3032 
3033 	/* Flow Search Table is in CMEM */
3034 	bool fst_in_cmem;
3035 
3036 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
3037 	struct dp_swlm swlm;
3038 #endif
3039 
3040 #ifdef FEATURE_RUNTIME_PM
3041 	/* DP Rx timestamp */
3042 	qdf_time_t rx_last_busy;
3043 	/* Dp runtime refcount */
3044 	qdf_atomic_t dp_runtime_refcount;
3045 	/* Dp tx pending count in RTPM */
3046 	qdf_atomic_t tx_pending_rtpm;
3047 #endif
3048 	/* Invalid buffer that allocated for RX buffer */
3049 	qdf_nbuf_queue_t invalid_buf_queue;
3050 
3051 #ifdef FEATURE_MEC
3052 	/** @mec_lock: spinlock for MEC table */
3053 	qdf_spinlock_t mec_lock;
3054 	/** @mec_cnt: number of active mec entries */
3055 	qdf_atomic_t mec_cnt;
3056 	struct {
3057 		/** @mask: mask bits */
3058 		uint32_t mask;
3059 		/** @idx_bits: index to shift bits */
3060 		uint32_t idx_bits;
3061 		/** @bins: MEC table */
3062 		TAILQ_HEAD(, dp_mec_entry) * bins;
3063 	} mec_hash;
3064 #endif
3065 
3066 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3067 	qdf_list_t reo_desc_deferred_freelist;
3068 	qdf_spinlock_t reo_desc_deferred_freelist_lock;
3069 	bool reo_desc_deferred_freelist_init;
3070 #endif
3071 	/* BM id for first WBM2SW  ring */
3072 	uint32_t wbm_sw0_bm_id;
3073 
3074 	/* Store arch_id from device_id */
3075 	uint16_t arch_id;
3076 
3077 	/* link desc ID start per device type */
3078 	uint32_t link_desc_id_start;
3079 
3080 	/* CMEM buffer target reserved for host usage */
3081 	uint64_t cmem_base;
3082 	/* CMEM size in bytes */
3083 	uint64_t cmem_total_size;
3084 	/* CMEM free size in bytes */
3085 	uint64_t cmem_avail_size;
3086 
3087 	/* SOC level feature flags */
3088 	struct dp_soc_features features;
3089 
3090 #ifdef WIFI_MONITOR_SUPPORT
3091 	struct dp_mon_soc *monitor_soc;
3092 #endif
3093 	uint8_t rxdma2sw_rings_not_supported:1,
3094 		wbm_sg_last_msdu_war:1,
3095 		mec_fw_offload:1,
3096 		multi_peer_grp_cmd_supported:1,
3097 		umac_reset_supported:1;
3098 
3099 	/* Number of Rx refill rings */
3100 	uint8_t num_rx_refill_buf_rings;
3101 #ifdef FEATURE_RUNTIME_PM
3102 	/* flag to indicate vote for runtime_pm for high tput castt*/
3103 	qdf_atomic_t rtpm_high_tput_flag;
3104 #endif
3105 	/* Buffer manager ID for idle link descs */
3106 	uint8_t idle_link_bm_id;
3107 	qdf_atomic_t ref_count;
3108 
3109 	unsigned long vdev_stats_id_map;
3110 	bool txmon_hw_support;
3111 
3112 #ifdef DP_UMAC_HW_RESET_SUPPORT
3113 	struct dp_soc_umac_reset_ctx umac_reset_ctx;
3114 #endif
3115 	/* PPDU to link_id mapping parameters */
3116 	uint8_t link_id_offset;
3117 	uint8_t link_id_bits;
3118 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
3119 	/* A flag using to decide the switch of rx link speed  */
3120 	bool high_throughput;
3121 #endif
3122 	bool is_tx_pause;
3123 
3124 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3125 	/* number of IPv4 flows inserted */
3126 	qdf_atomic_t ipv4_fse_cnt;
3127 	/* number of IPv6 flows inserted */
3128 	qdf_atomic_t ipv6_fse_cnt;
3129 #endif
3130 	/* Reo queue ref table items */
3131 	struct reo_queue_ref_table reo_qref;
3132 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP
3133 	/* Flag to show if TX ILP is enabled */
3134 	bool tx_ilp_enable;
3135 #endif
3136 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
3137 	uint8_t mld_mode_ap;
3138 #endif
3139 	struct test_qaddr_del *list_shared_qaddr_del;
3140 	struct test_qaddr_del *reo_write_list;
3141 	struct test_mem_free *list_qdesc_addr_free;
3142 	struct test_mem_free *list_qdesc_addr_alloc;
3143 	uint64_t free_addr_list_idx;
3144 	uint64_t alloc_addr_list_idx;
3145 	uint64_t shared_qaddr_del_idx;
3146 	uint64_t write_paddr_list_idx;
3147 };
3148 
3149 #ifdef IPA_OFFLOAD
3150 /**
3151  * struct dp_ipa_resources - Resources needed for IPA
3152  * @tx_ring:
3153  * @tx_num_alloc_buffer:
3154  * @tx_comp_ring:
3155  * @rx_rdy_ring:
3156  * @rx_refill_ring:
3157  * @tx_comp_doorbell_paddr: IPA UC doorbell registers paddr
3158  * @tx_comp_doorbell_vaddr:
3159  * @rx_ready_doorbell_paddr:
3160  * @is_db_ddr_mapped:
3161  * @tx_alt_ring:
3162  * @tx_alt_ring_num_alloc_buffer:
3163  * @tx_alt_comp_ring:
3164  * @tx_alt_comp_doorbell_paddr: IPA UC doorbell registers paddr
3165  * @tx_alt_comp_doorbell_vaddr:
3166  * @rx_alt_rdy_ring:
3167  * @rx_alt_refill_ring:
3168  * @rx_alt_ready_doorbell_paddr:
3169  */
3170 struct dp_ipa_resources {
3171 	qdf_shared_mem_t tx_ring;
3172 	uint32_t tx_num_alloc_buffer;
3173 
3174 	qdf_shared_mem_t tx_comp_ring;
3175 	qdf_shared_mem_t rx_rdy_ring;
3176 	qdf_shared_mem_t rx_refill_ring;
3177 
3178 	/* IPA UC doorbell registers paddr */
3179 	qdf_dma_addr_t tx_comp_doorbell_paddr;
3180 	uint32_t *tx_comp_doorbell_vaddr;
3181 	qdf_dma_addr_t rx_ready_doorbell_paddr;
3182 
3183 	bool is_db_ddr_mapped;
3184 
3185 #ifdef IPA_WDI3_TX_TWO_PIPES
3186 	qdf_shared_mem_t tx_alt_ring;
3187 	uint32_t tx_alt_ring_num_alloc_buffer;
3188 	qdf_shared_mem_t tx_alt_comp_ring;
3189 
3190 	/* IPA UC doorbell registers paddr */
3191 	qdf_dma_addr_t tx_alt_comp_doorbell_paddr;
3192 	uint32_t *tx_alt_comp_doorbell_vaddr;
3193 #endif
3194 #ifdef IPA_WDI3_VLAN_SUPPORT
3195 	qdf_shared_mem_t rx_alt_rdy_ring;
3196 	qdf_shared_mem_t rx_alt_refill_ring;
3197 	qdf_dma_addr_t rx_alt_ready_doorbell_paddr;
3198 #endif
3199 };
3200 #endif
3201 
3202 #define MAX_RX_MAC_RINGS 2
3203 /* Same as NAC_MAX_CLENT */
3204 #define DP_NAC_MAX_CLIENT  24
3205 
3206 /*
3207  * 24 bits cookie size
3208  * 10 bits page id 0 ~ 1023 for MCL
3209  * 3 bits page id 0 ~ 7 for WIN
3210  * WBM Idle List Desc size = 128,
3211  * Num descs per page = 4096/128 = 32 for MCL
3212  * Num descs per page = 2MB/128 = 16384 for WIN
3213  */
3214 /*
3215  * Macros to setup link descriptor cookies - for link descriptors, we just
3216  * need first 3 bits to store bank/page ID for WIN. The
3217  * remaining bytes will be used to set a unique ID, which will
3218  * be useful in debugging
3219  */
3220 #ifdef MAX_ALLOC_PAGE_SIZE
3221 #if PAGE_SIZE == 4096
3222 #define LINK_DESC_PAGE_ID_MASK  0x007FE0
3223 #define LINK_DESC_ID_SHIFT      5
3224 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
3225 #elif PAGE_SIZE == 65536
3226 #define LINK_DESC_PAGE_ID_MASK  0x007E00
3227 #define LINK_DESC_ID_SHIFT      9
3228 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x800
3229 #else
3230 #error "Unsupported kernel PAGE_SIZE"
3231 #endif
3232 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
3233 	((((_page_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_desc_id))
3234 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
3235 	(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
3236 #else
3237 #define LINK_DESC_PAGE_ID_MASK  0x7
3238 #define LINK_DESC_ID_SHIFT      3
3239 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
3240 	((((_desc_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_page_id))
3241 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
3242 	((_cookie) & LINK_DESC_PAGE_ID_MASK)
3243 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
3244 #endif
3245 #define LINK_DESC_ID_START_20_BITS_COOKIE 0x4000
3246 
3247 /* same as ieee80211_nac_param */
3248 enum dp_nac_param_cmd {
3249 	/* IEEE80211_NAC_PARAM_ADD */
3250 	DP_NAC_PARAM_ADD = 1,
3251 	/* IEEE80211_NAC_PARAM_DEL */
3252 	DP_NAC_PARAM_DEL,
3253 	/* IEEE80211_NAC_PARAM_LIST */
3254 	DP_NAC_PARAM_LIST,
3255 };
3256 
3257 /**
3258  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
3259  * @neighbour_peers_macaddr: neighbour peer's mac address
3260  * @vdev: associated vdev
3261  * @ast_entry: ast_entry for neighbour peer
3262  * @rssi: rssi value
3263  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
3264  */
3265 struct dp_neighbour_peer {
3266 	union dp_align_mac_addr neighbour_peers_macaddr;
3267 	struct dp_vdev *vdev;
3268 	struct dp_ast_entry *ast_entry;
3269 	uint8_t rssi;
3270 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
3271 };
3272 
3273 #ifdef WLAN_TX_PKT_CAPTURE_ENH
3274 #define WLAN_TX_PKT_CAPTURE_ENH 1
3275 #define DP_TX_PPDU_PROC_THRESHOLD 8
3276 #define DP_TX_PPDU_PROC_TIMEOUT 10
3277 #endif
3278 
3279 /**
3280  * struct ppdu_info - PPDU Status info descriptor
3281  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
3282  * @sched_cmdid: schedule command id, which will be same in a burst
3283  * @max_ppdu_id: wrap around for ppdu id
3284  * @tsf_l32:
3285  * @tlv_bitmap:
3286  * @last_tlv_cnt: Keep track for missing ppdu tlvs
3287  * @last_user: last ppdu processed for user
3288  * @is_ampdu: set if Ampdu aggregate
3289  * @nbuf: ppdu descriptor payload
3290  * @ppdu_desc: ppdu descriptor
3291  * @ulist: Union of lists
3292  * @ppdu_info_dlist_elem: linked list of ppdu tlvs
3293  * @ppdu_info_slist_elem: Singly linked list (queue) of ppdu tlvs
3294  * @ppdu_info_list_elem: linked list of ppdu tlvs
3295  * @ppdu_info_queue_elem: Singly linked list (queue) of ppdu tlvs
3296  * @compltn_common_tlv: Successful tlv counter from COMPLTN COMMON tlv
3297  * @ack_ba_tlv: Successful tlv counter from ACK BA tlv
3298  * @done:
3299  */
3300 struct ppdu_info {
3301 	uint32_t ppdu_id;
3302 	uint32_t sched_cmdid;
3303 	uint32_t max_ppdu_id;
3304 	uint32_t tsf_l32;
3305 	uint16_t tlv_bitmap;
3306 	uint16_t last_tlv_cnt;
3307 	uint16_t last_user:8,
3308 		 is_ampdu:1;
3309 	qdf_nbuf_t nbuf;
3310 	struct cdp_tx_completion_ppdu *ppdu_desc;
3311 #ifdef WLAN_TX_PKT_CAPTURE_ENH
3312 	union {
3313 		TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem;
3314 		STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem;
3315 	} ulist;
3316 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem
3317 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem
3318 #else
3319 	TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
3320 #endif
3321 	uint8_t compltn_common_tlv;
3322 	uint8_t ack_ba_tlv;
3323 	bool done;
3324 };
3325 
3326 /**
3327  * struct msdu_completion_info - wbm msdu completion info
3328  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
3329  * @peer_id: peer_id
3330  * @tid: tid which used during transmit
3331  * @first_msdu: first msdu indication
3332  * @last_msdu: last msdu indication
3333  * @msdu_part_of_amsdu: msdu part of amsdu
3334  * @transmit_cnt: retried count
3335  * @status: transmit status
3336  * @tsf: timestamp which it transmitted
3337  */
3338 struct msdu_completion_info {
3339 	uint32_t ppdu_id;
3340 	uint16_t peer_id;
3341 	uint8_t tid;
3342 	uint8_t first_msdu:1,
3343 		last_msdu:1,
3344 		msdu_part_of_amsdu:1;
3345 	uint8_t transmit_cnt;
3346 	uint8_t status;
3347 	uint32_t tsf;
3348 };
3349 
3350 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
3351 struct rx_protocol_tag_map {
3352 	/* This is the user configured tag for the said protocol type */
3353 	uint16_t tag;
3354 };
3355 
3356 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
3357 /**
3358  * struct rx_protocol_tag_stats - protocol statistics
3359  * @tag_ctr: number of rx msdus matching this tag
3360  */
3361 struct rx_protocol_tag_stats {
3362 	uint32_t tag_ctr;
3363 };
3364 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
3365 
3366 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
3367 
3368 #ifdef WLAN_RX_PKT_CAPTURE_ENH
3369 /* Template data to be set for Enhanced RX Monitor packets */
3370 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a
3371 
3372 /**
3373  * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern
3374  * at end of each MSDU in monitor-lite mode
3375  * @reserved1: reserved for future use
3376  * @reserved2: reserved for future use
3377  * @flow_tag: flow tag value read from skb->cb
3378  * @protocol_tag: protocol tag value read from skb->cb
3379  */
3380 struct dp_rx_mon_enh_trailer_data {
3381 	uint16_t reserved1;
3382 	uint16_t reserved2;
3383 	uint16_t flow_tag;
3384 	uint16_t protocol_tag;
3385 };
3386 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
3387 
3388 #ifdef HTT_STATS_DEBUGFS_SUPPORT
3389 /* Number of debugfs entries created for HTT stats */
3390 #define PDEV_HTT_STATS_DBGFS_SIZE HTT_DBG_NUM_EXT_STATS
3391 
3392 /**
3393  * struct pdev_htt_stats_dbgfs_priv - Structure to maintain debugfs information
3394  * of HTT stats
3395  * @pdev: dp pdev of debugfs entry
3396  * @stats_id: stats id of debugfs entry
3397  */
3398 struct pdev_htt_stats_dbgfs_priv {
3399 	struct dp_pdev *pdev;
3400 	uint16_t stats_id;
3401 };
3402 
3403 /**
3404  * struct pdev_htt_stats_dbgfs_cfg - PDEV level data structure for debugfs
3405  * support for HTT stats
3406  * @debugfs_entry: qdf_debugfs directory entry
3407  * @m: qdf debugfs file handler
3408  * @pdev_htt_stats_dbgfs_ops: File operations of entry created
3409  * @priv: HTT stats debugfs private object
3410  * @htt_stats_dbgfs_event: HTT stats event for debugfs support
3411  * @lock: HTT stats debugfs lock
3412  * @htt_stats_dbgfs_msg_process: Function callback to print HTT stats
3413  */
3414 struct pdev_htt_stats_dbgfs_cfg {
3415 	qdf_dentry_t debugfs_entry[PDEV_HTT_STATS_DBGFS_SIZE];
3416 	qdf_debugfs_file_t m;
3417 	struct qdf_debugfs_fops
3418 			pdev_htt_stats_dbgfs_ops[PDEV_HTT_STATS_DBGFS_SIZE - 1];
3419 	struct pdev_htt_stats_dbgfs_priv priv[PDEV_HTT_STATS_DBGFS_SIZE - 1];
3420 	qdf_event_t htt_stats_dbgfs_event;
3421 	qdf_mutex_t lock;
3422 	void (*htt_stats_dbgfs_msg_process)(void *data, A_INT32 len);
3423 };
3424 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
3425 
3426 struct dp_srng_ring_state {
3427 	enum hal_ring_type ring_type;
3428 	uint32_t sw_head;
3429 	uint32_t sw_tail;
3430 	uint32_t hw_head;
3431 	uint32_t hw_tail;
3432 
3433 };
3434 
3435 struct dp_soc_srngs_state {
3436 	uint32_t seq_num;
3437 	uint32_t max_ring_id;
3438 	struct dp_srng_ring_state ring_state[DP_MAX_SRNGS];
3439 	TAILQ_ENTRY(dp_soc_srngs_state) list_elem;
3440 };
3441 
3442 #ifdef WLAN_FEATURE_11BE_MLO
3443 /* struct dp_mlo_sync_timestamp - PDEV level data structure for storing
3444  * MLO timestamp received via HTT msg.
3445  * msg_type: This would be set to HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND
3446  * pdev_id: pdev_id
3447  * chip_id: chip_id
3448  * mac_clk_freq: mac clock frequency of the mac HW block in MHz
3449  * sync_tstmp_lo_us: lower 32 bits of the WLAN global time stamp (in us) at
3450  *                   which last sync interrupt was received
3451  * sync_tstmp_hi_us: upper 32 bits of the WLAN global time stamp (in us) at
3452  *                   which last sync interrupt was received
3453  * mlo_offset_lo_us: lower 32 bits of the MLO time stamp offset in us
3454  * mlo_offset_hi_us: upper 32 bits of the MLO time stamp offset in us
3455  * mlo_offset_clks:  MLO time stamp offset in clock ticks for sub us
3456  * mlo_comp_us:      MLO time stamp compensation applied in us
3457  * mlo_comp_clks:    MLO time stamp compensation applied in clock ticks
3458  *                   for sub us resolution
3459  * mlo_comp_timer:   period of MLO compensation timer at which compensation
3460  *                   is applied, in us
3461  */
3462 struct dp_mlo_sync_timestamp {
3463 	uint32_t msg_type:8,
3464 		 pdev_id:2,
3465 		 chip_id:2,
3466 		 rsvd1:4,
3467 		 mac_clk_freq:16;
3468 	uint32_t sync_tstmp_lo_us;
3469 	uint32_t sync_tstmp_hi_us;
3470 	uint32_t mlo_offset_lo_us;
3471 	uint32_t mlo_offset_hi_us;
3472 	uint32_t mlo_offset_clks;
3473 	uint32_t mlo_comp_us:16,
3474 		 mlo_comp_clks:10,
3475 		 rsvd2:6;
3476 	uint32_t mlo_comp_timer:22,
3477 		 rsvd3:10;
3478 };
3479 #endif
3480 
3481 /* PDEV level structure for data path */
3482 struct dp_pdev {
3483 	/**
3484 	 * Re-use Memory Section Starts
3485 	 */
3486 
3487 	/* PDEV Id */
3488 	uint8_t pdev_id;
3489 
3490 	/* LMAC Id */
3491 	uint8_t lmac_id;
3492 
3493 	/* Target pdev  Id */
3494 	uint8_t target_pdev_id;
3495 
3496 	bool pdev_deinit;
3497 
3498 	/* TXRX SOC handle */
3499 	struct dp_soc *soc;
3500 
3501 	/* pdev status down or up required to handle dynamic hw
3502 	 * mode switch between DBS and DBS_SBS.
3503 	 * 1 = down
3504 	 * 0 = up
3505 	 */
3506 	bool is_pdev_down;
3507 
3508 	/* Enhanced Stats is enabled */
3509 	uint8_t enhanced_stats_en:1,
3510 		link_peer_stats:1;
3511 
3512 	/* Flag to indicate fast RX */
3513 	bool rx_fast_flag;
3514 
3515 	/* Second ring used to replenish rx buffers */
3516 	struct dp_srng rx_refill_buf_ring2;
3517 #ifdef IPA_WDI3_VLAN_SUPPORT
3518 	/* Third ring used to replenish rx buffers */
3519 	struct dp_srng rx_refill_buf_ring3;
3520 #endif
3521 
3522 #ifdef FEATURE_DIRECT_LINK
3523 	/* Fourth ring used to replenish rx buffers */
3524 	struct dp_srng rx_refill_buf_ring4;
3525 #endif
3526 
3527 	/* Empty ring used by firmware to post rx buffers to the MAC */
3528 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
3529 
3530 	int ch_band_lmac_id_mapping[REG_BAND_UNKNOWN];
3531 
3532 	/* wlan_cfg pdev ctxt*/
3533 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
3534 
3535 	/**
3536 	 * TODO: See if we need a ring map here for LMAC rings.
3537 	 * 1. Monitor rings are currently planning to be processed on receiving
3538 	 * PPDU end interrupts and hence won't need ring based interrupts.
3539 	 * 2. Rx buffer rings will be replenished during REO destination
3540 	 * processing and doesn't require regular interrupt handling - we will
3541 	 * only handle low water mark interrupts which is not expected
3542 	 * frequently
3543 	 */
3544 
3545 	/* VDEV list */
3546 	TAILQ_HEAD(, dp_vdev) vdev_list;
3547 
3548 	/* vdev list lock */
3549 	qdf_spinlock_t vdev_list_lock;
3550 
3551 	/* Number of vdevs this device have */
3552 	uint16_t vdev_count;
3553 
3554 	/* PDEV transmit lock */
3555 	qdf_spinlock_t tx_lock;
3556 
3557 	/*tx_mutex for me*/
3558 	DP_MUTEX_TYPE tx_mutex;
3559 
3560 	/* msdu chain head & tail */
3561 	qdf_nbuf_t invalid_peer_head_msdu;
3562 	qdf_nbuf_t invalid_peer_tail_msdu;
3563 
3564 	/* Band steering  */
3565 	/* TBD */
3566 
3567 	/* PDEV level data path statistics */
3568 	struct cdp_pdev_stats stats;
3569 
3570 	/* Global RX decap mode for the device */
3571 	enum htt_pkt_type rx_decap_mode;
3572 
3573 	qdf_atomic_t num_tx_outstanding;
3574 	int32_t tx_descs_max;
3575 
3576 	qdf_atomic_t num_tx_exception;
3577 
3578 	/* MCL specific local peer handle */
3579 	struct {
3580 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
3581 		uint8_t freelist;
3582 		qdf_spinlock_t lock;
3583 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
3584 	} local_peer_ids;
3585 
3586 	/* dscp_tid_map_*/
3587 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
3588 
3589 	/* operating channel */
3590 	struct {
3591 		uint8_t num;
3592 		uint8_t band;
3593 		uint16_t freq;
3594 	} operating_channel;
3595 
3596 	/* pool addr for mcast enhance buff */
3597 	struct {
3598 		int size;
3599 		uint32_t paddr;
3600 		char *vaddr;
3601 		struct dp_tx_me_buf_t *freelist;
3602 		int buf_in_use;
3603 		qdf_dma_mem_context(memctx);
3604 	} me_buf;
3605 
3606 	bool hmmc_tid_override_en;
3607 	uint8_t hmmc_tid;
3608 
3609 	/* Number of VAPs with mcast enhancement enabled */
3610 	qdf_atomic_t mc_num_vap_attached;
3611 
3612 	qdf_atomic_t stats_cmd_complete;
3613 
3614 #ifdef IPA_OFFLOAD
3615 	ipa_uc_op_cb_type ipa_uc_op_cb;
3616 	void *usr_ctxt;
3617 	struct dp_ipa_resources ipa_resource;
3618 #endif
3619 
3620 	/* TBD */
3621 
3622 	/* map this pdev to a particular Reo Destination ring */
3623 	enum cdp_host_reo_dest_ring reo_dest;
3624 
3625 	/* WDI event handlers */
3626 	struct wdi_event_subscribe_t **wdi_event_list;
3627 
3628 	bool cfr_rcc_mode;
3629 
3630 	/* enable time latency check for tx completion */
3631 	bool latency_capture_enable;
3632 
3633 	/* enable calculation of delay stats*/
3634 	bool delay_stats_flag;
3635 	void *dp_txrx_handle; /* Advanced data path handle */
3636 	uint32_t ppdu_id;
3637 	bool first_nbuf;
3638 	/* Current noise-floor reading for the pdev channel */
3639 	int16_t chan_noise_floor;
3640 
3641 	/*
3642 	 * For multiradio device, this flag indicates if
3643 	 * this radio is primary or secondary.
3644 	 *
3645 	 * For HK 1.0, this is used for WAR for the AST issue.
3646 	 * HK 1.x mandates creation of only 1 AST entry with same MAC address
3647 	 * across 2 radios. is_primary indicates the radio on which DP should
3648 	 * install HW AST entry if there is a request to add 2 AST entries
3649 	 * with same MAC address across 2 radios
3650 	 */
3651 	uint8_t is_primary;
3652 	struct cdp_tx_sojourn_stats sojourn_stats;
3653 	qdf_nbuf_t sojourn_buf;
3654 
3655 	union dp_rx_desc_list_elem_t *free_list_head;
3656 	union dp_rx_desc_list_elem_t *free_list_tail;
3657 	/* Cached peer_id from htt_peer_details_tlv */
3658 	uint16_t fw_stats_peer_id;
3659 
3660 	/* qdf_event for fw_peer_stats */
3661 	qdf_event_t fw_peer_stats_event;
3662 
3663 	/* qdf_event for fw_stats */
3664 	qdf_event_t fw_stats_event;
3665 
3666 	/* qdf_event for fw__obss_stats */
3667 	qdf_event_t fw_obss_stats_event;
3668 
3669 	/* To check if request is already sent for obss stats */
3670 	bool pending_fw_obss_stats_response;
3671 
3672 	/* User configured max number of tx buffers */
3673 	uint32_t num_tx_allowed;
3674 
3675 	/*
3676 	 * User configured max num of tx buffers excluding the
3677 	 * number of buffers reserved for handling special frames
3678 	 */
3679 	uint32_t num_reg_tx_allowed;
3680 
3681 	/* User configured max number of tx buffers for the special frames*/
3682 	uint32_t num_tx_spl_allowed;
3683 
3684 	/* unique cookie required for peer session */
3685 	uint32_t next_peer_cookie;
3686 
3687 	/*
3688 	 * Run time enabled when the first protocol tag is added,
3689 	 * run time disabled when the last protocol tag is deleted
3690 	 */
3691 	bool  is_rx_protocol_tagging_enabled;
3692 
3693 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
3694 	/*
3695 	 * The protocol type is used as array index to save
3696 	 * user provided tag info
3697 	 */
3698 	struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX];
3699 
3700 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
3701 	/*
3702 	 * Track msdus received from each reo ring separately to avoid
3703 	 * simultaneous writes from different core
3704 	 */
3705 	struct rx_protocol_tag_stats
3706 		reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX];
3707 	/* Track msdus received from exception ring separately */
3708 	struct rx_protocol_tag_stats
3709 		rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3710 	struct rx_protocol_tag_stats
3711 		mon_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3712 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
3713 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
3714 
3715 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3716 	/**
3717 	 * Pointer to DP Flow FST at SOC level if
3718 	 * is_rx_flow_search_table_per_pdev is true
3719 	 */
3720 	struct dp_rx_fst *rx_fst;
3721 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
3722 
3723 #ifdef FEATURE_TSO_STATS
3724 	/* TSO Id to index into TSO packet information */
3725 	qdf_atomic_t tso_idx;
3726 #endif /* FEATURE_TSO_STATS */
3727 
3728 #ifdef WLAN_SUPPORT_DATA_STALL
3729 	data_stall_detect_cb data_stall_detect_callback;
3730 #endif /* WLAN_SUPPORT_DATA_STALL */
3731 
3732 	/* flag to indicate whether LRO hash command has been sent to FW */
3733 	uint8_t is_lro_hash_configured;
3734 
3735 #ifdef HTT_STATS_DEBUGFS_SUPPORT
3736 	/* HTT stats debugfs params */
3737 	struct pdev_htt_stats_dbgfs_cfg *dbgfs_cfg;
3738 #endif
3739 	struct {
3740 		qdf_work_t work;
3741 		qdf_workqueue_t *work_queue;
3742 		uint32_t seq_num;
3743 		uint8_t queue_depth;
3744 		qdf_spinlock_t list_lock;
3745 
3746 		TAILQ_HEAD(, dp_soc_srngs_state) list;
3747 	} bkp_stats;
3748 #ifdef WIFI_MONITOR_SUPPORT
3749 	struct dp_mon_pdev *monitor_pdev;
3750 #endif
3751 #ifdef WLAN_FEATURE_11BE_MLO
3752 	struct dp_mlo_sync_timestamp timestamp;
3753 #endif
3754 	/* Is isolation mode enabled */
3755 	bool  isolation;
3756 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
3757 	uint8_t is_first_wakeup_packet;
3758 #endif
3759 #ifdef CONNECTIVITY_PKTLOG
3760 	/* packetdump callback functions */
3761 	ol_txrx_pktdump_cb dp_tx_packetdump_cb;
3762 	ol_txrx_pktdump_cb dp_rx_packetdump_cb;
3763 #endif
3764 
3765 	/* Firmware Stats for TLV received from Firmware */
3766 	uint64_t fw_stats_tlv_bitmap_rcvd;
3767 
3768 	/* For Checking Pending Firmware Response */
3769 	bool pending_fw_stats_response;
3770 };
3771 
3772 struct dp_peer;
3773 
3774 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3775 #define WLAN_ROAM_PEER_AUTH_STATUS_NONE 0x0
3776 /*
3777  * This macro is equivalent to macro ROAM_AUTH_STATUS_AUTHENTICATED used
3778  * in connection mgr
3779  */
3780 #define WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED 0x2
3781 #endif
3782 
3783 /* VDEV structure for data path state */
3784 struct dp_vdev {
3785 	/* OS device abstraction */
3786 	qdf_device_t osdev;
3787 
3788 	/* physical device that is the parent of this virtual device */
3789 	struct dp_pdev *pdev;
3790 
3791 	/* VDEV operating mode */
3792 	enum wlan_op_mode opmode;
3793 
3794 	/* VDEV subtype */
3795 	enum wlan_op_subtype subtype;
3796 
3797 	/* Tx encapsulation type for this VAP */
3798 	enum htt_cmn_pkt_type tx_encap_type;
3799 
3800 	/* Rx Decapsulation type for this VAP */
3801 	enum htt_cmn_pkt_type rx_decap_type;
3802 
3803 	/* WDS enabled */
3804 	bool wds_enabled;
3805 
3806 	/* MEC enabled */
3807 	bool mec_enabled;
3808 
3809 #ifdef QCA_SUPPORT_WDS_EXTENDED
3810 	bool wds_ext_enabled;
3811 	bool drop_tx_mcast;
3812 #endif /* QCA_SUPPORT_WDS_EXTENDED */
3813 	bool drop_3addr_mcast;
3814 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
3815 	bool skip_bar_update;
3816 	unsigned long skip_bar_update_last_ts;
3817 #endif
3818 	/* WDS Aging timer period */
3819 	uint32_t wds_aging_timer_val;
3820 
3821 	/* NAWDS enabled */
3822 	bool nawds_enabled;
3823 
3824 	/* Multicast enhancement enabled */
3825 	uint8_t mcast_enhancement_en;
3826 
3827 	/* IGMP multicast enhancement enabled */
3828 	uint8_t igmp_mcast_enhanc_en;
3829 
3830 	/* vdev_id - ID used to specify a particular vdev to the target */
3831 	uint8_t vdev_id;
3832 
3833 	/* Default HTT meta data for this VDEV */
3834 	/* TBD: check alignment constraints */
3835 	uint16_t htt_tcl_metadata;
3836 
3837 	/* vdev lmac_id */
3838 	uint8_t lmac_id;
3839 
3840 	/* vdev bank_id */
3841 	uint8_t bank_id;
3842 
3843 	/* Mesh mode vdev */
3844 	uint32_t mesh_vdev;
3845 
3846 	/* Mesh mode rx filter setting */
3847 	uint32_t mesh_rx_filter;
3848 
3849 	/* DSCP-TID mapping table ID */
3850 	uint8_t dscp_tid_map_id;
3851 
3852 	/* Address search type to be set in TX descriptor */
3853 	uint8_t search_type;
3854 
3855 	/*
3856 	 * Flag to indicate if s/w tid classification should be
3857 	 * skipped
3858 	 */
3859 	uint8_t skip_sw_tid_classification;
3860 
3861 	/* Flag to enable peer authorization */
3862 	uint8_t peer_authorize;
3863 
3864 	/* AST hash value for BSS peer in HW valid for STA VAP*/
3865 	uint16_t bss_ast_hash;
3866 
3867 	/* AST hash index for BSS peer in HW valid for STA VAP*/
3868 	uint16_t bss_ast_idx;
3869 
3870 	bool multipass_en;
3871 
3872 	/* Address search flags to be configured in HAL descriptor */
3873 	uint8_t hal_desc_addr_search_flags;
3874 
3875 	/* Handle to the OS shim SW's virtual device */
3876 	ol_osif_vdev_handle osif_vdev;
3877 
3878 	/* MAC address */
3879 	union dp_align_mac_addr mac_addr;
3880 
3881 #ifdef WLAN_FEATURE_11BE_MLO
3882 	/* MLO MAC address corresponding to vdev */
3883 	union dp_align_mac_addr mld_mac_addr;
3884 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
3885 	uint8_t mlo_vdev:1,
3886 		is_bridge_vdev:1,
3887 		reserved_1:6;
3888 #endif
3889 #endif
3890 
3891 	/* node in the pdev's list of vdevs */
3892 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
3893 
3894 	/* dp_peer list */
3895 	TAILQ_HEAD(, dp_peer) peer_list;
3896 	/* to protect peer_list */
3897 	DP_MUTEX_TYPE peer_list_lock;
3898 
3899 	/* RX call back function to flush GRO packets*/
3900 	ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;
3901 	/* default RX call back function called by dp */
3902 	ol_txrx_rx_fp osif_rx;
3903 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
3904 	/* callback to receive eapol frames */
3905 	ol_txrx_rx_fp osif_rx_eapol;
3906 #endif
3907 	/* callback to deliver rx frames to the OS */
3908 	ol_txrx_rx_fp osif_rx_stack;
3909 	/* Callback to handle rx fisa frames */
3910 	ol_txrx_fisa_rx_fp osif_fisa_rx;
3911 	ol_txrx_fisa_flush_fp osif_fisa_flush;
3912 
3913 	/* call back function to flush out queued rx packets*/
3914 	ol_txrx_rx_flush_fp osif_rx_flush;
3915 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
3916 	ol_txrx_get_key_fp osif_get_key;
3917 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
3918 
3919 #ifdef notyet
3920 	/* callback to check if the msdu is an WAI (WAPI) frame */
3921 	ol_rx_check_wai_fp osif_check_wai;
3922 #endif
3923 
3924 	/* proxy arp function */
3925 	ol_txrx_proxy_arp_fp osif_proxy_arp;
3926 
3927 	ol_txrx_mcast_me_fp me_convert;
3928 
3929 	/* completion function used by this vdev*/
3930 	ol_txrx_completion_fp tx_comp;
3931 
3932 	ol_txrx_get_tsf_time get_tsf_time;
3933 
3934 	/* callback to classify critical packets */
3935 	ol_txrx_classify_critical_pkt_fp tx_classify_critical_pkt_cb;
3936 
3937 	/* deferred vdev deletion state */
3938 	struct {
3939 		/* VDEV delete pending */
3940 		int pending;
3941 		/*
3942 		* callback and a context argument to provide a
3943 		* notification for when the vdev is deleted.
3944 		*/
3945 		ol_txrx_vdev_delete_cb callback;
3946 		void *context;
3947 	} delete;
3948 
3949 	/* tx data delivery notification callback function */
3950 	struct {
3951 		ol_txrx_data_tx_cb func;
3952 		void *ctxt;
3953 	} tx_non_std_data_callback;
3954 
3955 
3956 	/* safe mode control to bypass the encrypt and decipher process*/
3957 	uint32_t safemode;
3958 
3959 	/* rx filter related */
3960 	uint32_t drop_unenc;
3961 #ifdef notyet
3962 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
3963 	uint32_t filters_num;
3964 #endif
3965 	/* TDLS Link status */
3966 	bool tdls_link_connected;
3967 	bool is_tdls_frame;
3968 
3969 	/* per vdev rx nbuf queue */
3970 	qdf_nbuf_queue_t rxq;
3971 
3972 	uint8_t tx_ring_id;
3973 	struct dp_tx_desc_pool_s *tx_desc;
3974 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
3975 
3976 	/* Capture timestamp of previous tx packet enqueued */
3977 	uint64_t prev_tx_enq_tstamp;
3978 
3979 	/* Capture timestamp of previous rx packet delivered */
3980 	uint64_t prev_rx_deliver_tstamp;
3981 
3982 	/* VDEV Stats */
3983 	struct cdp_vdev_stats stats;
3984 
3985 	/* Is this a proxySTA VAP */
3986 	uint8_t proxysta_vdev : 1, /* Is this a proxySTA VAP */
3987 		wrap_vdev : 1, /* Is this a QWRAP AP VAP */
3988 		isolation_vdev : 1, /* Is this a QWRAP AP VAP */
3989 		reserved : 5; /* Reserved */
3990 
3991 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3992 	struct dp_tx_desc_pool_s *pool;
3993 #endif
3994 	/* AP BRIDGE enabled */
3995 	bool ap_bridge_enabled;
3996 
3997 	enum cdp_sec_type  sec_type;
3998 
3999 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
4000 	bool raw_mode_war;
4001 
4002 
4003 	/* 8021p PCP-TID mapping table ID */
4004 	uint8_t tidmap_tbl_id;
4005 
4006 	/* 8021p PCP-TID map values */
4007 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
4008 
4009 	/* TIDmap priority */
4010 	uint8_t tidmap_prty;
4011 
4012 #ifdef QCA_MULTIPASS_SUPPORT
4013 	uint16_t *iv_vlan_map;
4014 
4015 	/* dp_peer special list */
4016 	TAILQ_HEAD(, dp_txrx_peer) mpass_peer_list;
4017 	DP_MUTEX_TYPE mpass_peer_mutex;
4018 #endif
4019 	/* Extended data path handle */
4020 	struct cdp_ext_vdev *vdev_dp_ext_handle;
4021 #ifdef VDEV_PEER_PROTOCOL_COUNT
4022 	/*
4023 	 * Rx-Ingress and Tx-Egress are in the lower level DP layer
4024 	 * Rx-Egress and Tx-ingress are handled in osif layer for DP
4025 	 * So
4026 	 * Rx-Egress and Tx-ingress mask definitions are in OSIF layer
4027 	 * Rx-Ingress and Tx-Egress definitions are here below
4028 	 */
4029 #define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1
4030 #define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2
4031 #define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4
4032 #define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8
4033 	bool peer_protocol_count_track;
4034 	int peer_protocol_count_dropmask;
4035 #endif
4036 	/* callback to collect connectivity stats */
4037 	ol_txrx_stats_rx_fp stats_cb;
4038 	uint32_t num_peers;
4039 	/* entry to inactive_list*/
4040 	TAILQ_ENTRY(dp_vdev) inactive_list_elem;
4041 
4042 #ifdef WLAN_SUPPORT_RX_FISA
4043 	/**
4044 	 * Params used for controlling the fisa aggregation dynamically
4045 	 */
4046 	uint8_t fisa_disallowed[MAX_REO_DEST_RINGS];
4047 	uint8_t fisa_force_flushed[MAX_REO_DEST_RINGS];
4048 #endif
4049 	/*
4050 	 * Refcount for VDEV currently incremented when
4051 	 * peer is created for VDEV
4052 	 */
4053 	qdf_atomic_t ref_cnt;
4054 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
4055 	uint8_t num_latency_critical_conn;
4056 #ifdef WLAN_SUPPORT_MESH_LATENCY
4057 	uint8_t peer_tid_latency_enabled;
4058 	/* tid latency configuration parameters */
4059 	struct {
4060 		uint32_t service_interval;
4061 		uint32_t burst_size;
4062 		uint8_t latency_tid;
4063 	} mesh_tid_latency_config;
4064 #endif
4065 #ifdef WIFI_MONITOR_SUPPORT
4066 	struct dp_mon_vdev *monitor_vdev;
4067 #endif
4068 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
4069 	/* Delta between TQM clock and TSF clock */
4070 	uint32_t delta_tsf;
4071 #endif
4072 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
4073 	/* Indicate if uplink delay report is enabled or not */
4074 	qdf_atomic_t ul_delay_report;
4075 	/* accumulative delay for every TX completion */
4076 	qdf_atomic_t ul_delay_accum;
4077 	/* accumulative number of packets delay has accumulated */
4078 	qdf_atomic_t ul_pkts_accum;
4079 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
4080 
4081 	/* vdev_stats_id - ID used for stats collection by FW from HW*/
4082 	uint8_t vdev_stats_id;
4083 #ifdef HW_TX_DELAY_STATS_ENABLE
4084 	/* hw tx delay stats enable */
4085 	uint8_t hw_tx_delay_stats_enabled;
4086 #endif
4087 #ifdef DP_RX_UDP_OVER_PEER_ROAM
4088 	uint32_t roaming_peer_status;
4089 	union dp_align_mac_addr roaming_peer_mac;
4090 #endif
4091 #ifdef DP_TRAFFIC_END_INDICATION
4092 	/* per vdev feature enable/disable status */
4093 	bool traffic_end_ind_en;
4094 	/* per vdev nbuf queue for traffic end indication packets */
4095 	qdf_nbuf_queue_t end_ind_pkt_q;
4096 #endif
4097 #ifdef FEATURE_DIRECT_LINK
4098 	/* Flag to indicate if to_fw should be set for tx pkts on this vdev */
4099 	bool to_fw;
4100 #endif
4101 	/* QDF VDEV operating mode  */
4102 	enum QDF_OPMODE qdf_opmode;
4103 
4104 #ifdef WLAN_TX_PKT_CAPTURE_ENH
4105 	/* TX capture feature to over ride return buffer manager */
4106 	bool is_override_rbm_id;
4107 	/* Return buffer manager ID */
4108 	uint8_t rbm_id;
4109 #endif
4110 };
4111 
4112 enum {
4113 	dp_sec_mcast = 0,
4114 	dp_sec_ucast
4115 };
4116 
4117 #ifdef WDS_VENDOR_EXTENSION
4118 typedef struct {
4119 	uint8_t	wds_tx_mcast_4addr:1,
4120 		wds_tx_ucast_4addr:1,
4121 		wds_rx_filter:1,      /* enforce rx filter */
4122 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
4123 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
4124 
4125 } dp_ecm_policy;
4126 #endif
4127 
4128 /**
4129  * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
4130  * @cached_bufq: nbuff list to enqueue rx packets
4131  * @bufq_lock: spinlock for nbuff list access
4132  * @thresh: maximum threshold for number of rx buff to enqueue
4133  * @entries: number of entries
4134  * @dropped: number of packets dropped
4135  */
4136 struct dp_peer_cached_bufq {
4137 	qdf_list_t cached_bufq;
4138 	qdf_spinlock_t bufq_lock;
4139 	uint32_t thresh;
4140 	uint32_t entries;
4141 	uint32_t dropped;
4142 };
4143 
4144 /**
4145  * enum dp_peer_ast_flowq
4146  * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue
4147  * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue
4148  * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP
4149  * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP
4150  * @DP_PEER_AST_FLOWQ_MAX: max value
4151  */
4152 enum dp_peer_ast_flowq {
4153 	DP_PEER_AST_FLOWQ_HI_PRIO,
4154 	DP_PEER_AST_FLOWQ_LOW_PRIO,
4155 	DP_PEER_AST_FLOWQ_UDP,
4156 	DP_PEER_AST_FLOWQ_NON_UDP,
4157 	DP_PEER_AST_FLOWQ_MAX,
4158 };
4159 
4160 /**
4161  * struct dp_ast_flow_override_info - ast override info
4162  * @ast_idx: ast indexes in peer map message
4163  * @ast_valid_mask: ast valid mask for each ast index
4164  * @ast_flow_mask: ast flow mask for each ast index
4165  * @tid_valid_low_pri_mask: per tid mask for low priority flow
4166  * @tid_valid_hi_pri_mask: per tid mask for hi priority flow
4167  */
4168 struct dp_ast_flow_override_info {
4169 	uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX];
4170 	uint8_t ast_valid_mask;
4171 	uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX];
4172 	uint8_t tid_valid_low_pri_mask;
4173 	uint8_t tid_valid_hi_pri_mask;
4174 };
4175 
4176 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
4177 /**
4178  * struct dp_peer_ext_evt_info - peer extended event info
4179  * @peer_id: peer_id from firmware
4180  * @vdev_id: vdev ID
4181  * @link_id: Link ID
4182  * @link_id_valid: link_id_valid
4183  * @peer_mac_addr: mac address of the peer
4184  */
4185 struct dp_peer_ext_evt_info {
4186 	uint16_t peer_id;
4187 	uint8_t vdev_id;
4188 	uint8_t link_id;
4189 	bool link_id_valid;
4190 	uint8_t *peer_mac_addr;
4191 };
4192 #endif
4193 
4194 /**
4195  * struct dp_peer_ast_params - ast parameters for a msdu flow-queue
4196  * @ast_idx: ast index populated by FW
4197  * @is_valid: ast flow valid mask
4198  * @valid_tid_mask: per tid mask for this ast index
4199  * @flowQ: flow queue id associated with this ast index
4200  */
4201 struct dp_peer_ast_params {
4202 	uint16_t ast_idx;
4203 	uint8_t is_valid;
4204 	uint8_t valid_tid_mask;
4205 	uint8_t flowQ;
4206 };
4207 
4208 #define DP_MLO_FLOW_INFO_MAX	3
4209 
4210 /**
4211  * struct dp_mlo_flow_override_info - Flow override info
4212  * @ast_idx: Primary TCL AST Index
4213  * @ast_idx_valid: Is AST index valid
4214  * @chip_id: CHIP ID
4215  * @tidmask: tidmask
4216  * @cache_set_num: Cache set number
4217  */
4218 struct dp_mlo_flow_override_info {
4219 	uint16_t ast_idx;
4220 	uint8_t ast_idx_valid;
4221 	uint8_t chip_id;
4222 	uint8_t tidmask;
4223 	uint8_t cache_set_num;
4224 };
4225 
4226 /**
4227  * struct dp_mlo_link_info - Link info
4228  * @peer_chip_id: Peer Chip ID
4229  * @vdev_id: Vdev ID
4230  */
4231 struct dp_mlo_link_info {
4232 	uint8_t peer_chip_id;
4233 	uint8_t vdev_id;
4234 };
4235 
4236 #ifdef WLAN_SUPPORT_MSCS
4237 /*MSCS Procedure based macros */
4238 #define IEEE80211_MSCS_MAX_ELEM_SIZE    5
4239 #define IEEE80211_TCLAS_MASK_CLA_TYPE_4  4
4240 /**
4241  * struct dp_peer_mscs_parameter - MSCS database obtained from
4242  * MSCS Request and Response in the control path. This data is used
4243  * by the AP to find out what priority to set based on the tuple
4244  * classification during packet processing.
4245  * @user_priority_bitmap: User priority bitmap obtained during
4246  * handshake
4247  * @user_priority_limit: User priority limit obtained during
4248  * handshake
4249  * @classifier_mask: params to be compared during processing
4250  */
4251 struct dp_peer_mscs_parameter {
4252 	uint8_t user_priority_bitmap;
4253 	uint8_t user_priority_limit;
4254 	uint8_t classifier_mask;
4255 };
4256 #endif
4257 
4258 #ifdef QCA_SUPPORT_WDS_EXTENDED
4259 #define WDS_EXT_PEER_INIT_BIT 0
4260 
4261 /**
4262  * struct dp_wds_ext_peer - wds ext peer structure
4263  * This is used when wds extended feature is enabled
4264  * both compile time and run time. It is created
4265  * when 1st 4 address frame is received from
4266  * wds backhaul.
4267  * @osif_peer: Handle to the OS shim SW's virtual device
4268  * @init: wds ext netdev state
4269  */
4270 struct dp_wds_ext_peer {
4271 	ol_osif_peer_handle osif_peer;
4272 	unsigned long init;
4273 };
4274 #endif /* QCA_SUPPORT_WDS_EXTENDED */
4275 
4276 #ifdef WLAN_SUPPORT_MESH_LATENCY
4277 /*Advanced Mesh latency feature based macros */
4278 
4279 /**
4280  * struct dp_peer_mesh_latency_parameter - Mesh latency related
4281  * parameters. This data is updated per peer per TID based on
4282  * the flow tuple classification in external rule database
4283  * during packet processing.
4284  * @service_interval_dl: Service interval associated with TID in DL
4285  * @burst_size_dl: Burst size additive over multiple flows in DL
4286  * @service_interval_ul: Service interval associated with TID in UL
4287  * @burst_size_ul: Burst size additive over multiple flows in UL
4288  * @ac: custom ac derived from service interval
4289  * @msduq: MSDU queue number within TID
4290  */
4291 struct dp_peer_mesh_latency_parameter {
4292 	uint32_t service_interval_dl;
4293 	uint32_t burst_size_dl;
4294 	uint32_t service_interval_ul;
4295 	uint32_t burst_size_ul;
4296 	uint8_t ac;
4297 	uint8_t msduq;
4298 };
4299 #endif
4300 
4301 #ifdef WLAN_FEATURE_11BE_MLO
4302 /* Max number of links for MLO connection */
4303 #define DP_MAX_MLO_LINKS 4
4304 
4305 /**
4306  * struct dp_peer_link_info - link peer information for MLO
4307  * @mac_addr: Mac address
4308  * @vdev_id: Vdev ID for current link peer
4309  * @is_valid: flag for link peer info valid or not
4310  * @chip_id: chip id
4311  */
4312 struct dp_peer_link_info {
4313 	union dp_align_mac_addr mac_addr;
4314 	uint8_t vdev_id;
4315 	uint8_t is_valid;
4316 	uint8_t chip_id;
4317 };
4318 
4319 /**
4320  * struct dp_mld_link_peers - this structure is used to get link peers
4321  *			      pointer from mld peer
4322  * @link_peers: link peers pointer array
4323  * @num_links: number of link peers fetched
4324  */
4325 struct dp_mld_link_peers {
4326 	struct dp_peer *link_peers[DP_MAX_MLO_LINKS];
4327 	uint8_t num_links;
4328 };
4329 #else
4330 #define DP_MAX_MLO_LINKS 0
4331 #endif
4332 
4333 typedef void *dp_txrx_ref_handle;
4334 
4335 /**
4336  * struct dp_peer_per_pkt_tx_stats- Peer Tx stats updated in per pkt
4337  *				Tx completion path
4338  * @ucast: Unicast Packet Count
4339  * @mcast: Multicast Packet Count
4340  * @bcast: Broadcast Packet Count
4341  * @nawds_mcast: NAWDS Multicast Packet Count
4342  * @tx_success: Successful Tx Packets
4343  * @nawds_mcast_drop: NAWDS Multicast Drop Count
4344  * @ofdma: Total Packets as ofdma
4345  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
4346  * @amsdu_cnt: Number of MSDUs part of AMSDU
4347  * @dropped: Dropped packet statistics
4348  * @dropped.fw_rem: Discarded by firmware
4349  * @dropped.fw_rem_notx: firmware_discard_untransmitted
4350  * @dropped.fw_rem_tx: firmware_discard_transmitted
4351  * @dropped.age_out: aged out in mpdu/msdu queues
4352  * @dropped.fw_reason1: discarded by firmware reason 1
4353  * @dropped.fw_reason2: discarded by firmware reason 2
4354  * @dropped.fw_reason3: discarded by firmware reason  3
4355  * @dropped.fw_rem_no_match: dropped due to fw no match command
4356  * @dropped.drop_threshold: dropped due to HW threshold
4357  * @dropped.drop_link_desc_na: dropped due resource not available in HW
4358  * @dropped.invalid_drop: Invalid msdu drop
4359  * @dropped.mcast_vdev_drop: MCAST drop configured for VDEV in HW
4360  * @dropped.invalid_rr: Invalid TQM release reason
4361  * @failed_retry_count: packets failed due to retry above 802.11 retry limit
4362  * @retry_count: packets successfully send after one or more retry
4363  * @multiple_retry_count: packets successfully sent after more than one retry
4364  * @no_ack_count: no ack pkt count for different protocols
4365  * @tx_success_twt: Successful Tx Packets in TWT session
4366  * @last_tx_ts: last timestamp in jiffies when tx comp occurred
4367  * @avg_sojourn_msdu: Avg sojourn msdu stat
4368  * @protocol_trace_cnt: per-peer protocol counter
4369  * @release_src_not_tqm: Counter to keep track of release source is not TQM
4370  *			 in TX completion status processing
4371  * @inval_link_id_pkt_cnt: Counter to capture Invalid Link Id
4372  */
4373 struct dp_peer_per_pkt_tx_stats {
4374 	struct cdp_pkt_info ucast;
4375 	struct cdp_pkt_info mcast;
4376 	struct cdp_pkt_info bcast;
4377 	struct cdp_pkt_info nawds_mcast;
4378 	struct cdp_pkt_info tx_success;
4379 	uint32_t nawds_mcast_drop;
4380 	uint32_t ofdma;
4381 	uint32_t non_amsdu_cnt;
4382 	uint32_t amsdu_cnt;
4383 	struct {
4384 		struct cdp_pkt_info fw_rem;
4385 		uint32_t fw_rem_notx;
4386 		uint32_t fw_rem_tx;
4387 		uint32_t age_out;
4388 		uint32_t fw_reason1;
4389 		uint32_t fw_reason2;
4390 		uint32_t fw_reason3;
4391 		uint32_t fw_rem_queue_disable;
4392 		uint32_t fw_rem_no_match;
4393 		uint32_t drop_threshold;
4394 		uint32_t drop_link_desc_na;
4395 		uint32_t invalid_drop;
4396 		uint32_t mcast_vdev_drop;
4397 		uint32_t invalid_rr;
4398 	} dropped;
4399 	uint32_t failed_retry_count;
4400 	uint32_t retry_count;
4401 	uint32_t multiple_retry_count;
4402 	uint32_t no_ack_count[QDF_PROTO_SUBTYPE_MAX];
4403 	struct cdp_pkt_info tx_success_twt;
4404 	unsigned long last_tx_ts;
4405 	qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
4406 #ifdef VDEV_PEER_PROTOCOL_COUNT
4407 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
4408 #endif
4409 	uint32_t release_src_not_tqm;
4410 	uint32_t inval_link_id_pkt_cnt;
4411 };
4412 
4413 /**
4414  * struct dp_peer_extd_tx_stats - Peer Tx stats updated in either
4415  *	per pkt Tx completion path when macro QCA_ENHANCED_STATS_SUPPORT is
4416  *	disabled or in HTT Tx PPDU completion path when macro is enabled
4417  * @stbc: Packets in STBC
4418  * @ldpc: Packets in LDPC
4419  * @retries: Packet retries
4420  * @pkt_type: pkt count for different .11 modes
4421  * @wme_ac_type: Wireless Multimedia type Count
4422  * @excess_retries_per_ac: Wireless Multimedia type Count
4423  * @ampdu_cnt: completion of aggregation
4424  * @non_ampdu_cnt: tx completion not aggregated
4425  * @num_ppdu_cookie_valid: no. of valid ppdu cookies rcvd from FW
4426  * @tx_ppdus: ppdus in tx
4427  * @tx_mpdus_success: mpdus successful in tx
4428  * @tx_mpdus_tried: mpdus tried in tx
4429  * @tx_rate: Tx Rate in kbps
4430  * @last_tx_rate: Last tx rate for unicast packets
4431  * @last_tx_rate_mcs: Tx rate mcs for unicast packets
4432  * @mcast_last_tx_rate: Last tx rate for multicast packets
4433  * @mcast_last_tx_rate_mcs: Last tx rate mcs for multicast
4434  * @rnd_avg_tx_rate: Rounded average tx rate
4435  * @avg_tx_rate: Average TX rate
4436  * @tx_ratecode: Tx rate code of last frame
4437  * @pream_punct_cnt: Preamble Punctured count
4438  * @sgi_count: SGI count
4439  * @nss: Packet count for different num_spatial_stream values
4440  * @bw: Packet Count for different bandwidths
4441  * @ru_start: RU start index
4442  * @ru_tones: RU tones size
4443  * @ru_loc: pkt info for RU location 26/ 52/ 106/ 242/ 484 counter
4444  * @transmit_type: pkt info for tx transmit type
4445  * @mu_group_id: mumimo mu group id
4446  * @last_ack_rssi: RSSI of last acked packet
4447  * @nss_info: NSS 1,2, ...8
4448  * @mcs_info: MCS index
4449  * @bw_info: Bandwidth
4450  *       <enum 0 bw_20_MHz>
4451  *       <enum 1 bw_40_MHz>
4452  *       <enum 2 bw_80_MHz>
4453  *       <enum 3 bw_160_MHz>
4454  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
4455  *       <enum 1     0_4_us_sgi > Legacy short GI
4456  *       <enum 2     1_6_us_sgi > HE related GI
4457  *       <enum 3     3_2_us_sgi > HE
4458  * @preamble_info: preamble
4459  * @tx_ucast_total: total ucast count
4460  * @tx_ucast_success: total ucast success count
4461  * @retries_mpdu: mpdu number of successfully transmitted after retries
4462  * @mpdu_success_with_retries: mpdu retry count in case of successful tx
4463  * @su_be_ppdu_cnt: SU Tx packet count for 11BE
4464  * @mu_be_ppdu_cnt: MU Tx packet count for 11BE
4465  * @punc_bw: MSDU count for punctured bw
4466  * @rts_success: RTS success count
4467  * @rts_failure: RTS failure count
4468  * @bar_cnt: Block ACK Request frame count
4469  * @ndpa_cnt: NDP announcement frame count
4470  * @rssi_chain: rssi chain
4471  * @wme_ac_type_bytes: Wireless Multimedia bytes Count
4472  */
4473 struct dp_peer_extd_tx_stats {
4474 	uint32_t stbc;
4475 	uint32_t ldpc;
4476 	uint32_t retries;
4477 	struct cdp_pkt_type pkt_type[DOT11_MAX];
4478 	uint32_t wme_ac_type[WME_AC_MAX];
4479 	uint32_t excess_retries_per_ac[WME_AC_MAX];
4480 	uint32_t ampdu_cnt;
4481 	uint32_t non_ampdu_cnt;
4482 	uint32_t num_ppdu_cookie_valid;
4483 	uint32_t tx_ppdus;
4484 	uint32_t tx_mpdus_success;
4485 	uint32_t tx_mpdus_tried;
4486 
4487 	uint32_t tx_rate;
4488 	uint32_t last_tx_rate;
4489 	uint32_t last_tx_rate_mcs;
4490 	uint32_t mcast_last_tx_rate;
4491 	uint32_t mcast_last_tx_rate_mcs;
4492 	uint64_t rnd_avg_tx_rate;
4493 	uint64_t avg_tx_rate;
4494 	uint16_t tx_ratecode;
4495 
4496 	uint32_t sgi_count[MAX_GI];
4497 	uint32_t pream_punct_cnt;
4498 	uint32_t nss[SS_COUNT];
4499 	uint32_t bw[MAX_BW];
4500 	uint32_t ru_start;
4501 	uint32_t ru_tones;
4502 	struct cdp_tx_pkt_info ru_loc[MAX_RU_LOCATIONS];
4503 
4504 	struct cdp_tx_pkt_info transmit_type[MAX_TRANSMIT_TYPES];
4505 	uint32_t mu_group_id[MAX_MU_GROUP_ID];
4506 
4507 	uint32_t last_ack_rssi;
4508 
4509 	uint32_t nss_info:4,
4510 		 mcs_info:4,
4511 		 bw_info:4,
4512 		 gi_info:4,
4513 		 preamble_info:4;
4514 
4515 	uint32_t retries_mpdu;
4516 	uint32_t mpdu_success_with_retries;
4517 	struct cdp_pkt_info tx_ucast_total;
4518 	struct cdp_pkt_info tx_ucast_success;
4519 #ifdef WLAN_FEATURE_11BE
4520 	struct cdp_pkt_type su_be_ppdu_cnt;
4521 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
4522 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
4523 #endif
4524 	uint32_t rts_success;
4525 	uint32_t rts_failure;
4526 	uint32_t bar_cnt;
4527 	uint32_t ndpa_cnt;
4528 	int32_t rssi_chain[CDP_RSSI_CHAIN_LEN];
4529 	uint64_t wme_ac_type_bytes[WME_AC_MAX];
4530 };
4531 
4532 /**
4533  * struct dp_peer_per_pkt_rx_stats - Peer Rx stats updated in per pkt Rx path
4534  * @rcvd_reo: Packets received on the reo ring
4535  * @rx_lmac: Packets received on each lmac
4536  * @unicast: Total unicast packets
4537  * @multicast: Total multicast packets
4538  * @bcast:  Broadcast Packet Count
4539  * @raw: Raw Pakets received
4540  * @nawds_mcast_drop: Total NAWDS multicast packets dropped
4541  * @mec_drop: Total MEC packets dropped
4542  * @ppeds_drop: Total DS packets dropped
4543  * @last_rx_ts: last timestamp in jiffies when RX happened
4544  * @intra_bss: Intra BSS statistics
4545  * @intra_bss.pkts: Intra BSS packets received
4546  * @intra_bss.fail: Intra BSS packets failed
4547  * @intra_bss.mdns_no_fws: Intra BSS MDNS packets not forwarded
4548  * @err: error counters
4549  * @err.mic_err: Rx MIC errors CCMP
4550  * @err.decrypt_err: Rx Decryption Errors CRC
4551  * @err.fcserr: rx MIC check failed (CCMP)
4552  * @err.pn_err: pn check failed
4553  * @err.oor_err: Rx OOR errors
4554  * @err.jump_2k_err: 2k jump errors
4555  * @err.rxdma_wifi_parse_err: rxdma wifi parse errors
4556  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
4557  * @amsdu_cnt: Number of MSDUs part of AMSDU
4558  * @rx_retries: retries of packet in rx
4559  * @multipass_rx_pkt_drop: Dropped multipass rx pkt
4560  * @peer_unauth_rx_pkt_drop: Unauth rx packet drops
4561  * @policy_check_drop: policy check drops
4562  * @to_stack_twt: Total packets sent up the stack in TWT session
4563  * @protocol_trace_cnt: per-peer protocol counters
4564  * @mcast_3addr_drop:
4565  * @rx_total: total rx count
4566  * @inval_link_id_pkt_cnt: Counter to capture Invalid Link Id
4567  */
4568 struct dp_peer_per_pkt_rx_stats {
4569 	struct cdp_pkt_info rcvd_reo[CDP_MAX_RX_RINGS];
4570 	struct cdp_pkt_info rx_lmac[CDP_MAX_LMACS];
4571 	struct cdp_pkt_info unicast;
4572 	struct cdp_pkt_info multicast;
4573 	struct cdp_pkt_info bcast;
4574 	struct cdp_pkt_info raw;
4575 	uint32_t nawds_mcast_drop;
4576 	struct cdp_pkt_info mec_drop;
4577 	struct cdp_pkt_info ppeds_drop;
4578 	unsigned long last_rx_ts;
4579 	struct {
4580 		struct cdp_pkt_info pkts;
4581 		struct cdp_pkt_info fail;
4582 		uint32_t mdns_no_fwd;
4583 	} intra_bss;
4584 	struct {
4585 		uint32_t mic_err;
4586 		uint32_t decrypt_err;
4587 		uint32_t fcserr;
4588 		uint32_t pn_err;
4589 		uint32_t oor_err;
4590 		uint32_t jump_2k_err;
4591 		uint32_t rxdma_wifi_parse_err;
4592 	} err;
4593 	uint32_t non_amsdu_cnt;
4594 	uint32_t amsdu_cnt;
4595 	uint32_t rx_retries;
4596 	uint32_t multipass_rx_pkt_drop;
4597 	uint32_t peer_unauth_rx_pkt_drop;
4598 	uint32_t policy_check_drop;
4599 	struct cdp_pkt_info to_stack_twt;
4600 #ifdef VDEV_PEER_PROTOCOL_COUNT
4601 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
4602 #endif
4603 	uint32_t mcast_3addr_drop;
4604 #ifdef IPA_OFFLOAD
4605 	struct cdp_pkt_info rx_total;
4606 #endif
4607 	uint32_t inval_link_id_pkt_cnt;
4608 };
4609 
4610 /**
4611  * struct dp_peer_extd_rx_stats - Peer Rx stats updated in either
4612  *	per pkt Rx path when macro QCA_ENHANCED_STATS_SUPPORT is disabled or in
4613  *	Rx monitor patch when macro is enabled
4614  * @pkt_type: pkt counter for different .11 modes
4615  * @wme_ac_type: Wireless Multimedia type Count
4616  * @mpdu_cnt_fcs_ok: SU Rx success mpdu count
4617  * @mpdu_cnt_fcs_err: SU Rx fail mpdu count
4618  * @non_ampdu_cnt: Number of MSDUs with no MPDU level aggregation
4619  * @ampdu_cnt: Number of MSDUs part of AMSPU
4620  * @rx_mpdus: mpdu in rx
4621  * @rx_ppdus: ppdu in rx
4622  * @su_ax_ppdu_cnt: SU Rx packet count for .11ax
4623  * @rx_mu: Rx MU stats
4624  * @reception_type: Reception type of packets
4625  * @ppdu_cnt: PPDU packet count in reception type
4626  * @sgi_count: sgi count
4627  * @nss: packet count in spatiel Streams
4628  * @ppdu_nss: PPDU packet count in spatial streams
4629  * @bw: Packet Count in different bandwidths
4630  * @rx_mpdu_cnt: rx mpdu count per MCS rate
4631  * @rx_rate: Rx rate
4632  * @last_rx_rate: Previous rx rate
4633  * @rnd_avg_rx_rate: Rounded average rx rate
4634  * @avg_rx_rate: Average Rx rate
4635  * @rx_ratecode: Rx rate code of last frame
4636  * @avg_snr: Average snr
4637  * @rx_snr_measured_time: Time at which snr is measured
4638  * @snr: SNR of received signal
4639  * @last_snr: Previous snr
4640  * @nss_info: NSS 1,2, ...8
4641  * @mcs_info: MCS index
4642  * @bw_info: Bandwidth
4643  *       <enum 0 bw_20_MHz>
4644  *       <enum 1 bw_40_MHz>
4645  *       <enum 2 bw_80_MHz>
4646  *       <enum 3 bw_160_MHz>
4647  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
4648  *       <enum 1     0_4_us_sgi > Legacy short GI
4649  *       <enum 2     1_6_us_sgi > HE related GI
4650  *       <enum 3     3_2_us_sgi > HE
4651  * @preamble_info: preamble
4652  * @mpdu_retry_cnt: retries of mpdu in rx
4653  * @su_be_ppdu_cnt: SU Rx packet count for BE
4654  * @mu_be_ppdu_cnt: MU rx packet count for BE
4655  * @punc_bw: MSDU count for punctured bw
4656  * @bar_cnt: Block ACK Request frame count
4657  * @ndpa_cnt: NDP announcement frame count
4658  * @wme_ac_type_bytes: Wireless Multimedia type Bytes Count
4659  */
4660 struct dp_peer_extd_rx_stats {
4661 	struct cdp_pkt_type pkt_type[DOT11_MAX];
4662 	uint32_t wme_ac_type[WME_AC_MAX];
4663 	uint32_t mpdu_cnt_fcs_ok;
4664 	uint32_t mpdu_cnt_fcs_err;
4665 	uint32_t non_ampdu_cnt;
4666 	uint32_t ampdu_cnt;
4667 	uint32_t rx_mpdus;
4668 	uint32_t rx_ppdus;
4669 
4670 	struct cdp_pkt_type su_ax_ppdu_cnt;
4671 	struct cdp_rx_mu rx_mu[TXRX_TYPE_MU_MAX];
4672 	uint32_t reception_type[MAX_RECEPTION_TYPES];
4673 	uint32_t ppdu_cnt[MAX_RECEPTION_TYPES];
4674 
4675 	uint32_t sgi_count[MAX_GI];
4676 	uint32_t nss[SS_COUNT];
4677 	uint32_t ppdu_nss[SS_COUNT];
4678 	uint32_t bw[MAX_BW];
4679 	uint32_t rx_mpdu_cnt[MAX_MCS];
4680 
4681 	uint32_t rx_rate;
4682 	uint32_t last_rx_rate;
4683 	uint32_t rnd_avg_rx_rate;
4684 	uint32_t avg_rx_rate;
4685 	uint32_t rx_ratecode;
4686 
4687 	uint32_t avg_snr;
4688 	unsigned long rx_snr_measured_time;
4689 	uint8_t snr;
4690 	uint8_t last_snr;
4691 
4692 	uint32_t nss_info:4,
4693 		 mcs_info:4,
4694 		 bw_info:4,
4695 		 gi_info:4,
4696 		 preamble_info:4;
4697 
4698 	uint32_t mpdu_retry_cnt;
4699 #ifdef WLAN_FEATURE_11BE
4700 	struct cdp_pkt_type su_be_ppdu_cnt;
4701 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
4702 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
4703 #endif
4704 	uint32_t bar_cnt;
4705 	uint32_t ndpa_cnt;
4706 	uint64_t wme_ac_type_bytes[WME_AC_MAX];
4707 };
4708 
4709 /**
4710  * struct dp_peer_per_pkt_stats - Per pkt stats for peer
4711  * @tx: Per pkt Tx stats
4712  * @rx: Per pkt Rx stats
4713  */
4714 struct dp_peer_per_pkt_stats {
4715 	struct dp_peer_per_pkt_tx_stats tx;
4716 	struct dp_peer_per_pkt_rx_stats rx;
4717 };
4718 
4719 /**
4720  * struct dp_peer_extd_stats - Stats from extended path for peer
4721  * @tx: Extended path tx stats
4722  * @rx: Extended path rx stats
4723  */
4724 struct dp_peer_extd_stats {
4725 	struct dp_peer_extd_tx_stats tx;
4726 	struct dp_peer_extd_rx_stats rx;
4727 };
4728 
4729 /**
4730  * struct dp_peer_stats - Peer stats
4731  * @per_pkt_stats: Per packet path stats
4732  * @extd_stats: Extended path stats
4733  */
4734 struct dp_peer_stats {
4735 	struct dp_peer_per_pkt_stats per_pkt_stats;
4736 #ifndef QCA_ENHANCED_STATS_SUPPORT
4737 	struct dp_peer_extd_stats extd_stats;
4738 #endif
4739 };
4740 
4741 /**
4742  * struct dp_txrx_peer: DP txrx_peer structure used in per pkt path
4743  * @vdev: VDEV to which this peer is associated
4744  * @peer_id: peer ID for this peer
4745  * @authorize: Set when authorized
4746  * @in_twt: in TWT session
4747  * @hw_txrx_stats_en: Indicate HW offload vdev stats
4748  * @is_mld_peer:1: MLD peer
4749  * @tx_failed: Total Tx failure
4750  * @comp_pkt: Pkt Info for which completions were received
4751  * @to_stack: Total packets sent up the stack
4752  * @delay_stats: Peer delay stats
4753  * @jitter_stats: Peer jitter stats
4754  * @security: Security credentials
4755  * @nawds_enabled: NAWDS flag
4756  * @bss_peer: set for bss peer
4757  * @isolation: enable peer isolation for this peer
4758  * @wds_enabled: WDS peer
4759  * @wds_ecm:
4760  * @flush_in_progress:
4761  * @bufq_info:
4762  * @mpass_peer_list_elem: node in the special peer list element
4763  * @vlan_id: vlan id for key
4764  * @wds_ext:
4765  * @osif_rx:
4766  * @rx_tid:
4767  * @sawf_stats:
4768  * @bw: bandwidth of peer connection
4769  * @mpdu_retry_threshold: MPDU retry threshold to increment tx bad count
4770  * @stats_arr_size: peer stats array size
4771  * @stats: Peer link and mld statistics
4772  */
4773 struct dp_txrx_peer {
4774 	struct dp_vdev *vdev;
4775 	uint16_t peer_id;
4776 	uint8_t authorize:1,
4777 		in_twt:1,
4778 		hw_txrx_stats_en:1,
4779 		is_mld_peer:1;
4780 	uint32_t tx_failed;
4781 	struct cdp_pkt_info comp_pkt;
4782 	struct cdp_pkt_info to_stack;
4783 
4784 	struct dp_peer_delay_stats *delay_stats;
4785 
4786 	struct cdp_peer_tid_stats *jitter_stats;
4787 
4788 	struct {
4789 		enum cdp_sec_type sec_type;
4790 		u_int32_t michael_key[2]; /* relevant for TKIP */
4791 	} security[2]; /* 0 -> multicast, 1 -> unicast */
4792 
4793 	uint16_t nawds_enabled:1,
4794 		bss_peer:1,
4795 		isolation:1,
4796 		wds_enabled:1;
4797 #ifdef WDS_VENDOR_EXTENSION
4798 	dp_ecm_policy wds_ecm;
4799 #endif
4800 #ifdef PEER_CACHE_RX_PKTS
4801 	qdf_atomic_t flush_in_progress;
4802 	struct dp_peer_cached_bufq bufq_info;
4803 #endif
4804 #ifdef QCA_MULTIPASS_SUPPORT
4805 	TAILQ_ENTRY(dp_txrx_peer) mpass_peer_list_elem;
4806 	uint16_t vlan_id;
4807 #endif
4808 #ifdef QCA_SUPPORT_WDS_EXTENDED
4809 	struct dp_wds_ext_peer wds_ext;
4810 	ol_txrx_rx_fp osif_rx;
4811 #endif
4812 	struct dp_rx_tid_defrag rx_tid[DP_MAX_TIDS];
4813 #ifdef CONFIG_SAWF
4814 	struct dp_peer_sawf_stats *sawf_stats;
4815 #endif
4816 #ifdef DP_PEER_EXTENDED_API
4817 	enum cdp_peer_bw bw;
4818 	uint8_t mpdu_retry_threshold;
4819 #endif
4820 	uint8_t stats_arr_size;
4821 
4822 	/* dp_peer_stats should be the last member in the structure */
4823 	struct dp_peer_stats stats[];
4824 };
4825 
4826 /* Peer structure for data path state */
4827 struct dp_peer {
4828 	struct dp_txrx_peer *txrx_peer;
4829 #ifdef WIFI_MONITOR_SUPPORT
4830 	struct dp_mon_peer *monitor_peer;
4831 #endif
4832 	/* peer ID for this peer */
4833 	uint16_t peer_id;
4834 
4835 	/* VDEV to which this peer is associated */
4836 	struct dp_vdev *vdev;
4837 
4838 	struct dp_ast_entry *self_ast_entry;
4839 
4840 	qdf_atomic_t ref_cnt;
4841 
4842 	union dp_align_mac_addr mac_addr;
4843 
4844 	/* node in the vdev's list of peers */
4845 	TAILQ_ENTRY(dp_peer) peer_list_elem;
4846 	/* node in the hash table bin's list of peers */
4847 	TAILQ_ENTRY(dp_peer) hash_list_elem;
4848 
4849 	/* TID structures pointer */
4850 	struct dp_rx_tid *rx_tid;
4851 
4852 	/* TBD: No transmit TID state required? */
4853 
4854 	struct {
4855 		enum cdp_sec_type sec_type;
4856 		u_int32_t michael_key[2]; /* relevant for TKIP */
4857 	} security[2]; /* 0 -> multicast, 1 -> unicast */
4858 
4859 	/* NAWDS Flag and Bss Peer bit */
4860 	uint16_t bss_peer:1, /* set for bss peer */
4861 		authorize:1, /* Set when authorized */
4862 		valid:1, /* valid bit */
4863 		delete_in_progress:1, /* Indicate kickout sent */
4864 		sta_self_peer:1, /* Indicate STA self peer */
4865 		is_tdls_peer:1; /* Indicate TDLS peer */
4866 
4867 #ifdef WLAN_FEATURE_11BE_MLO
4868 	uint8_t first_link:1, /* first link peer for MLO */
4869 		primary_link:1; /* primary link for MLO */
4870 #endif
4871 
4872 	/* MCL specific peer local id */
4873 	uint16_t local_id;
4874 	enum ol_txrx_peer_state state;
4875 	qdf_spinlock_t peer_info_lock;
4876 
4877 	/* Peer calibrated stats */
4878 	struct cdp_calibr_stats stats;
4879 
4880 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
4881 	/* TBD */
4882 
4883 	/* Active Block ack sessions */
4884 	uint16_t active_ba_session_cnt;
4885 
4886 	/* Current HW buffersize setting */
4887 	uint16_t hw_buffer_size;
4888 
4889 	/*
4890 	 * Flag to check if sessions with 256 buffersize
4891 	 * should be terminated.
4892 	 */
4893 	uint8_t kill_256_sessions;
4894 	qdf_atomic_t is_default_route_set;
4895 
4896 #ifdef QCA_PEER_MULTIQ_SUPPORT
4897 	struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
4898 #endif
4899 	/* entry to inactive_list*/
4900 	TAILQ_ENTRY(dp_peer) inactive_list_elem;
4901 
4902 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
4903 
4904 	uint8_t peer_state;
4905 	qdf_spinlock_t peer_state_lock;
4906 #ifdef WLAN_SUPPORT_MSCS
4907 	struct dp_peer_mscs_parameter mscs_ipv4_parameter, mscs_ipv6_parameter;
4908 	bool mscs_active;
4909 #endif
4910 #ifdef WLAN_SUPPORT_MESH_LATENCY
4911 	struct dp_peer_mesh_latency_parameter mesh_latency_params[DP_MAX_TIDS];
4912 #endif
4913 #ifdef WLAN_FEATURE_11BE_MLO
4914 	/* peer type */
4915 	enum cdp_peer_type peer_type;
4916 	/*---------for link peer---------*/
4917 	struct dp_peer *mld_peer;
4918 
4919 	/*Link ID of link peer*/
4920 	uint8_t link_id;
4921 	bool link_id_valid;
4922 
4923 	/*---------for mld peer----------*/
4924 	struct dp_peer_link_info link_peers[DP_MAX_MLO_LINKS];
4925 	uint8_t num_links;
4926 	DP_MUTEX_TYPE link_peers_info_lock;
4927 #endif
4928 #ifdef CONFIG_SAWF_DEF_QUEUES
4929 	struct dp_peer_sawf *sawf;
4930 #endif
4931 	/* AST hash index for peer in HW */
4932 	uint16_t ast_idx;
4933 
4934 	/* AST hash value for peer in HW */
4935 	uint16_t ast_hash;
4936 };
4937 
4938 /**
4939  * struct dp_invalid_peer_msg - Invalid peer message
4940  * @nbuf: data buffer
4941  * @wh: 802.11 header
4942  * @vdev_id: id of vdev
4943  */
4944 struct dp_invalid_peer_msg {
4945 	qdf_nbuf_t nbuf;
4946 	struct ieee80211_frame *wh;
4947 	uint8_t vdev_id;
4948 };
4949 
4950 /**
4951  * struct dp_tx_me_buf_t - ME buffer
4952  * @next: pointer to next buffer
4953  * @data: Destination Mac address
4954  * @paddr_macbuf: physical address for dest_mac
4955  */
4956 struct dp_tx_me_buf_t {
4957 	/* Note: ME buf pool initialization logic expects next pointer to
4958 	 * be the first element. Dont add anything before next */
4959 	struct dp_tx_me_buf_t *next;
4960 	uint8_t data[QDF_MAC_ADDR_SIZE];
4961 	qdf_dma_addr_t paddr_macbuf;
4962 };
4963 
4964 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
4965 struct hal_rx_fst;
4966 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
4967 
4968 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4969 struct dp_rx_fse {
4970 	/* HAL Rx Flow Search Entry which matches HW definition */
4971 	void *hal_rx_fse;
4972 	/* Toeplitz hash value */
4973 	uint32_t flow_hash;
4974 	/* Flow index, equivalent to hash value truncated to FST size */
4975 	uint32_t flow_id;
4976 	/* Stats tracking for this flow */
4977 	struct cdp_flow_stats stats;
4978 	/* Flag indicating whether flow is IPv4 address tuple */
4979 	uint8_t is_ipv4_addr_entry;
4980 	/* Flag indicating whether flow is valid */
4981 	uint8_t is_valid;
4982 };
4983 
4984 struct dp_rx_fst {
4985 	/* Software (DP) FST */
4986 	uint8_t *base;
4987 	/* Pointer to HAL FST */
4988 	struct hal_rx_fst *hal_rx_fst;
4989 	/* Base physical address of HAL RX HW FST */
4990 	uint64_t hal_rx_fst_base_paddr;
4991 	/* Maximum number of flows FSE supports */
4992 	uint16_t max_entries;
4993 	/* Num entries in flow table */
4994 	uint16_t num_entries;
4995 	/* SKID Length */
4996 	uint16_t max_skid_length;
4997 	/* Hash mask to obtain legitimate hash entry */
4998 	uint32_t hash_mask;
4999 	/* Timer for bundling of flows */
5000 	qdf_timer_t cache_invalidate_timer;
5001 	/**
5002 	 * Flag which tracks whether cache update
5003 	 * is needed on timer expiry
5004 	 */
5005 	qdf_atomic_t is_cache_update_pending;
5006 	/* Flag to indicate completion of FSE setup in HW/FW */
5007 	bool fse_setup_done;
5008 	/* Last ring id used to add a flow */
5009 	uint8_t ring_id;
5010 };
5011 
5012 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
5013 
5014 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
5015 
5016 #ifdef WLAN_FEATURE_STATS_EXT
5017 /**
5018  * struct dp_req_rx_hw_stats_t - RX peer HW stats query structure
5019  * @pending_tid_stats_cnt: pending tid stats count which waits for REO status
5020  * @is_query_timeout: flag to show is stats query timeout
5021  */
5022 struct dp_req_rx_hw_stats_t {
5023 	qdf_atomic_t pending_tid_stats_cnt;
5024 	bool is_query_timeout;
5025 };
5026 #endif
5027 /* soc level structure to declare arch specific ops for DP */
5028 
5029 #ifndef WLAN_SOFTUMAC_SUPPORT
5030 /**
5031  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
5032  * @soc: DP SOC handle
5033  * @mac_id: mac id
5034  *
5035  * Return: none
5036  */
5037 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id);
5038 
5039 /**
5040  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
5041  * @soc: DP SOC handle
5042  * @mac_id: mac id
5043  *
5044  * Allocates memory pages for link descriptors, the page size is 4K for
5045  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
5046  * allocated for regular RX/TX and if the there is a proper mac_id link
5047  * descriptors are allocated for RX monitor mode.
5048  *
5049  * Return: QDF_STATUS_SUCCESS: Success
5050  *	   QDF_STATUS_E_FAILURE: Failure
5051  */
5052 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
5053 					    uint32_t mac_id);
5054 #else
5055 static inline void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc,
5056 						   uint32_t mac_id)
5057 {
5058 }
5059 
5060 static inline QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
5061 							  uint32_t mac_id)
5062 {
5063 	return QDF_STATUS_SUCCESS;
5064 }
5065 #endif
5066 
5067 /**
5068  * dp_link_desc_ring_replenish() - Replenish hw link desc rings
5069  * @soc: DP SOC handle
5070  * @mac_id: mac id
5071  *
5072  * Return: None
5073  */
5074 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
5075 
5076 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
5077 void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc);
5078 #else
5079 static inline void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc) {}
5080 #endif
5081 
5082 /**
5083  * dp_srng_alloc() - Allocate memory for SRNG
5084  * @soc  : Data path soc handle
5085  * @srng : SRNG pointer
5086  * @ring_type : Ring Type
5087  * @num_entries: Number of entries
5088  * @cached: cached flag variable
5089  *
5090  * Return: QDF_STATUS
5091  */
5092 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
5093 			 int ring_type, uint32_t num_entries,
5094 			 bool cached);
5095 
5096 /**
5097  * dp_srng_free() - Free SRNG memory
5098  * @soc: Data path soc handle
5099  * @srng: SRNG pointer
5100  *
5101  * Return: None
5102  */
5103 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng);
5104 
5105 /**
5106  * dp_srng_init() - Initialize SRNG
5107  * @soc  : Data path soc handle
5108  * @srng : SRNG pointer
5109  * @ring_type : Ring Type
5110  * @ring_num: Ring number
5111  * @mac_id: mac_id
5112  *
5113  * Return: QDF_STATUS
5114  */
5115 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
5116 			int ring_type, int ring_num, int mac_id);
5117 
5118 /**
5119  * dp_srng_init_idx() - Initialize SRNG
5120  * @soc  : Data path soc handle
5121  * @srng : SRNG pointer
5122  * @ring_type : Ring Type
5123  * @ring_num: Ring number
5124  * @mac_id: mac_id
5125  * @idx: ring index
5126  *
5127  * Return: QDF_STATUS
5128  */
5129 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
5130 			    int ring_type, int ring_num, int mac_id,
5131 			    uint32_t idx);
5132 
5133 /**
5134  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
5135  * @soc: DP SOC handle
5136  * @srng: source ring structure
5137  * @ring_type: type of ring
5138  * @ring_num: ring number
5139  *
5140  * Return: None
5141  */
5142 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
5143 		    int ring_type, int ring_num);
5144 
5145 void dp_print_peer_txrx_stats_be(struct cdp_peer_stats *peer_stats,
5146 				 enum peer_stats_type stats_type);
5147 void dp_print_peer_txrx_stats_li(struct cdp_peer_stats *peer_stats,
5148 				 enum peer_stats_type stats_type);
5149 
5150 void dp_print_peer_txrx_stats_rh(struct cdp_peer_stats *peer_stats,
5151 				 enum peer_stats_type stats_type);
5152 
5153 /**
5154  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
5155  * @soc: DP soc handle
5156  * @work_done: work done in softirq context
5157  * @start_time: start time for the softirq
5158  *
5159  * Return: enum with yield code
5160  */
5161 enum timer_yield_status
5162 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
5163 			  uint64_t start_time);
5164 
5165 /**
5166  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5167  * @vdev: Datapath VDEV handle
5168  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5169  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5170  *
5171  * Return: None
5172  */
5173 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5174 				  enum cdp_host_reo_dest_ring *reo_dest,
5175 				  bool *hash_based);
5176 
5177 /**
5178  * dp_reo_remap_config() - configure reo remap register value based
5179  *                         nss configuration.
5180  * @soc: DP soc handle
5181  * @remap0: output parameter indicates reo remap 0 register value
5182  * @remap1: output parameter indicates reo remap 1 register value
5183  * @remap2: output parameter indicates reo remap 2 register value
5184  *
5185  * based on offload_radio value below remap configuration
5186  * get applied.
5187  *	0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
5188  *	1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
5189  *	2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
5190  *	3 - both Radios handled by NSS (remap not required)
5191  *	4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
5192  *
5193  * Return: bool type, true if remap is configured else false.
5194  */
5195 
5196 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
5197 			 uint32_t *remap1, uint32_t *remap2);
5198 
5199 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
5200 /**
5201  * dp_tx_comp_get_prefetched_params_from_hal_desc() - Get prefetched TX desc
5202  * @soc: DP soc handle
5203  * @tx_comp_hal_desc: HAL TX Comp Descriptor
5204  * @r_tx_desc: SW Tx Descriptor retrieved from HAL desc.
5205  *
5206  * Return: None
5207  */
5208 void dp_tx_comp_get_prefetched_params_from_hal_desc(
5209 					struct dp_soc *soc,
5210 					void *tx_comp_hal_desc,
5211 					struct dp_tx_desc_s **r_tx_desc);
5212 #endif
5213 #endif /* _DP_TYPES_H_ */
5214