xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_TYPES_H_
21 #define _DP_TYPES_H_
22 
23 #include <qdf_types.h>
24 #include <qdf_nbuf.h>
25 #include <qdf_lock.h>
26 #include <qdf_atomic.h>
27 #include <qdf_util.h>
28 #include <qdf_list.h>
29 #include <qdf_lro.h>
30 #include <queue.h>
31 #include <htt_common.h>
32 #include <htt.h>
33 #include <htt_stats.h>
34 #include <cdp_txrx_cmn.h>
35 #ifdef DP_MOB_DEFS
36 #include <cds_ieee80211_common.h>
37 #endif
38 #include <wdi_event_api.h>    /* WDI subscriber event list */
39 
40 #include "hal_hw_headers.h"
41 #include <hal_tx.h>
42 #include <hal_reo.h>
43 #include "wlan_cfg.h"
44 #include "hal_rx.h"
45 #include <hal_api.h>
46 #include <hal_api_mon.h>
47 #include "hal_rx.h"
48 //#include "hal_rx_flow.h"
49 
50 #define MAX_BW 8
51 #define MAX_RETRIES 4
52 #define MAX_RECEPTION_TYPES 4
53 
54 #define MINIDUMP_STR_SIZE 25
55 #ifndef REMOVE_PKT_LOG
56 #include <pktlog.h>
57 #endif
58 #include <dp_umac_reset.h>
59 
60 //#include "dp_tx.h"
61 
62 #define REPT_MU_MIMO 1
63 #define REPT_MU_OFDMA_MIMO 3
64 #define DP_VO_TID 6
65  /** MAX TID MAPS AVAILABLE PER PDEV */
66 #define DP_MAX_TID_MAPS 16
67 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */
68 #define DSCP_TID_MAP_MAX (64 + 6)
69 #define DP_IP_DSCP_SHIFT 2
70 #define DP_IP_DSCP_MASK 0x3f
71 #define DP_FC0_SUBTYPE_QOS 0x80
72 #define DP_QOS_TID 0x0f
73 #define DP_IPV6_PRIORITY_SHIFT 20
74 #define MAX_MON_LINK_DESC_BANKS 2
75 #define DP_VDEV_ALL 0xff
76 
77 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
78 #define WLAN_DP_RESET_MON_BUF_RING_FILTER
79 #define MAX_TXDESC_POOLS 6
80 #else
81 #define MAX_TXDESC_POOLS 4
82 #endif
83 
84 #define MAX_RXDESC_POOLS 4
85 
86 /* Max no. of VDEV per PSOC */
87 #ifdef WLAN_PSOC_MAX_VDEVS
88 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS
89 #else
90 #define MAX_VDEV_CNT 51
91 #endif
92 
93 /* Max no. of VDEVs, a PDEV can support */
94 #ifdef WLAN_PDEV_MAX_VDEVS
95 #define DP_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS
96 #else
97 #define DP_PDEV_MAX_VDEVS 17
98 #endif
99 
100 #define EXCEPTION_DEST_RING_ID 0
101 #define MAX_IDLE_SCATTER_BUFS 16
102 #define DP_MAX_IRQ_PER_CONTEXT 12
103 #define DEFAULT_HW_PEER_ID 0xffff
104 
105 #define MAX_AST_AGEOUT_COUNT 128
106 
107 #ifdef TX_ADDR_INDEX_SEARCH
108 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_INDEX_SEARCH
109 #else
110 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_SEARCH_DEFAULT
111 #endif
112 
113 #define WBM_INT_ERROR_ALL 0
114 #define WBM_INT_ERROR_REO_NULL_BUFFER 1
115 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2
116 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3
117 #define WBM_INT_ERROR_REO_BUFF_REAPED 4
118 #define MAX_WBM_INT_ERROR_REASONS 5
119 
120 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
121 /* Maximum retries for Delba per tid per peer */
122 #define DP_MAX_DELBA_RETRY 3
123 
124 #ifdef AST_OFFLOAD_ENABLE
125 #define AST_OFFLOAD_ENABLE_STATUS 1
126 #else
127 #define AST_OFFLOAD_ENABLE_STATUS 0
128 #endif
129 
130 #ifdef FEATURE_MEC_OFFLOAD
131 #define FW_MEC_FW_OFFLOAD_ENABLED 1
132 #else
133 #define FW_MEC_FW_OFFLOAD_ENABLED 0
134 #endif
135 
136 #define PCP_TID_MAP_MAX 8
137 #define MAX_MU_USERS 37
138 
139 #define REO_CMD_EVENT_HIST_MAX 64
140 
141 #define DP_MAX_SRNGS 64
142 
143 /* 2G PHYB */
144 #define PHYB_2G_LMAC_ID 2
145 #define PHYB_2G_TARGET_PDEV_ID 2
146 
147 /* Flags for skippig s/w tid classification */
148 #define DP_TX_HW_DSCP_TID_MAP_VALID 0x1
149 #define DP_TXRX_HLOS_TID_OVERRIDE_ENABLED 0x2
150 #define DP_TX_MESH_ENABLED 0x4
151 #define DP_TX_INVALID_QOS_TAG 0xf
152 
153 #ifdef WLAN_SUPPORT_RX_FISA
154 #define FISA_FLOW_MAX_AGGR_COUNT        16 /* max flow aggregate count */
155 #endif
156 
157 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
158 #define DP_RX_REFILL_BUFF_POOL_SIZE  2048
159 #define DP_RX_REFILL_BUFF_POOL_BURST 64
160 #define DP_RX_REFILL_THRD_THRESHOLD  512
161 #endif
162 
163 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
164 #define DP_SKIP_BAR_UPDATE_TIMEOUT 5000
165 #endif
166 
167 #define DP_TX_MAGIC_PATTERN_INUSE	0xABCD1234
168 #define DP_TX_MAGIC_PATTERN_FREE	0xDEADBEEF
169 
170 #ifdef IPA_OFFLOAD
171 #define DP_PEER_REO_STATS_TID_SHIFT 16
172 #define DP_PEER_REO_STATS_TID_MASK 0xFFFF0000
173 #define DP_PEER_REO_STATS_PEER_ID_MASK 0x0000FFFF
174 #define DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid) \
175 	((comb_peer_id_tid & DP_PEER_REO_STATS_TID_MASK) >> \
176 	DP_PEER_REO_STATS_TID_SHIFT)
177 #define DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid) \
178 	(comb_peer_id_tid & DP_PEER_REO_STATS_PEER_ID_MASK)
179 #endif
180 
181 enum rx_pktlog_mode {
182 	DP_RX_PKTLOG_DISABLED = 0,
183 	DP_RX_PKTLOG_FULL,
184 	DP_RX_PKTLOG_LITE,
185 };
186 
187 /* enum m_copy_mode - Available mcopy mode
188  *
189  */
190 enum m_copy_mode {
191 	M_COPY_DISABLED = 0,
192 	M_COPY = 2,
193 	M_COPY_EXTENDED = 4,
194 };
195 
196 struct msdu_list {
197 	qdf_nbuf_t head;
198 	qdf_nbuf_t tail;
199 	uint32_t sum_len;
200 };
201 
202 struct dp_soc_cmn;
203 struct dp_pdev;
204 struct dp_vdev;
205 struct dp_tx_desc_s;
206 struct dp_soc;
207 union dp_rx_desc_list_elem_t;
208 struct cdp_peer_rate_stats_ctx;
209 struct cdp_soc_rate_stats_ctx;
210 struct dp_rx_fst;
211 struct dp_mon_filter;
212 struct dp_mon_mpdu;
213 #ifdef BE_PKTLOG_SUPPORT
214 struct dp_mon_filter_be;
215 #endif
216 struct dp_peer;
217 struct dp_txrx_peer;
218 
219 /**
220  * enum for DP peer state
221  */
222 enum dp_peer_state {
223 	DP_PEER_STATE_NONE,
224 	DP_PEER_STATE_INIT,
225 	DP_PEER_STATE_ACTIVE,
226 	DP_PEER_STATE_LOGICAL_DELETE,
227 	DP_PEER_STATE_INACTIVE,
228 	DP_PEER_STATE_FREED,
229 	DP_PEER_STATE_INVALID,
230 };
231 
232 /**
233  * enum for modules ids of
234  */
235 enum dp_mod_id {
236 	DP_MOD_ID_TX_RX,
237 	DP_MOD_ID_TX_COMP,
238 	DP_MOD_ID_RX,
239 	DP_MOD_ID_HTT_COMP,
240 	DP_MOD_ID_RX_ERR,
241 	DP_MOD_ID_TX_PPDU_STATS,
242 	DP_MOD_ID_RX_PPDU_STATS,
243 	DP_MOD_ID_CDP,
244 	DP_MOD_ID_GENERIC_STATS,
245 	DP_MOD_ID_TX_MULTIPASS,
246 	DP_MOD_ID_TX_CAPTURE,
247 	DP_MOD_ID_NSS_OFFLOAD,
248 	DP_MOD_ID_CONFIG,
249 	DP_MOD_ID_HTT,
250 	DP_MOD_ID_IPA,
251 	DP_MOD_ID_AST,
252 	DP_MOD_ID_MCAST2UCAST,
253 	DP_MOD_ID_CHILD,
254 	DP_MOD_ID_MESH,
255 	DP_MOD_ID_TX_EXCEPTION,
256 	DP_MOD_ID_TDLS,
257 	DP_MOD_ID_MISC,
258 	DP_MOD_ID_MSCS,
259 	DP_MOD_ID_TX,
260 	DP_MOD_ID_SAWF,
261 	DP_MOD_ID_REINJECT,
262 	DP_MOD_ID_SCS,
263 	DP_MOD_ID_UMAC_RESET,
264 	DP_MOD_ID_MAX,
265 };
266 
267 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
268 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
269 
270 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
271 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
272 
273 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
274 	TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase))
275 
276 #define DP_MUTEX_TYPE qdf_spinlock_t
277 
278 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
279 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
280 
281 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
282     ((_a)[0] == 0x33 &&                         \
283      (_a)[1] == 0x33)
284 
285 #define DP_FRAME_IS_BROADCAST(_a)              \
286     ((_a)[0] == 0xff &&                         \
287      (_a)[1] == 0xff &&                         \
288      (_a)[2] == 0xff &&                         \
289      (_a)[3] == 0xff &&                         \
290      (_a)[4] == 0xff &&                         \
291      (_a)[5] == 0xff)
292 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
293 		(_llc)->llc_ssap == 0xaa && \
294 		(_llc)->llc_un.type_snap.control == 0x3)
295 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
296 #define DP_FRAME_FC0_TYPE_MASK 0x0c
297 #define DP_FRAME_FC0_TYPE_DATA 0x08
298 #define DP_FRAME_IS_DATA(_frame) \
299 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
300 
301 /**
302  * macros to convert hw mac id to sw mac id:
303  * mac ids used by hardware start from a value of 1 while
304  * those in host software start from a value of 0. Use the
305  * macros below to convert between mac ids used by software and
306  * hardware
307  */
308 #define DP_SW2HW_MACID(id) ((id) + 1)
309 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
310 
311 /**
312  * Number of Tx Queues
313  * enum and macro to define how many threshold levels is used
314  * for the AC based flow control
315  */
316 #ifdef QCA_AC_BASED_FLOW_CONTROL
317 enum dp_fl_ctrl_threshold {
318 	DP_TH_BE_BK = 0,
319 	DP_TH_VI,
320 	DP_TH_VO,
321 	DP_TH_HI,
322 };
323 
324 #define FL_TH_MAX (4)
325 #define FL_TH_VI_PERCENTAGE (80)
326 #define FL_TH_VO_PERCENTAGE (60)
327 #define FL_TH_HI_PERCENTAGE (40)
328 #endif
329 
330 /**
331  * enum dp_intr_mode
332  * @DP_INTR_INTEGRATED: Line interrupts
333  * @DP_INTR_MSI: MSI interrupts
334  * @DP_INTR_POLL: Polling
335  */
336 enum dp_intr_mode {
337 	DP_INTR_INTEGRATED = 0,
338 	DP_INTR_MSI,
339 	DP_INTR_POLL,
340 	DP_INTR_LEGACY_VIRTUAL_IRQ,
341 };
342 
343 /**
344  * enum dp_tx_frm_type
345  * @dp_tx_frm_std: Regular frame, no added header fragments
346  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
347  * @dp_tx_frm_sg: SG segment
348  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
349  * @dp_tx_frm_me: Multicast to Unicast Converted frame
350  * @dp_tx_frm_raw: Raw Frame
351  */
352 enum dp_tx_frm_type {
353 	dp_tx_frm_std = 0,
354 	dp_tx_frm_tso,
355 	dp_tx_frm_sg,
356 	dp_tx_frm_audio,
357 	dp_tx_frm_me,
358 	dp_tx_frm_raw,
359 };
360 
361 /**
362  * enum dp_ast_type
363  * @dp_ast_type_wds: WDS peer AST type
364  * @dp_ast_type_static: static ast entry type
365  * @dp_ast_type_mec: Multicast echo ast entry type
366  */
367 enum dp_ast_type {
368 	dp_ast_type_wds = 0,
369 	dp_ast_type_static,
370 	dp_ast_type_mec,
371 };
372 
373 /**
374  * enum dp_nss_cfg
375  * @dp_nss_cfg_default: No radios are offloaded
376  * @dp_nss_cfg_first_radio: First radio offloaded
377  * @dp_nss_cfg_second_radio: Second radio offloaded
378  * @dp_nss_cfg_dbdc: Dual radios offloaded
379  * @dp_nss_cfg_dbtc: Three radios offloaded
380  */
381 enum dp_nss_cfg {
382 	dp_nss_cfg_default = 0x0,
383 	dp_nss_cfg_first_radio = 0x1,
384 	dp_nss_cfg_second_radio = 0x2,
385 	dp_nss_cfg_dbdc = 0x3,
386 	dp_nss_cfg_dbtc = 0x7,
387 	dp_nss_cfg_max
388 };
389 
390 #ifdef WLAN_TX_PKT_CAPTURE_ENH
391 #define DP_CPU_RING_MAP_1 1
392 #endif
393 
394 /**
395  * dp_cpu_ring_map_type - dp tx cpu ring map
396  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
397  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
398  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
399  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
400  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
401  * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring
402  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
403  */
404 enum dp_cpu_ring_map_types {
405 	DP_NSS_DEFAULT_MAP,
406 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
407 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
408 	DP_NSS_DBDC_OFFLOADED_MAP,
409 	DP_NSS_DBTC_OFFLOADED_MAP,
410 #ifdef WLAN_TX_PKT_CAPTURE_ENH
411 	DP_SINGLE_TX_RING_MAP,
412 #endif
413 	DP_NSS_CPU_RING_MAP_MAX
414 };
415 
416 /**
417  * dp_rx_nbuf_frag_info - Hold vaddr and paddr for a buffer
418  *
419  * paddr: Physical address of buffer allocated.
420  * nbuf: Allocated nbuf in case of nbuf approach.
421  * vaddr: Virtual address of frag allocated in case of frag approach.
422  */
423 struct dp_rx_nbuf_frag_info {
424 	qdf_dma_addr_t paddr;
425 	union {
426 		qdf_nbuf_t nbuf;
427 		qdf_frag_t vaddr;
428 	} virt_addr;
429 };
430 
431 /**
432  * enum dp_ctxt - context type
433  * @DP_PDEV_TYPE: PDEV context
434  * @DP_RX_RING_HIST_TYPE: Datapath rx ring history
435  * @DP_RX_ERR_RING_HIST_TYPE: Datapath rx error ring history
436  * @DP_RX_REINJECT_RING_HIST_TYPE: Datapath reinject ring history
437  * @DP_RX_REFILL_RING_HIST_TYPE: Datapath rx refill ring history
438  * @DP_TX_HW_DESC_HIST_TYPE: Datapath TX HW descriptor history
439  * @DP_MON_SOC_TYPE: Datapath monitor soc context
440  * @DP_MON_PDEV_TYPE: Datapath monitor pdev context
441  * @DP_MON_STATUS_BUF_HIST_TYPE: DP monitor status buffer history
442  */
443 enum dp_ctxt_type {
444 	DP_PDEV_TYPE,
445 	DP_RX_RING_HIST_TYPE,
446 	DP_RX_ERR_RING_HIST_TYPE,
447 	DP_RX_REINJECT_RING_HIST_TYPE,
448 	DP_TX_TCL_HIST_TYPE,
449 	DP_TX_COMP_HIST_TYPE,
450 	DP_FISA_RX_FT_TYPE,
451 	DP_RX_REFILL_RING_HIST_TYPE,
452 	DP_TX_HW_DESC_HIST_TYPE,
453 	DP_MON_SOC_TYPE,
454 	DP_MON_PDEV_TYPE,
455 	DP_MON_STATUS_BUF_HIST_TYPE,
456 };
457 
458 /**
459  * enum dp_desc_type - source type for multiple pages allocation
460  * @DP_TX_DESC_TYPE: DP SW TX descriptor
461  * @DP_TX_EXT_DESC_TYPE: DP TX msdu extension descriptor
462  * @DP_TX_EXT_DESC_LINK_TYPE: DP link descriptor for msdu ext_desc
463  * @DP_TX_TSO_DESC_TYPE: DP TX TSO descriptor
464  * @DP_TX_TSO_NUM_SEG_TYPE: DP TX number of segments
465  * @DP_RX_DESC_BUF_TYPE: DP RX SW descriptor
466  * @DP_RX_DESC_STATUS_TYPE: DP RX SW descriptor for monitor status
467  * @DP_HW_LINK_DESC_TYPE: DP HW link descriptor
468  * @DP_HW_CC_SPT_PAGE_TYPE: DP pages for HW CC secondary page table
469  */
470 enum dp_desc_type {
471 	DP_TX_DESC_TYPE,
472 	DP_TX_EXT_DESC_TYPE,
473 	DP_TX_EXT_DESC_LINK_TYPE,
474 	DP_TX_TSO_DESC_TYPE,
475 	DP_TX_TSO_NUM_SEG_TYPE,
476 	DP_RX_DESC_BUF_TYPE,
477 	DP_RX_DESC_STATUS_TYPE,
478 	DP_HW_LINK_DESC_TYPE,
479 	DP_HW_CC_SPT_PAGE_TYPE,
480 };
481 
482 /**
483  * struct rx_desc_pool
484  * @pool_size: number of RX descriptor in the pool
485  * @elem_size: Element size
486  * @desc_pages: Multi page descriptors
487  * @array: pointer to array of RX descriptor
488  * @freelist: pointer to free RX descriptor link list
489  * @lock: Protection for the RX descriptor pool
490  * @owner: owner for nbuf
491  * @buf_size: Buffer size
492  * @buf_alignment: Buffer alignment
493  * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
494  * @desc_type: type of desc this pool serves
495  */
496 struct rx_desc_pool {
497 	uint32_t pool_size;
498 #ifdef RX_DESC_MULTI_PAGE_ALLOC
499 	uint16_t elem_size;
500 	struct qdf_mem_multi_page_t desc_pages;
501 #else
502 	union dp_rx_desc_list_elem_t *array;
503 #endif
504 	union dp_rx_desc_list_elem_t *freelist;
505 	qdf_spinlock_t lock;
506 	uint8_t owner;
507 	uint16_t buf_size;
508 	uint8_t buf_alignment;
509 	bool rx_mon_dest_frag_enable;
510 	enum dp_desc_type desc_type;
511 };
512 
513 /**
514  * struct dp_tx_ext_desc_elem_s
515  * @next: next extension descriptor pointer
516  * @vaddr: hlos virtual address pointer
517  * @paddr: physical address pointer for descriptor
518  * @flags: mark features for extension descriptor
519  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
520  *		Tx completion of ME packet
521  * @tso_desc: Pointer to Tso desc
522  * @tso_num_desc: Pointer to tso_num_desc
523  */
524 struct dp_tx_ext_desc_elem_s {
525 	struct dp_tx_ext_desc_elem_s *next;
526 	void *vaddr;
527 	qdf_dma_addr_t paddr;
528 	uint16_t flags;
529 	struct dp_tx_me_buf_t *me_buffer;
530 	struct qdf_tso_seg_elem_t *tso_desc;
531 	struct qdf_tso_num_seg_elem_t *tso_num_desc;
532 };
533 
534 /**
535  * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool
536  * @elem_count: Number of descriptors in the pool
537  * @elem_size: Size of each descriptor
538  * @num_free: Number of free descriptors
539  * @msdu_ext_desc: MSDU extension descriptor
540  * @desc_pages: multiple page allocation information for actual descriptors
541  * @link_elem_size: size of the link descriptor in cacheable memory used for
542  * 		    chaining the extension descriptors
543  * @desc_link_pages: multiple page allocation information for link descriptors
544  */
545 struct dp_tx_ext_desc_pool_s {
546 	uint16_t elem_count;
547 	int elem_size;
548 	uint16_t num_free;
549 	struct qdf_mem_multi_page_t desc_pages;
550 	int link_elem_size;
551 	struct qdf_mem_multi_page_t desc_link_pages;
552 	struct dp_tx_ext_desc_elem_s *freelist;
553 	qdf_spinlock_t lock;
554 	qdf_dma_mem_context(memctx);
555 };
556 
557 /**
558  * struct dp_tx_desc_s - Tx Descriptor
559  * @next: Next in the chain of descriptors in freelist or in the completion list
560  * @nbuf: Buffer Address
561  * @msdu_ext_desc: MSDU extension descriptor
562  * @id: Descriptor ID
563  * @vdev_id: vdev_id of vdev over which the packet was transmitted
564  * @pdev: Handle to pdev
565  * @pool_id: Pool ID - used when releasing the descriptor
566  * @flags: Flags to track the state of descriptor and special frame handling
567  * @comp: Pool ID - used when releasing the descriptor
568  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
569  * 		   This is maintained in descriptor to allow more efficient
570  * 		   processing in completion event processing code.
571  * 		   This field is filled in with the htt_pkt_type enum.
572  * @buffer_src: buffer source TQM, REO, FW etc.
573  * @frm_type: Frame Type - ToDo check if this is redundant
574  * @pkt_offset: Offset from which the actual packet data starts
575  * @pool: handle to flow_pool this descriptor belongs to.
576  */
577 struct dp_tx_desc_s {
578 	struct dp_tx_desc_s *next;
579 	qdf_nbuf_t nbuf;
580 	uint16_t length;
581 #ifdef DP_TX_TRACKING
582 	uint32_t magic;
583 	uint64_t timestamp_tick;
584 #endif
585 	uint16_t flags;
586 	uint32_t id;
587 	qdf_dma_addr_t dma_addr;
588 	uint8_t vdev_id;
589 	uint8_t tx_status;
590 	uint16_t peer_id;
591 	struct dp_pdev *pdev;
592 	uint8_t tx_encap_type:2,
593 		buffer_src:3,
594 		reserved:3;
595 	uint8_t frm_type;
596 	uint8_t pkt_offset;
597 	uint8_t  pool_id;
598 	unsigned char *shinfo_addr;
599 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
600 	qdf_ktime_t timestamp;
601 	struct hal_tx_desc_comp_s comp;
602 };
603 
604 #ifdef QCA_AC_BASED_FLOW_CONTROL
605 /**
606  * enum flow_pool_status - flow pool status
607  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
608  *				and network queues are unpaused
609  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
610  *			   and network queues are paused
611  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
612  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
613  * @FLOW_POOL_ACTIVE_UNPAUSED_REATTACH: pool is reattached but network
614  *					queues are not paused
615  */
616 enum flow_pool_status {
617 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
618 	FLOW_POOL_ACTIVE_PAUSED = 1,
619 	FLOW_POOL_BE_BK_PAUSED = 2,
620 	FLOW_POOL_VI_PAUSED = 3,
621 	FLOW_POOL_VO_PAUSED = 4,
622 	FLOW_POOL_INVALID = 5,
623 	FLOW_POOL_INACTIVE = 6,
624 	FLOW_POOL_ACTIVE_UNPAUSED_REATTACH = 7,
625 };
626 
627 #else
628 /**
629  * enum flow_pool_status - flow pool status
630  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
631  *				and network queues are unpaused
632  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
633  *			   and network queues are paused
634  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
635  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
636  */
637 enum flow_pool_status {
638 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
639 	FLOW_POOL_ACTIVE_PAUSED = 1,
640 	FLOW_POOL_BE_BK_PAUSED = 2,
641 	FLOW_POOL_VI_PAUSED = 3,
642 	FLOW_POOL_VO_PAUSED = 4,
643 	FLOW_POOL_INVALID = 5,
644 	FLOW_POOL_INACTIVE = 6,
645 };
646 
647 #endif
648 
649 /**
650  * struct dp_tx_tso_seg_pool_s
651  * @pool_size: total number of pool elements
652  * @num_free: free element count
653  * @freelist: first free element pointer
654  * @desc_pages: multiple page allocation information for actual descriptors
655  * @lock: lock for accessing the pool
656  */
657 struct dp_tx_tso_seg_pool_s {
658 	uint16_t pool_size;
659 	uint16_t num_free;
660 	struct qdf_tso_seg_elem_t *freelist;
661 	struct qdf_mem_multi_page_t desc_pages;
662 	qdf_spinlock_t lock;
663 };
664 
665 /**
666  * struct dp_tx_tso_num_seg_pool_s {
667  * @num_seg_pool_size: total number of pool elements
668  * @num_free: free element count
669  * @freelist: first free element pointer
670  * @desc_pages: multiple page allocation information for actual descriptors
671  * @lock: lock for accessing the pool
672  */
673 
674 struct dp_tx_tso_num_seg_pool_s {
675 	uint16_t num_seg_pool_size;
676 	uint16_t num_free;
677 	struct qdf_tso_num_seg_elem_t *freelist;
678 	struct qdf_mem_multi_page_t desc_pages;
679 	/*tso mutex */
680 	qdf_spinlock_t lock;
681 };
682 
683 /**
684  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
685  * @elem_size: Size of each descriptor in the pool
686  * @pool_size: Total number of descriptors in the pool
687  * @num_free: Number of free descriptors
688  * @num_allocated: Number of used descriptors
689  * @freelist: Chain of free descriptors
690  * @desc_pages: multiple page allocation information for actual descriptors
691  * @num_invalid_bin: Deleted pool with pending Tx completions.
692  * @flow_pool_array_lock: Lock when operating on flow_pool_array.
693  * @flow_pool_array: List of allocated flow pools
694  * @lock- Lock for descriptor allocation/free from/to the pool
695  */
696 struct dp_tx_desc_pool_s {
697 	uint16_t elem_size;
698 	uint32_t num_allocated;
699 	struct dp_tx_desc_s *freelist;
700 	struct qdf_mem_multi_page_t desc_pages;
701 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
702 	uint16_t pool_size;
703 	uint8_t flow_pool_id;
704 	uint8_t num_invalid_bin;
705 	uint16_t avail_desc;
706 	enum flow_pool_status status;
707 	enum htt_flow_type flow_type;
708 #ifdef QCA_AC_BASED_FLOW_CONTROL
709 	uint16_t stop_th[FL_TH_MAX];
710 	uint16_t start_th[FL_TH_MAX];
711 	qdf_time_t max_pause_time[FL_TH_MAX];
712 	qdf_time_t latest_pause_time[FL_TH_MAX];
713 #else
714 	uint16_t stop_th;
715 	uint16_t start_th;
716 #endif
717 	uint16_t pkt_drop_no_desc;
718 	qdf_spinlock_t flow_pool_lock;
719 	uint8_t pool_create_cnt;
720 	void *pool_owner_ctx;
721 #else
722 	uint16_t elem_count;
723 	uint32_t num_free;
724 	qdf_spinlock_t lock;
725 #endif
726 };
727 
728 /**
729  * struct dp_txrx_pool_stats - flow pool related statistics
730  * @pool_map_count: flow pool map received
731  * @pool_unmap_count: flow pool unmap received
732  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
733  */
734 struct dp_txrx_pool_stats {
735 	uint16_t pool_map_count;
736 	uint16_t pool_unmap_count;
737 	uint16_t pkt_drop_no_pool;
738 };
739 
740 /**
741  * struct dp_srng - DP srng structure
742  * @hal_srng: hal_srng handle
743  * @base_vaddr_unaligned: un-aligned virtual base address of the srng ring
744  * @base_vaddr_aligned: aligned virtual base address of the srng ring
745  * @base_paddr_unaligned: un-aligned physical base address of the srng ring
746  * @base_paddr_aligned: aligned physical base address of the srng ring
747  * @alloc_size: size of the srng ring
748  * @cached: is the srng ring memory cached or un-cached memory
749  * @irq: irq number of the srng ring
750  * @num_entries: number of entries in the srng ring
751  * @is_mem_prealloc: Is this srng memeory pre-allocated
752  * @crit_thresh: Critical threshold for near-full processing of this srng
753  * @safe_thresh: Safe threshold for near-full processing of this srng
754  * @near_full: Flag to indicate srng is near-full
755  */
756 struct dp_srng {
757 	hal_ring_handle_t hal_srng;
758 	void *base_vaddr_unaligned;
759 	void *base_vaddr_aligned;
760 	qdf_dma_addr_t base_paddr_unaligned;
761 	qdf_dma_addr_t base_paddr_aligned;
762 	uint32_t alloc_size;
763 	uint8_t cached;
764 	int irq;
765 	uint32_t num_entries;
766 #ifdef DP_MEM_PRE_ALLOC
767 	uint8_t is_mem_prealloc;
768 #endif
769 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
770 	uint16_t crit_thresh;
771 	uint16_t safe_thresh;
772 	qdf_atomic_t near_full;
773 #endif
774 };
775 
776 struct dp_rx_reorder_array_elem {
777 	qdf_nbuf_t head;
778 	qdf_nbuf_t tail;
779 };
780 
781 #define DP_RX_BA_INACTIVE 0
782 #define DP_RX_BA_ACTIVE 1
783 #define DP_RX_BA_IN_PROGRESS 2
784 struct dp_reo_cmd_info {
785 	uint16_t cmd;
786 	enum hal_reo_cmd_type cmd_type;
787 	void *data;
788 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
789 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
790 };
791 
792 struct dp_peer_delay_stats {
793 	struct cdp_delay_tid_stats delay_tid_stats[CDP_MAX_DATA_TIDS]
794 						  [CDP_MAX_TXRX_CTX];
795 };
796 
797 /* Rx TID defrag*/
798 struct dp_rx_tid_defrag {
799 	/* TID */
800 	int tid;
801 
802 	/* only used for defrag right now */
803 	TAILQ_ENTRY(dp_rx_tid_defrag) defrag_waitlist_elem;
804 
805 	/* Store dst desc for reinjection */
806 	hal_ring_desc_t dst_ring_desc;
807 	struct dp_rx_desc *head_frag_desc;
808 
809 	/* Sequence and fragments that are being processed currently */
810 	uint32_t curr_seq_num;
811 	uint32_t curr_frag_num;
812 
813 	/* TODO: Check the following while adding defragmentation support */
814 	struct dp_rx_reorder_array_elem *array;
815 	/* base - single rx reorder element used for non-aggr cases */
816 	struct dp_rx_reorder_array_elem base;
817 	/* rx_tid lock */
818 	qdf_spinlock_t defrag_tid_lock;
819 
820 	/* head PN number */
821 	uint64_t pn128[2];
822 
823 	uint32_t defrag_timeout_ms;
824 
825 	/* defrag usage only, dp_peer pointer related with this tid */
826 	struct dp_txrx_peer *defrag_peer;
827 };
828 
829 /* Rx TID */
830 struct dp_rx_tid {
831 	/* TID */
832 	int tid;
833 
834 	/* Num of addba requests */
835 	uint32_t num_of_addba_req;
836 
837 	/* Num of addba responses */
838 	uint32_t num_of_addba_resp;
839 
840 	/* Num of delba requests */
841 	uint32_t num_of_delba_req;
842 
843 	/* Num of addba responses successful */
844 	uint32_t num_addba_rsp_success;
845 
846 	/* Num of addba responses failed */
847 	uint32_t num_addba_rsp_failed;
848 
849 	/* pn size */
850 	uint8_t pn_size;
851 	/* REO TID queue descriptors */
852 	void *hw_qdesc_vaddr_unaligned;
853 	void *hw_qdesc_vaddr_aligned;
854 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
855 	qdf_dma_addr_t hw_qdesc_paddr;
856 	uint32_t hw_qdesc_alloc_size;
857 
858 	/* RX ADDBA session state */
859 	int ba_status;
860 
861 	/* RX BA window size */
862 	uint16_t ba_win_size;
863 
864 	/* Starting sequence number in Addba request */
865 	uint16_t startseqnum;
866 	uint16_t dialogtoken;
867 	uint16_t statuscode;
868 	/* user defined ADDBA response status code */
869 	uint16_t userstatuscode;
870 
871 	/* rx_tid lock */
872 	qdf_spinlock_t tid_lock;
873 
874 	/* Store ppdu_id when 2k exception is received */
875 	uint32_t ppdu_id_2k;
876 
877 	/* Delba Tx completion status */
878 	uint8_t delba_tx_status;
879 
880 	/* Delba Tx retry count */
881 	uint8_t delba_tx_retry;
882 
883 	/* Delba stats */
884 	uint32_t delba_tx_success_cnt;
885 	uint32_t delba_tx_fail_cnt;
886 
887 	/* Delba reason code for retries */
888 	uint8_t delba_rcode;
889 
890 	/* Coex Override preserved windows size 1 based */
891 	uint16_t rx_ba_win_size_override;
892 #ifdef IPA_OFFLOAD
893 	/* rx msdu count per tid */
894 	struct cdp_pkt_info rx_msdu_cnt;
895 #endif
896 
897 };
898 
899 /**
900  * struct dp_intr_stats - DP Interrupt Stats for an interrupt context
901  * @num_tx_ring_masks: interrupts with tx_ring_mask set
902  * @num_rx_ring_masks: interrupts with rx_ring_mask set
903  * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set
904  * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set
905  * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set
906  * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set
907  * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set
908  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
909  * @num_host2rxdma_mon_ring_masks: interrupts with host2rxdma_ring_mask set
910  * @num_rx_ring_near_full_masks: Near-full interrupts for REO DST ring
911  * @num_tx_comp_ring_near_full_masks: Near-full interrupts for TX completion
912  * @num_rx_wbm_rel_ring_near_full_masks: total number of times the wbm rel ring
913  *                                       near full interrupt was received
914  * @num_reo_status_ring_near_full_masks: total number of times the reo status
915  *                                       near full interrupt was received
916  * @num_near_full_masks: total number of times the near full interrupt
917  *                       was received
918  * @num_masks: total number of times the interrupt was received
919  * @num_host2txmon_ring_masks: interrupts with host2txmon_ring_mask set
920  * @num_near_full_masks: total number of times the interrupt was received
921  * @num_masks: total number of times the near full interrupt was received
922  * @num_tx_mon_ring_masks: interrupts with num_tx_mon_ring_masks set
923  *
924  * Counter for individual masks are incremented only if there are any packets
925  * on that ring.
926  */
927 struct dp_intr_stats {
928 	uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS];
929 	uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS];
930 	uint32_t num_rx_mon_ring_masks;
931 	uint32_t num_rx_err_ring_masks;
932 	uint32_t num_rx_wbm_rel_ring_masks;
933 	uint32_t num_reo_status_ring_masks;
934 	uint32_t num_rxdma2host_ring_masks;
935 	uint32_t num_host2rxdma_ring_masks;
936 	uint32_t num_host2rxdma_mon_ring_masks;
937 	uint32_t num_rx_ring_near_full_masks[MAX_REO_DEST_RINGS];
938 	uint32_t num_tx_comp_ring_near_full_masks[MAX_TCL_DATA_RINGS];
939 	uint32_t num_rx_wbm_rel_ring_near_full_masks;
940 	uint32_t num_reo_status_ring_near_full_masks;
941 	uint32_t num_host2txmon_ring__masks;
942 	uint32_t num_near_full_masks;
943 	uint32_t num_masks;
944 	uint32_t num_tx_mon_ring_masks;
945 };
946 
947 #ifdef DP_UMAC_HW_RESET_SUPPORT
948 /**
949  * struct dp_intr_bkp - DP per interrupt context ring masks old state
950  * @tx_ring_mask: WBM Tx completion rings (0-2) associated with this napi ctxt
951  * @rx_ring_mask: Rx REO rings (0-3) associated with this interrupt context
952  * @rx_mon_ring_mask: Rx monitor ring mask (0-2)
953  * @rx_err_ring_mask: REO Exception Ring
954  * @rx_wbm_rel_ring_mask: WBM2SW Rx Release Ring
955  * @reo_status_ring_mask: REO command response ring
956  * @rxdma2host_ring_mask: RXDMA to host destination ring
957  * @host2rxdma_ring_mask: Host to RXDMA buffer ring
958  * @host2rxdma_mon_ring_mask: Host to RXDMA monitor  buffer ring
959  * @host2txmon_ring_mask: Tx monitor buffer ring
960  * @tx_mon_ring_mask: Tx monitor ring mask (0-2)
961  *
962  */
963 struct dp_intr_bkp {
964 	uint8_t tx_ring_mask;
965 	uint8_t rx_ring_mask;
966 	uint8_t rx_mon_ring_mask;
967 	uint8_t rx_err_ring_mask;
968 	uint8_t rx_wbm_rel_ring_mask;
969 	uint8_t reo_status_ring_mask;
970 	uint8_t rxdma2host_ring_mask;
971 	uint8_t host2rxdma_ring_mask;
972 	uint8_t host2rxdma_mon_ring_mask;
973 	uint8_t host2txmon_ring_mask;
974 	uint8_t tx_mon_ring_mask;
975 };
976 #endif
977 
978 /* per interrupt context  */
979 struct dp_intr {
980 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
981 				associated with this napi context */
982 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
983 				with this interrupt context */
984 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
985 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
986 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
987 	uint8_t reo_status_ring_mask; /* REO command response ring */
988 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
989 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
990 	/* Host to RXDMA monitor  buffer ring */
991 	uint8_t host2rxdma_mon_ring_mask;
992 	/* RX REO rings near full interrupt mask */
993 	uint8_t rx_near_full_grp_1_mask;
994 	/* RX REO rings near full interrupt mask */
995 	uint8_t rx_near_full_grp_2_mask;
996 	/* WBM TX completion rings near full interrupt mask */
997 	uint8_t tx_ring_near_full_mask;
998 	uint8_t host2txmon_ring_mask; /* Tx monitor buffer ring */
999 	uint8_t tx_mon_ring_mask;  /* Tx monitor ring mask (0-2) */
1000 	struct dp_soc *soc;    /* Reference to SoC structure ,
1001 				to get DMA ring handles */
1002 	qdf_lro_ctx_t lro_ctx;
1003 	uint8_t dp_intr_id;
1004 
1005 	/* Interrupt Stats for individual masks */
1006 	struct dp_intr_stats intr_stats;
1007 	uint8_t umac_reset_intr_mask;  /* UMAC reset interrupt mask */
1008 };
1009 
1010 #define REO_DESC_FREELIST_SIZE 64
1011 #define REO_DESC_FREE_DEFER_MS 1000
1012 struct reo_desc_list_node {
1013 	qdf_list_node_t node;
1014 	unsigned long free_ts;
1015 	struct dp_rx_tid rx_tid;
1016 	bool resend_update_reo_cmd;
1017 	uint32_t pending_ext_desc_size;
1018 #ifdef REO_QDESC_HISTORY
1019 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1020 #endif
1021 };
1022 
1023 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
1024 #define REO_DESC_DEFERRED_FREELIST_SIZE 256
1025 #define REO_DESC_DEFERRED_FREE_MS 30000
1026 
1027 struct reo_desc_deferred_freelist_node {
1028 	qdf_list_node_t node;
1029 	unsigned long free_ts;
1030 	void *hw_qdesc_vaddr_unaligned;
1031 	qdf_dma_addr_t hw_qdesc_paddr;
1032 	uint32_t hw_qdesc_alloc_size;
1033 #ifdef REO_QDESC_HISTORY
1034 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1035 #endif /* REO_QDESC_HISTORY */
1036 };
1037 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
1038 
1039 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1040 /**
1041  * struct reo_cmd_event_record: Elements to record for each reo command
1042  * @cmd_type: reo command type
1043  * @cmd_return_status: reo command post status
1044  * @timestamp: record timestamp for the reo command
1045  */
1046 struct reo_cmd_event_record {
1047 	enum hal_reo_cmd_type cmd_type;
1048 	uint8_t cmd_return_status;
1049 	uint64_t timestamp;
1050 };
1051 
1052 /**
1053  * struct reo_cmd_event_history: Account for reo cmd events
1054  * @index: record number
1055  * @cmd_record: list of records
1056  */
1057 struct reo_cmd_event_history {
1058 	qdf_atomic_t index;
1059 	struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX];
1060 };
1061 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1062 
1063 /* SoC level data path statistics */
1064 struct dp_soc_stats {
1065 	struct {
1066 		uint32_t added;
1067 		uint32_t deleted;
1068 		uint32_t aged_out;
1069 		uint32_t map_err;
1070 		uint32_t ast_mismatch;
1071 	} ast;
1072 
1073 	struct {
1074 		uint32_t added;
1075 		uint32_t deleted;
1076 	} mec;
1077 
1078 	/* SOC level TX stats */
1079 	struct {
1080 		/* Total packets transmitted */
1081 		struct cdp_pkt_info egress[MAX_TCL_DATA_RINGS];
1082 		/* Enqueues per tcl ring */
1083 		uint32_t tcl_enq[MAX_TCL_DATA_RINGS];
1084 		/* packets dropped on tx because of no peer */
1085 		struct cdp_pkt_info tx_invalid_peer;
1086 		/* descriptors in each tcl ring */
1087 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
1088 		/* Descriptors in use at soc */
1089 		uint32_t desc_in_use;
1090 		/* tqm_release_reason == FW removed */
1091 		uint32_t dropped_fw_removed;
1092 		/* tx completion release_src != TQM or FW */
1093 		uint32_t invalid_release_source;
1094 		/* tx completion wbm_internal_error */
1095 		uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS];
1096 		/* tx completion non_wbm_internal_error */
1097 		uint32_t non_wbm_internal_err;
1098 		/* TX Comp loop packet limit hit */
1099 		uint32_t tx_comp_loop_pkt_limit_hit;
1100 		/* Head pointer Out of sync at the end of dp_tx_comp_handler */
1101 		uint32_t hp_oos2;
1102 		/* tx desc freed as part of vdev detach */
1103 		uint32_t tx_comp_exception;
1104 		/* TQM drops after/during peer delete */
1105 		uint64_t tqm_drop_no_peer;
1106 		/* Number of tx completions reaped per WBM2SW release ring */
1107 		uint32_t tx_comp[MAX_TCL_DATA_RINGS];
1108 		/* Number of tx completions force freed */
1109 		uint32_t tx_comp_force_freed;
1110 	} tx;
1111 
1112 	/* SOC level RX stats */
1113 	struct {
1114 		/* Total rx packets count */
1115 		struct cdp_pkt_info ingress;
1116 		/* Rx errors */
1117 		/* Total Packets in Rx Error ring */
1118 		uint32_t err_ring_pkts;
1119 		/* No of Fragments */
1120 		uint32_t rx_frags;
1121 		/* No of incomplete fragments in waitlist */
1122 		uint32_t rx_frag_wait;
1123 		/* Fragments dropped due to errors */
1124 		uint32_t rx_frag_err;
1125 		/* Fragments received OOR causing sequence num mismatch */
1126 		uint32_t rx_frag_oor;
1127 		/* Fragments dropped due to len errors in skb */
1128 		uint32_t rx_frag_err_len_error;
1129 		/* Fragments dropped due to no peer found */
1130 		uint32_t rx_frag_err_no_peer;
1131 		/* No of reinjected packets */
1132 		uint32_t reo_reinject;
1133 		/* Reap loop packet limit hit */
1134 		uint32_t reap_loop_pkt_limit_hit;
1135 		/* Head pointer Out of sync at the end of dp_rx_process */
1136 		uint32_t hp_oos2;
1137 		/* Rx ring near full */
1138 		uint32_t near_full;
1139 		/* Break ring reaping as not all scattered msdu received */
1140 		uint32_t msdu_scatter_wait_break;
1141 		/* Number of bar frames received */
1142 		uint32_t bar_frame;
1143 		/* Number of frames routed from rxdma */
1144 		uint32_t rxdma2rel_route_drop;
1145 		/* Number of frames routed from reo*/
1146 		uint32_t reo2rel_route_drop;
1147 
1148 		struct {
1149 			/* Invalid RBM error count */
1150 			uint32_t invalid_rbm;
1151 			/* Invalid VDEV Error count */
1152 			uint32_t invalid_vdev;
1153 			/* Invalid PDEV error count */
1154 			uint32_t invalid_pdev;
1155 
1156 			/* Packets delivered to stack that no related peer */
1157 			uint32_t pkt_delivered_no_peer;
1158 			/* Defrag peer uninit error count */
1159 			uint32_t defrag_peer_uninit;
1160 			/* Invalid sa_idx or da_idx*/
1161 			uint32_t invalid_sa_da_idx;
1162 			/* MSDU DONE failures */
1163 			uint32_t msdu_done_fail;
1164 			/* Invalid PEER Error count */
1165 			struct cdp_pkt_info rx_invalid_peer;
1166 			/* Invalid PEER ID count */
1167 			struct cdp_pkt_info rx_invalid_peer_id;
1168 			/* Invalid packet length */
1169 			struct cdp_pkt_info rx_invalid_pkt_len;
1170 			/* HAL ring access Fail error count */
1171 			uint32_t hal_ring_access_fail;
1172 			/* HAL ring access full Fail error count */
1173 			uint32_t hal_ring_access_full_fail;
1174 			/* RX DMA error count */
1175 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
1176 			/* RX REO DEST Desc Invalid Magic count */
1177 			uint32_t rx_desc_invalid_magic;
1178 			/* REO Error count */
1179 			uint32_t reo_error[HAL_REO_ERR_MAX];
1180 			/* HAL REO ERR Count */
1181 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
1182 			/* HAL REO DEST Duplicate count */
1183 			uint32_t hal_reo_dest_dup;
1184 			/* HAL WBM RELEASE Duplicate count */
1185 			uint32_t hal_wbm_rel_dup;
1186 			/* HAL RXDMA error Duplicate count */
1187 			uint32_t hal_rxdma_err_dup;
1188 			/* ipa smmu map duplicate count */
1189 			uint32_t ipa_smmu_map_dup;
1190 			/* ipa smmu unmap duplicate count */
1191 			uint32_t ipa_smmu_unmap_dup;
1192 			/* ipa smmu unmap while ipa pipes is disabled */
1193 			uint32_t ipa_unmap_no_pipe;
1194 			/* REO cmd send fail/requeue count */
1195 			uint32_t reo_cmd_send_fail;
1196 			/* REO cmd send drain count */
1197 			uint32_t reo_cmd_send_drain;
1198 			/* RX msdu drop count due to scatter */
1199 			uint32_t scatter_msdu;
1200 			/* RX msdu drop count due to invalid cookie */
1201 			uint32_t invalid_cookie;
1202 			/* Count of stale cookie read in RX path */
1203 			uint32_t stale_cookie;
1204 			/* Delba sent count due to RX 2k jump */
1205 			uint32_t rx_2k_jump_delba_sent;
1206 			/* RX 2k jump msdu indicated to stack count */
1207 			uint32_t rx_2k_jump_to_stack;
1208 			/* RX 2k jump msdu dropped count */
1209 			uint32_t rx_2k_jump_drop;
1210 			/* REO ERR msdu buffer received */
1211 			uint32_t reo_err_msdu_buf_rcved;
1212 			/* REO ERR msdu buffer with invalid coookie received */
1213 			uint32_t reo_err_msdu_buf_invalid_cookie;
1214 			/* REO OOR msdu drop count */
1215 			uint32_t reo_err_oor_drop;
1216 			/* REO OOR msdu indicated to stack count */
1217 			uint32_t reo_err_oor_to_stack;
1218 			/* REO OOR scattered msdu count */
1219 			uint32_t reo_err_oor_sg_count;
1220 			/* RX msdu rejected count on delivery to vdev stack_fn*/
1221 			uint32_t rejected;
1222 			/* Incorrect msdu count in MPDU desc info */
1223 			uint32_t msdu_count_mismatch;
1224 			/* RX raw frame dropped count */
1225 			uint32_t raw_frm_drop;
1226 			/* Stale link desc cookie count*/
1227 			uint32_t invalid_link_cookie;
1228 			/* Nbuf sanity failure */
1229 			uint32_t nbuf_sanity_fail;
1230 			/* Duplicate link desc refilled */
1231 			uint32_t dup_refill_link_desc;
1232 			/* Incorrect msdu continuation bit in MSDU desc */
1233 			uint32_t msdu_continuation_err;
1234 			/* count of start sequence (ssn) updates */
1235 			uint32_t ssn_update_count;
1236 			/* count of bar handling fail */
1237 			uint32_t bar_handle_fail_count;
1238 			/* EAPOL drop count in intrabss scenario */
1239 			uint32_t intrabss_eapol_drop;
1240 			/* PN check failed for 2K-jump or OOR error */
1241 			uint32_t pn_in_dest_check_fail;
1242 			/* MSDU len err count */
1243 			uint32_t msdu_len_err;
1244 			/* Rx flush count */
1245 			uint32_t rx_flush_count;
1246 			/* Rx invalid tid count */
1247 			uint32_t rx_invalid_tid_err;
1248 		} err;
1249 
1250 		/* packet count per core - per ring */
1251 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
1252 	} rx;
1253 
1254 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1255 	struct reo_cmd_event_history cmd_event_history;
1256 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1257 };
1258 
1259 union dp_align_mac_addr {
1260 	uint8_t raw[QDF_MAC_ADDR_SIZE];
1261 	struct {
1262 		uint16_t bytes_ab;
1263 		uint16_t bytes_cd;
1264 		uint16_t bytes_ef;
1265 	} align2;
1266 	struct {
1267 		uint32_t bytes_abcd;
1268 		uint16_t bytes_ef;
1269 	} align4;
1270 	struct __attribute__((__packed__)) {
1271 		uint16_t bytes_ab;
1272 		uint32_t bytes_cdef;
1273 	} align4_2;
1274 };
1275 
1276 /**
1277  * struct dp_ast_free_cb_params - HMWDS free callback cookie
1278  * @mac_addr: ast mac address
1279  * @peer_mac_addr: mac address of peer
1280  * @type: ast entry type
1281  * @vdev_id: vdev_id
1282  * @flags: ast flags
1283  */
1284 struct dp_ast_free_cb_params {
1285 	union dp_align_mac_addr mac_addr;
1286 	union dp_align_mac_addr peer_mac_addr;
1287 	enum cdp_txrx_ast_entry_type type;
1288 	uint8_t vdev_id;
1289 	uint32_t flags;
1290 };
1291 
1292 /*
1293  * dp_ast_entry
1294  *
1295  * @ast_idx: Hardware AST Index
1296  * @peer_id: Next Hop peer_id (for non-WDS nodes, this will be point to
1297  *           associated peer with this MAC address)
1298  * @mac_addr:  MAC Address for this AST entry
1299  * @next_hop: Set to 1 if this is for a WDS node
1300  * @is_active: flag to indicate active data traffic on this node
1301  *             (used for aging out/expiry)
1302  * @ase_list_elem: node in peer AST list
1303  * @is_bss: flag to indicate if entry corresponds to bss peer
1304  * @is_mapped: flag to indicate that we have mapped the AST entry
1305  *             in ast_table
1306  * @pdev_id: pdev ID
1307  * @vdev_id: vdev ID
1308  * @ast_hash_value: hast value in HW
1309  * @ref_cnt: reference count
1310  * @type: flag to indicate type of the entry(static/WDS/MEC)
1311  * @delete_in_progress: Flag to indicate that delete commands send to FW
1312  *                      and host is waiting for response from FW
1313  * @callback: ast free/unmap callback
1314  * @cookie: argument to callback
1315  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
1316  */
1317 struct dp_ast_entry {
1318 	uint16_t ast_idx;
1319 	uint16_t peer_id;
1320 	union dp_align_mac_addr mac_addr;
1321 	bool next_hop;
1322 	bool is_active;
1323 	bool is_mapped;
1324 	uint8_t pdev_id;
1325 	uint8_t vdev_id;
1326 	uint16_t ast_hash_value;
1327 	qdf_atomic_t ref_cnt;
1328 	enum cdp_txrx_ast_entry_type type;
1329 	bool delete_in_progress;
1330 	txrx_ast_free_cb callback;
1331 	void *cookie;
1332 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
1333 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
1334 };
1335 
1336 /*
1337  * dp_mec_entry
1338  *
1339  * @mac_addr:  MAC Address for this MEC entry
1340  * @is_active: flag to indicate active data traffic on this node
1341  *             (used for aging out/expiry)
1342  * @pdev_id: pdev ID
1343  * @vdev_id: vdev ID
1344  * @hash_list_elem: node in soc MEC hash list (mac address used as hash)
1345  */
1346 struct dp_mec_entry {
1347 	union dp_align_mac_addr mac_addr;
1348 	bool is_active;
1349 	uint8_t pdev_id;
1350 	uint8_t vdev_id;
1351 
1352 	TAILQ_ENTRY(dp_mec_entry) hash_list_elem;
1353 };
1354 
1355 /* SOC level htt stats */
1356 struct htt_t2h_stats {
1357 	/* lock to protect htt_stats_msg update */
1358 	qdf_spinlock_t lock;
1359 
1360 	/* work queue to process htt stats */
1361 	qdf_work_t work;
1362 
1363 	/* T2H Ext stats message queue */
1364 	qdf_nbuf_queue_t msg;
1365 
1366 	/* number of completed stats in htt_stats_msg */
1367 	uint32_t num_stats;
1368 };
1369 
1370 struct link_desc_bank {
1371 	void *base_vaddr_unaligned;
1372 	void *base_vaddr;
1373 	qdf_dma_addr_t base_paddr_unaligned;
1374 	qdf_dma_addr_t base_paddr;
1375 	uint32_t size;
1376 };
1377 
1378 struct rx_buff_pool {
1379 	qdf_nbuf_queue_head_t emerg_nbuf_q;
1380 	uint32_t nbuf_fail_cnt;
1381 	bool is_initialized;
1382 };
1383 
1384 struct rx_refill_buff_pool {
1385 	bool is_initialized;
1386 	uint16_t head;
1387 	uint16_t tail;
1388 	struct dp_pdev *dp_pdev;
1389 	uint16_t max_bufq_len;
1390 	qdf_nbuf_t buf_elem[2048];
1391 };
1392 
1393 #ifdef DP_TX_HW_DESC_HISTORY
1394 #define DP_TX_HW_DESC_HIST_MAX 6144
1395 #define DP_TX_HW_DESC_HIST_PER_SLOT_MAX 2048
1396 #define DP_TX_HW_DESC_HIST_MAX_SLOTS 3
1397 #define DP_TX_HW_DESC_HIST_SLOT_SHIFT 11
1398 
1399 struct dp_tx_hw_desc_evt {
1400 	uint8_t tcl_desc[HAL_TX_DESC_LEN_BYTES];
1401 	uint64_t posted;
1402 	uint32_t hp;
1403 	uint32_t tp;
1404 };
1405 
1406 /* struct dp_tx_hw_desc_history - TX HW desc hisotry
1407  * @index: Index where the last entry is written
1408  * @entry: history entries
1409  */
1410 struct dp_tx_hw_desc_history {
1411 	qdf_atomic_t index;
1412 	uint16_t num_entries_per_slot;
1413 	uint16_t allocated;
1414 	struct dp_tx_hw_desc_evt *entry[DP_TX_HW_DESC_HIST_MAX_SLOTS];
1415 };
1416 #endif
1417 
1418 /*
1419  * enum dp_mon_status_process_event - Events for monitor status buffer record
1420  * @DP_MON_STATUS_BUF_REAP: Monitor status buffer is reaped from ring
1421  * @DP_MON_STATUS_BUF_ENQUEUE: Status buffer is enqueued to local queue
1422  * @DP_MON_STATUS_BUF_DEQUEUE: Status buffer is dequeued from local queue
1423  */
1424 enum dp_mon_status_process_event {
1425 	DP_MON_STATUS_BUF_REAP,
1426 	DP_MON_STATUS_BUF_ENQUEUE,
1427 	DP_MON_STATUS_BUF_DEQUEUE,
1428 };
1429 
1430 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
1431 #define DP_MON_STATUS_HIST_MAX	2048
1432 
1433 /**
1434  * struct dp_buf_info_record - ring buffer info
1435  * @hbi: HW ring buffer info
1436  * @timestamp: timestamp when this entry was recorded
1437  * @event: event
1438  * @rx_desc: RX descriptor corresponding to the received buffer
1439  * @nbuf: buffer attached to rx_desc, if event is REAP, else the buffer
1440  *	  which was enqueued or dequeued.
1441  * @rx_desc_nbuf_data: nbuf data pointer.
1442  */
1443 struct dp_mon_stat_info_record {
1444 	struct hal_buf_info hbi;
1445 	uint64_t timestamp;
1446 	enum dp_mon_status_process_event event;
1447 	void *rx_desc;
1448 	qdf_nbuf_t nbuf;
1449 	uint8_t *rx_desc_nbuf_data;
1450 };
1451 
1452 /* struct dp_rx_history - rx ring hisotry
1453  * @index: Index where the last entry is written
1454  * @entry: history entries
1455  */
1456 struct dp_mon_status_ring_history {
1457 	qdf_atomic_t index;
1458 	struct dp_mon_stat_info_record entry[DP_MON_STATUS_HIST_MAX];
1459 };
1460 #endif
1461 
1462 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1463 /*
1464  * The logic for get current index of these history is dependent on this
1465  * value being power of 2.
1466  */
1467 #define DP_RX_HIST_MAX 2048
1468 #define DP_RX_ERR_HIST_MAX 2048
1469 #define DP_RX_REINJECT_HIST_MAX 1024
1470 #define DP_RX_REFILL_HIST_MAX 2048
1471 
1472 QDF_COMPILE_TIME_ASSERT(rx_history_size,
1473 			(DP_RX_HIST_MAX &
1474 			 (DP_RX_HIST_MAX - 1)) == 0);
1475 QDF_COMPILE_TIME_ASSERT(rx_err_history_size,
1476 			(DP_RX_ERR_HIST_MAX &
1477 			 (DP_RX_ERR_HIST_MAX - 1)) == 0);
1478 QDF_COMPILE_TIME_ASSERT(rx_reinject_history_size,
1479 			(DP_RX_REINJECT_HIST_MAX &
1480 			 (DP_RX_REINJECT_HIST_MAX - 1)) == 0);
1481 QDF_COMPILE_TIME_ASSERT(rx_refill_history_size,
1482 			(DP_RX_REFILL_HIST_MAX &
1483 			(DP_RX_REFILL_HIST_MAX - 1)) == 0);
1484 
1485 
1486 /**
1487  * struct dp_buf_info_record - ring buffer info
1488  * @hbi: HW ring buffer info
1489  * @timestamp: timestamp when this entry was recorded
1490  */
1491 struct dp_buf_info_record {
1492 	struct hal_buf_info hbi;
1493 	uint64_t timestamp;
1494 };
1495 
1496 /**
1497  * struct dp_refill_info_record - ring refill buffer info
1498  * @hp: HP value after refill
1499  * @tp: cached tail value during refill
1500  * @num_req: number of buffers requested to refill
1501  * @num_refill: number of buffers refilled to ring
1502  * @timestamp: timestamp when this entry was recorded
1503  */
1504 struct dp_refill_info_record {
1505 	uint32_t hp;
1506 	uint32_t tp;
1507 	uint32_t num_req;
1508 	uint32_t num_refill;
1509 	uint64_t timestamp;
1510 };
1511 
1512 /* struct dp_rx_history - rx ring hisotry
1513  * @index: Index where the last entry is written
1514  * @entry: history entries
1515  */
1516 struct dp_rx_history {
1517 	qdf_atomic_t index;
1518 	struct dp_buf_info_record entry[DP_RX_HIST_MAX];
1519 };
1520 
1521 /* struct dp_rx_err_history - rx err ring hisotry
1522  * @index: Index where the last entry is written
1523  * @entry: history entries
1524  */
1525 struct dp_rx_err_history {
1526 	qdf_atomic_t index;
1527 	struct dp_buf_info_record entry[DP_RX_ERR_HIST_MAX];
1528 };
1529 
1530 /* struct dp_rx_reinject_history - rx reinject ring hisotry
1531  * @index: Index where the last entry is written
1532  * @entry: history entries
1533  */
1534 struct dp_rx_reinject_history {
1535 	qdf_atomic_t index;
1536 	struct dp_buf_info_record entry[DP_RX_REINJECT_HIST_MAX];
1537 };
1538 
1539 /* struct dp_rx_refill_history - rx buf refill hisotry
1540  * @index: Index where the last entry is written
1541  * @entry: history entries
1542  */
1543 struct dp_rx_refill_history {
1544 	qdf_atomic_t index;
1545 	struct dp_refill_info_record entry[DP_RX_REFILL_HIST_MAX];
1546 };
1547 
1548 #endif
1549 
1550 enum dp_tx_event_type {
1551 	DP_TX_DESC_INVAL_EVT = 0,
1552 	DP_TX_DESC_MAP,
1553 	DP_TX_DESC_COOKIE,
1554 	DP_TX_DESC_FLUSH,
1555 	DP_TX_DESC_UNMAP,
1556 	DP_TX_COMP_UNMAP,
1557 	DP_TX_COMP_UNMAP_ERR,
1558 	DP_TX_COMP_MSDU_EXT,
1559 };
1560 
1561 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
1562 /* Size must be in 2 power, for bitwise index rotation */
1563 #define DP_TX_TCL_HISTORY_SIZE 0x4000
1564 #define DP_TX_TCL_HIST_PER_SLOT_MAX 2048
1565 #define DP_TX_TCL_HIST_MAX_SLOTS 8
1566 #define DP_TX_TCL_HIST_SLOT_SHIFT 11
1567 
1568 /* Size must be in 2 power, for bitwise index rotation */
1569 #define DP_TX_COMP_HISTORY_SIZE 0x4000
1570 #define DP_TX_COMP_HIST_PER_SLOT_MAX 2048
1571 #define DP_TX_COMP_HIST_MAX_SLOTS 8
1572 #define DP_TX_COMP_HIST_SLOT_SHIFT 11
1573 
1574 struct dp_tx_desc_event {
1575 	qdf_nbuf_t skb;
1576 	dma_addr_t paddr;
1577 	uint32_t sw_cookie;
1578 	enum dp_tx_event_type type;
1579 	uint64_t ts;
1580 };
1581 
1582 struct dp_tx_tcl_history {
1583 	qdf_atomic_t index;
1584 	uint16_t num_entries_per_slot;
1585 	uint16_t allocated;
1586 	struct dp_tx_desc_event *entry[DP_TX_TCL_HIST_MAX_SLOTS];
1587 };
1588 
1589 struct dp_tx_comp_history {
1590 	qdf_atomic_t index;
1591 	uint16_t num_entries_per_slot;
1592 	uint16_t allocated;
1593 	struct dp_tx_desc_event *entry[DP_TX_COMP_HIST_MAX_SLOTS];
1594 };
1595 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
1596 
1597 /* structure to record recent operation related variable */
1598 struct dp_last_op_info {
1599 	/* last link desc buf info through WBM release ring */
1600 	struct hal_buf_info wbm_rel_link_desc;
1601 	/* last link desc buf info through REO reinject ring */
1602 	struct hal_buf_info reo_reinject_link_desc;
1603 };
1604 
1605 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1606 
1607 /**
1608  * struct dp_swlm_tcl_data - params for tcl register write coalescing
1609  *			     descision making
1610  * @nbuf: TX packet
1611  * @tid: tid for transmitting the current packet
1612  * @num_ll_connections: Number of low latency connections on this vdev
1613  * @ring_id: TCL ring id
1614  * @pkt_len: Packet length
1615  *
1616  * This structure contains the information required by the software
1617  * latency manager to decide on whether to coalesce the current TCL
1618  * register write or not.
1619  */
1620 struct dp_swlm_tcl_data {
1621 	qdf_nbuf_t nbuf;
1622 	uint8_t tid;
1623 	uint8_t num_ll_connections;
1624 	uint8_t ring_id;
1625 	uint32_t pkt_len;
1626 };
1627 
1628 /**
1629  * union swlm_data - SWLM query data
1630  * @tcl_data: data for TCL query in SWLM
1631  */
1632 union swlm_data {
1633 	struct dp_swlm_tcl_data *tcl_data;
1634 };
1635 
1636 /**
1637  * struct dp_swlm_ops - SWLM ops
1638  * @tcl_wr_coalesce_check: handler to check if the current TCL register
1639  *			   write can be coalesced or not
1640  */
1641 struct dp_swlm_ops {
1642 	int (*tcl_wr_coalesce_check)(struct dp_soc *soc,
1643 				     struct dp_swlm_tcl_data *tcl_data);
1644 };
1645 
1646 /**
1647  * struct dp_swlm_stats - Stats for Software Latency manager.
1648  * @tcl.timer_flush_success: Num TCL HP writes success from timer context
1649  * @tcl.timer_flush_fail: Num TCL HP writes failure from timer context
1650  * @tcl.tid_fail: Num TCL register write coalescing skips, since the pkt
1651  *		 was being transmitted on a TID above coalescing threshold
1652  * @tcl.sp_frames: Num TCL register write coalescing skips, since the pkt
1653  *		  being transmitted was a special frame
1654  * @tcl.ll_connection: Num TCL register write coalescing skips, since the
1655  *		       vdev has low latency connections
1656  * @tcl.bytes_thresh_reached: Num TCL HP writes flush after the coalescing
1657  *			     bytes threshold was reached
1658  * @tcl.time_thresh_reached: Num TCL HP writes flush after the coalescing
1659  *			    session time expired
1660  * @tcl.tput_criteria_fail: Num TCL HP writes coalescing fails, since the
1661  *			   throughput did not meet session threshold
1662  * @tcl.coalesce_success: Num of TCL HP writes coalesced successfully.
1663  * @tcl.coalesce_fail: Num of TCL HP writes coalesces failed
1664  */
1665 struct dp_swlm_stats {
1666 	struct {
1667 		uint32_t timer_flush_success;
1668 		uint32_t timer_flush_fail;
1669 		uint32_t tid_fail;
1670 		uint32_t sp_frames;
1671 		uint32_t ll_connection;
1672 		uint32_t bytes_thresh_reached;
1673 		uint32_t time_thresh_reached;
1674 		uint32_t tput_criteria_fail;
1675 		uint32_t coalesce_success;
1676 		uint32_t coalesce_fail;
1677 	} tcl[MAX_TCL_DATA_RINGS];
1678 };
1679 
1680 /**
1681  * struct dp_swlm_tcl_params: Parameters based on TCL for different modules
1682  *			      in the Software latency manager.
1683  * @soc: DP soc reference
1684  * @ring_id: TCL ring id
1685  * @flush_timer: Timer for flushing the coalesced TCL HP writes
1686  * @sampling_session_tx_bytes: Num bytes transmitted in the sampling time
1687  * @bytes_flush_thresh: Bytes threshold to flush the TCL HP register write
1688  * @coalesce_end_time: End timestamp for current coalescing session
1689  * @bytes_coalesced: Num bytes coalesced in the current session
1690  * @prev_tx_packets: Previous TX packets accounted
1691  * @prev_tx_bytes: Previous TX bytes accounted
1692  * @prev_rx_bytes: Previous RX bytes accounted
1693  * @expire_time: expiry time for sample
1694  * @tput_pass_cnt: threshold throughput pass counter
1695  */
1696 struct dp_swlm_tcl_params {
1697 	struct dp_soc *soc;
1698 	uint32_t ring_id;
1699 	qdf_timer_t flush_timer;
1700 	uint32_t sampling_session_tx_bytes;
1701 	uint32_t bytes_flush_thresh;
1702 	uint64_t coalesce_end_time;
1703 	uint32_t bytes_coalesced;
1704 	uint32_t prev_tx_packets;
1705 	uint32_t prev_tx_bytes;
1706 	uint32_t prev_rx_bytes;
1707 	uint64_t expire_time;
1708 	uint32_t tput_pass_cnt;
1709 };
1710 
1711 /**
1712  * struct dp_swlm_params: Parameters for different modules in the
1713  *			  Software latency manager.
1714  * @rx_traffic_thresh: Threshold for RX traffic, to begin TCL register
1715  *			   write coalescing
1716  * @tx_traffic_thresh: Threshold for TX traffic, to begin TCL register
1717  *			   write coalescing
1718  * @sampling_time: Sampling time to test the throughput threshold
1719  * @time_flush_thresh: Time threshold to flush the TCL HP register write
1720  * @tx_thresh_multiplier: Multiplier to deduce the bytes threshold after
1721  *			      which the TCL HP register is written, thereby
1722  *			      ending the coalescing.
1723  * @tx_pkt_thresh: Threshold for TX packet count, to begin TCL register
1724  *		       write coalescing
1725  * @tcl: TCL ring specific params
1726  */
1727 
1728 struct dp_swlm_params {
1729 	uint32_t rx_traffic_thresh;
1730 	uint32_t tx_traffic_thresh;
1731 	uint32_t sampling_time;
1732 	uint32_t time_flush_thresh;
1733 	uint32_t tx_thresh_multiplier;
1734 	uint32_t tx_pkt_thresh;
1735 	struct dp_swlm_tcl_params tcl[MAX_TCL_DATA_RINGS];
1736 };
1737 
1738 /**
1739  * struct dp_swlm - Software latency manager context
1740  * @ops: SWLM ops pointers
1741  * @is_enabled: SWLM enabled/disabled
1742  * @is_init: SWLM module initialized
1743  * @stats: SWLM stats
1744  * @params: SWLM SRNG params
1745  * @tcl_flush_timer: flush timer for TCL register writes
1746  */
1747 struct dp_swlm {
1748 	struct dp_swlm_ops *ops;
1749 	uint8_t is_enabled:1,
1750 		is_init:1;
1751 	struct dp_swlm_stats stats;
1752 	struct dp_swlm_params params;
1753 };
1754 #endif
1755 
1756 #ifdef IPA_OFFLOAD
1757 /* IPA uC datapath offload Wlan Tx resources */
1758 struct ipa_dp_tx_rsc {
1759 	/* Resource info to be passed to IPA */
1760 	qdf_dma_addr_t ipa_tcl_ring_base_paddr;
1761 	void *ipa_tcl_ring_base_vaddr;
1762 	uint32_t ipa_tcl_ring_size;
1763 	qdf_dma_addr_t ipa_tcl_hp_paddr;
1764 	uint32_t alloc_tx_buf_cnt;
1765 
1766 	qdf_dma_addr_t ipa_wbm_ring_base_paddr;
1767 	void *ipa_wbm_ring_base_vaddr;
1768 	uint32_t ipa_wbm_ring_size;
1769 	qdf_dma_addr_t ipa_wbm_tp_paddr;
1770 	/* WBM2SW HP shadow paddr */
1771 	qdf_dma_addr_t ipa_wbm_hp_shadow_paddr;
1772 
1773 	/* TX buffers populated into the WBM ring */
1774 	void **tx_buf_pool_vaddr_unaligned;
1775 	qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
1776 };
1777 
1778 /* IPA uC datapath offload Wlan Rx resources */
1779 struct ipa_dp_rx_rsc {
1780 	/* Resource info to be passed to IPA */
1781 	qdf_dma_addr_t ipa_reo_ring_base_paddr;
1782 	void *ipa_reo_ring_base_vaddr;
1783 	uint32_t ipa_reo_ring_size;
1784 	qdf_dma_addr_t ipa_reo_tp_paddr;
1785 
1786 	/* Resource info to be passed to firmware and IPA */
1787 	qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
1788 	void *ipa_rx_refill_buf_ring_base_vaddr;
1789 	uint32_t ipa_rx_refill_buf_ring_size;
1790 	qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
1791 };
1792 #endif
1793 
1794 struct dp_tx_msdu_info_s;
1795 /*
1796  * enum dp_context_type- DP Context Type
1797  * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
1798  * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
1799  * @DP_CONTEXT_TYPE_VDEV: Context type DP VDEV
1800  * @DP_CONTEXT_TYPE_PEER: Context type DP PEER
1801  * @DP_CONTEXT_TYPE_MON_SOC: Context type DP MON SOC
1802  * @DP_CONTEXT_TYPE_MON_PDEV: Context type DP MON PDEV
1803  *
1804  * Helper enums to be used to retrieve the size of the corresponding
1805  * data structure by passing the type.
1806  */
1807 enum dp_context_type {
1808 	DP_CONTEXT_TYPE_SOC,
1809 	DP_CONTEXT_TYPE_PDEV,
1810 	DP_CONTEXT_TYPE_VDEV,
1811 	DP_CONTEXT_TYPE_PEER,
1812 	DP_CONTEXT_TYPE_MON_SOC,
1813 	DP_CONTEXT_TYPE_MON_PDEV
1814 };
1815 
1816 /*
1817  * struct dp_arch_ops- DP target specific arch ops
1818  * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
1819  * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
1820  * @tx_hw_enqueue: enqueue TX data to HW
1821  * @tx_comp_get_params_from_hal_desc: get software tx descriptor and release
1822  * 				      source from HAL desc for wbm release ring
1823  * @dp_service_near_full_srngs: Handler for servicing the near full IRQ
1824  * @txrx_set_vdev_param: target specific ops while setting vdev params
1825  * @dp_srng_test_and_update_nf_params: Check if the srng is in near full state
1826  *				and set the near-full params.
1827  */
1828 struct dp_arch_ops {
1829 	/* INIT/DEINIT Arch Ops */
1830 	QDF_STATUS (*txrx_soc_attach)(struct dp_soc *soc,
1831 				      struct cdp_soc_attach_params *params);
1832 	QDF_STATUS (*txrx_soc_detach)(struct dp_soc *soc);
1833 	QDF_STATUS (*txrx_soc_init)(struct dp_soc *soc);
1834 	QDF_STATUS (*txrx_soc_deinit)(struct dp_soc *soc);
1835 	QDF_STATUS (*txrx_soc_srng_alloc)(struct dp_soc *soc);
1836 	QDF_STATUS (*txrx_soc_srng_init)(struct dp_soc *soc);
1837 	void (*txrx_soc_srng_deinit)(struct dp_soc *soc);
1838 	void (*txrx_soc_srng_free)(struct dp_soc *soc);
1839 	QDF_STATUS (*txrx_pdev_attach)(struct dp_pdev *pdev,
1840 				       struct cdp_pdev_attach_params *params);
1841 	QDF_STATUS (*txrx_pdev_detach)(struct dp_pdev *pdev);
1842 	QDF_STATUS (*txrx_vdev_attach)(struct dp_soc *soc,
1843 				       struct dp_vdev *vdev);
1844 	QDF_STATUS (*txrx_vdev_detach)(struct dp_soc *soc,
1845 				       struct dp_vdev *vdev);
1846 	QDF_STATUS (*txrx_peer_map_attach)(struct dp_soc *soc);
1847 	void (*txrx_peer_map_detach)(struct dp_soc *soc);
1848 	QDF_STATUS (*dp_rxdma_ring_sel_cfg)(struct dp_soc *soc);
1849 	void (*soc_cfg_attach)(struct dp_soc *soc);
1850 	void (*peer_get_reo_hash)(struct dp_vdev *vdev,
1851 				  struct cdp_peer_setup_info *setup_info,
1852 				  enum cdp_host_reo_dest_ring *reo_dest,
1853 				  bool *hash_based,
1854 				  uint8_t *lmac_peer_id_msb);
1855 	 bool (*reo_remap_config)(struct dp_soc *soc, uint32_t *remap0,
1856 				  uint32_t *remap1, uint32_t *remap2);
1857 
1858 	/* TX RX Arch Ops */
1859 	QDF_STATUS (*tx_hw_enqueue)(struct dp_soc *soc, struct dp_vdev *vdev,
1860 				    struct dp_tx_desc_s *tx_desc,
1861 				    uint16_t fw_metadata,
1862 				    struct cdp_tx_exception_metadata *metadata,
1863 				    struct dp_tx_msdu_info_s *msdu_info);
1864 
1865 	 void (*tx_comp_get_params_from_hal_desc)(struct dp_soc *soc,
1866 						  void *tx_comp_hal_desc,
1867 						  struct dp_tx_desc_s **desc);
1868 	void (*dp_tx_process_htt_completion)(struct dp_soc *soc,
1869 					     struct dp_tx_desc_s *tx_desc,
1870 					     uint8_t *status,
1871 					     uint8_t ring_id);
1872 
1873 	uint32_t (*dp_rx_process)(struct dp_intr *int_ctx,
1874 				  hal_ring_handle_t hal_ring_hdl,
1875 				  uint8_t reo_ring_num, uint32_t quota);
1876 
1877 	qdf_nbuf_t (*dp_tx_send_fast)(struct cdp_soc_t *soc_hdl,
1878 				      uint8_t vdev_id,
1879 				      qdf_nbuf_t nbuf);
1880 
1881 	QDF_STATUS (*dp_tx_desc_pool_init)(struct dp_soc *soc,
1882 					   uint32_t num_elem,
1883 					   uint8_t pool_id);
1884 	void (*dp_tx_desc_pool_deinit)(
1885 				struct dp_soc *soc,
1886 				struct dp_tx_desc_pool_s *tx_desc_pool,
1887 				uint8_t pool_id);
1888 
1889 	QDF_STATUS (*dp_rx_desc_pool_init)(struct dp_soc *soc,
1890 					   struct rx_desc_pool *rx_desc_pool,
1891 					   uint32_t pool_id);
1892 	void (*dp_rx_desc_pool_deinit)(struct dp_soc *soc,
1893 				       struct rx_desc_pool *rx_desc_pool,
1894 				       uint32_t pool_id);
1895 
1896 	QDF_STATUS (*dp_wbm_get_rx_desc_from_hal_desc)(
1897 						struct dp_soc *soc,
1898 						void *ring_desc,
1899 						struct dp_rx_desc **r_rx_desc);
1900 
1901 	bool
1902 	(*dp_rx_intrabss_handle_nawds)(struct dp_soc *soc,
1903 				       struct dp_txrx_peer *ta_txrx_peer,
1904 				       qdf_nbuf_t nbuf_copy,
1905 				       struct cdp_tid_rx_stats *tid_stats);
1906 
1907 	struct dp_rx_desc *(*dp_rx_desc_cookie_2_va)(struct dp_soc *soc,
1908 						     uint32_t cookie);
1909 	uint32_t (*dp_service_near_full_srngs)(struct dp_soc *soc,
1910 					       struct dp_intr *int_ctx,
1911 					       uint32_t dp_budget);
1912 	void (*tx_implicit_rbm_set)(struct dp_soc *soc, uint8_t tx_ring_id,
1913 				    uint8_t bm_id);
1914 	uint16_t (*dp_rx_peer_metadata_peer_id_get)(struct dp_soc *soc,
1915 						    uint32_t peer_metadata);
1916 	/* Control Arch Ops */
1917 	QDF_STATUS (*txrx_set_vdev_param)(struct dp_soc *soc,
1918 					  struct dp_vdev *vdev,
1919 					  enum cdp_vdev_param_type param,
1920 					  cdp_config_param_type val);
1921 
1922 	/* Misc Arch Ops */
1923 	qdf_size_t (*txrx_get_context_size)(enum dp_context_type);
1924 #ifdef WIFI_MONITOR_SUPPORT
1925 	qdf_size_t (*txrx_get_mon_context_size)(enum dp_context_type);
1926 #endif
1927 	int (*dp_srng_test_and_update_nf_params)(struct dp_soc *soc,
1928 						 struct dp_srng *dp_srng,
1929 						 int *max_reap_limit);
1930 
1931 	/* MLO ops */
1932 #ifdef WLAN_FEATURE_11BE_MLO
1933 #ifdef WLAN_MCAST_MLO
1934 	void (*dp_tx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
1935 				    qdf_nbuf_t nbuf);
1936 	bool (*dp_rx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
1937 				    struct dp_txrx_peer *peer, qdf_nbuf_t nbuf);
1938 #endif
1939 	void (*mlo_peer_find_hash_detach)(struct dp_soc *soc);
1940 	QDF_STATUS (*mlo_peer_find_hash_attach)(struct dp_soc *soc);
1941 	void (*mlo_peer_find_hash_add)(struct dp_soc *soc,
1942 				       struct dp_peer *peer);
1943 	void (*mlo_peer_find_hash_remove)(struct dp_soc *soc,
1944 					  struct dp_peer *peer);
1945 	struct dp_peer *(*mlo_peer_find_hash_find)(struct dp_soc *soc,
1946 						   uint8_t *peer_mac_addr,
1947 						   int mac_addr_is_aligned,
1948 						   enum dp_mod_id mod_id,
1949 						   uint8_t vdev_id);
1950 #endif
1951 	void (*get_rx_hash_key)(struct dp_soc *soc,
1952 				struct cdp_lro_hash_config *lro_hash);
1953 	void (*txrx_print_peer_stats)(struct cdp_peer_stats *peer_stats,
1954 				      enum peer_stats_type stats_type);
1955 	/* Dp peer reorder queue setup */
1956 	QDF_STATUS (*dp_peer_rx_reorder_queue_setup)(struct dp_soc *soc,
1957 						     struct dp_peer *peer,
1958 						     int tid,
1959 						     uint32_t ba_window_size);
1960 	struct dp_peer *(*dp_find_peer_by_destmac)(struct dp_soc *soc,
1961 						   uint8_t *dest_mac_addr,
1962 						   uint8_t vdev_id);
1963 	void (*dp_bank_reconfig)(struct dp_soc *soc, struct dp_vdev *vdev);
1964 
1965 	void (*dp_reconfig_tx_vdev_mcast_ctrl)(struct dp_soc *soc,
1966 					       struct dp_vdev *vdev);
1967 
1968 	void (*dp_cc_reg_cfg_init)(struct dp_soc *soc, bool is_4k_align);
1969 
1970 	QDF_STATUS
1971 	(*dp_tx_compute_hw_delay)(struct dp_soc *soc,
1972 				  struct dp_vdev *vdev,
1973 				  struct hal_tx_completion_status *ts,
1974 				  uint32_t *delay_us);
1975 	void (*print_mlo_ast_stats)(struct dp_soc *soc);
1976 	void (*dp_partner_chips_map)(struct dp_soc *soc,
1977 				     struct dp_peer *peer,
1978 				     uint16_t peer_id);
1979 	void (*dp_partner_chips_unmap)(struct dp_soc *soc,
1980 				       uint16_t peer_id);
1981 };
1982 
1983 /**
1984  * struct dp_soc_features: Data structure holding the SOC level feature flags.
1985  * @pn_in_reo_dest: PN provided by hardware in the REO destination ring.
1986  * @dmac_cmn_src_rxbuf_ring_enabled: Flag to indicate DMAC mode common Rx
1987  *				     buffer source rings
1988  * @rssi_dbm_conv_support: Rssi dbm converstion support param.
1989  * @umac_hw_reset_support: UMAC HW reset support
1990  */
1991 struct dp_soc_features {
1992 	uint8_t pn_in_reo_dest:1,
1993 		dmac_cmn_src_rxbuf_ring_enabled:1;
1994 	bool rssi_dbm_conv_support;
1995 	bool umac_hw_reset_support;
1996 };
1997 
1998 enum sysfs_printing_mode {
1999 	PRINTING_MODE_DISABLED = 0,
2000 	PRINTING_MODE_ENABLED
2001 };
2002 
2003 /**
2004  * @typedef tx_pause_callback
2005  * @brief OSIF function registered with the data path
2006  */
2007 
2008 typedef void (*notify_pre_reset_fw_callback)(struct dp_soc *soc);
2009 
2010 #ifdef WLAN_SYSFS_DP_STATS
2011 /**
2012  * struct sysfs_stats_config: Data structure holding stats sysfs config.
2013  * @rw_stats_lock: Lock to read and write to stat_type and pdev_id.
2014  * @sysfs_read_lock: Lock held while another stat req is being executed.
2015  * @sysfs_write_user_buffer: Lock to change buff len, max buf len
2016  * and *buf.
2017  * @sysfs_txrx_fw_request_done: Event to wait for firmware response.
2018  * @stat_type_requested: stat type requested.
2019  * @mac_id: mac id for which stat type are requested.
2020  * @printing_mode: Should a print go through.
2021  * @process_id: Process allowed to write to buffer.
2022  * @curr_buffer_length: Curr length of buffer written
2023  * @max_buffer_length: Max buffer length.
2024  * @buf: Sysfs buffer.
2025  */
2026 struct sysfs_stats_config {
2027 	/* lock held to read stats */
2028 	qdf_spinlock_t rw_stats_lock;
2029 	qdf_mutex_t sysfs_read_lock;
2030 	qdf_spinlock_t sysfs_write_user_buffer;
2031 	qdf_event_t sysfs_txrx_fw_request_done;
2032 	uint32_t stat_type_requested;
2033 	uint32_t mac_id;
2034 	enum sysfs_printing_mode printing_mode;
2035 	int process_id;
2036 	uint16_t curr_buffer_length;
2037 	uint16_t max_buffer_length;
2038 	char *buf;
2039 };
2040 #endif
2041 
2042 /* SOC level structure for data path */
2043 struct dp_soc {
2044 	/**
2045 	 * re-use memory section starts
2046 	 */
2047 
2048 	/* Common base structure - Should be the first member */
2049 	struct cdp_soc_t cdp_soc;
2050 
2051 	/* SoC Obj */
2052 	struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
2053 
2054 	/* OS device abstraction */
2055 	qdf_device_t osdev;
2056 
2057 	/*cce disable*/
2058 	bool cce_disable;
2059 
2060 	/* WLAN config context */
2061 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
2062 
2063 	/* HTT handle for host-fw interaction */
2064 	struct htt_soc *htt_handle;
2065 
2066 	/* Commint init done */
2067 	qdf_atomic_t cmn_init_done;
2068 
2069 	/* Opaque hif handle */
2070 	struct hif_opaque_softc *hif_handle;
2071 
2072 	/* PDEVs on this SOC */
2073 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
2074 
2075 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
2076 	struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT];
2077 
2078 	struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW];
2079 
2080 	/* RXDMA error destination ring */
2081 	struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW];
2082 
2083 	/* RXDMA monitor buffer replenish ring */
2084 	struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW];
2085 
2086 	/* RXDMA monitor destination ring */
2087 	struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW];
2088 
2089 	/* RXDMA monitor status ring. TBD: Check format of this ring */
2090 	struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW];
2091 
2092 	/* Number of PDEVs */
2093 	uint8_t pdev_count;
2094 
2095 	/*ast override support in HW*/
2096 	bool ast_override_support;
2097 
2098 	/*number of hw dscp tid map*/
2099 	uint8_t num_hw_dscp_tid_map;
2100 
2101 	/* HAL SOC handle */
2102 	hal_soc_handle_t hal_soc;
2103 
2104 	/* rx monitor pkt tlv size */
2105 	uint16_t rx_mon_pkt_tlv_size;
2106 	/* rx pkt tlv size */
2107 	uint16_t rx_pkt_tlv_size;
2108 
2109 	struct dp_arch_ops arch_ops;
2110 
2111 	/* Device ID coming from Bus sub-system */
2112 	uint32_t device_id;
2113 
2114 	/* Link descriptor pages */
2115 	struct qdf_mem_multi_page_t link_desc_pages;
2116 
2117 	/* total link descriptors for regular RX and TX */
2118 	uint32_t total_link_descs;
2119 
2120 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
2121 	struct dp_srng wbm_idle_link_ring;
2122 
2123 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
2124 	 */
2125 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
2126 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
2127 	uint32_t num_scatter_bufs;
2128 
2129 	/* Tx SW descriptor pool */
2130 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
2131 
2132 	/* Tx MSDU Extension descriptor pool */
2133 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
2134 
2135 	/* Tx TSO descriptor pool */
2136 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
2137 
2138 	/* Tx TSO Num of segments pool */
2139 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
2140 
2141 	/* REO destination rings */
2142 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
2143 
2144 	/* REO exception ring - See if should combine this with reo_dest_ring */
2145 	struct dp_srng reo_exception_ring;
2146 
2147 	/* REO reinjection ring */
2148 	struct dp_srng reo_reinject_ring;
2149 
2150 	/* REO command ring */
2151 	struct dp_srng reo_cmd_ring;
2152 
2153 	/* REO command status ring */
2154 	struct dp_srng reo_status_ring;
2155 
2156 	/* WBM Rx release ring */
2157 	struct dp_srng rx_rel_ring;
2158 
2159 	/* TCL data ring */
2160 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
2161 
2162 	/* Number of Tx comp rings */
2163 	uint8_t num_tx_comp_rings;
2164 
2165 	/* Number of TCL data rings */
2166 	uint8_t num_tcl_data_rings;
2167 
2168 	/* TCL CMD_CREDIT ring */
2169 	bool init_tcl_cmd_cred_ring;
2170 
2171 	/* It is used as credit based ring on QCN9000 else command ring */
2172 	struct dp_srng tcl_cmd_credit_ring;
2173 
2174 	/* TCL command status ring */
2175 	struct dp_srng tcl_status_ring;
2176 
2177 	/* WBM Tx completion rings */
2178 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
2179 
2180 	/* Common WBM link descriptor release ring (SW to WBM) */
2181 	struct dp_srng wbm_desc_rel_ring;
2182 
2183 	/* DP Interrupts */
2184 	struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
2185 
2186 	/* Monitor mode mac id to dp_intr_id map */
2187 	int mon_intr_id_lmac_map[MAX_NUM_LMAC_HW];
2188 	/* Rx SW descriptor pool for RXDMA monitor buffer */
2189 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
2190 
2191 	/* Rx SW descriptor pool for RXDMA status buffer */
2192 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
2193 
2194 	/* Rx SW descriptor pool for RXDMA buffer */
2195 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
2196 
2197 	/* Number of REO destination rings */
2198 	uint8_t num_reo_dest_rings;
2199 
2200 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2201 	/* lock to control access to soc TX descriptors */
2202 	qdf_spinlock_t flow_pool_array_lock;
2203 
2204 	/* pause callback to pause TX queues as per flow control */
2205 	tx_pause_callback pause_cb;
2206 
2207 	/* flow pool related statistics */
2208 	struct dp_txrx_pool_stats pool_stats;
2209 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
2210 
2211 	notify_pre_reset_fw_callback notify_fw_callback;
2212 
2213 	unsigned long service_rings_running;
2214 
2215 	uint32_t wbm_idle_scatter_buf_size;
2216 
2217 	/* VDEVs on this SOC */
2218 	struct dp_vdev *vdev_id_map[MAX_VDEV_CNT];
2219 
2220 	/* Tx H/W queues lock */
2221 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
2222 
2223 	/* Tx ring map for interrupt processing */
2224 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2225 
2226 	/* Rx ring map for interrupt processing */
2227 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2228 
2229 	/* peer ID to peer object map (array of pointers to peer objects) */
2230 	struct dp_peer **peer_id_to_obj_map;
2231 
2232 	struct {
2233 		unsigned mask;
2234 		unsigned idx_bits;
2235 		TAILQ_HEAD(, dp_peer) * bins;
2236 	} peer_hash;
2237 
2238 	/* rx defrag state – TBD: do we need this per radio? */
2239 	struct {
2240 		struct {
2241 			TAILQ_HEAD(, dp_rx_tid_defrag) waitlist;
2242 			uint32_t timeout_ms;
2243 			uint32_t next_flush_ms;
2244 			qdf_spinlock_t defrag_lock;
2245 		} defrag;
2246 		struct {
2247 			int defrag_timeout_check;
2248 			int dup_check;
2249 		} flags;
2250 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
2251 		qdf_spinlock_t reo_cmd_lock;
2252 	} rx;
2253 
2254 	/* optional rx processing function */
2255 	void (*rx_opt_proc)(
2256 		struct dp_vdev *vdev,
2257 		struct dp_peer *peer,
2258 		unsigned tid,
2259 		qdf_nbuf_t msdu_list);
2260 
2261 	/* pool addr for mcast enhance buff */
2262 	struct {
2263 		int size;
2264 		uint32_t paddr;
2265 		uint32_t *vaddr;
2266 		struct dp_tx_me_buf_t *freelist;
2267 		int buf_in_use;
2268 		qdf_dma_mem_context(memctx);
2269 	} me_buf;
2270 
2271 	/* Protect peer hash table */
2272 	DP_MUTEX_TYPE peer_hash_lock;
2273 	/* Protect peer_id_to_objmap */
2274 	DP_MUTEX_TYPE peer_map_lock;
2275 
2276 	/* maximum number of suppoerted peers */
2277 	uint32_t max_peers;
2278 	/* maximum value for peer_id */
2279 	uint32_t max_peer_id;
2280 
2281 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2282 	uint32_t peer_id_shift;
2283 	uint32_t peer_id_mask;
2284 #endif
2285 
2286 	/* SoC level data path statistics */
2287 	struct dp_soc_stats stats;
2288 #ifdef WLAN_SYSFS_DP_STATS
2289 	/* sysfs config for DP stats */
2290 	struct sysfs_stats_config *sysfs_config;
2291 #endif
2292 	/* timestamp to keep track of msdu buffers received on reo err ring */
2293 	uint64_t rx_route_err_start_pkt_ts;
2294 
2295 	/* Num RX Route err in a given window to keep track of rate of errors */
2296 	uint32_t rx_route_err_in_window;
2297 
2298 	/* Enable processing of Tx completion status words */
2299 	bool process_tx_status;
2300 	bool process_rx_status;
2301 	struct dp_ast_entry **ast_table;
2302 	struct {
2303 		unsigned mask;
2304 		unsigned idx_bits;
2305 		TAILQ_HEAD(, dp_ast_entry) * bins;
2306 	} ast_hash;
2307 
2308 #ifdef DP_TX_HW_DESC_HISTORY
2309 	struct dp_tx_hw_desc_history tx_hw_desc_history;
2310 #endif
2311 
2312 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2313 	struct dp_rx_history *rx_ring_history[MAX_REO_DEST_RINGS];
2314 	struct dp_rx_refill_history *rx_refill_ring_history[MAX_PDEV_CNT];
2315 	struct dp_rx_err_history *rx_err_ring_history;
2316 	struct dp_rx_reinject_history *rx_reinject_ring_history;
2317 #endif
2318 
2319 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
2320 	struct dp_mon_status_ring_history *mon_status_ring_history;
2321 #endif
2322 
2323 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
2324 	struct dp_tx_tcl_history tx_tcl_history;
2325 	struct dp_tx_comp_history tx_comp_history;
2326 #endif
2327 
2328 	qdf_spinlock_t ast_lock;
2329 	/*Timer for AST entry ageout maintainance */
2330 	qdf_timer_t ast_aging_timer;
2331 
2332 	/*Timer counter for WDS AST entry ageout*/
2333 	uint8_t wds_ast_aging_timer_cnt;
2334 	bool pending_ageout;
2335 	bool ast_offload_support;
2336 	bool host_ast_db_enable;
2337 	uint32_t max_ast_ageout_count;
2338 	uint8_t eapol_over_control_port;
2339 
2340 	uint8_t sta_mode_search_policy;
2341 	qdf_timer_t lmac_reap_timer;
2342 	uint8_t lmac_timer_init;
2343 	qdf_timer_t int_timer;
2344 	uint8_t intr_mode;
2345 	uint8_t lmac_polled_mode;
2346 
2347 	qdf_list_t reo_desc_freelist;
2348 	qdf_spinlock_t reo_desc_freelist_lock;
2349 
2350 	/* htt stats */
2351 	struct htt_t2h_stats htt_stats;
2352 
2353 	void *external_txrx_handle; /* External data path handle */
2354 #ifdef IPA_OFFLOAD
2355 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc;
2356 #ifdef IPA_WDI3_TX_TWO_PIPES
2357 	/* Resources for the alternative IPA TX pipe */
2358 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc_alt;
2359 #endif
2360 
2361 	struct ipa_dp_rx_rsc ipa_uc_rx_rsc;
2362 #ifdef IPA_WDI3_VLAN_SUPPORT
2363 	struct ipa_dp_rx_rsc ipa_uc_rx_rsc_alt;
2364 #endif
2365 	qdf_atomic_t ipa_pipes_enabled;
2366 	bool ipa_first_tx_db_access;
2367 	qdf_spinlock_t ipa_rx_buf_map_lock;
2368 	bool ipa_rx_buf_map_lock_initialized;
2369 	uint8_t ipa_reo_ctx_lock_required[MAX_REO_DEST_RINGS];
2370 #endif
2371 
2372 #ifdef WLAN_FEATURE_STATS_EXT
2373 	struct {
2374 		uint32_t rx_mpdu_received;
2375 		uint32_t rx_mpdu_missed;
2376 	} ext_stats;
2377 	qdf_event_t rx_hw_stats_event;
2378 	qdf_spinlock_t rx_hw_stats_lock;
2379 	bool is_last_stats_ctx_init;
2380 #endif /* WLAN_FEATURE_STATS_EXT */
2381 
2382 	/* Indicates HTT map/unmap versions*/
2383 	uint8_t peer_map_unmap_versions;
2384 	/* Per peer per Tid ba window size support */
2385 	uint8_t per_tid_basize_max_tid;
2386 	/* Soc level flag to enable da_war */
2387 	uint8_t da_war_enabled;
2388 	/* number of active ast entries */
2389 	uint32_t num_ast_entries;
2390 	/* peer extended rate statistics context at soc level*/
2391 	struct cdp_soc_rate_stats_ctx *rate_stats_ctx;
2392 	/* peer extended rate statistics control flag */
2393 	bool peerstats_enabled;
2394 
2395 	/* 8021p PCP-TID map values */
2396 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
2397 	/* TID map priority value */
2398 	uint8_t tidmap_prty;
2399 	/* Pointer to global per ring type specific configuration table */
2400 	struct wlan_srng_cfg *wlan_srng_cfg;
2401 	/* Num Tx outstanding on device */
2402 	qdf_atomic_t num_tx_outstanding;
2403 	/* Num Tx exception on device */
2404 	qdf_atomic_t num_tx_exception;
2405 	/* Num Tx allowed */
2406 	uint32_t num_tx_allowed;
2407 	/* Preferred HW mode */
2408 	uint8_t preferred_hw_mode;
2409 
2410 	/**
2411 	 * Flag to indicate whether WAR to address single cache entry
2412 	 * invalidation bug is enabled or not
2413 	 */
2414 	bool is_rx_fse_full_cache_invalidate_war_enabled;
2415 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2416 	/**
2417 	 * Pointer to DP RX Flow FST at SOC level if
2418 	 * is_rx_flow_search_table_per_pdev is false
2419 	 * TBD: rx_fst[num_macs] if we decide to have per mac FST
2420 	 */
2421 	struct dp_rx_fst *rx_fst;
2422 #ifdef WLAN_SUPPORT_RX_FISA
2423 	uint8_t fisa_enable;
2424 	uint8_t fisa_lru_del_enable;
2425 	/**
2426 	 * Params used for controlling the fisa aggregation dynamically
2427 	 */
2428 	struct {
2429 		qdf_atomic_t skip_fisa;
2430 		uint8_t fisa_force_flush[MAX_REO_DEST_RINGS];
2431 	} skip_fisa_param;
2432 
2433 	/**
2434 	 * CMEM address and size for FST in CMEM, This is the address
2435 	 * shared during init time.
2436 	 */
2437 	uint64_t fst_cmem_base;
2438 	uint64_t fst_cmem_size;
2439 #endif
2440 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
2441 	/* SG supported for msdu continued packets from wbm release ring */
2442 	bool wbm_release_desc_rx_sg_support;
2443 	bool peer_map_attach_success;
2444 	/* Flag to disable mac1 ring interrupts */
2445 	bool disable_mac1_intr;
2446 	/* Flag to disable mac2 ring interrupts */
2447 	bool disable_mac2_intr;
2448 
2449 	struct {
2450 		/* 1st msdu in sg for msdu continued packets in wbm rel ring */
2451 		bool wbm_is_first_msdu_in_sg;
2452 		/* Wbm sg list head */
2453 		qdf_nbuf_t wbm_sg_nbuf_head;
2454 		/* Wbm sg list tail */
2455 		qdf_nbuf_t wbm_sg_nbuf_tail;
2456 		uint32_t wbm_sg_desc_msdu_len;
2457 	} wbm_sg_param;
2458 	/* Number of msdu exception descriptors */
2459 	uint32_t num_msdu_exception_desc;
2460 
2461 	/* RX buffer params */
2462 	struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
2463 	struct rx_refill_buff_pool rx_refill_buff_pool;
2464 	/* Save recent operation related variable */
2465 	struct dp_last_op_info last_op_info;
2466 	TAILQ_HEAD(, dp_peer) inactive_peer_list;
2467 	qdf_spinlock_t inactive_peer_list_lock;
2468 	TAILQ_HEAD(, dp_vdev) inactive_vdev_list;
2469 	qdf_spinlock_t inactive_vdev_list_lock;
2470 	/* lock to protect vdev_id_map table*/
2471 	qdf_spinlock_t vdev_map_lock;
2472 
2473 	/* Flow Search Table is in CMEM */
2474 	bool fst_in_cmem;
2475 
2476 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2477 	struct dp_swlm swlm;
2478 #endif
2479 
2480 #ifdef FEATURE_RUNTIME_PM
2481 	/* DP Rx timestamp */
2482 	qdf_time_t rx_last_busy;
2483 	/* Dp runtime refcount */
2484 	qdf_atomic_t dp_runtime_refcount;
2485 	/* Dp tx pending count in RTPM */
2486 	qdf_atomic_t tx_pending_rtpm;
2487 #endif
2488 	/* Invalid buffer that allocated for RX buffer */
2489 	qdf_nbuf_queue_t invalid_buf_queue;
2490 
2491 #ifdef FEATURE_MEC
2492 	/** @mec_lock: spinlock for MEC table */
2493 	qdf_spinlock_t mec_lock;
2494 	/** @mec_cnt: number of active mec entries */
2495 	qdf_atomic_t mec_cnt;
2496 	struct {
2497 		/** @mask: mask bits */
2498 		uint32_t mask;
2499 		/** @idx_bits: index to shift bits */
2500 		uint32_t idx_bits;
2501 		/** @bins: MEC table */
2502 		TAILQ_HEAD(, dp_mec_entry) * bins;
2503 	} mec_hash;
2504 #endif
2505 
2506 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
2507 	qdf_list_t reo_desc_deferred_freelist;
2508 	qdf_spinlock_t reo_desc_deferred_freelist_lock;
2509 	bool reo_desc_deferred_freelist_init;
2510 #endif
2511 	/* BM id for first WBM2SW  ring */
2512 	uint32_t wbm_sw0_bm_id;
2513 
2514 	/* Store arch_id from device_id */
2515 	uint16_t arch_id;
2516 
2517 	/* link desc ID start per device type */
2518 	uint32_t link_desc_id_start;
2519 
2520 	/* CMEM buffer target reserved for host usage */
2521 	uint64_t cmem_base;
2522 	/* CMEM size in bytes */
2523 	uint64_t cmem_total_size;
2524 	/* CMEM free size in bytes */
2525 	uint64_t cmem_avail_size;
2526 
2527 	/* SOC level feature flags */
2528 	struct dp_soc_features features;
2529 
2530 #ifdef WIFI_MONITOR_SUPPORT
2531 	struct dp_mon_soc *monitor_soc;
2532 #endif
2533 	uint8_t rxdma2sw_rings_not_supported:1,
2534 		wbm_sg_last_msdu_war:1,
2535 		mec_fw_offload:1,
2536 		multi_peer_grp_cmd_supported:1;
2537 
2538 	/* Number of Rx refill rings */
2539 	uint8_t num_rx_refill_buf_rings;
2540 #ifdef FEATURE_RUNTIME_PM
2541 	/* flag to indicate vote for runtime_pm for high tput castt*/
2542 	qdf_atomic_t rtpm_high_tput_flag;
2543 #endif
2544 	/* Buffer manager ID for idle link descs */
2545 	uint8_t idle_link_bm_id;
2546 	qdf_atomic_t ref_count;
2547 
2548 	unsigned long vdev_stats_id_map;
2549 	bool txmon_hw_support;
2550 
2551 #ifdef DP_UMAC_HW_RESET_SUPPORT
2552 	struct dp_soc_umac_reset_ctx umac_reset_ctx;
2553 #endif
2554 	/* PPDU to link_id mapping parameters */
2555 	uint8_t link_id_offset;
2556 	uint8_t link_id_bits;
2557 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
2558 	/* A flag using to decide the switch of rx link speed  */
2559 	bool high_throughput;
2560 #endif
2561 };
2562 
2563 #ifdef IPA_OFFLOAD
2564 /**
2565  * dp_ipa_resources - Resources needed for IPA
2566  */
2567 struct dp_ipa_resources {
2568 	qdf_shared_mem_t tx_ring;
2569 	uint32_t tx_num_alloc_buffer;
2570 
2571 	qdf_shared_mem_t tx_comp_ring;
2572 	qdf_shared_mem_t rx_rdy_ring;
2573 	qdf_shared_mem_t rx_refill_ring;
2574 
2575 	/* IPA UC doorbell registers paddr */
2576 	qdf_dma_addr_t tx_comp_doorbell_paddr;
2577 	uint32_t *tx_comp_doorbell_vaddr;
2578 	qdf_dma_addr_t rx_ready_doorbell_paddr;
2579 
2580 	bool is_db_ddr_mapped;
2581 
2582 #ifdef IPA_WDI3_TX_TWO_PIPES
2583 	qdf_shared_mem_t tx_alt_ring;
2584 	uint32_t tx_alt_ring_num_alloc_buffer;
2585 	qdf_shared_mem_t tx_alt_comp_ring;
2586 
2587 	/* IPA UC doorbell registers paddr */
2588 	qdf_dma_addr_t tx_alt_comp_doorbell_paddr;
2589 	uint32_t *tx_alt_comp_doorbell_vaddr;
2590 #endif
2591 #ifdef IPA_WDI3_VLAN_SUPPORT
2592 	qdf_shared_mem_t rx_alt_rdy_ring;
2593 	qdf_shared_mem_t rx_alt_refill_ring;
2594 	qdf_dma_addr_t rx_alt_ready_doorbell_paddr;
2595 #endif
2596 };
2597 #endif
2598 
2599 #define MAX_RX_MAC_RINGS 2
2600 /* Same as NAC_MAX_CLENT */
2601 #define DP_NAC_MAX_CLIENT  24
2602 
2603 /*
2604  * 24 bits cookie size
2605  * 10 bits page id 0 ~ 1023 for MCL
2606  * 3 bits page id 0 ~ 7 for WIN
2607  * WBM Idle List Desc size = 128,
2608  * Num descs per page = 4096/128 = 32 for MCL
2609  * Num descs per page = 2MB/128 = 16384 for WIN
2610  */
2611 /*
2612  * Macros to setup link descriptor cookies - for link descriptors, we just
2613  * need first 3 bits to store bank/page ID for WIN. The
2614  * remaining bytes will be used to set a unique ID, which will
2615  * be useful in debugging
2616  */
2617 #ifdef MAX_ALLOC_PAGE_SIZE
2618 #if PAGE_SIZE == 4096
2619 #define LINK_DESC_PAGE_ID_MASK  0x007FE0
2620 #define LINK_DESC_ID_SHIFT      5
2621 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
2622 #elif PAGE_SIZE == 65536
2623 #define LINK_DESC_PAGE_ID_MASK  0x007E00
2624 #define LINK_DESC_ID_SHIFT      9
2625 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x800
2626 #else
2627 #error "Unsupported kernel PAGE_SIZE"
2628 #endif
2629 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
2630 	((((_page_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_desc_id))
2631 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
2632 	(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
2633 #else
2634 #define LINK_DESC_PAGE_ID_MASK  0x7
2635 #define LINK_DESC_ID_SHIFT      3
2636 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
2637 	((((_desc_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_page_id))
2638 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
2639 	((_cookie) & LINK_DESC_PAGE_ID_MASK)
2640 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
2641 #endif
2642 #define LINK_DESC_ID_START_20_BITS_COOKIE 0x4000
2643 
2644 /* same as ieee80211_nac_param */
2645 enum dp_nac_param_cmd {
2646 	/* IEEE80211_NAC_PARAM_ADD */
2647 	DP_NAC_PARAM_ADD = 1,
2648 	/* IEEE80211_NAC_PARAM_DEL */
2649 	DP_NAC_PARAM_DEL,
2650 	/* IEEE80211_NAC_PARAM_LIST */
2651 	DP_NAC_PARAM_LIST,
2652 };
2653 
2654 /**
2655  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
2656  * @neighbour_peers_macaddr: neighbour peer's mac address
2657  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
2658  * @ast_entry: ast_entry for neighbour peer
2659  * @rssi: rssi value
2660  */
2661 struct dp_neighbour_peer {
2662 	/* MAC address of neighbour's peer */
2663 	union dp_align_mac_addr neighbour_peers_macaddr;
2664 	struct dp_vdev *vdev;
2665 	struct dp_ast_entry *ast_entry;
2666 	uint8_t rssi;
2667 	/* node in the list of neighbour's peer */
2668 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
2669 };
2670 
2671 #ifdef WLAN_TX_PKT_CAPTURE_ENH
2672 #define WLAN_TX_PKT_CAPTURE_ENH 1
2673 #define DP_TX_PPDU_PROC_THRESHOLD 8
2674 #define DP_TX_PPDU_PROC_TIMEOUT 10
2675 #endif
2676 
2677 /**
2678  * struct ppdu_info - PPDU Status info descriptor
2679  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
2680  * @sched_cmdid: schedule command id, which will be same in a burst
2681  * @max_ppdu_id: wrap around for ppdu id
2682  * @last_tlv_cnt: Keep track for missing ppdu tlvs
2683  * @last_user: last ppdu processed for user
2684  * @is_ampdu: set if Ampdu aggregate
2685  * @nbuf: ppdu descriptor payload
2686  * @ppdu_desc: ppdu descriptor
2687  * @ppdu_info_list_elem: linked list of ppdu tlvs
2688  * @ppdu_info_queue_elem: Singly linked list (queue) of ppdu tlvs
2689  * @mpdu_compltn_common_tlv: Successful tlv counter from COMPLTN COMMON tlv
2690  * @mpdu_ack_ba_tlv: Successful tlv counter from ACK BA tlv
2691  */
2692 struct ppdu_info {
2693 	uint32_t ppdu_id;
2694 	uint32_t sched_cmdid;
2695 	uint32_t max_ppdu_id;
2696 	uint32_t tsf_l32;
2697 	uint16_t tlv_bitmap;
2698 	uint16_t last_tlv_cnt;
2699 	uint16_t last_user:8,
2700 		 is_ampdu:1;
2701 	qdf_nbuf_t nbuf;
2702 	struct cdp_tx_completion_ppdu *ppdu_desc;
2703 #ifdef WLAN_TX_PKT_CAPTURE_ENH
2704 	union {
2705 		TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem;
2706 		STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem;
2707 	} ulist;
2708 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem
2709 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem
2710 #else
2711 	TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
2712 #endif
2713 	uint8_t compltn_common_tlv;
2714 	uint8_t ack_ba_tlv;
2715 	bool done;
2716 };
2717 
2718 /**
2719  * struct msdu_completion_info - wbm msdu completion info
2720  * @ppdu_id            - Unique ppduid assigned by firmware for every tx packet
2721  * @peer_id            - peer_id
2722  * @tid                - tid which used during transmit
2723  * @first_msdu         - first msdu indication
2724  * @last_msdu          - last msdu indication
2725  * @msdu_part_of_amsdu - msdu part of amsdu
2726  * @transmit_cnt       - retried count
2727  * @status             - transmit status
2728  * @tsf                - timestamp which it transmitted
2729  */
2730 struct msdu_completion_info {
2731 	uint32_t ppdu_id;
2732 	uint16_t peer_id;
2733 	uint8_t tid;
2734 	uint8_t first_msdu:1,
2735 		last_msdu:1,
2736 		msdu_part_of_amsdu:1;
2737 	uint8_t transmit_cnt;
2738 	uint8_t status;
2739 	uint32_t tsf;
2740 };
2741 
2742 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
2743 struct rx_protocol_tag_map {
2744 	/* This is the user configured tag for the said protocol type */
2745 	uint16_t tag;
2746 };
2747 
2748 /**
2749  * rx_protocol_tag_stats - protocol statistics
2750  * @tag_ctr: number of rx msdus matching this tag
2751  * @mon_tag_ctr: number of msdus matching this tag in mon path
2752  */
2753 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
2754 struct rx_protocol_tag_stats {
2755 	uint32_t tag_ctr;
2756 };
2757 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
2758 
2759 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
2760 
2761 #ifdef WLAN_RX_PKT_CAPTURE_ENH
2762 /* Template data to be set for Enhanced RX Monitor packets */
2763 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a
2764 
2765 /**
2766  * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern
2767  * at end of each MSDU in monitor-lite mode
2768  * @reserved1: reserved for future use
2769  * @reserved2: reserved for future use
2770  * @flow_tag: flow tag value read from skb->cb
2771  * @protocol_tag: protocol tag value read from skb->cb
2772  */
2773 struct dp_rx_mon_enh_trailer_data {
2774 	uint16_t reserved1;
2775 	uint16_t reserved2;
2776 	uint16_t flow_tag;
2777 	uint16_t protocol_tag;
2778 };
2779 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
2780 
2781 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2782 /* Number of debugfs entries created for HTT stats */
2783 #define PDEV_HTT_STATS_DBGFS_SIZE HTT_DBG_NUM_EXT_STATS
2784 
2785 /* struct pdev_htt_stats_dbgfs_priv - Structure to maintain debugfs information
2786  * of HTT stats
2787  * @pdev: dp pdev of debugfs entry
2788  * @stats_id: stats id of debugfs entry
2789  */
2790 struct pdev_htt_stats_dbgfs_priv {
2791 	struct dp_pdev *pdev;
2792 	uint16_t stats_id;
2793 };
2794 
2795 /* struct pdev_htt_stats_dbgfs_cfg - PDEV level data structure for debugfs
2796  * support for HTT stats
2797  * @debugfs_entry: qdf_debugfs directory entry
2798  * @m: qdf debugfs file handler
2799  * @pdev_htt_stats_dbgfs_ops: File operations of entry created
2800  * @priv: HTT stats debugfs private object
2801  * @htt_stats_dbgfs_event: HTT stats event for debugfs support
2802  * @lock: HTT stats debugfs lock
2803  * @htt_stats_dbgfs_msg_process: Function callback to print HTT stats
2804  */
2805 struct pdev_htt_stats_dbgfs_cfg {
2806 	qdf_dentry_t debugfs_entry[PDEV_HTT_STATS_DBGFS_SIZE];
2807 	qdf_debugfs_file_t m;
2808 	struct qdf_debugfs_fops
2809 			pdev_htt_stats_dbgfs_ops[PDEV_HTT_STATS_DBGFS_SIZE - 1];
2810 	struct pdev_htt_stats_dbgfs_priv priv[PDEV_HTT_STATS_DBGFS_SIZE - 1];
2811 	qdf_event_t htt_stats_dbgfs_event;
2812 	qdf_mutex_t lock;
2813 	void (*htt_stats_dbgfs_msg_process)(void *data, A_INT32 len);
2814 };
2815 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
2816 
2817 struct dp_srng_ring_state {
2818 	enum hal_ring_type ring_type;
2819 	uint32_t sw_head;
2820 	uint32_t sw_tail;
2821 	uint32_t hw_head;
2822 	uint32_t hw_tail;
2823 
2824 };
2825 
2826 struct dp_soc_srngs_state {
2827 	uint32_t seq_num;
2828 	uint32_t max_ring_id;
2829 	struct dp_srng_ring_state ring_state[DP_MAX_SRNGS];
2830 	TAILQ_ENTRY(dp_soc_srngs_state) list_elem;
2831 };
2832 
2833 #ifdef WLAN_FEATURE_11BE_MLO
2834 /* struct dp_mlo_sync_timestamp - PDEV level data structure for storing
2835  * MLO timestamp received via HTT msg.
2836  * msg_type: This would be set to HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND
2837  * pdev_id: pdev_id
2838  * chip_id: chip_id
2839  * mac_clk_freq: mac clock frequency of the mac HW block in MHz
2840  * sync_tstmp_lo_us: lower 32 bits of the WLAN global time stamp (in us) at
2841  *                   which last sync interrupt was received
2842  * sync_tstmp_hi_us: upper 32 bits of the WLAN global time stamp (in us) at
2843  *                   which last sync interrupt was received
2844  * mlo_offset_lo_us: lower 32 bits of the MLO time stamp offset in us
2845  * mlo_offset_hi_us: upper 32 bits of the MLO time stamp offset in us
2846  * mlo_offset_clks:  MLO time stamp offset in clock ticks for sub us
2847  * mlo_comp_us:      MLO time stamp compensation applied in us
2848  * mlo_comp_clks:    MLO time stamp compensation applied in clock ticks
2849  *                   for sub us resolution
2850  * mlo_comp_timer:   period of MLO compensation timer at which compensation
2851  *                   is applied, in us
2852  */
2853 struct dp_mlo_sync_timestamp {
2854 	uint32_t msg_type:8,
2855 		 pdev_id:2,
2856 		 chip_id:2,
2857 		 rsvd1:4,
2858 		 mac_clk_freq:16;
2859 	uint32_t sync_tstmp_lo_us;
2860 	uint32_t sync_tstmp_hi_us;
2861 	uint32_t mlo_offset_lo_us;
2862 	uint32_t mlo_offset_hi_us;
2863 	uint32_t mlo_offset_clks;
2864 	uint32_t mlo_comp_us:16,
2865 		 mlo_comp_clks:10,
2866 		 rsvd2:6;
2867 	uint32_t mlo_comp_timer:22,
2868 		 rsvd3:10;
2869 };
2870 #endif
2871 
2872 /* PDEV level structure for data path */
2873 struct dp_pdev {
2874 	/**
2875 	 * Re-use Memory Section Starts
2876 	 */
2877 
2878 	/* PDEV Id */
2879 	uint8_t pdev_id;
2880 
2881 	/* LMAC Id */
2882 	uint8_t lmac_id;
2883 
2884 	/* Target pdev  Id */
2885 	uint8_t target_pdev_id;
2886 
2887 	bool pdev_deinit;
2888 
2889 	/* TXRX SOC handle */
2890 	struct dp_soc *soc;
2891 
2892 	/* pdev status down or up required to handle dynamic hw
2893 	 * mode switch between DBS and DBS_SBS.
2894 	 * 1 = down
2895 	 * 0 = up
2896 	 */
2897 	bool is_pdev_down;
2898 
2899 	/* Enhanced Stats is enabled */
2900 	bool enhanced_stats_en;
2901 
2902 	/* Flag to indicate fast RX */
2903 	bool rx_fast_flag;
2904 
2905 	/* Second ring used to replenish rx buffers */
2906 	struct dp_srng rx_refill_buf_ring2;
2907 #ifdef IPA_WDI3_VLAN_SUPPORT
2908 	/* Third ring used to replenish rx buffers */
2909 	struct dp_srng rx_refill_buf_ring3;
2910 #endif
2911 
2912 	/* Empty ring used by firmware to post rx buffers to the MAC */
2913 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
2914 
2915 	int ch_band_lmac_id_mapping[REG_BAND_UNKNOWN];
2916 
2917 	/* wlan_cfg pdev ctxt*/
2918 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
2919 
2920 	/**
2921 	 * TODO: See if we need a ring map here for LMAC rings.
2922 	 * 1. Monitor rings are currently planning to be processed on receiving
2923 	 * PPDU end interrupts and hence wont need ring based interrupts.
2924 	 * 2. Rx buffer rings will be replenished during REO destination
2925 	 * processing and doesn't require regular interrupt handling - we will
2926 	 * only handle low water mark interrupts which is not expected
2927 	 * frequently
2928 	 */
2929 
2930 	/* VDEV list */
2931 	TAILQ_HEAD(, dp_vdev) vdev_list;
2932 
2933 	/* vdev list lock */
2934 	qdf_spinlock_t vdev_list_lock;
2935 
2936 	/* Number of vdevs this device have */
2937 	uint16_t vdev_count;
2938 
2939 	/* PDEV transmit lock */
2940 	qdf_spinlock_t tx_lock;
2941 
2942 	/*tx_mutex for me*/
2943 	DP_MUTEX_TYPE tx_mutex;
2944 
2945 	/* msdu chain head & tail */
2946 	qdf_nbuf_t invalid_peer_head_msdu;
2947 	qdf_nbuf_t invalid_peer_tail_msdu;
2948 
2949 	/* Band steering  */
2950 	/* TBD */
2951 
2952 	/* PDEV level data path statistics */
2953 	struct cdp_pdev_stats stats;
2954 
2955 	/* Global RX decap mode for the device */
2956 	enum htt_pkt_type rx_decap_mode;
2957 
2958 	qdf_atomic_t num_tx_outstanding;
2959 	int32_t tx_descs_max;
2960 
2961 	qdf_atomic_t num_tx_exception;
2962 
2963 	/* MCL specific local peer handle */
2964 	struct {
2965 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
2966 		uint8_t freelist;
2967 		qdf_spinlock_t lock;
2968 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
2969 	} local_peer_ids;
2970 
2971 	/* dscp_tid_map_*/
2972 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
2973 
2974 	/* operating channel */
2975 	struct {
2976 		uint8_t num;
2977 		uint8_t band;
2978 		uint16_t freq;
2979 	} operating_channel;
2980 
2981 	/* pool addr for mcast enhance buff */
2982 	struct {
2983 		int size;
2984 		uint32_t paddr;
2985 		char *vaddr;
2986 		struct dp_tx_me_buf_t *freelist;
2987 		int buf_in_use;
2988 		qdf_dma_mem_context(memctx);
2989 	} me_buf;
2990 
2991 	bool hmmc_tid_override_en;
2992 	uint8_t hmmc_tid;
2993 
2994 	/* Number of VAPs with mcast enhancement enabled */
2995 	qdf_atomic_t mc_num_vap_attached;
2996 
2997 	qdf_atomic_t stats_cmd_complete;
2998 
2999 #ifdef IPA_OFFLOAD
3000 	ipa_uc_op_cb_type ipa_uc_op_cb;
3001 	void *usr_ctxt;
3002 	struct dp_ipa_resources ipa_resource;
3003 #endif
3004 
3005 	/* TBD */
3006 
3007 	/* map this pdev to a particular Reo Destination ring */
3008 	enum cdp_host_reo_dest_ring reo_dest;
3009 
3010 	/* WDI event handlers */
3011 	struct wdi_event_subscribe_t **wdi_event_list;
3012 
3013 	bool cfr_rcc_mode;
3014 
3015 	/* enable time latency check for tx completion */
3016 	bool latency_capture_enable;
3017 
3018 	/* enable calculation of delay stats*/
3019 	bool delay_stats_flag;
3020 	void *dp_txrx_handle; /* Advanced data path handle */
3021 	uint32_t ppdu_id;
3022 	bool first_nbuf;
3023 	/* Current noise-floor reading for the pdev channel */
3024 	int16_t chan_noise_floor;
3025 
3026 	/*
3027 	 * For multiradio device, this flag indicates if
3028 	 * this radio is primary or secondary.
3029 	 *
3030 	 * For HK 1.0, this is used for WAR for the AST issue.
3031 	 * HK 1.x mandates creation of only 1 AST entry with same MAC address
3032 	 * across 2 radios. is_primary indicates the radio on which DP should
3033 	 * install HW AST entry if there is a request to add 2 AST entries
3034 	 * with same MAC address across 2 radios
3035 	 */
3036 	uint8_t is_primary;
3037 	struct cdp_tx_sojourn_stats sojourn_stats;
3038 	qdf_nbuf_t sojourn_buf;
3039 
3040 	union dp_rx_desc_list_elem_t *free_list_head;
3041 	union dp_rx_desc_list_elem_t *free_list_tail;
3042 	/* Cached peer_id from htt_peer_details_tlv */
3043 	uint16_t fw_stats_peer_id;
3044 
3045 	/* qdf_event for fw_peer_stats */
3046 	qdf_event_t fw_peer_stats_event;
3047 
3048 	/* qdf_event for fw_stats */
3049 	qdf_event_t fw_stats_event;
3050 
3051 	/* User configured max number of tx buffers */
3052 	uint32_t num_tx_allowed;
3053 
3054 	/* unique cookie required for peer session */
3055 	uint32_t next_peer_cookie;
3056 
3057 	/*
3058 	 * Run time enabled when the first protocol tag is added,
3059 	 * run time disabled when the last protocol tag is deleted
3060 	 */
3061 	bool  is_rx_protocol_tagging_enabled;
3062 
3063 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
3064 	/*
3065 	 * The protocol type is used as array index to save
3066 	 * user provided tag info
3067 	 */
3068 	struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX];
3069 
3070 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
3071 	/*
3072 	 * Track msdus received from each reo ring separately to avoid
3073 	 * simultaneous writes from different core
3074 	 */
3075 	struct rx_protocol_tag_stats
3076 		reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX];
3077 	/* Track msdus received from expection ring separately */
3078 	struct rx_protocol_tag_stats
3079 		rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3080 	struct rx_protocol_tag_stats
3081 		mon_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3082 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
3083 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
3084 
3085 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3086 	/**
3087 	 * Pointer to DP Flow FST at SOC level if
3088 	 * is_rx_flow_search_table_per_pdev is true
3089 	 */
3090 	struct dp_rx_fst *rx_fst;
3091 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
3092 
3093 #ifdef FEATURE_TSO_STATS
3094 	/* TSO Id to index into TSO packet information */
3095 	qdf_atomic_t tso_idx;
3096 #endif /* FEATURE_TSO_STATS */
3097 
3098 #ifdef WLAN_SUPPORT_DATA_STALL
3099 	data_stall_detect_cb data_stall_detect_callback;
3100 #endif /* WLAN_SUPPORT_DATA_STALL */
3101 
3102 	/* flag to indicate whether LRO hash command has been sent to FW */
3103 	uint8_t is_lro_hash_configured;
3104 
3105 #ifdef HTT_STATS_DEBUGFS_SUPPORT
3106 	/* HTT stats debugfs params */
3107 	struct pdev_htt_stats_dbgfs_cfg *dbgfs_cfg;
3108 #endif
3109 	struct {
3110 		qdf_work_t work;
3111 		qdf_workqueue_t *work_queue;
3112 		uint32_t seq_num;
3113 		uint8_t queue_depth;
3114 		qdf_spinlock_t list_lock;
3115 
3116 		TAILQ_HEAD(, dp_soc_srngs_state) list;
3117 	} bkp_stats;
3118 #ifdef WIFI_MONITOR_SUPPORT
3119 	struct dp_mon_pdev *monitor_pdev;
3120 #endif
3121 #ifdef WLAN_FEATURE_11BE_MLO
3122 	struct dp_mlo_sync_timestamp timestamp;
3123 #endif
3124 	/* Is isolation mode enabled */
3125 	bool  isolation;
3126 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
3127 	uint8_t is_first_wakeup_packet;
3128 #endif
3129 #ifdef CONNECTIVITY_PKTLOG
3130 	/* packetdump callback functions */
3131 	ol_txrx_pktdump_cb dp_tx_packetdump_cb;
3132 	ol_txrx_pktdump_cb dp_rx_packetdump_cb;
3133 #endif
3134 
3135 	/* Firmware Stats for TLV received from Firmware */
3136 	uint64_t fw_stats_tlv_bitmap_rcvd;
3137 
3138 	/* For Checking Pending Firmware Response */
3139 	bool pending_fw_stats_response;
3140 };
3141 
3142 struct dp_peer;
3143 
3144 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3145 #define WLAN_ROAM_PEER_AUTH_STATUS_NONE 0x0
3146 /**
3147  * This macro is equivalent to macro ROAM_AUTH_STATUS_AUTHENTICATED used
3148  * in connection mgr
3149  */
3150 #define WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED 0x2
3151 #endif
3152 
3153 /* VDEV structure for data path state */
3154 struct dp_vdev {
3155 	/* OS device abstraction */
3156 	qdf_device_t osdev;
3157 
3158 	/* physical device that is the parent of this virtual device */
3159 	struct dp_pdev *pdev;
3160 
3161 	/* VDEV operating mode */
3162 	enum wlan_op_mode opmode;
3163 
3164 	/* VDEV subtype */
3165 	enum wlan_op_subtype subtype;
3166 
3167 	/* Tx encapsulation type for this VAP */
3168 	enum htt_cmn_pkt_type tx_encap_type;
3169 
3170 	/* Rx Decapsulation type for this VAP */
3171 	enum htt_cmn_pkt_type rx_decap_type;
3172 
3173 	/* WDS enabled */
3174 	bool wds_enabled;
3175 
3176 	/* MEC enabled */
3177 	bool mec_enabled;
3178 
3179 #ifdef QCA_SUPPORT_WDS_EXTENDED
3180 	bool wds_ext_enabled;
3181 #endif /* QCA_SUPPORT_WDS_EXTENDED */
3182 	bool drop_3addr_mcast;
3183 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
3184 	bool skip_bar_update;
3185 	unsigned long skip_bar_update_last_ts;
3186 #endif
3187 	/* WDS Aging timer period */
3188 	uint32_t wds_aging_timer_val;
3189 
3190 	/* NAWDS enabled */
3191 	bool nawds_enabled;
3192 
3193 	/* Multicast enhancement enabled */
3194 	uint8_t mcast_enhancement_en;
3195 
3196 	/* IGMP multicast enhancement enabled */
3197 	uint8_t igmp_mcast_enhanc_en;
3198 
3199 	/* vdev_id - ID used to specify a particular vdev to the target */
3200 	uint8_t vdev_id;
3201 
3202 	/* Default HTT meta data for this VDEV */
3203 	/* TBD: check alignment constraints */
3204 	uint16_t htt_tcl_metadata;
3205 
3206 	/* vdev lmac_id */
3207 	uint8_t lmac_id;
3208 
3209 	/* vdev bank_id */
3210 	uint8_t bank_id;
3211 
3212 	/* Mesh mode vdev */
3213 	uint32_t mesh_vdev;
3214 
3215 	/* Mesh mode rx filter setting */
3216 	uint32_t mesh_rx_filter;
3217 
3218 	/* DSCP-TID mapping table ID */
3219 	uint8_t dscp_tid_map_id;
3220 
3221 	/* Address search type to be set in TX descriptor */
3222 	uint8_t search_type;
3223 
3224 	/*
3225 	 * Flag to indicate if s/w tid classification should be
3226 	 * skipped
3227 	 */
3228 	uint8_t skip_sw_tid_classification;
3229 
3230 	/* Flag to enable peer authorization */
3231 	uint8_t peer_authorize;
3232 
3233 	/* AST hash value for BSS peer in HW valid for STA VAP*/
3234 	uint16_t bss_ast_hash;
3235 
3236 	/* AST hash index for BSS peer in HW valid for STA VAP*/
3237 	uint16_t bss_ast_idx;
3238 
3239 	bool multipass_en;
3240 
3241 	/* Address search flags to be configured in HAL descriptor */
3242 	uint8_t hal_desc_addr_search_flags;
3243 
3244 	/* Handle to the OS shim SW's virtual device */
3245 	ol_osif_vdev_handle osif_vdev;
3246 
3247 	/* MAC address */
3248 	union dp_align_mac_addr mac_addr;
3249 
3250 #ifdef WLAN_FEATURE_11BE_MLO
3251 	/* MLO MAC address corresponding to vdev */
3252 	union dp_align_mac_addr mld_mac_addr;
3253 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
3254 	bool mlo_vdev;
3255 #endif
3256 #endif
3257 
3258 	/* node in the pdev's list of vdevs */
3259 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
3260 
3261 	/* dp_peer list */
3262 	TAILQ_HEAD(, dp_peer) peer_list;
3263 	/* to protect peer_list */
3264 	DP_MUTEX_TYPE peer_list_lock;
3265 
3266 	/* RX call back function to flush GRO packets*/
3267 	ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;
3268 	/* default RX call back function called by dp */
3269 	ol_txrx_rx_fp osif_rx;
3270 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
3271 	/* callback to receive eapol frames */
3272 	ol_txrx_rx_fp osif_rx_eapol;
3273 #endif
3274 	/* callback to deliver rx frames to the OS */
3275 	ol_txrx_rx_fp osif_rx_stack;
3276 	/* Callback to handle rx fisa frames */
3277 	ol_txrx_fisa_rx_fp osif_fisa_rx;
3278 	ol_txrx_fisa_flush_fp osif_fisa_flush;
3279 
3280 	/* call back function to flush out queued rx packets*/
3281 	ol_txrx_rx_flush_fp osif_rx_flush;
3282 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
3283 	ol_txrx_get_key_fp osif_get_key;
3284 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
3285 
3286 #ifdef notyet
3287 	/* callback to check if the msdu is an WAI (WAPI) frame */
3288 	ol_rx_check_wai_fp osif_check_wai;
3289 #endif
3290 
3291 	/* proxy arp function */
3292 	ol_txrx_proxy_arp_fp osif_proxy_arp;
3293 
3294 	ol_txrx_mcast_me_fp me_convert;
3295 
3296 	/* completion function used by this vdev*/
3297 	ol_txrx_completion_fp tx_comp;
3298 
3299 	ol_txrx_get_tsf_time get_tsf_time;
3300 
3301 	/* callback to classify critical packets */
3302 	ol_txrx_classify_critical_pkt_fp tx_classify_critical_pkt_cb;
3303 
3304 	/* deferred vdev deletion state */
3305 	struct {
3306 		/* VDEV delete pending */
3307 		int pending;
3308 		/*
3309 		* callback and a context argument to provide a
3310 		* notification for when the vdev is deleted.
3311 		*/
3312 		ol_txrx_vdev_delete_cb callback;
3313 		void *context;
3314 	} delete;
3315 
3316 	/* tx data delivery notification callback function */
3317 	struct {
3318 		ol_txrx_data_tx_cb func;
3319 		void *ctxt;
3320 	} tx_non_std_data_callback;
3321 
3322 
3323 	/* safe mode control to bypass the encrypt and decipher process*/
3324 	uint32_t safemode;
3325 
3326 	/* rx filter related */
3327 	uint32_t drop_unenc;
3328 #ifdef notyet
3329 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
3330 	uint32_t filters_num;
3331 #endif
3332 	/* TDLS Link status */
3333 	bool tdls_link_connected;
3334 	bool is_tdls_frame;
3335 
3336 	/* per vdev rx nbuf queue */
3337 	qdf_nbuf_queue_t rxq;
3338 
3339 	uint8_t tx_ring_id;
3340 	struct dp_tx_desc_pool_s *tx_desc;
3341 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
3342 
3343 	/* Capture timestamp of previous tx packet enqueued */
3344 	uint64_t prev_tx_enq_tstamp;
3345 
3346 	/* Capture timestamp of previous rx packet delivered */
3347 	uint64_t prev_rx_deliver_tstamp;
3348 
3349 	/* VDEV Stats */
3350 	struct cdp_vdev_stats stats;
3351 
3352 	/* Is this a proxySTA VAP */
3353 	uint8_t proxysta_vdev : 1, /* Is this a proxySTA VAP */
3354 		wrap_vdev : 1, /* Is this a QWRAP AP VAP */
3355 		isolation_vdev : 1, /* Is this a QWRAP AP VAP */
3356 		reserved : 5; /* Reserved */
3357 
3358 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3359 	struct dp_tx_desc_pool_s *pool;
3360 #endif
3361 	/* AP BRIDGE enabled */
3362 	bool ap_bridge_enabled;
3363 
3364 	enum cdp_sec_type  sec_type;
3365 
3366 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
3367 	bool raw_mode_war;
3368 
3369 
3370 	/* 8021p PCP-TID mapping table ID */
3371 	uint8_t tidmap_tbl_id;
3372 
3373 	/* 8021p PCP-TID map values */
3374 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
3375 
3376 	/* TIDmap priority */
3377 	uint8_t tidmap_prty;
3378 
3379 #ifdef QCA_MULTIPASS_SUPPORT
3380 	uint16_t *iv_vlan_map;
3381 
3382 	/* dp_peer special list */
3383 	TAILQ_HEAD(, dp_txrx_peer) mpass_peer_list;
3384 	DP_MUTEX_TYPE mpass_peer_mutex;
3385 #endif
3386 	/* Extended data path handle */
3387 	struct cdp_ext_vdev *vdev_dp_ext_handle;
3388 #ifdef VDEV_PEER_PROTOCOL_COUNT
3389 	/*
3390 	 * Rx-Ingress and Tx-Egress are in the lower level DP layer
3391 	 * Rx-Egress and Tx-ingress are handled in osif layer for DP
3392 	 * So
3393 	 * Rx-Egress and Tx-ingress mask definitions are in OSIF layer
3394 	 * Rx-Ingress and Tx-Egress definitions are here below
3395 	 */
3396 #define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1
3397 #define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2
3398 #define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4
3399 #define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8
3400 	bool peer_protocol_count_track;
3401 	int peer_protocol_count_dropmask;
3402 #endif
3403 	/* callback to collect connectivity stats */
3404 	ol_txrx_stats_rx_fp stats_cb;
3405 	uint32_t num_peers;
3406 	/* entry to inactive_list*/
3407 	TAILQ_ENTRY(dp_vdev) inactive_list_elem;
3408 
3409 #ifdef WLAN_SUPPORT_RX_FISA
3410 	/**
3411 	 * Params used for controlling the fisa aggregation dynamically
3412 	 */
3413 	uint8_t fisa_disallowed[MAX_REO_DEST_RINGS];
3414 	uint8_t fisa_force_flushed[MAX_REO_DEST_RINGS];
3415 #endif
3416 	/*
3417 	 * Refcount for VDEV currently incremented when
3418 	 * peer is created for VDEV
3419 	 */
3420 	qdf_atomic_t ref_cnt;
3421 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
3422 	uint8_t num_latency_critical_conn;
3423 #ifdef WLAN_SUPPORT_MESH_LATENCY
3424 	uint8_t peer_tid_latency_enabled;
3425 	/* tid latency configuration parameters */
3426 	struct {
3427 		uint32_t service_interval;
3428 		uint32_t burst_size;
3429 		uint8_t latency_tid;
3430 	} mesh_tid_latency_config;
3431 #endif
3432 #ifdef WIFI_MONITOR_SUPPORT
3433 	struct dp_mon_vdev *monitor_vdev;
3434 #endif
3435 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
3436 	/* Delta between TQM clock and TSF clock */
3437 	uint32_t delta_tsf;
3438 #endif
3439 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
3440 	/* Indicate if uplink delay report is enabled or not */
3441 	qdf_atomic_t ul_delay_report;
3442 	/* accumulative delay for every TX completion */
3443 	qdf_atomic_t ul_delay_accum;
3444 	/* accumulative number of packets delay has accumulated */
3445 	qdf_atomic_t ul_pkts_accum;
3446 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
3447 
3448 	/* vdev_stats_id - ID used for stats collection by FW from HW*/
3449 	uint8_t vdev_stats_id;
3450 #ifdef HW_TX_DELAY_STATS_ENABLE
3451 	/* hw tx delay stats enable */
3452 	uint8_t hw_tx_delay_stats_enabled;
3453 #endif
3454 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3455 	uint32_t roaming_peer_status;
3456 	union dp_align_mac_addr roaming_peer_mac;
3457 #endif
3458 #ifdef DP_TRAFFIC_END_INDICATION
3459 	/* per vdev feature enable/disable status */
3460 	bool traffic_end_ind_en;
3461 	/* per vdev nbuf queue for traffic end indication packets */
3462 	qdf_nbuf_queue_t end_ind_pkt_q;
3463 #endif
3464 };
3465 
3466 enum {
3467 	dp_sec_mcast = 0,
3468 	dp_sec_ucast
3469 };
3470 
3471 #ifdef WDS_VENDOR_EXTENSION
3472 typedef struct {
3473 	uint8_t	wds_tx_mcast_4addr:1,
3474 		wds_tx_ucast_4addr:1,
3475 		wds_rx_filter:1,      /* enforce rx filter */
3476 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
3477 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
3478 
3479 } dp_ecm_policy;
3480 #endif
3481 
3482 /*
3483  * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
3484  * @cached_bufq: nbuff list to enqueue rx packets
3485  * @bufq_lock: spinlock for nbuff list access
3486  * @thres: maximum threshold for number of rx buff to enqueue
3487  * @entries: number of entries
3488  * @dropped: number of packets dropped
3489  */
3490 struct dp_peer_cached_bufq {
3491 	qdf_list_t cached_bufq;
3492 	qdf_spinlock_t bufq_lock;
3493 	uint32_t thresh;
3494 	uint32_t entries;
3495 	uint32_t dropped;
3496 };
3497 
3498 /**
3499  * enum dp_peer_ast_flowq
3500  * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue
3501  * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue
3502  * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP
3503  * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP
3504  */
3505 enum dp_peer_ast_flowq {
3506 	DP_PEER_AST_FLOWQ_HI_PRIO,
3507 	DP_PEER_AST_FLOWQ_LOW_PRIO,
3508 	DP_PEER_AST_FLOWQ_UDP,
3509 	DP_PEER_AST_FLOWQ_NON_UDP,
3510 	DP_PEER_AST_FLOWQ_MAX,
3511 };
3512 
3513 /*
3514  * struct dp_ast_flow_override_info - ast override info
3515  * @ast_index - ast indexes in peer map message
3516  * @ast_valid_mask - ast valid mask for each ast index
3517  * @ast_flow_mask - ast flow mask for each ast index
3518  * @tid_valid_low_pri_mask - per tid mask for low priority flow
3519  * @tid_valid_hi_pri_mask - per tid mask for hi priority flow
3520  */
3521 struct dp_ast_flow_override_info {
3522 	uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX];
3523 	uint8_t ast_valid_mask;
3524 	uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX];
3525 	uint8_t tid_valid_low_pri_mask;
3526 	uint8_t tid_valid_hi_pri_mask;
3527 };
3528 
3529 /*
3530  * struct dp_peer_ast_params - ast parameters for a msdu flow-queue
3531  * @ast_index - ast index populated by FW
3532  * @is_valid - ast flow valid mask
3533  * @valid_tid_mask - per tid mask for this ast index
3534  * @flowQ - flow queue id associated with this ast index
3535  */
3536 struct dp_peer_ast_params {
3537 	uint16_t ast_idx;
3538 	uint8_t is_valid;
3539 	uint8_t valid_tid_mask;
3540 	uint8_t flowQ;
3541 };
3542 
3543 #define DP_MLO_FLOW_INFO_MAX	3
3544 
3545 /**
3546  * struct dp_mlo_flow_override_info - Flow override info
3547  * @ast_idx: Primary TCL AST Index
3548  * @ast_idx_valid: Is AST index valid
3549  * @chip_id: CHIP ID
3550  * @tidmask: tidmask
3551  * @cache_set_num: Cache set number
3552  */
3553 struct dp_mlo_flow_override_info {
3554 	uint16_t ast_idx;
3555 	uint8_t ast_idx_valid;
3556 	uint8_t chip_id;
3557 	uint8_t tidmask;
3558 	uint8_t cache_set_num;
3559 };
3560 
3561 /**
3562  * struct dp_mlo_link_info - Link info
3563  * @peer_chip_id: Peer Chip ID
3564  * @vdev_id: Vdev ID
3565  */
3566 struct dp_mlo_link_info {
3567 	uint8_t peer_chip_id;
3568 	uint8_t vdev_id;
3569 };
3570 
3571 #ifdef WLAN_SUPPORT_MSCS
3572 /*MSCS Procedure based macros */
3573 #define IEEE80211_MSCS_MAX_ELEM_SIZE    5
3574 #define IEEE80211_TCLAS_MASK_CLA_TYPE_4  4
3575 /*
3576  * struct dp_peer_mscs_parameter - MSCS database obtained from
3577  * MSCS Request and Response in the control path. This data is used
3578  * by the AP to find out what priority to set based on the tuple
3579  * classification during packet processing.
3580  * @user_priority_bitmap - User priority bitmap obtained during
3581  * handshake
3582  * @user_priority_limit - User priority limit obtained during
3583  * handshake
3584  * @classifier_mask - params to be compared during processing
3585  */
3586 struct dp_peer_mscs_parameter {
3587 	uint8_t user_priority_bitmap;
3588 	uint8_t user_priority_limit;
3589 	uint8_t classifier_mask;
3590 };
3591 #endif
3592 
3593 #ifdef QCA_SUPPORT_WDS_EXTENDED
3594 #define WDS_EXT_PEER_INIT_BIT 0
3595 
3596 /**
3597  * struct dp_wds_ext_peer - wds ext peer structure
3598  * This is used when wds extended feature is enabled
3599  * both compile time and run time. It is created
3600  * when 1st 4 address frame is received from
3601  * wds backhaul.
3602  * @osif_vdev: Handle to the OS shim SW's virtual device
3603  * @init: wds ext netdev state
3604  */
3605 struct dp_wds_ext_peer {
3606 	ol_osif_peer_handle osif_peer;
3607 	unsigned long init;
3608 };
3609 #endif /* QCA_SUPPORT_WDS_EXTENDED */
3610 
3611 #ifdef WLAN_SUPPORT_MESH_LATENCY
3612 /*Advanced Mesh latency feature based macros */
3613 /*
3614  * struct dp_peer_mesh_latency parameter - Mesh latency related
3615  * parameters. This data is updated per peer per TID based on
3616  * the flow tuple classification in external rule database
3617  * during packet processing.
3618  * @service_interval_dl - Service interval associated with TID in DL
3619  * @burst_size_dl - Burst size additive over multiple flows in DL
3620  * @service_interval_ul - Service interval associated with TID in UL
3621  * @burst_size_ul - Burst size additive over multiple flows in UL
3622  * @ac - custom ac derived from service interval
3623  * @msduq - MSDU queue number within TID
3624  */
3625 struct dp_peer_mesh_latency_parameter {
3626 	uint32_t service_interval_dl;
3627 	uint32_t burst_size_dl;
3628 	uint32_t service_interval_ul;
3629 	uint32_t burst_size_ul;
3630 	uint8_t ac;
3631 	uint8_t msduq;
3632 };
3633 #endif
3634 
3635 #ifdef WLAN_FEATURE_11BE_MLO
3636 /* Max number of links for MLO connection */
3637 #define DP_MAX_MLO_LINKS 3
3638 
3639 /**
3640  * struct dp_peer_link_info - link peer information for MLO
3641  * @mac_add: Mac address
3642  * @vdev_id: Vdev ID for current link peer
3643  * @is_valid: flag for link peer info valid or not
3644  * @chip_id: chip id
3645  */
3646 struct dp_peer_link_info {
3647 	union dp_align_mac_addr mac_addr;
3648 	uint8_t vdev_id;
3649 	uint8_t is_valid;
3650 	uint8_t chip_id;
3651 };
3652 
3653 /**
3654  * struct dp_mld_link_peers - this structure is used to get link peers
3655 			      pointer from mld peer
3656  * @link_peers: link peers pointer array
3657  * @num_links: number of link peers fetched
3658  */
3659 struct dp_mld_link_peers {
3660 	struct dp_peer *link_peers[DP_MAX_MLO_LINKS];
3661 	uint8_t num_links;
3662 };
3663 #endif
3664 
3665 typedef void *dp_txrx_ref_handle;
3666 
3667 /**
3668  * struct dp_peer_per_pkt_tx_stats- Peer Tx stats updated in per pkt
3669  *				Tx completion path
3670  * @cdp_pkt_info ucast: Unicast Packet Count
3671  * @cdp_pkt_info mcast: Multicast Packet Count
3672  * @cdp_pkt_info bcast: Broadcast Packet Count
3673  * @cdp_pkt_info nawds_mcast: NAWDS Multicast Packet Count
3674  * @cdp_pkt_info tx_success: Successful Tx Packets
3675  * @nawds_mcast_drop: NAWDS Multicast Drop Count
3676  * @ofdma: Total Packets as ofdma
3677  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
3678  * @amsdu_cnt: Number of MSDUs part of AMSDU
3679  * @cdp_pkt_info fw_rem: Discarded by firmware
3680  * @fw_rem_notx: firmware_discard_untransmitted
3681  * @fw_rem_tx: firmware_discard_transmitted
3682  * @age_out: aged out in mpdu/msdu queues
3683  * @fw_reason1: discarded by firmware reason 1
3684  * @fw_reason2: discarded by firmware reason 2
3685  * @fw_reason3: discarded by firmware reason  3
3686  * @fw_rem_no_match: dropped due to fw no match command
3687  * @drop_threshold: dropped due to HW threshold
3688  * @drop_link_desc_na: dropped due resource not available in HW
3689  * @invalid_drop: Invalid msdu drop
3690  * @mcast_vdev_drop: MCAST drop configured for VDEV in HW
3691  * @invalid_rr: Invalid TQM release reason
3692  * @failed_retry_count: packets failed due to retry above 802.11 retry limit
3693  * @retry_count: packets successfully send after one or more retry
3694  * @multiple_retry_count: packets successfully sent after more than one retry
3695  * @no_ack_count: no ack pkt count for different protocols
3696  * @tx_success_twt: Successful Tx Packets in TWT session
3697  * @last_tx_ts: last timestamp in jiffies when tx comp occurred
3698  * @avg_sojourn_msdu[CDP_DATA_TID_MAX]: Avg sojourn msdu stat
3699  * @protocol_trace_cnt: per-peer protocol counter
3700  * @release_src_not_tqm: Counter to keep track of release source is not TQM
3701  *			 in TX completion status processing
3702  */
3703 struct dp_peer_per_pkt_tx_stats {
3704 	struct cdp_pkt_info ucast;
3705 	struct cdp_pkt_info mcast;
3706 	struct cdp_pkt_info bcast;
3707 	struct cdp_pkt_info nawds_mcast;
3708 	struct cdp_pkt_info tx_success;
3709 	uint32_t nawds_mcast_drop;
3710 	uint32_t ofdma;
3711 	uint32_t non_amsdu_cnt;
3712 	uint32_t amsdu_cnt;
3713 	struct {
3714 		struct cdp_pkt_info fw_rem;
3715 		uint32_t fw_rem_notx;
3716 		uint32_t fw_rem_tx;
3717 		uint32_t age_out;
3718 		uint32_t fw_reason1;
3719 		uint32_t fw_reason2;
3720 		uint32_t fw_reason3;
3721 		uint32_t fw_rem_queue_disable;
3722 		uint32_t fw_rem_no_match;
3723 		uint32_t drop_threshold;
3724 		uint32_t drop_link_desc_na;
3725 		uint32_t invalid_drop;
3726 		uint32_t mcast_vdev_drop;
3727 		uint32_t invalid_rr;
3728 	} dropped;
3729 	uint32_t failed_retry_count;
3730 	uint32_t retry_count;
3731 	uint32_t multiple_retry_count;
3732 	uint32_t no_ack_count[QDF_PROTO_SUBTYPE_MAX];
3733 	struct cdp_pkt_info tx_success_twt;
3734 	unsigned long last_tx_ts;
3735 	qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
3736 #ifdef VDEV_PEER_PROTOCOL_COUNT
3737 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
3738 #endif
3739 	uint32_t release_src_not_tqm;
3740 };
3741 
3742 /**
3743  * struct dp_peer_extd_tx_stats - Peer Tx stats updated in either
3744  *	per pkt Tx completion path when macro QCA_ENHANCED_STATS_SUPPORT is
3745  *	disabled or in HTT Tx PPDU completion path when macro is enabled
3746  * @stbc: Packets in STBC
3747  * @ldpc: Packets in LDPC
3748  * @retries: Packet retries
3749  * @pkt_type[DOT11_MAX]: pkt count for different .11 modes
3750  * @wme_ac_type[WME_AC_MAX]: Wireless Multimedia type Count
3751  * @excess_retries_per_ac[WME_AC_MAX]: Wireless Multimedia type Count
3752  * @ampdu_cnt: completion of aggregation
3753  * @non_ampdu_cnt: tx completion not aggregated
3754  * @num_ppdu_cookie_valid: no. of valid ppdu cookies rcvd from FW
3755  * @tx_ppdus: ppdus in tx
3756  * @tx_mpdus_success: mpdus successful in tx
3757  * @tx_mpdus_tried: mpdus tried in tx
3758  * @tx_rate: Tx Rate in kbps
3759  * @last_tx_rate: Last tx rate for unicast packets
3760  * @last_tx_rate_mcs: Tx rate mcs for unicast packets
3761  * @mcast_last_tx_rate: Last tx rate for multicast packets
3762  * @mcast_last_tx_rate_mcs: Last tx rate mcs for multicast
3763  * @rnd_avg_tx_rate: Rounded average tx rate
3764  * @avg_tx_rate: Average TX rate
3765  * @tx_ratecode: Tx rate code of last frame
3766  * @pream_punct_cnt: Preamble Punctured count
3767  * @sgi_count[MAX_GI]: SGI count
3768  * @nss[SS_COUNT]: Packet count for different num_spatial_stream values
3769  * @bw[MAX_BW]: Packet Count for different bandwidths
3770  * @ru_start: RU start index
3771  * @ru_tones: RU tones size
3772  * @ru_loc: pkt info for RU location 26/ 52/ 106/ 242/ 484 counter
3773  * @transmit_type: pkt info for tx transmit type
3774  * @mu_group_id: mumimo mu group id
3775  * @last_ack_rssi: RSSI of last acked packet
3776  * @nss_info: NSS 1,2, ...8
3777  * @mcs_info: MCS index
3778  * @bw_info: Bandwidth
3779  *       <enum 0 bw_20_MHz>
3780  *       <enum 1 bw_40_MHz>
3781  *       <enum 2 bw_80_MHz>
3782  *       <enum 3 bw_160_MHz>
3783  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
3784  *       <enum 1     0_4_us_sgi > Legacy short GI
3785  *       <enum 2     1_6_us_sgi > HE related GI
3786  *       <enum 3     3_2_us_sgi > HE
3787  * @preamble_info: preamble
3788  * @tx_ucast_total: total ucast count
3789  * @tx_ucast_success: total ucast success count
3790  * @retries_mpdu: mpdu number of successfully transmitted after retries
3791  * @mpdu_success_with_retries: mpdu retry count in case of successful tx
3792  * @su_be_ppdu_cnt: SU Tx packet count for 11BE
3793  * @mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX]: MU Tx packet count for 11BE
3794  * @punc_bw[MAX_PUNCTURED_MODE]: MSDU count for punctured bw
3795  */
3796 struct dp_peer_extd_tx_stats {
3797 	uint32_t stbc;
3798 	uint32_t ldpc;
3799 	uint32_t retries;
3800 	struct cdp_pkt_type pkt_type[DOT11_MAX];
3801 	uint32_t wme_ac_type[WME_AC_MAX];
3802 	uint32_t excess_retries_per_ac[WME_AC_MAX];
3803 	uint32_t ampdu_cnt;
3804 	uint32_t non_ampdu_cnt;
3805 	uint32_t num_ppdu_cookie_valid;
3806 	uint32_t tx_ppdus;
3807 	uint32_t tx_mpdus_success;
3808 	uint32_t tx_mpdus_tried;
3809 
3810 	uint32_t tx_rate;
3811 	uint32_t last_tx_rate;
3812 	uint32_t last_tx_rate_mcs;
3813 	uint32_t mcast_last_tx_rate;
3814 	uint32_t mcast_last_tx_rate_mcs;
3815 	uint64_t rnd_avg_tx_rate;
3816 	uint64_t avg_tx_rate;
3817 	uint16_t tx_ratecode;
3818 
3819 	uint32_t sgi_count[MAX_GI];
3820 	uint32_t pream_punct_cnt;
3821 	uint32_t nss[SS_COUNT];
3822 	uint32_t bw[MAX_BW];
3823 	uint32_t ru_start;
3824 	uint32_t ru_tones;
3825 	struct cdp_tx_pkt_info ru_loc[MAX_RU_LOCATIONS];
3826 
3827 	struct cdp_tx_pkt_info transmit_type[MAX_TRANSMIT_TYPES];
3828 	uint32_t mu_group_id[MAX_MU_GROUP_ID];
3829 
3830 	uint32_t last_ack_rssi;
3831 
3832 	uint32_t nss_info:4,
3833 		 mcs_info:4,
3834 		 bw_info:4,
3835 		 gi_info:4,
3836 		 preamble_info:4;
3837 
3838 	uint32_t retries_mpdu;
3839 	uint32_t mpdu_success_with_retries;
3840 	struct cdp_pkt_info tx_ucast_total;
3841 	struct cdp_pkt_info tx_ucast_success;
3842 #ifdef WLAN_FEATURE_11BE
3843 	struct cdp_pkt_type su_be_ppdu_cnt;
3844 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
3845 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
3846 #endif
3847 };
3848 
3849 /**
3850  * struct dp_peer_per_pkt_rx_stats - Peer Rx stats updated in per pkt Rx path
3851  * @rcvd_reo[CDP_MAX_RX_RINGS]: Packets received on the reo ring
3852  * @rx_lmac[CDP_MAX_LMACS]: Packets received on each lmac
3853  * @unicast: Total unicast packets
3854  * @multicast: Total multicast packets
3855  * @bcast:  Broadcast Packet Count
3856  * @raw: Raw Pakets received
3857  * @nawds_mcast_drop: Total NAWDS multicast packets dropped
3858  * @mec_drop: Total MEC packets dropped
3859  * @last_rx_ts: last timestamp in jiffies when RX happened
3860  * @intra_bss.pkts: Intra BSS packets received
3861  * @intra_bss.fail: Intra BSS packets failed
3862  * @intra_bss.mdns_no_fws: Intra BSS MDNS packets not forwarded
3863  * @mic_err: Rx MIC errors CCMP
3864  * @decrypt_err: Rx Decryption Errors CRC
3865  * @fcserr: rx MIC check failed (CCMP)
3866  * @pn_err: pn check failed
3867  * @oor_err: Rx OOR errors
3868  * @jump_2k_err: 2k jump errors
3869  * @rxdma_wifi_parse_err: rxdma wifi parse errors
3870  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
3871  * @amsdu_cnt: Number of MSDUs part of AMSDU
3872  * @rx_retries: retries of packet in rx
3873  * @multipass_rx_pkt_drop: Dropped multipass rx pkt
3874  * @peer_unauth_rx_pkt_drop: Unauth rx packet drops
3875  * @policy_check_drop: policy check drops
3876  * @to_stack_twt: Total packets sent up the stack in TWT session
3877  * @protocol_trace_cnt: per-peer protocol counters
3878  */
3879 struct dp_peer_per_pkt_rx_stats {
3880 	struct cdp_pkt_info rcvd_reo[CDP_MAX_RX_RINGS];
3881 	struct cdp_pkt_info rx_lmac[CDP_MAX_LMACS];
3882 	struct cdp_pkt_info unicast;
3883 	struct cdp_pkt_info multicast;
3884 	struct cdp_pkt_info bcast;
3885 	struct cdp_pkt_info raw;
3886 	uint32_t nawds_mcast_drop;
3887 	struct cdp_pkt_info mec_drop;
3888 	unsigned long last_rx_ts;
3889 	struct {
3890 		struct cdp_pkt_info pkts;
3891 		struct cdp_pkt_info fail;
3892 		uint32_t mdns_no_fwd;
3893 	} intra_bss;
3894 	struct {
3895 		uint32_t mic_err;
3896 		uint32_t decrypt_err;
3897 		uint32_t fcserr;
3898 		uint32_t pn_err;
3899 		uint32_t oor_err;
3900 		uint32_t jump_2k_err;
3901 		uint32_t rxdma_wifi_parse_err;
3902 	} err;
3903 	uint32_t non_amsdu_cnt;
3904 	uint32_t amsdu_cnt;
3905 	uint32_t rx_retries;
3906 	uint32_t multipass_rx_pkt_drop;
3907 	uint32_t peer_unauth_rx_pkt_drop;
3908 	uint32_t policy_check_drop;
3909 	struct cdp_pkt_info to_stack_twt;
3910 #ifdef VDEV_PEER_PROTOCOL_COUNT
3911 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
3912 #endif
3913 	uint32_t mcast_3addr_drop;
3914 };
3915 
3916 /**
3917  * struct dp_peer_extd_rx_stats - Peer Rx stats updated in either
3918  *	per pkt Rx path when macro QCA_ENHANCED_STATS_SUPPORT is disabled or in
3919  *	Rx monitor patch when macro is enabled
3920  * @pkt_type[DOT11_MAX]: pkt counter for different .11 modes
3921  * @wme_ac_type[WME_AC_MAX]: Wireless Multimedia type Count
3922  * @mpdu_cnt_fcs_ok: SU Rx success mpdu count
3923  * @mpdu_cnt_fcs_err: SU Rx fail mpdu count
3924  * @non_ampdu_cnt: Number of MSDUs with no MPDU level aggregation
3925  * @ampdu_cnt: Number of MSDUs part of AMSPU
3926  * @rx_mpdus: mpdu in rx
3927  * @rx_ppdus: ppdu in rx
3928  * @su_ax_ppdu_cnt: SU Rx packet count for .11ax
3929  * @rx_mu[TXRX_TYPE_MU_MAX]: Rx MU stats
3930  * @reception_type[MAX_RECEPTION_TYPES]: Reception type of packets
3931  * @ppdu_cnt[MAX_RECEPTION_TYPES]: PPDU packet count in reception type
3932  * @sgi_count[MAX_GI]: sgi count
3933  * @nss[SS_COUNT]: packet count in spatiel Streams
3934  * @ppdu_nss[SS_COUNT]: PPDU packet count in spatial streams
3935  * @bw[MAX_BW]: Packet Count in different bandwidths
3936  * @rx_mpdu_cnt[MAX_MCS]: rx mpdu count per MCS rate
3937  * @rx_rate: Rx rate
3938  * @last_rx_rate: Previous rx rate
3939  * @rnd_avg_rx_rate: Rounded average rx rate
3940  * @avg_rx_rate: Average Rx rate
3941  * @rx_ratecode: Rx rate code of last frame
3942  * @avg_snr: Average snr
3943  * @rx_snr_measured_time: Time at which snr is measured
3944  * @snr: SNR of received signal
3945  * @last_snr: Previous snr
3946  * @nss_info: NSS 1,2, ...8
3947  * @mcs_info: MCS index
3948  * @bw_info: Bandwidth
3949  *       <enum 0 bw_20_MHz>
3950  *       <enum 1 bw_40_MHz>
3951  *       <enum 2 bw_80_MHz>
3952  *       <enum 3 bw_160_MHz>
3953  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
3954  *       <enum 1     0_4_us_sgi > Legacy short GI
3955  *       <enum 2     1_6_us_sgi > HE related GI
3956  *       <enum 3     3_2_us_sgi > HE
3957  * @preamble_info: preamble
3958  * @mpdu_retry_cnt: retries of mpdu in rx
3959  * @su_be_ppdu_cnt: SU Rx packet count for BE
3960  * @mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX]: MU rx packet count for BE
3961  * @punc_bw[MAX_PUNCTURED_MODE]: MSDU count for punctured bw
3962  */
3963 struct dp_peer_extd_rx_stats {
3964 	struct cdp_pkt_type pkt_type[DOT11_MAX];
3965 	uint32_t wme_ac_type[WME_AC_MAX];
3966 	uint32_t mpdu_cnt_fcs_ok;
3967 	uint32_t mpdu_cnt_fcs_err;
3968 	uint32_t non_ampdu_cnt;
3969 	uint32_t ampdu_cnt;
3970 	uint32_t rx_mpdus;
3971 	uint32_t rx_ppdus;
3972 
3973 	struct cdp_pkt_type su_ax_ppdu_cnt;
3974 	struct cdp_rx_mu rx_mu[TXRX_TYPE_MU_MAX];
3975 	uint32_t reception_type[MAX_RECEPTION_TYPES];
3976 	uint32_t ppdu_cnt[MAX_RECEPTION_TYPES];
3977 
3978 	uint32_t sgi_count[MAX_GI];
3979 	uint32_t nss[SS_COUNT];
3980 	uint32_t ppdu_nss[SS_COUNT];
3981 	uint32_t bw[MAX_BW];
3982 	uint32_t rx_mpdu_cnt[MAX_MCS];
3983 
3984 	uint32_t rx_rate;
3985 	uint32_t last_rx_rate;
3986 	uint32_t rnd_avg_rx_rate;
3987 	uint32_t avg_rx_rate;
3988 	uint32_t rx_ratecode;
3989 
3990 	uint32_t avg_snr;
3991 	uint32_t rx_snr_measured_time;
3992 	uint8_t snr;
3993 	uint8_t last_snr;
3994 
3995 	uint32_t nss_info:4,
3996 		 mcs_info:4,
3997 		 bw_info:4,
3998 		 gi_info:4,
3999 		 preamble_info:4;
4000 
4001 	uint32_t mpdu_retry_cnt;
4002 #ifdef WLAN_FEATURE_11BE
4003 	struct cdp_pkt_type su_be_ppdu_cnt;
4004 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
4005 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
4006 #endif
4007 };
4008 
4009 /**
4010  * struct dp_peer_per_pkt_stats - Per pkt stats for peer
4011  * @tx: Per pkt Tx stats
4012  * @rx: Per pkt Rx stats
4013  */
4014 struct dp_peer_per_pkt_stats {
4015 	struct dp_peer_per_pkt_tx_stats tx;
4016 	struct dp_peer_per_pkt_rx_stats rx;
4017 };
4018 
4019 /**
4020  * struct dp_peer_extd_stats - Stats from extended path for peer
4021  * @tx: Extended path tx stats
4022  * @rx: Extended path rx stats
4023  */
4024 struct dp_peer_extd_stats {
4025 	struct dp_peer_extd_tx_stats tx;
4026 	struct dp_peer_extd_rx_stats rx;
4027 };
4028 
4029 /**
4030  * struct dp_peer_stats - Peer stats
4031  * @per_pkt_stats: Per packet path stats
4032  * @extd_stats: Extended path stats
4033  */
4034 struct dp_peer_stats {
4035 	struct dp_peer_per_pkt_stats per_pkt_stats;
4036 #ifndef QCA_ENHANCED_STATS_SUPPORT
4037 	struct dp_peer_extd_stats extd_stats;
4038 #endif
4039 };
4040 
4041 /**
4042  * struct dp_txrx_peer: DP txrx_peer strcuture used in per pkt path
4043  * @tx_failed: Total Tx failure
4044  * @cdp_pkt_info comp_pkt: Pkt Info for which completions were received
4045  * @to_stack: Total packets sent up the stack
4046  * @stats: Peer stats
4047  * @delay_stats: Peer delay stats
4048  * @jitter_stats: Peer jitter stats
4049  * @bw: bandwidth of peer connection
4050  * @mpdu_retry_threshold: MPDU retry threshold to increment tx bad count
4051  */
4052 struct dp_txrx_peer {
4053 	/* Core TxRx Peer */
4054 
4055 	/* VDEV to which this peer is associated */
4056 	struct dp_vdev *vdev;
4057 
4058 	/* peer ID for this peer */
4059 	uint16_t peer_id;
4060 
4061 	uint8_t authorize:1, /* Set when authorized */
4062 		in_twt:1, /* in TWT session */
4063 		hw_txrx_stats_en:1, /*Indicate HW offload vdev stats */
4064 		mld_peer:1; /* MLD peer*/
4065 
4066 	uint32_t tx_failed;
4067 	struct cdp_pkt_info comp_pkt;
4068 	struct cdp_pkt_info to_stack;
4069 
4070 	struct dp_peer_stats stats;
4071 
4072 	struct dp_peer_delay_stats *delay_stats;
4073 
4074 	struct cdp_peer_tid_stats *jitter_stats;
4075 
4076 	struct {
4077 		enum cdp_sec_type sec_type;
4078 		u_int32_t michael_key[2]; /* relevant for TKIP */
4079 	} security[2]; /* 0 -> multicast, 1 -> unicast */
4080 
4081 	uint16_t nawds_enabled:1, /* NAWDS flag */
4082 		bss_peer:1, /* set for bss peer */
4083 		isolation:1, /* enable peer isolation for this peer */
4084 		wds_enabled:1; /* WDS peer */
4085 #ifdef WDS_VENDOR_EXTENSION
4086 	dp_ecm_policy wds_ecm;
4087 #endif
4088 #ifdef PEER_CACHE_RX_PKTS
4089 	qdf_atomic_t flush_in_progress;
4090 	struct dp_peer_cached_bufq bufq_info;
4091 #endif
4092 #ifdef QCA_MULTIPASS_SUPPORT
4093 	/* node in the special peer list element */
4094 	TAILQ_ENTRY(dp_txrx_peer) mpass_peer_list_elem;
4095 	/* vlan id for key */
4096 	uint16_t vlan_id;
4097 #endif
4098 #ifdef QCA_SUPPORT_WDS_EXTENDED
4099 	struct dp_wds_ext_peer wds_ext;
4100 	ol_txrx_rx_fp osif_rx;
4101 #endif
4102 	struct dp_rx_tid_defrag rx_tid[DP_MAX_TIDS];
4103 #ifdef CONFIG_SAWF
4104 	struct dp_peer_sawf_stats *sawf_stats;
4105 #endif
4106 #ifdef DP_PEER_EXTENDED_API
4107 	enum cdp_peer_bw bw;
4108 	uint8_t mpdu_retry_threshold;
4109 #endif
4110 };
4111 
4112 /* Peer structure for data path state */
4113 struct dp_peer {
4114 	struct dp_txrx_peer *txrx_peer;
4115 #ifdef WIFI_MONITOR_SUPPORT
4116 	struct dp_mon_peer *monitor_peer;
4117 #endif
4118 	/* peer ID for this peer */
4119 	uint16_t peer_id;
4120 
4121 	/* VDEV to which this peer is associated */
4122 	struct dp_vdev *vdev;
4123 
4124 	struct dp_ast_entry *self_ast_entry;
4125 
4126 	qdf_atomic_t ref_cnt;
4127 
4128 	union dp_align_mac_addr mac_addr;
4129 
4130 	/* node in the vdev's list of peers */
4131 	TAILQ_ENTRY(dp_peer) peer_list_elem;
4132 	/* node in the hash table bin's list of peers */
4133 	TAILQ_ENTRY(dp_peer) hash_list_elem;
4134 
4135 	/* TID structures pointer */
4136 	struct dp_rx_tid *rx_tid;
4137 
4138 	/* TBD: No transmit TID state required? */
4139 
4140 	struct {
4141 		enum cdp_sec_type sec_type;
4142 		u_int32_t michael_key[2]; /* relevant for TKIP */
4143 	} security[2]; /* 0 -> multicast, 1 -> unicast */
4144 
4145 	/* NAWDS Flag and Bss Peer bit */
4146 	uint16_t bss_peer:1, /* set for bss peer */
4147 		authorize:1, /* Set when authorized */
4148 		valid:1, /* valid bit */
4149 		delete_in_progress:1, /* Indicate kickout sent */
4150 		sta_self_peer:1, /* Indicate STA self peer */
4151 		is_tdls_peer:1; /* Indicate TDLS peer */
4152 
4153 #ifdef WLAN_FEATURE_11BE_MLO
4154 	uint8_t first_link:1, /* first link peer for MLO */
4155 		primary_link:1; /* primary link for MLO */
4156 #endif
4157 
4158 	/* MCL specific peer local id */
4159 	uint16_t local_id;
4160 	enum ol_txrx_peer_state state;
4161 	qdf_spinlock_t peer_info_lock;
4162 
4163 	/* Peer calibrated stats */
4164 	struct cdp_calibr_stats stats;
4165 
4166 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
4167 	/* TBD */
4168 
4169 	/* Active Block ack sessions */
4170 	uint16_t active_ba_session_cnt;
4171 
4172 	/* Current HW buffersize setting */
4173 	uint16_t hw_buffer_size;
4174 
4175 	/*
4176 	 * Flag to check if sessions with 256 buffersize
4177 	 * should be terminated.
4178 	 */
4179 	uint8_t kill_256_sessions;
4180 	qdf_atomic_t is_default_route_set;
4181 
4182 #ifdef QCA_PEER_MULTIQ_SUPPORT
4183 	struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
4184 #endif
4185 	/* entry to inactive_list*/
4186 	TAILQ_ENTRY(dp_peer) inactive_list_elem;
4187 
4188 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
4189 
4190 	uint8_t peer_state;
4191 	qdf_spinlock_t peer_state_lock;
4192 #ifdef WLAN_SUPPORT_MSCS
4193 	struct dp_peer_mscs_parameter mscs_ipv4_parameter, mscs_ipv6_parameter;
4194 	bool mscs_active;
4195 #endif
4196 #ifdef WLAN_SUPPORT_MESH_LATENCY
4197 	struct dp_peer_mesh_latency_parameter mesh_latency_params[DP_MAX_TIDS];
4198 #endif
4199 #ifdef WLAN_FEATURE_11BE_MLO
4200 	/* peer type */
4201 	enum cdp_peer_type peer_type;
4202 	/*---------for link peer---------*/
4203 	struct dp_peer *mld_peer;
4204 	/*---------for mld peer----------*/
4205 	struct dp_peer_link_info link_peers[DP_MAX_MLO_LINKS];
4206 	uint8_t num_links;
4207 	DP_MUTEX_TYPE link_peers_info_lock;
4208 #endif
4209 #ifdef CONFIG_SAWF_DEF_QUEUES
4210 	struct dp_peer_sawf *sawf;
4211 #endif
4212 };
4213 
4214 /*
4215  * dp_invalid_peer_msg
4216  * @nbuf: data buffer
4217  * @wh: 802.11 header
4218  * @vdev_id: id of vdev
4219  */
4220 struct dp_invalid_peer_msg {
4221 	qdf_nbuf_t nbuf;
4222 	struct ieee80211_frame *wh;
4223 	uint8_t vdev_id;
4224 };
4225 
4226 /*
4227  * dp_tx_me_buf_t: ME buffer
4228  * next: pointer to next buffer
4229  * data: Destination Mac address
4230  * paddr_macbuf: physical address for dest_mac
4231  */
4232 struct dp_tx_me_buf_t {
4233 	/* Note: ME buf pool initialization logic expects next pointer to
4234 	 * be the first element. Dont add anything before next */
4235 	struct dp_tx_me_buf_t *next;
4236 	uint8_t data[QDF_MAC_ADDR_SIZE];
4237 	qdf_dma_addr_t paddr_macbuf;
4238 };
4239 
4240 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
4241 struct hal_rx_fst;
4242 
4243 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4244 struct dp_rx_fse {
4245 	/* HAL Rx Flow Search Entry which matches HW definition */
4246 	void *hal_rx_fse;
4247 	/* Toeplitz hash value */
4248 	uint32_t flow_hash;
4249 	/* Flow index, equivalent to hash value truncated to FST size */
4250 	uint32_t flow_id;
4251 	/* Stats tracking for this flow */
4252 	struct cdp_flow_stats stats;
4253 	/* Flag indicating whether flow is IPv4 address tuple */
4254 	uint8_t is_ipv4_addr_entry;
4255 	/* Flag indicating whether flow is valid */
4256 	uint8_t is_valid;
4257 };
4258 
4259 struct dp_rx_fst {
4260 	/* Software (DP) FST */
4261 	uint8_t *base;
4262 	/* Pointer to HAL FST */
4263 	struct hal_rx_fst *hal_rx_fst;
4264 	/* Base physical address of HAL RX HW FST */
4265 	uint64_t hal_rx_fst_base_paddr;
4266 	/* Maximum number of flows FSE supports */
4267 	uint16_t max_entries;
4268 	/* Num entries in flow table */
4269 	uint16_t num_entries;
4270 	/* SKID Length */
4271 	uint16_t max_skid_length;
4272 	/* Hash mask to obtain legitimate hash entry */
4273 	uint32_t hash_mask;
4274 	/* Timer for bundling of flows */
4275 	qdf_timer_t cache_invalidate_timer;
4276 	/**
4277 	 * Flag which tracks whether cache update
4278 	 * is needed on timer expiry
4279 	 */
4280 	qdf_atomic_t is_cache_update_pending;
4281 	/* Flag to indicate completion of FSE setup in HW/FW */
4282 	bool fse_setup_done;
4283 };
4284 
4285 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
4286 #elif WLAN_SUPPORT_RX_FISA
4287 
4288 /**
4289  * struct dp_fisa_reo_mismatch_stats - reo mismatch sub-case stats for FISA
4290  * @allow_cce_match: packet allowed due to cce mismatch
4291  * @allow_fse_metdata_mismatch: packet allowed since it belongs to same flow,
4292  *			only fse_metadata is not same.
4293  * @allow_non_aggr: packet allowed due to any other reason.
4294  */
4295 struct dp_fisa_reo_mismatch_stats {
4296 	uint32_t allow_cce_match;
4297 	uint32_t allow_fse_metdata_mismatch;
4298 	uint32_t allow_non_aggr;
4299 };
4300 
4301 struct dp_fisa_stats {
4302 	/* flow index invalid from RX HW TLV */
4303 	uint32_t invalid_flow_index;
4304 	struct dp_fisa_reo_mismatch_stats reo_mismatch;
4305 };
4306 
4307 enum fisa_aggr_ret {
4308 	FISA_AGGR_DONE,
4309 	FISA_AGGR_NOT_ELIGIBLE,
4310 	FISA_FLUSH_FLOW
4311 };
4312 
4313 /**
4314  * struct fisa_pkt_hist - FISA Packet history structure
4315  * @tlv_hist: array of TLV history
4316  * @ts: array of timestamps of fisa packets
4317  * @idx: index indicating the next location to be used in the array.
4318  */
4319 struct fisa_pkt_hist {
4320 	uint8_t *tlv_hist;
4321 	qdf_time_t ts_hist[FISA_FLOW_MAX_AGGR_COUNT];
4322 	uint32_t idx;
4323 };
4324 
4325 struct dp_fisa_rx_sw_ft {
4326 	/* HAL Rx Flow Search Entry which matches HW definition */
4327 	void *hw_fse;
4328 	/* hash value */
4329 	uint32_t flow_hash;
4330 	/* toeplitz hash value*/
4331 	uint32_t flow_id_toeplitz;
4332 	/* Flow index, equivalent to hash value truncated to FST size */
4333 	uint32_t flow_id;
4334 	/* Stats tracking for this flow */
4335 	struct cdp_flow_stats stats;
4336 	/* Flag indicating whether flow is IPv4 address tuple */
4337 	uint8_t is_ipv4_addr_entry;
4338 	/* Flag indicating whether flow is valid */
4339 	uint8_t is_valid;
4340 	uint8_t is_populated;
4341 	uint8_t is_flow_udp;
4342 	uint8_t is_flow_tcp;
4343 	qdf_nbuf_t head_skb;
4344 	uint16_t cumulative_l4_checksum;
4345 	uint16_t adjusted_cumulative_ip_length;
4346 	uint16_t cur_aggr;
4347 	uint16_t napi_flush_cumulative_l4_checksum;
4348 	uint16_t napi_flush_cumulative_ip_length;
4349 	qdf_nbuf_t last_skb;
4350 	uint32_t head_skb_ip_hdr_offset;
4351 	uint32_t head_skb_l4_hdr_offset;
4352 	struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
4353 	uint8_t napi_id;
4354 	struct dp_vdev *vdev;
4355 	uint64_t bytes_aggregated;
4356 	uint32_t flush_count;
4357 	uint32_t aggr_count;
4358 	uint8_t do_not_aggregate;
4359 	uint16_t hal_cumultive_ip_len;
4360 	struct dp_soc *soc_hdl;
4361 	/* last aggregate count fetched from RX PKT TLV */
4362 	uint32_t last_hal_aggr_count;
4363 	uint32_t cur_aggr_gso_size;
4364 	struct udphdr *head_skb_udp_hdr;
4365 	uint16_t frags_cumulative_len;
4366 	/* CMEM parameters */
4367 	uint32_t cmem_offset;
4368 	uint32_t metadata;
4369 	uint32_t reo_dest_indication;
4370 	qdf_time_t flow_init_ts;
4371 	qdf_time_t last_accessed_ts;
4372 #ifdef WLAN_SUPPORT_RX_FISA_HIST
4373 	struct fisa_pkt_hist pkt_hist;
4374 #endif
4375 };
4376 
4377 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_fisa_rx_sw_ft)
4378 #define MAX_FSE_CACHE_FL_HST 10
4379 /**
4380  * struct fse_cache_flush_history - Debug history cache flush
4381  * @timestamp: Entry update timestamp
4382  * @flows_added: Number of flows added for this flush
4383  * @flows_deleted: Number of flows deleted for this flush
4384  */
4385 struct fse_cache_flush_history {
4386 	uint64_t timestamp;
4387 	uint32_t flows_added;
4388 	uint32_t flows_deleted;
4389 };
4390 
4391 struct dp_rx_fst {
4392 	/* Software (DP) FST */
4393 	uint8_t *base;
4394 	/* Pointer to HAL FST */
4395 	struct hal_rx_fst *hal_rx_fst;
4396 	/* Base physical address of HAL RX HW FST */
4397 	uint64_t hal_rx_fst_base_paddr;
4398 	/* Maximum number of flows FSE supports */
4399 	uint16_t max_entries;
4400 	/* Num entries in flow table */
4401 	uint16_t num_entries;
4402 	/* SKID Length */
4403 	uint16_t max_skid_length;
4404 	/* Hash mask to obtain legitimate hash entry */
4405 	uint32_t hash_mask;
4406 	/* Lock for adding/deleting entries of FST */
4407 	qdf_spinlock_t dp_rx_fst_lock;
4408 	uint32_t add_flow_count;
4409 	uint32_t del_flow_count;
4410 	uint32_t hash_collision_cnt;
4411 	struct dp_soc *soc_hdl;
4412 	qdf_atomic_t fse_cache_flush_posted;
4413 	qdf_timer_t fse_cache_flush_timer;
4414 	/* Allow FSE cache flush cmd to FW */
4415 	bool fse_cache_flush_allow;
4416 	struct fse_cache_flush_history cache_fl_rec[MAX_FSE_CACHE_FL_HST];
4417 	/* FISA DP stats */
4418 	struct dp_fisa_stats stats;
4419 
4420 	/* CMEM params */
4421 	qdf_work_t fst_update_work;
4422 	qdf_workqueue_t *fst_update_wq;
4423 	qdf_list_t fst_update_list;
4424 	uint32_t meta_counter;
4425 	uint32_t cmem_ba;
4426 	qdf_spinlock_t dp_rx_sw_ft_lock[MAX_REO_DEST_RINGS];
4427 	qdf_event_t cmem_resp_event;
4428 	bool flow_deletion_supported;
4429 	bool fst_in_cmem;
4430 	bool pm_suspended;
4431 };
4432 
4433 #endif /* WLAN_SUPPORT_RX_FISA */
4434 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
4435 
4436 #ifdef WLAN_FEATURE_STATS_EXT
4437 /*
4438  * dp_req_rx_hw_stats_t: RX peer HW stats query structure
4439  * @pending_tid_query_cnt: pending tid stats count which waits for REO status
4440  * @is_query_timeout: flag to show is stats query timeout
4441  */
4442 struct dp_req_rx_hw_stats_t {
4443 	qdf_atomic_t pending_tid_stats_cnt;
4444 	bool is_query_timeout;
4445 };
4446 #endif
4447 /* soc level structure to declare arch specific ops for DP */
4448 
4449 
4450 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id);
4451 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
4452 					    uint32_t mac_id);
4453 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
4454 
4455 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
4456 void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc);
4457 #else
4458 static inline void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc) {}
4459 #endif
4460 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
4461 			 int ring_type, uint32_t num_entries,
4462 			 bool cached);
4463 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng);
4464 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
4465 			int ring_type, int ring_num, int mac_id);
4466 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
4467 		    int ring_type, int ring_num);
4468 void dp_print_peer_txrx_stats_be(struct cdp_peer_stats *peer_stats,
4469 				 enum peer_stats_type stats_type);
4470 void dp_print_peer_txrx_stats_li(struct cdp_peer_stats *peer_stats,
4471 				 enum peer_stats_type stats_type);
4472 
4473 enum timer_yield_status
4474 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
4475 			  uint64_t start_time);
4476 
4477 /*
4478  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
4479  * @vdev: Datapath VDEV handle
4480  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4481  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4482  *
4483  * Return: None
4484  */
4485 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
4486 				  enum cdp_host_reo_dest_ring *reo_dest,
4487 				  bool *hash_based);
4488 
4489 /**
4490  * dp_reo_remap_config() - configure reo remap register value based
4491  *                         nss configuration.
4492  *		based on offload_radio value below remap configuration
4493  *		get applied.
4494  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
4495  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
4496  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
4497  *		3 - both Radios handled by NSS (remap not required)
4498  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
4499  *
4500  * @remap0: output parameter indicates reo remap 0 register value
4501  * @remap1: output parameter indicates reo remap 1 register value
4502  * @remap2: output parameter indicates reo remap 2 register value
4503  * Return: bool type, true if remap is configured else false.
4504  */
4505 
4506 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4507 			 uint32_t *remap1, uint32_t *remap2);
4508 
4509 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
4510 /**
4511  * dp_tx_comp_get_prefetched_params_from_hal_desc() - Get prefetched TX desc
4512  * @soc: DP soc handle
4513  * @tx_comp_hal_desc: HAL TX Comp Descriptor
4514  * @r_tx_desc: SW Tx Descriptor retrieved from HAL desc.
4515  *
4516  * Return: None
4517  */
4518 void dp_tx_comp_get_prefetched_params_from_hal_desc(
4519 					struct dp_soc *soc,
4520 					void *tx_comp_hal_desc,
4521 					struct dp_tx_desc_s **r_tx_desc);
4522 #endif
4523 #endif /* _DP_TYPES_H_ */
4524