xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_TYPES_H_
20 #define _DP_TYPES_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include <qdf_lock.h>
25 #include <qdf_atomic.h>
26 #include <qdf_util.h>
27 #include <qdf_list.h>
28 #include <qdf_lro.h>
29 #include <queue.h>
30 #include <htt_common.h>
31 #include <htt.h>
32 #include <htt_stats.h>
33 #include <cdp_txrx_cmn.h>
34 #ifdef DP_MOB_DEFS
35 #include <cds_ieee80211_common.h>
36 #endif
37 #include <wdi_event_api.h>    /* WDI subscriber event list */
38 
39 #include "hal_hw_headers.h"
40 #include <hal_tx.h>
41 #include <hal_reo.h>
42 #include "wlan_cfg.h"
43 #include "hal_rx.h"
44 #include <hal_api.h>
45 #include <hal_api_mon.h>
46 #include "hal_rx.h"
47 //#include "hal_rx_flow.h"
48 
49 #define MAX_BW 7
50 #define MAX_RETRIES 4
51 #define MAX_RECEPTION_TYPES 4
52 
53 #define MINIDUMP_STR_SIZE 25
54 #ifndef REMOVE_PKT_LOG
55 #include <pktlog.h>
56 #endif
57 
58 //#include "dp_tx.h"
59 
60 #define REPT_MU_MIMO 1
61 #define REPT_MU_OFDMA_MIMO 3
62 #define DP_VO_TID 6
63  /** MAX TID MAPS AVAILABLE PER PDEV */
64 #define DP_MAX_TID_MAPS 16
65 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */
66 #define DSCP_TID_MAP_MAX (64 + 6)
67 #define DP_IP_DSCP_SHIFT 2
68 #define DP_IP_DSCP_MASK 0x3f
69 #define DP_FC0_SUBTYPE_QOS 0x80
70 #define DP_QOS_TID 0x0f
71 #define DP_IPV6_PRIORITY_SHIFT 20
72 #define MAX_MON_LINK_DESC_BANKS 2
73 #define DP_VDEV_ALL 0xff
74 
75 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
76 #define MAX_PDEV_CNT 1
77 #define WLAN_DP_RESET_MON_BUF_RING_FILTER
78 #else
79 #define MAX_PDEV_CNT 3
80 #endif
81 
82 /* Max no. of VDEV per PSOC */
83 #ifdef WLAN_PSOC_MAX_VDEVS
84 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS
85 #else
86 #define MAX_VDEV_CNT 51
87 #endif
88 
89 /* Max no. of VDEVs, a PDEV can support */
90 #ifdef WLAN_PDEV_MAX_VDEVS
91 #define DP_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS
92 #else
93 #define DP_PDEV_MAX_VDEVS 17
94 #endif
95 
96 #define MAX_TXDESC_POOLS 4
97 #define MAX_RXDESC_POOLS 4
98 
99 #define EXCEPTION_DEST_RING_ID 0
100 #define MAX_IDLE_SCATTER_BUFS 16
101 #define DP_MAX_IRQ_PER_CONTEXT 12
102 #define DEFAULT_HW_PEER_ID 0xffff
103 
104 #define MAX_AST_AGEOUT_COUNT 128
105 
106 #define WBM_INT_ERROR_ALL 0
107 #define WBM_INT_ERROR_REO_NULL_BUFFER 1
108 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2
109 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3
110 #define WBM_INT_ERROR_REO_BUFF_REAPED 4
111 #define MAX_WBM_INT_ERROR_REASONS 5
112 
113 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
114 /* Maximum retries for Delba per tid per peer */
115 #define DP_MAX_DELBA_RETRY 3
116 
117 #define PCP_TID_MAP_MAX 8
118 #define MAX_MU_USERS 37
119 
120 #define REO_CMD_EVENT_HIST_MAX 64
121 
122 #define DP_MAX_SRNGS 64
123 
124 /* 2G PHYB */
125 #define PHYB_2G_LMAC_ID 2
126 #define PHYB_2G_TARGET_PDEV_ID 2
127 
128 /* Flags for skippig s/w tid classification */
129 #define DP_TX_HW_DSCP_TID_MAP_VALID 0x1
130 #define DP_TXRX_HLOS_TID_OVERRIDE_ENABLED 0x2
131 #define DP_TX_MESH_ENABLED 0x4
132 
133 #ifdef WLAN_SUPPORT_RX_FISA
134 #define FISA_FLOW_MAX_AGGR_COUNT        16 /* max flow aggregate count */
135 #endif
136 
137 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
138 #define DP_RX_REFILL_BUFF_POOL_SIZE  2048
139 #define DP_RX_REFILL_BUFF_POOL_BURST 64
140 #define DP_RX_REFILL_THRD_THRESHOLD  512
141 #endif
142 
143 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
144 #define DP_SKIP_BAR_UPDATE_TIMEOUT 5000
145 #endif
146 
147 enum rx_pktlog_mode {
148 	DP_RX_PKTLOG_DISABLED = 0,
149 	DP_RX_PKTLOG_FULL,
150 	DP_RX_PKTLOG_LITE,
151 };
152 
153 /* enum m_copy_mode - Available mcopy mode
154  *
155  */
156 enum m_copy_mode {
157 	M_COPY_DISABLED = 0,
158 	M_COPY = 2,
159 	M_COPY_EXTENDED = 4,
160 };
161 
162 struct msdu_list {
163 	qdf_nbuf_t head;
164 	qdf_nbuf_t tail;
165 	uint32_t sum_len;
166 };
167 
168 struct dp_soc_cmn;
169 struct dp_pdev;
170 struct dp_vdev;
171 struct dp_tx_desc_s;
172 struct dp_soc;
173 union dp_rx_desc_list_elem_t;
174 struct cdp_peer_rate_stats_ctx;
175 struct cdp_soc_rate_stats_ctx;
176 struct dp_rx_fst;
177 struct dp_mon_filter;
178 struct dp_mon_mpdu;
179 
180 /**
181  * enum for DP peer state
182  */
183 enum dp_peer_state {
184 	DP_PEER_STATE_NONE,
185 	DP_PEER_STATE_INIT,
186 	DP_PEER_STATE_ACTIVE,
187 	DP_PEER_STATE_LOGICAL_DELETE,
188 	DP_PEER_STATE_INACTIVE,
189 	DP_PEER_STATE_FREED,
190 	DP_PEER_STATE_INVALID,
191 };
192 
193 /**
194  * enum for modules ids of
195  */
196 enum dp_mod_id {
197 	DP_MOD_ID_TX_COMP = 0,
198 	DP_MOD_ID_RX = 1,
199 	DP_MOD_ID_HTT_COMP = 2,
200 	DP_MOD_ID_RX_ERR = 3,
201 	DP_MOD_ID_TX_PPDU_STATS = 4,
202 	DP_MOD_ID_RX_PPDU_STATS = 5,
203 	DP_MOD_ID_CDP = 6,
204 	DP_MOD_ID_GENERIC_STATS = 7,
205 	DP_MOD_ID_TX_MULTIPASS = 8,
206 	DP_MOD_ID_TX_CAPTURE = 9,
207 	DP_MOD_ID_NSS_OFFLOAD = 10,
208 	DP_MOD_ID_CONFIG = 11,
209 	DP_MOD_ID_HTT = 12,
210 	DP_MOD_ID_IPA = 13,
211 	DP_MOD_ID_AST = 14,
212 	DP_MOD_ID_MCAST2UCAST = 15,
213 	DP_MOD_ID_CHILD = 16,
214 	DP_MOD_ID_MESH = 17,
215 	DP_MOD_ID_TX_EXCEPTION = 18,
216 	DP_MOD_ID_TDLS = 19,
217 	DP_MOD_ID_MISC = 20,
218 	DP_MOD_ID_MSCS = 21,
219 	DP_MOD_ID_TX = 22,
220 	DP_MOD_ID_MAX = 23,
221 };
222 
223 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
224 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
225 
226 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
227 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
228 
229 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
230 	TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase))
231 
232 #define DP_MUTEX_TYPE qdf_spinlock_t
233 
234 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
235 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
236 
237 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
238     ((_a)[0] == 0x33 &&                         \
239      (_a)[1] == 0x33)
240 
241 #define DP_FRAME_IS_BROADCAST(_a)              \
242     ((_a)[0] == 0xff &&                         \
243      (_a)[1] == 0xff &&                         \
244      (_a)[2] == 0xff &&                         \
245      (_a)[3] == 0xff &&                         \
246      (_a)[4] == 0xff &&                         \
247      (_a)[5] == 0xff)
248 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
249 		(_llc)->llc_ssap == 0xaa && \
250 		(_llc)->llc_un.type_snap.control == 0x3)
251 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
252 #define DP_FRAME_FC0_TYPE_MASK 0x0c
253 #define DP_FRAME_FC0_TYPE_DATA 0x08
254 #define DP_FRAME_IS_DATA(_frame) \
255 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
256 
257 /**
258  * macros to convert hw mac id to sw mac id:
259  * mac ids used by hardware start from a value of 1 while
260  * those in host software start from a value of 0. Use the
261  * macros below to convert between mac ids used by software and
262  * hardware
263  */
264 #define DP_SW2HW_MACID(id) ((id) + 1)
265 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
266 
267 /**
268  * Number of Tx Queues
269  * enum and macro to define how many threshold levels is used
270  * for the AC based flow control
271  */
272 #ifdef QCA_AC_BASED_FLOW_CONTROL
273 enum dp_fl_ctrl_threshold {
274 	DP_TH_BE_BK = 0,
275 	DP_TH_VI,
276 	DP_TH_VO,
277 	DP_TH_HI,
278 };
279 
280 #define FL_TH_MAX (4)
281 #define FL_TH_VI_PERCENTAGE (80)
282 #define FL_TH_VO_PERCENTAGE (60)
283 #define FL_TH_HI_PERCENTAGE (40)
284 #endif
285 
286 /**
287  * enum dp_intr_mode
288  * @DP_INTR_INTEGRATED: Line interrupts
289  * @DP_INTR_MSI: MSI interrupts
290  * @DP_INTR_POLL: Polling
291  */
292 enum dp_intr_mode {
293 	DP_INTR_INTEGRATED = 0,
294 	DP_INTR_MSI,
295 	DP_INTR_POLL,
296 };
297 
298 /**
299  * enum dp_tx_frm_type
300  * @dp_tx_frm_std: Regular frame, no added header fragments
301  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
302  * @dp_tx_frm_sg: SG segment
303  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
304  * @dp_tx_frm_me: Multicast to Unicast Converted frame
305  * @dp_tx_frm_raw: Raw Frame
306  */
307 enum dp_tx_frm_type {
308 	dp_tx_frm_std = 0,
309 	dp_tx_frm_tso,
310 	dp_tx_frm_sg,
311 	dp_tx_frm_audio,
312 	dp_tx_frm_me,
313 	dp_tx_frm_raw,
314 };
315 
316 /**
317  * enum dp_ast_type
318  * @dp_ast_type_wds: WDS peer AST type
319  * @dp_ast_type_static: static ast entry type
320  * @dp_ast_type_mec: Multicast echo ast entry type
321  */
322 enum dp_ast_type {
323 	dp_ast_type_wds = 0,
324 	dp_ast_type_static,
325 	dp_ast_type_mec,
326 };
327 
328 /**
329  * enum dp_nss_cfg
330  * @dp_nss_cfg_default: No radios are offloaded
331  * @dp_nss_cfg_first_radio: First radio offloaded
332  * @dp_nss_cfg_second_radio: Second radio offloaded
333  * @dp_nss_cfg_dbdc: Dual radios offloaded
334  * @dp_nss_cfg_dbtc: Three radios offloaded
335  */
336 enum dp_nss_cfg {
337 	dp_nss_cfg_default = 0x0,
338 	dp_nss_cfg_first_radio = 0x1,
339 	dp_nss_cfg_second_radio = 0x2,
340 	dp_nss_cfg_dbdc = 0x3,
341 	dp_nss_cfg_dbtc = 0x7,
342 	dp_nss_cfg_max
343 };
344 
345 #ifdef WLAN_TX_PKT_CAPTURE_ENH
346 #define DP_CPU_RING_MAP_1 1
347 #endif
348 
349 /**
350  * dp_cpu_ring_map_type - dp tx cpu ring map
351  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
352  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
353  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
354  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
355  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
356  * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring
357  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
358  */
359 enum dp_cpu_ring_map_types {
360 	DP_NSS_DEFAULT_MAP,
361 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
362 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
363 	DP_NSS_DBDC_OFFLOADED_MAP,
364 	DP_NSS_DBTC_OFFLOADED_MAP,
365 #ifdef WLAN_TX_PKT_CAPTURE_ENH
366 	DP_SINGLE_TX_RING_MAP,
367 #endif
368 	DP_NSS_CPU_RING_MAP_MAX
369 };
370 
371 /**
372  * dp_rx_nbuf_frag_info - Hold vaddr and paddr for a buffer
373  *
374  * paddr: Physical address of buffer allocated.
375  * nbuf: Allocated nbuf in case of nbuf approach.
376  * vaddr: Virtual address of frag allocated in case of frag approach.
377  */
378 struct dp_rx_nbuf_frag_info {
379 	qdf_dma_addr_t paddr;
380 	union {
381 		qdf_nbuf_t nbuf;
382 		qdf_frag_t vaddr;
383 	} virt_addr;
384 };
385 
386 /**
387  * enum dp_ctxt - context type
388  * @DP_PDEV_TYPE: PDEV context
389  * @DP_RX_RING_HIST_TYPE: Datapath rx ring history
390  * @DP_RX_ERR_RING_HIST_TYPE: Datapath rx error ring history
391  * @DP_RX_REINJECT_RING_HIST_TYPE: Datapath reinject ring history
392  * @DP_RX_REFILL_RING_HIST_TYPE: Datapath rx refill ring history
393  * @DP_TX_HW_DESC_HIST_TYPE: Datapath TX HW descriptor history
394  */
395 enum dp_ctxt_type {
396 	DP_PDEV_TYPE,
397 	DP_RX_RING_HIST_TYPE,
398 	DP_RX_ERR_RING_HIST_TYPE,
399 	DP_RX_REINJECT_RING_HIST_TYPE,
400 	DP_TX_TCL_HIST_TYPE,
401 	DP_TX_COMP_HIST_TYPE,
402 	DP_FISA_RX_FT_TYPE,
403 	DP_RX_REFILL_RING_HIST_TYPE,
404 	DP_TX_HW_DESC_HIST_TYPE,
405 };
406 
407 /**
408  * enum dp_desc_type - source type for multiple pages allocation
409  * @DP_TX_DESC_TYPE: DP SW TX descriptor
410  * @DP_TX_EXT_DESC_TYPE: DP TX msdu extension descriptor
411  * @DP_TX_EXT_DESC_LINK_TYPE: DP link descriptor for msdu ext_desc
412  * @DP_TX_TSO_DESC_TYPE: DP TX TSO descriptor
413  * @DP_TX_TSO_NUM_SEG_TYPE: DP TX number of segments
414  * @DP_RX_DESC_BUF_TYPE: DP RX SW descriptor
415  * @DP_RX_DESC_STATUS_TYPE: DP RX SW descriptor for monitor status
416  * @DP_HW_LINK_DESC_TYPE: DP HW link descriptor
417  * @DP_HW_CC_SPT_PAGE_TYPE: DP pages for HW CC secondary page table
418  */
419 enum dp_desc_type {
420 	DP_TX_DESC_TYPE,
421 	DP_TX_EXT_DESC_TYPE,
422 	DP_TX_EXT_DESC_LINK_TYPE,
423 	DP_TX_TSO_DESC_TYPE,
424 	DP_TX_TSO_NUM_SEG_TYPE,
425 	DP_RX_DESC_BUF_TYPE,
426 	DP_RX_DESC_STATUS_TYPE,
427 	DP_HW_LINK_DESC_TYPE,
428 	DP_HW_CC_SPT_PAGE_TYPE,
429 };
430 
431 /**
432  * struct rx_desc_pool
433  * @pool_size: number of RX descriptor in the pool
434  * @elem_size: Element size
435  * @desc_pages: Multi page descriptors
436  * @array: pointer to array of RX descriptor
437  * @freelist: pointer to free RX descriptor link list
438  * @lock: Protection for the RX descriptor pool
439  * @owner: owner for nbuf
440  * @buf_size: Buffer size
441  * @buf_alignment: Buffer alignment
442  * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
443  * @desc_type: type of desc this pool serves
444  */
445 struct rx_desc_pool {
446 	uint32_t pool_size;
447 #ifdef RX_DESC_MULTI_PAGE_ALLOC
448 	uint16_t elem_size;
449 	struct qdf_mem_multi_page_t desc_pages;
450 #else
451 	union dp_rx_desc_list_elem_t *array;
452 #endif
453 	union dp_rx_desc_list_elem_t *freelist;
454 	qdf_spinlock_t lock;
455 	uint8_t owner;
456 	uint16_t buf_size;
457 	uint8_t buf_alignment;
458 	bool rx_mon_dest_frag_enable;
459 	enum dp_desc_type desc_type;
460 };
461 
462 /**
463  * struct dp_tx_ext_desc_elem_s
464  * @next: next extension descriptor pointer
465  * @vaddr: hlos virtual address pointer
466  * @paddr: physical address pointer for descriptor
467  * @flags: mark features for extension descriptor
468  */
469 struct dp_tx_ext_desc_elem_s {
470 	struct dp_tx_ext_desc_elem_s *next;
471 	void *vaddr;
472 	qdf_dma_addr_t paddr;
473 	uint16_t flags;
474 };
475 
476 /**
477  * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool
478  * @elem_count: Number of descriptors in the pool
479  * @elem_size: Size of each descriptor
480  * @num_free: Number of free descriptors
481  * @msdu_ext_desc: MSDU extension descriptor
482  * @desc_pages: multiple page allocation information for actual descriptors
483  * @link_elem_size: size of the link descriptor in cacheable memory used for
484  * 		    chaining the extension descriptors
485  * @desc_link_pages: multiple page allocation information for link descriptors
486  */
487 struct dp_tx_ext_desc_pool_s {
488 	uint16_t elem_count;
489 	int elem_size;
490 	uint16_t num_free;
491 	struct qdf_mem_multi_page_t desc_pages;
492 	int link_elem_size;
493 	struct qdf_mem_multi_page_t desc_link_pages;
494 	struct dp_tx_ext_desc_elem_s *freelist;
495 	qdf_spinlock_t lock;
496 	qdf_dma_mem_context(memctx);
497 };
498 
499 /**
500  * struct dp_tx_desc_s - Tx Descriptor
501  * @next: Next in the chain of descriptors in freelist or in the completion list
502  * @nbuf: Buffer Address
503  * @msdu_ext_desc: MSDU extension descriptor
504  * @id: Descriptor ID
505  * @vdev_id: vdev_id of vdev over which the packet was transmitted
506  * @pdev: Handle to pdev
507  * @pool_id: Pool ID - used when releasing the descriptor
508  * @flags: Flags to track the state of descriptor and special frame handling
509  * @comp: Pool ID - used when releasing the descriptor
510  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
511  * 		   This is maintained in descriptor to allow more efficient
512  * 		   processing in completion event processing code.
513  * 		   This field is filled in with the htt_pkt_type enum.
514  * @buffer_src: buffer source TQM, REO, FW etc.
515  * @frm_type: Frame Type - ToDo check if this is redundant
516  * @pkt_offset: Offset from which the actual packet data starts
517  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
518  *		Tx completion of ME packet
519  * @pool: handle to flow_pool this descriptor belongs to.
520  */
521 struct dp_tx_desc_s {
522 	struct dp_tx_desc_s *next;
523 	qdf_nbuf_t nbuf;
524 	uint16_t length;
525 	uint16_t flags;
526 	uint32_t id;
527 	qdf_dma_addr_t dma_addr;
528 	uint8_t vdev_id;
529 	uint8_t tx_status;
530 	uint16_t peer_id;
531 	struct dp_pdev *pdev;
532 	uint8_t tx_encap_type:2,
533 		buffer_src:3,
534 		reserved:3;
535 	uint8_t frm_type;
536 	uint8_t pkt_offset;
537 	uint8_t  pool_id;
538 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
539 	void *me_buffer;
540 	void *tso_desc;
541 	void *tso_num_desc;
542 	uint64_t timestamp;
543 	struct hal_tx_desc_comp_s comp;
544 };
545 
546 /**
547  * enum flow_pool_status - flow pool status
548  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
549  *				and network queues are unpaused
550  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
551  *			   and network queues are paused
552  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
553  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
554  */
555 enum flow_pool_status {
556 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
557 	FLOW_POOL_ACTIVE_PAUSED = 1,
558 	FLOW_POOL_BE_BK_PAUSED = 2,
559 	FLOW_POOL_VI_PAUSED = 3,
560 	FLOW_POOL_VO_PAUSED = 4,
561 	FLOW_POOL_INVALID = 5,
562 	FLOW_POOL_INACTIVE = 6,
563 };
564 
565 /**
566  * struct dp_tx_tso_seg_pool_s
567  * @pool_size: total number of pool elements
568  * @num_free: free element count
569  * @freelist: first free element pointer
570  * @desc_pages: multiple page allocation information for actual descriptors
571  * @lock: lock for accessing the pool
572  */
573 struct dp_tx_tso_seg_pool_s {
574 	uint16_t pool_size;
575 	uint16_t num_free;
576 	struct qdf_tso_seg_elem_t *freelist;
577 	struct qdf_mem_multi_page_t desc_pages;
578 	qdf_spinlock_t lock;
579 };
580 
581 /**
582  * struct dp_tx_tso_num_seg_pool_s {
583  * @num_seg_pool_size: total number of pool elements
584  * @num_free: free element count
585  * @freelist: first free element pointer
586  * @desc_pages: multiple page allocation information for actual descriptors
587  * @lock: lock for accessing the pool
588  */
589 
590 struct dp_tx_tso_num_seg_pool_s {
591 	uint16_t num_seg_pool_size;
592 	uint16_t num_free;
593 	struct qdf_tso_num_seg_elem_t *freelist;
594 	struct qdf_mem_multi_page_t desc_pages;
595 	/*tso mutex */
596 	qdf_spinlock_t lock;
597 };
598 
599 /**
600  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
601  * @elem_size: Size of each descriptor in the pool
602  * @pool_size: Total number of descriptors in the pool
603  * @num_free: Number of free descriptors
604  * @num_allocated: Number of used descriptors
605  * @freelist: Chain of free descriptors
606  * @desc_pages: multiple page allocation information for actual descriptors
607  * @num_invalid_bin: Deleted pool with pending Tx completions.
608  * @flow_pool_array_lock: Lock when operating on flow_pool_array.
609  * @flow_pool_array: List of allocated flow pools
610  * @lock- Lock for descriptor allocation/free from/to the pool
611  */
612 struct dp_tx_desc_pool_s {
613 	uint16_t elem_size;
614 	uint32_t num_allocated;
615 	struct dp_tx_desc_s *freelist;
616 	struct qdf_mem_multi_page_t desc_pages;
617 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
618 	uint16_t pool_size;
619 	uint8_t flow_pool_id;
620 	uint8_t num_invalid_bin;
621 	uint16_t avail_desc;
622 	enum flow_pool_status status;
623 	enum htt_flow_type flow_type;
624 #ifdef QCA_AC_BASED_FLOW_CONTROL
625 	uint16_t stop_th[FL_TH_MAX];
626 	uint16_t start_th[FL_TH_MAX];
627 	qdf_time_t max_pause_time[FL_TH_MAX];
628 	qdf_time_t latest_pause_time[FL_TH_MAX];
629 #else
630 	uint16_t stop_th;
631 	uint16_t start_th;
632 #endif
633 	uint16_t pkt_drop_no_desc;
634 	qdf_spinlock_t flow_pool_lock;
635 	uint8_t pool_create_cnt;
636 	void *pool_owner_ctx;
637 #else
638 	uint16_t elem_count;
639 	uint32_t num_free;
640 	qdf_spinlock_t lock;
641 #endif
642 };
643 
644 /**
645  * struct dp_txrx_pool_stats - flow pool related statistics
646  * @pool_map_count: flow pool map received
647  * @pool_unmap_count: flow pool unmap received
648  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
649  */
650 struct dp_txrx_pool_stats {
651 	uint16_t pool_map_count;
652 	uint16_t pool_unmap_count;
653 	uint16_t pkt_drop_no_pool;
654 };
655 
656 /**
657  * struct dp_srng - DP srng structure
658  * @hal_srng: hal_srng handle
659  * @base_vaddr_unaligned: un-aligned virtual base address of the srng ring
660  * @base_vaddr_aligned: aligned virtual base address of the srng ring
661  * @base_paddr_unaligned: un-aligned physical base address of the srng ring
662  * @base_paddr_aligned: aligned physical base address of the srng ring
663  * @alloc_size: size of the srng ring
664  * @cached: is the srng ring memory cached or un-cached memory
665  * @irq: irq number of the srng ring
666  * @num_entries: number of entries in the srng ring
667  * @is_mem_prealloc: Is this srng memeory pre-allocated
668  * @crit_thresh: Critical threshold for near-full processing of this srng
669  * @safe_thresh: Safe threshold for near-full processing of this srng
670  * @near_full: Flag to indicate srng is near-full
671  */
672 struct dp_srng {
673 	hal_ring_handle_t hal_srng;
674 	void *base_vaddr_unaligned;
675 	void *base_vaddr_aligned;
676 	qdf_dma_addr_t base_paddr_unaligned;
677 	qdf_dma_addr_t base_paddr_aligned;
678 	uint32_t alloc_size;
679 	uint8_t cached;
680 	int irq;
681 	uint32_t num_entries;
682 #ifdef DP_MEM_PRE_ALLOC
683 	uint8_t is_mem_prealloc;
684 #endif
685 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
686 	uint16_t crit_thresh;
687 	uint16_t safe_thresh;
688 	qdf_atomic_t near_full;
689 #endif
690 };
691 
692 struct dp_rx_reorder_array_elem {
693 	qdf_nbuf_t head;
694 	qdf_nbuf_t tail;
695 };
696 
697 #define DP_RX_BA_INACTIVE 0
698 #define DP_RX_BA_ACTIVE 1
699 #define DP_RX_BA_IN_PROGRESS 2
700 struct dp_reo_cmd_info {
701 	uint16_t cmd;
702 	enum hal_reo_cmd_type cmd_type;
703 	void *data;
704 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
705 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
706 };
707 
708 /* Rx TID */
709 struct dp_rx_tid {
710 	/* TID */
711 	int tid;
712 
713 	/* Num of addba requests */
714 	uint32_t num_of_addba_req;
715 
716 	/* Num of addba responses */
717 	uint32_t num_of_addba_resp;
718 
719 	/* Num of delba requests */
720 	uint32_t num_of_delba_req;
721 
722 	/* Num of addba responses successful */
723 	uint32_t num_addba_rsp_success;
724 
725 	/* Num of addba responses failed */
726 	uint32_t num_addba_rsp_failed;
727 
728 	/* pn size */
729 	uint8_t pn_size;
730 	/* REO TID queue descriptors */
731 	void *hw_qdesc_vaddr_unaligned;
732 	void *hw_qdesc_vaddr_aligned;
733 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
734 	qdf_dma_addr_t hw_qdesc_paddr;
735 	uint32_t hw_qdesc_alloc_size;
736 
737 	/* RX ADDBA session state */
738 	int ba_status;
739 
740 	/* RX BA window size */
741 	uint16_t ba_win_size;
742 
743 	/* Starting sequence number in Addba request */
744 	uint16_t startseqnum;
745 
746 	/* TODO: Check the following while adding defragmentation support */
747 	struct dp_rx_reorder_array_elem *array;
748 	/* base - single rx reorder element used for non-aggr cases */
749 	struct dp_rx_reorder_array_elem base;
750 
751 	/* only used for defrag right now */
752 	TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem;
753 
754 	/* Store dst desc for reinjection */
755 	hal_ring_desc_t dst_ring_desc;
756 	struct dp_rx_desc *head_frag_desc;
757 
758 	/* rx_tid lock */
759 	qdf_spinlock_t tid_lock;
760 
761 	/* Sequence and fragments that are being processed currently */
762 	uint32_t curr_seq_num;
763 	uint32_t curr_frag_num;
764 
765 	/* head PN number */
766 	uint64_t pn128[2];
767 
768 	uint32_t defrag_timeout_ms;
769 	uint16_t dialogtoken;
770 	uint16_t statuscode;
771 	/* user defined ADDBA response status code */
772 	uint16_t userstatuscode;
773 
774 	/* Store ppdu_id when 2k exception is received */
775 	uint32_t ppdu_id_2k;
776 
777 	/* Delba Tx completion status */
778 	uint8_t delba_tx_status;
779 
780 	/* Delba Tx retry count */
781 	uint8_t delba_tx_retry;
782 
783 	/* Delba stats */
784 	uint32_t delba_tx_success_cnt;
785 	uint32_t delba_tx_fail_cnt;
786 
787 	/* Delba reason code for retries */
788 	uint8_t delba_rcode;
789 
790 	/* Coex Override preserved windows size 1 based */
791 	uint16_t rx_ba_win_size_override;
792 
793 	/* Peer TID statistics */
794 	struct cdp_peer_tid_stats stats;
795 };
796 
797 /**
798  * struct dp_intr_stats - DP Interrupt Stats for an interrupt context
799  * @num_tx_ring_masks: interrupts with tx_ring_mask set
800  * @num_rx_ring_masks: interrupts with rx_ring_mask set
801  * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set
802  * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set
803  * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set
804  * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set
805  * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set
806  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
807  * @num_rx_ring_near_full_masks: Near-full interrupts for REO DST ring
808  * @num_tx_comp_ring_near_full_masks: Near-full interrupts for TX completion
809  * @num_masks: total number of times the interrupt was received
810  * @num_masks: total number of times the near full interrupt was received
811  *
812  * Counter for individual masks are incremented only if there are any packets
813  * on that ring.
814  */
815 struct dp_intr_stats {
816 	uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS];
817 	uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS];
818 	uint32_t num_rx_mon_ring_masks;
819 	uint32_t num_rx_err_ring_masks;
820 	uint32_t num_rx_wbm_rel_ring_masks;
821 	uint32_t num_reo_status_ring_masks;
822 	uint32_t num_rxdma2host_ring_masks;
823 	uint32_t num_host2rxdma_ring_masks;
824 	uint32_t num_rx_ring_near_full_masks[MAX_REO_DEST_RINGS];
825 	uint32_t num_tx_comp_ring_near_full_masks[MAX_TCL_DATA_RINGS];
826 	uint32_t num_rx_wbm_rel_ring_near_full_masks;
827 	uint32_t num_reo_status_ring_near_full_masks;
828 	uint32_t num_near_full_masks;
829 	uint32_t num_masks;
830 };
831 
832 /* per interrupt context  */
833 struct dp_intr {
834 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
835 				associated with this napi context */
836 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
837 				with this interrupt context */
838 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
839 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
840 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
841 	uint8_t reo_status_ring_mask; /* REO command response ring */
842 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
843 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
844 	/* Host to RXDMA monitor  buffer ring */
845 	uint8_t host2rxdma_mon_ring_mask;
846 	/* RX REO rings near full interrupt mask */
847 	uint8_t rx_near_full_grp_1_mask;
848 	/* RX REO rings near full interrupt mask */
849 	uint8_t rx_near_full_grp_2_mask;
850 	/* WBM TX completion rings near full interrupt mask */
851 	uint8_t tx_ring_near_full_mask;
852 	struct dp_soc *soc;    /* Reference to SoC structure ,
853 				to get DMA ring handles */
854 	qdf_lro_ctx_t lro_ctx;
855 	uint8_t dp_intr_id;
856 
857 	/* Interrupt Stats for individual masks */
858 	struct dp_intr_stats intr_stats;
859 };
860 
861 #define REO_DESC_FREELIST_SIZE 64
862 #define REO_DESC_FREE_DEFER_MS 1000
863 struct reo_desc_list_node {
864 	qdf_list_node_t node;
865 	unsigned long free_ts;
866 	struct dp_rx_tid rx_tid;
867 	bool resend_update_reo_cmd;
868 	uint32_t pending_ext_desc_size;
869 #ifdef REO_QDESC_HISTORY
870 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
871 #endif
872 };
873 
874 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
875 #define REO_DESC_DEFERRED_FREELIST_SIZE 256
876 #define REO_DESC_DEFERRED_FREE_MS 30000
877 
878 struct reo_desc_deferred_freelist_node {
879 	qdf_list_node_t node;
880 	unsigned long free_ts;
881 	void *hw_qdesc_vaddr_unaligned;
882 	qdf_dma_addr_t hw_qdesc_paddr;
883 	uint32_t hw_qdesc_alloc_size;
884 #ifdef REO_QDESC_HISTORY
885 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
886 #endif /* REO_QDESC_HISTORY */
887 };
888 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
889 
890 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
891 /**
892  * struct reo_cmd_event_record: Elements to record for each reo command
893  * @cmd_type: reo command type
894  * @cmd_return_status: reo command post status
895  * @timestamp: record timestamp for the reo command
896  */
897 struct reo_cmd_event_record {
898 	enum hal_reo_cmd_type cmd_type;
899 	uint8_t cmd_return_status;
900 	uint32_t timestamp;
901 };
902 
903 /**
904  * struct reo_cmd_event_history: Account for reo cmd events
905  * @index: record number
906  * @cmd_record: list of records
907  */
908 struct reo_cmd_event_history {
909 	qdf_atomic_t index;
910 	struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX];
911 };
912 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
913 
914 /* SoC level data path statistics */
915 struct dp_soc_stats {
916 	struct {
917 		uint32_t added;
918 		uint32_t deleted;
919 		uint32_t aged_out;
920 		uint32_t map_err;
921 		uint32_t ast_mismatch;
922 	} ast;
923 
924 	struct {
925 		uint32_t added;
926 		uint32_t deleted;
927 	} mec;
928 
929 	/* SOC level TX stats */
930 	struct {
931 		/* Total packets transmitted */
932 		struct cdp_pkt_info egress;
933 		/* packets dropped on tx because of no peer */
934 		struct cdp_pkt_info tx_invalid_peer;
935 		/* descriptors in each tcl ring */
936 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
937 		/* Descriptors in use at soc */
938 		uint32_t desc_in_use;
939 		/* tqm_release_reason == FW removed */
940 		uint32_t dropped_fw_removed;
941 		/* tx completion release_src != TQM or FW */
942 		uint32_t invalid_release_source;
943 		/* tx completion wbm_internal_error */
944 		uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS];
945 		/* tx completion non_wbm_internal_error */
946 		uint32_t non_wbm_internal_err;
947 		/* TX Comp loop packet limit hit */
948 		uint32_t tx_comp_loop_pkt_limit_hit;
949 		/* Head pointer Out of sync at the end of dp_tx_comp_handler */
950 		uint32_t hp_oos2;
951 		/* tx desc freed as part of vdev detach */
952 		uint32_t tx_comp_exception;
953 	} tx;
954 
955 	/* SOC level RX stats */
956 	struct {
957 		/* Total rx packets count */
958 		struct cdp_pkt_info ingress;
959 		/* Rx errors */
960 		/* Total Packets in Rx Error ring */
961 		uint32_t err_ring_pkts;
962 		/* No of Fragments */
963 		uint32_t rx_frags;
964 		/* No of incomplete fragments in waitlist */
965 		uint32_t rx_frag_wait;
966 		/* Fragments dropped due to errors */
967 		uint32_t rx_frag_err;
968 		/* Fragments received OOR causing sequence num mismatch */
969 		uint32_t rx_frag_oor;
970 		/* Fragments dropped due to len errors in skb */
971 		uint32_t rx_frag_err_len_error;
972 		/* Fragments dropped due to no peer found */
973 		uint32_t rx_frag_err_no_peer;
974 		/* No of reinjected packets */
975 		uint32_t reo_reinject;
976 		/* Reap loop packet limit hit */
977 		uint32_t reap_loop_pkt_limit_hit;
978 		/* Head pointer Out of sync at the end of dp_rx_process */
979 		uint32_t hp_oos2;
980 		/* Rx ring near full */
981 		uint32_t near_full;
982 		/* Break ring reaping as not all scattered msdu received */
983 		uint32_t msdu_scatter_wait_break;
984 		/* Number of bar frames received */
985 		uint32_t bar_frame;
986 		/* Number of frames routed from rxdma */
987 		uint32_t rxdma2rel_route_drop;
988 		/* Number of frames routed from reo*/
989 		uint32_t reo2rel_route_drop;
990 
991 		struct {
992 			/* Invalid RBM error count */
993 			uint32_t invalid_rbm;
994 			/* Invalid VDEV Error count */
995 			uint32_t invalid_vdev;
996 			/* Invalid PDEV error count */
997 			uint32_t invalid_pdev;
998 
999 			/* Packets delivered to stack that no related peer */
1000 			uint32_t pkt_delivered_no_peer;
1001 			/* Defrag peer uninit error count */
1002 			uint32_t defrag_peer_uninit;
1003 			/* Invalid sa_idx or da_idx*/
1004 			uint32_t invalid_sa_da_idx;
1005 			/* MSDU DONE failures */
1006 			uint32_t msdu_done_fail;
1007 			/* Invalid PEER Error count */
1008 			struct cdp_pkt_info rx_invalid_peer;
1009 			/* Invalid PEER ID count */
1010 			struct cdp_pkt_info rx_invalid_peer_id;
1011 			/* Invalid packet length */
1012 			struct cdp_pkt_info rx_invalid_pkt_len;
1013 			/* HAL ring access Fail error count */
1014 			uint32_t hal_ring_access_fail;
1015 			/* HAL ring access full Fail error count */
1016 			uint32_t hal_ring_access_full_fail;
1017 			/* RX DMA error count */
1018 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
1019 			/* RX REO DEST Desc Invalid Magic count */
1020 			uint32_t rx_desc_invalid_magic;
1021 			/* REO Error count */
1022 			uint32_t reo_error[HAL_REO_ERR_MAX];
1023 			/* HAL REO ERR Count */
1024 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
1025 			/* HAL REO DEST Duplicate count */
1026 			uint32_t hal_reo_dest_dup;
1027 			/* HAL WBM RELEASE Duplicate count */
1028 			uint32_t hal_wbm_rel_dup;
1029 			/* HAL RXDMA error Duplicate count */
1030 			uint32_t hal_rxdma_err_dup;
1031 			/* ipa smmu map duplicate count */
1032 			uint32_t ipa_smmu_map_dup;
1033 			/* ipa smmu unmap duplicate count */
1034 			uint32_t ipa_smmu_unmap_dup;
1035 			/* ipa smmu unmap while ipa pipes is disabled */
1036 			uint32_t ipa_unmap_no_pipe;
1037 			/* REO cmd send fail/requeue count */
1038 			uint32_t reo_cmd_send_fail;
1039 			/* REO cmd send drain count */
1040 			uint32_t reo_cmd_send_drain;
1041 			/* RX msdu drop count due to scatter */
1042 			uint32_t scatter_msdu;
1043 			/* RX msdu drop count due to invalid cookie */
1044 			uint32_t invalid_cookie;
1045 			/* Count of stale cookie read in RX path */
1046 			uint32_t stale_cookie;
1047 			/* Delba sent count due to RX 2k jump */
1048 			uint32_t rx_2k_jump_delba_sent;
1049 			/* RX 2k jump msdu indicated to stack count */
1050 			uint32_t rx_2k_jump_to_stack;
1051 			/* RX 2k jump msdu dropped count */
1052 			uint32_t rx_2k_jump_drop;
1053 			/* REO ERR msdu buffer received */
1054 			uint32_t reo_err_msdu_buf_rcved;
1055 			/* REO ERR msdu buffer with invalid coookie received */
1056 			uint32_t reo_err_msdu_buf_invalid_cookie;
1057 			/* REO OOR msdu drop count */
1058 			uint32_t reo_err_oor_drop;
1059 			/* REO OOR msdu indicated to stack count */
1060 			uint32_t reo_err_oor_to_stack;
1061 			/* REO OOR scattered msdu count */
1062 			uint32_t reo_err_oor_sg_count;
1063 			/* RX msdu rejected count on delivery to vdev stack_fn*/
1064 			uint32_t rejected;
1065 			/* Incorrect msdu count in MPDU desc info */
1066 			uint32_t msdu_count_mismatch;
1067 			/* RX raw frame dropped count */
1068 			uint32_t raw_frm_drop;
1069 			/* Stale link desc cookie count*/
1070 			uint32_t invalid_link_cookie;
1071 			/* Nbuf sanity failure */
1072 			uint32_t nbuf_sanity_fail;
1073 			/* Duplicate link desc refilled */
1074 			uint32_t dup_refill_link_desc;
1075 			/* Incorrect msdu continuation bit in MSDU desc */
1076 			uint32_t msdu_continuation_err;
1077 			/* Non Eapol packet drop count due to peer not authorized  */
1078 			uint32_t peer_unauth_rx_pkt_drop;
1079 			/* count of start sequence (ssn) updates */
1080 			uint32_t ssn_update_count;
1081 			/* count of bar handling fail */
1082 			uint32_t bar_handle_fail_count;
1083 			/* EAPOL drop count in intrabss scenario */
1084 			uint32_t intrabss_eapol_drop;
1085 			/* PN check failed for 2K-jump or OOR error */
1086 			uint32_t pn_in_dest_check_fail;
1087 			/* MSDU len err count */
1088 			uint32_t msdu_len_err;
1089 			/* Rx flush count */
1090 			uint32_t rx_flush_count;
1091 		} err;
1092 
1093 		/* packet count per core - per ring */
1094 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
1095 	} rx;
1096 
1097 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1098 	struct reo_cmd_event_history cmd_event_history;
1099 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1100 };
1101 
1102 union dp_align_mac_addr {
1103 	uint8_t raw[QDF_MAC_ADDR_SIZE];
1104 	struct {
1105 		uint16_t bytes_ab;
1106 		uint16_t bytes_cd;
1107 		uint16_t bytes_ef;
1108 	} align2;
1109 	struct {
1110 		uint32_t bytes_abcd;
1111 		uint16_t bytes_ef;
1112 	} align4;
1113 	struct __attribute__((__packed__)) {
1114 		uint16_t bytes_ab;
1115 		uint32_t bytes_cdef;
1116 	} align4_2;
1117 };
1118 
1119 /**
1120  * struct dp_ast_free_cb_params - HMWDS free callback cookie
1121  * @mac_addr: ast mac address
1122  * @peer_mac_addr: mac address of peer
1123  * @type: ast entry type
1124  * @vdev_id: vdev_id
1125  * @flags: ast flags
1126  */
1127 struct dp_ast_free_cb_params {
1128 	union dp_align_mac_addr mac_addr;
1129 	union dp_align_mac_addr peer_mac_addr;
1130 	enum cdp_txrx_ast_entry_type type;
1131 	uint8_t vdev_id;
1132 	uint32_t flags;
1133 };
1134 
1135 /*
1136  * dp_ast_entry
1137  *
1138  * @ast_idx: Hardware AST Index
1139  * @peer_id: Next Hop peer_id (for non-WDS nodes, this will be point to
1140  *           associated peer with this MAC address)
1141  * @mac_addr:  MAC Address for this AST entry
1142  * @next_hop: Set to 1 if this is for a WDS node
1143  * @is_active: flag to indicate active data traffic on this node
1144  *             (used for aging out/expiry)
1145  * @ase_list_elem: node in peer AST list
1146  * @is_bss: flag to indicate if entry corresponds to bss peer
1147  * @is_mapped: flag to indicate that we have mapped the AST entry
1148  *             in ast_table
1149  * @pdev_id: pdev ID
1150  * @vdev_id: vdev ID
1151  * @ast_hash_value: hast value in HW
1152  * @ref_cnt: reference count
1153  * @type: flag to indicate type of the entry(static/WDS/MEC)
1154  * @delete_in_progress: Flag to indicate that delete commands send to FW
1155  *                      and host is waiting for response from FW
1156  * @callback: ast free/unmap callback
1157  * @cookie: argument to callback
1158  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
1159  */
1160 struct dp_ast_entry {
1161 	uint16_t ast_idx;
1162 	uint16_t peer_id;
1163 	union dp_align_mac_addr mac_addr;
1164 	bool next_hop;
1165 	bool is_active;
1166 	bool is_mapped;
1167 	uint8_t pdev_id;
1168 	uint8_t vdev_id;
1169 	uint16_t ast_hash_value;
1170 	qdf_atomic_t ref_cnt;
1171 	enum cdp_txrx_ast_entry_type type;
1172 	bool delete_in_progress;
1173 	txrx_ast_free_cb callback;
1174 	void *cookie;
1175 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
1176 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
1177 };
1178 
1179 /*
1180  * dp_mec_entry
1181  *
1182  * @mac_addr:  MAC Address for this MEC entry
1183  * @is_active: flag to indicate active data traffic on this node
1184  *             (used for aging out/expiry)
1185  * @pdev_id: pdev ID
1186  * @vdev_id: vdev ID
1187  * @hash_list_elem: node in soc MEC hash list (mac address used as hash)
1188  */
1189 struct dp_mec_entry {
1190 	union dp_align_mac_addr mac_addr;
1191 	bool is_active;
1192 	uint8_t pdev_id;
1193 	uint8_t vdev_id;
1194 
1195 	TAILQ_ENTRY(dp_mec_entry) hash_list_elem;
1196 };
1197 
1198 /* SOC level htt stats */
1199 struct htt_t2h_stats {
1200 	/* lock to protect htt_stats_msg update */
1201 	qdf_spinlock_t lock;
1202 
1203 	/* work queue to process htt stats */
1204 	qdf_work_t work;
1205 
1206 	/* T2H Ext stats message queue */
1207 	qdf_nbuf_queue_t msg;
1208 
1209 	/* number of completed stats in htt_stats_msg */
1210 	uint32_t num_stats;
1211 };
1212 
1213 struct link_desc_bank {
1214 	void *base_vaddr_unaligned;
1215 	void *base_vaddr;
1216 	qdf_dma_addr_t base_paddr_unaligned;
1217 	qdf_dma_addr_t base_paddr;
1218 	uint32_t size;
1219 };
1220 
1221 struct rx_buff_pool {
1222 	qdf_nbuf_queue_head_t emerg_nbuf_q;
1223 	uint32_t nbuf_fail_cnt;
1224 	bool is_initialized;
1225 };
1226 
1227 struct rx_refill_buff_pool {
1228 	bool is_initialized;
1229 	uint16_t head;
1230 	uint16_t tail;
1231 	struct dp_pdev *dp_pdev;
1232 	uint16_t max_bufq_len;
1233 	qdf_nbuf_t buf_elem[2048];
1234 };
1235 
1236 #ifdef DP_TX_HW_DESC_HISTORY
1237 #define DP_TX_HW_DESC_HIST_MAX 6144
1238 
1239 struct dp_tx_hw_desc_evt {
1240 	uint8_t tcl_desc[HAL_TX_DESC_LEN_BYTES];
1241 	uint64_t posted;
1242 	uint32_t hp;
1243 	uint32_t tp;
1244 };
1245 
1246 /* struct dp_tx_hw_desc_history - TX HW desc hisotry
1247  * @index: Index where the last entry is written
1248  * @entry: history entries
1249  */
1250 struct dp_tx_hw_desc_history {
1251 	uint64_t index;
1252 	struct dp_tx_hw_desc_evt entry[DP_TX_HW_DESC_HIST_MAX];
1253 };
1254 #endif
1255 
1256 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1257 /*
1258  * The logic for get current index of these history is dependent on this
1259  * value being power of 2.
1260  */
1261 #define DP_RX_HIST_MAX 2048
1262 #define DP_RX_ERR_HIST_MAX 2048
1263 #define DP_RX_REINJECT_HIST_MAX 1024
1264 #define DP_RX_REFILL_HIST_MAX 2048
1265 
1266 QDF_COMPILE_TIME_ASSERT(rx_history_size,
1267 			(DP_RX_HIST_MAX &
1268 			 (DP_RX_HIST_MAX - 1)) == 0);
1269 QDF_COMPILE_TIME_ASSERT(rx_err_history_size,
1270 			(DP_RX_ERR_HIST_MAX &
1271 			 (DP_RX_ERR_HIST_MAX - 1)) == 0);
1272 QDF_COMPILE_TIME_ASSERT(rx_reinject_history_size,
1273 			(DP_RX_REINJECT_HIST_MAX &
1274 			 (DP_RX_REINJECT_HIST_MAX - 1)) == 0);
1275 QDF_COMPILE_TIME_ASSERT(rx_refill_history_size,
1276 			(DP_RX_REFILL_HIST_MAX &
1277 			(DP_RX_REFILL_HIST_MAX - 1)) == 0);
1278 
1279 
1280 /**
1281  * struct dp_buf_info_record - ring buffer info
1282  * @hbi: HW ring buffer info
1283  * @timestamp: timestamp when this entry was recorded
1284  */
1285 struct dp_buf_info_record {
1286 	struct hal_buf_info hbi;
1287 	uint64_t timestamp;
1288 };
1289 
1290 /**
1291  * struct dp_refill_info_record - ring refill buffer info
1292  * @hp: HP value after refill
1293  * @tp: cached tail value during refill
1294  * @num_req: number of buffers requested to refill
1295  * @num_refill: number of buffers refilled to ring
1296  * @timestamp: timestamp when this entry was recorded
1297  */
1298 struct dp_refill_info_record {
1299 	uint32_t hp;
1300 	uint32_t tp;
1301 	uint32_t num_req;
1302 	uint32_t num_refill;
1303 	uint64_t timestamp;
1304 };
1305 
1306 /* struct dp_rx_history - rx ring hisotry
1307  * @index: Index where the last entry is written
1308  * @entry: history entries
1309  */
1310 struct dp_rx_history {
1311 	qdf_atomic_t index;
1312 	struct dp_buf_info_record entry[DP_RX_HIST_MAX];
1313 };
1314 
1315 /* struct dp_rx_err_history - rx err ring hisotry
1316  * @index: Index where the last entry is written
1317  * @entry: history entries
1318  */
1319 struct dp_rx_err_history {
1320 	qdf_atomic_t index;
1321 	struct dp_buf_info_record entry[DP_RX_ERR_HIST_MAX];
1322 };
1323 
1324 /* struct dp_rx_reinject_history - rx reinject ring hisotry
1325  * @index: Index where the last entry is written
1326  * @entry: history entries
1327  */
1328 struct dp_rx_reinject_history {
1329 	qdf_atomic_t index;
1330 	struct dp_buf_info_record entry[DP_RX_REINJECT_HIST_MAX];
1331 };
1332 
1333 /* struct dp_rx_refill_history - rx buf refill hisotry
1334  * @index: Index where the last entry is written
1335  * @entry: history entries
1336  */
1337 struct dp_rx_refill_history {
1338 	qdf_atomic_t index;
1339 	struct dp_refill_info_record entry[DP_RX_REFILL_HIST_MAX];
1340 };
1341 
1342 #endif
1343 
1344 enum dp_tx_event_type {
1345 	DP_TX_DESC_INVAL_EVT = 0,
1346 	DP_TX_DESC_MAP,
1347 	DP_TX_DESC_COOKIE,
1348 	DP_TX_DESC_FLUSH,
1349 	DP_TX_DESC_UNMAP,
1350 	DP_TX_COMP_UNMAP,
1351 	DP_TX_COMP_UNMAP_ERR,
1352 	DP_TX_COMP_MSDU_EXT,
1353 };
1354 
1355 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
1356 /* Size must be in 2 power, for bitwise index rotation */
1357 #define DP_TX_TCL_HISTORY_SIZE 0x4000
1358 #define DP_TX_COMP_HISTORY_SIZE 0x4000
1359 
1360 struct dp_tx_desc_event {
1361 	qdf_nbuf_t skb;
1362 	dma_addr_t paddr;
1363 	uint32_t sw_cookie;
1364 	enum dp_tx_event_type type;
1365 	uint64_t ts;
1366 };
1367 
1368 struct dp_tx_tcl_history {
1369 	qdf_atomic_t index;
1370 	struct dp_tx_desc_event entry[DP_TX_TCL_HISTORY_SIZE];
1371 };
1372 
1373 struct dp_tx_comp_history {
1374 	qdf_atomic_t index;
1375 	struct dp_tx_desc_event entry[DP_TX_COMP_HISTORY_SIZE];
1376 };
1377 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
1378 
1379 /* structure to record recent operation related variable */
1380 struct dp_last_op_info {
1381 	/* last link desc buf info through WBM release ring */
1382 	struct hal_buf_info wbm_rel_link_desc;
1383 	/* last link desc buf info through REO reinject ring */
1384 	struct hal_buf_info reo_reinject_link_desc;
1385 };
1386 
1387 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1388 
1389 /**
1390  * struct dp_swlm_tcl_data - params for tcl register write coalescing
1391  *			     descision making
1392  * @nbuf: TX packet
1393  * @tid: tid for transmitting the current packet
1394  * @num_ll_connections: Number of low latency connections on this vdev
1395  *
1396  * This structure contains the information required by the software
1397  * latency manager to decide on whether to coalesce the current TCL
1398  * register write or not.
1399  */
1400 struct dp_swlm_tcl_data {
1401 	qdf_nbuf_t nbuf;
1402 	uint8_t tid;
1403 	uint8_t num_ll_connections;
1404 };
1405 
1406 /**
1407  * union swlm_data - SWLM query data
1408  * @tcl_data: data for TCL query in SWLM
1409  */
1410 union swlm_data {
1411 	struct dp_swlm_tcl_data *tcl_data;
1412 };
1413 
1414 /**
1415  * struct dp_swlm_ops - SWLM ops
1416  * @tcl_wr_coalesce_check: handler to check if the current TCL register
1417  *			   write can be coalesced or not
1418  */
1419 struct dp_swlm_ops {
1420 	int (*tcl_wr_coalesce_check)(struct dp_soc *soc,
1421 				     struct dp_swlm_tcl_data *tcl_data);
1422 };
1423 
1424 /**
1425  * struct dp_swlm_stats - Stats for Software Latency manager.
1426  * @tcl.timer_flush_success: Num TCL HP writes success from timer context
1427  * @tcl.timer_flush_fail: Num TCL HP writes failure from timer context
1428  * @tcl.tid_fail: Num TCL register write coalescing skips, since the pkt
1429  *		 was being transmitted on a TID above coalescing threshold
1430  * @tcl.sp_frames: Num TCL register write coalescing skips, since the pkt
1431  *		  being transmitted was a special frame
1432  * @tcl.ll_connection: Num TCL register write coalescing skips, since the
1433  *		       vdev has low latency connections
1434  * @tcl.bytes_thresh_reached: Num TCL HP writes flush after the coalescing
1435  *			     bytes threshold was reached
1436  * @tcl.time_thresh_reached: Num TCL HP writes flush after the coalescing
1437  *			    session time expired
1438  * @tcl.tput_criteria_fail: Num TCL HP writes coalescing fails, since the
1439  *			   throughput did not meet session threshold
1440  * @tcl.coalesce_success: Num of TCL HP writes coalesced successfully.
1441  * @tcl.coalesce_fail: Num of TCL HP writes coalesces failed
1442  */
1443 struct dp_swlm_stats {
1444 	struct {
1445 		uint32_t timer_flush_success;
1446 		uint32_t timer_flush_fail;
1447 		uint32_t tid_fail;
1448 		uint32_t sp_frames;
1449 		uint32_t ll_connection;
1450 		uint32_t bytes_thresh_reached;
1451 		uint32_t time_thresh_reached;
1452 		uint32_t tput_criteria_fail;
1453 		uint32_t coalesce_success;
1454 		uint32_t coalesce_fail;
1455 	} tcl;
1456 };
1457 
1458 /**
1459  * struct dp_swlm_params: Parameters for different modules in the
1460  *			  Software latency manager.
1461  * @tcl.flush_timer: Timer for flushing the coalesced TCL HP writes
1462  * @tcl.rx_traffic_thresh: Threshold for RX traffic, to begin TCL register
1463  *			   write coalescing
1464  * @tcl.tx_traffic_thresh: Threshold for TX traffic, to begin TCL register
1465  *			   write coalescing
1466  * @tcl.sampling_time: Sampling time to test the throughput threshold
1467  * @tcl.sampling_session_tx_bytes: Num bytes transmitted in the sampling time
1468  * @tcl.bytes_flush_thresh: Bytes threshold to flush the TCL HP register write
1469  * @tcl.time_flush_thresh: Time threshold to flush the TCL HP register write
1470  * @tcl.tx_thresh_multiplier: Multiplier to deduce the bytes threshold after
1471  *			      which the TCL HP register is written, thereby
1472  *			      ending the coalescing.
1473  * @tcl.coalesce_end_time: End timestamp for current coalescing session
1474  * @tcl.bytes_coalesced: Num bytes coalesced in the current session
1475  * @tcl.tx_pkt_thresh: Threshold for TX packet count, to begin TCL register
1476  *		       write coalescing
1477  */
1478 struct dp_swlm_params {
1479 	struct {
1480 		qdf_timer_t flush_timer;
1481 		uint32_t rx_traffic_thresh;
1482 		uint32_t tx_traffic_thresh;
1483 		uint32_t sampling_time;
1484 		uint32_t sampling_session_tx_bytes;
1485 		uint32_t bytes_flush_thresh;
1486 		uint32_t time_flush_thresh;
1487 		uint32_t tx_thresh_multiplier;
1488 		uint64_t coalesce_end_time;
1489 		uint32_t bytes_coalesced;
1490 		uint32_t tx_pkt_thresh;
1491 	} tcl;
1492 };
1493 
1494 /**
1495  * struct dp_swlm - Software latency manager context
1496  * @ops: SWLM ops pointers
1497  * @is_enabled: SWLM enabled/disabled
1498  * @is_init: SWLM module initialized
1499  * @stats: SWLM stats
1500  * @params: SWLM SRNG params
1501  * @tcl_flush_timer: flush timer for TCL register writes
1502  */
1503 struct dp_swlm {
1504 	struct dp_swlm_ops *ops;
1505 	uint8_t is_enabled:1,
1506 		is_init:1;
1507 	struct dp_swlm_stats stats;
1508 	struct dp_swlm_params params;
1509 };
1510 #endif
1511 
1512 #ifdef IPA_OFFLOAD
1513 /* IPA uC datapath offload Wlan Tx resources */
1514 struct ipa_dp_tx_rsc {
1515 	/* Resource info to be passed to IPA */
1516 	qdf_dma_addr_t ipa_tcl_ring_base_paddr;
1517 	void *ipa_tcl_ring_base_vaddr;
1518 	uint32_t ipa_tcl_ring_size;
1519 	qdf_dma_addr_t ipa_tcl_hp_paddr;
1520 	uint32_t alloc_tx_buf_cnt;
1521 
1522 	qdf_dma_addr_t ipa_wbm_ring_base_paddr;
1523 	void *ipa_wbm_ring_base_vaddr;
1524 	uint32_t ipa_wbm_ring_size;
1525 	qdf_dma_addr_t ipa_wbm_tp_paddr;
1526 	/* WBM2SW HP shadow paddr */
1527 	qdf_dma_addr_t ipa_wbm_hp_shadow_paddr;
1528 
1529 	/* TX buffers populated into the WBM ring */
1530 	void **tx_buf_pool_vaddr_unaligned;
1531 	qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
1532 };
1533 #endif
1534 
1535 struct dp_tx_msdu_info_s;
1536 /*
1537  * enum dp_context_type- DP Context Type
1538  * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
1539  * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
1540  * @DP_CONTEXT_TYPE_VDEV: Context type DP VDEV
1541  * @DP_CONTEXT_TYPE_PEER: Context type DP PEER
1542  *
1543  * Helper enums to be used to retrieve the size of the corresponding
1544  * data structure by passing the type.
1545  */
1546 enum dp_context_type {
1547 	DP_CONTEXT_TYPE_SOC,
1548 	DP_CONTEXT_TYPE_PDEV,
1549 	DP_CONTEXT_TYPE_VDEV,
1550 	DP_CONTEXT_TYPE_PEER
1551 };
1552 
1553 /*
1554  * struct dp_arch_ops- DP target specific arch ops
1555  * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
1556  * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
1557  * @tx_hw_enqueue: enqueue TX data to HW
1558  * @tx_comp_get_params_from_hal_desc: get software tx descriptor and release
1559  * 				      source from HAL desc for wbm release ring
1560  * @dp_service_near_full_srngs: Handler for servicing the near full IRQ
1561  * @txrx_set_vdev_param: target specific ops while setting vdev params
1562  * @dp_srng_test_and_update_nf_params: Check if the srng is in near full state
1563  *				and set the near-full params.
1564  */
1565 struct dp_arch_ops {
1566 	/* INIT/DEINIT Arch Ops */
1567 	QDF_STATUS (*txrx_soc_attach)(struct dp_soc *soc);
1568 	QDF_STATUS (*txrx_soc_detach)(struct dp_soc *soc);
1569 	QDF_STATUS (*txrx_soc_init)(struct dp_soc *soc);
1570 	QDF_STATUS (*txrx_soc_deinit)(struct dp_soc *soc);
1571 	QDF_STATUS (*txrx_soc_srng_alloc)(struct dp_soc *soc);
1572 	QDF_STATUS (*txrx_soc_srng_init)(struct dp_soc *soc);
1573 	void (*txrx_soc_srng_deinit)(struct dp_soc *soc);
1574 	void (*txrx_soc_srng_free)(struct dp_soc *soc);
1575 	QDF_STATUS (*txrx_pdev_attach)(struct dp_pdev *pdev);
1576 	QDF_STATUS (*txrx_pdev_detach)(struct dp_pdev *pdev);
1577 	QDF_STATUS (*txrx_vdev_attach)(struct dp_soc *soc,
1578 				       struct dp_vdev *vdev);
1579 	QDF_STATUS (*txrx_vdev_detach)(struct dp_soc *soc,
1580 				       struct dp_vdev *vdev);
1581 	QDF_STATUS (*dp_rxdma_ring_sel_cfg)(struct dp_soc *soc);
1582 	void (*soc_cfg_attach)(struct dp_soc *soc);
1583 
1584 	/* TX RX Arch Ops */
1585 	QDF_STATUS (*tx_hw_enqueue)(struct dp_soc *soc, struct dp_vdev *vdev,
1586 				    struct dp_tx_desc_s *tx_desc,
1587 				    uint16_t fw_metadata,
1588 				    struct cdp_tx_exception_metadata *metadata,
1589 				    struct dp_tx_msdu_info_s *msdu_info);
1590 
1591 	 void (*tx_comp_get_params_from_hal_desc)(struct dp_soc *soc,
1592 						  void *tx_comp_hal_desc,
1593 						  struct dp_tx_desc_s **desc);
1594 	uint32_t (*dp_rx_process)(struct dp_intr *int_ctx,
1595 				  hal_ring_handle_t hal_ring_hdl,
1596 				  uint8_t reo_ring_num, uint32_t quota);
1597 
1598 	QDF_STATUS (*dp_tx_desc_pool_init)(struct dp_soc *soc,
1599 					   uint16_t num_elem,
1600 					   uint8_t pool_id);
1601 	void (*dp_tx_desc_pool_deinit)(
1602 				struct dp_soc *soc,
1603 				struct dp_tx_desc_pool_s *tx_desc_pool,
1604 				uint8_t pool_id);
1605 
1606 	QDF_STATUS (*dp_rx_desc_pool_init)(struct dp_soc *soc,
1607 					   struct rx_desc_pool *rx_desc_pool,
1608 					   uint32_t pool_id);
1609 	void (*dp_rx_desc_pool_deinit)(struct dp_soc *soc,
1610 				       struct rx_desc_pool *rx_desc_pool,
1611 				       uint32_t pool_id);
1612 
1613 	QDF_STATUS (*dp_wbm_get_rx_desc_from_hal_desc)(
1614 						struct dp_soc *soc,
1615 						void *ring_desc,
1616 						struct dp_rx_desc **r_rx_desc);
1617 
1618 	struct dp_rx_desc *(*dp_rx_desc_cookie_2_va)(struct dp_soc *soc,
1619 						     uint32_t cookie);
1620 	uint32_t (*dp_service_near_full_srngs)(struct dp_soc *soc,
1621 					       struct dp_intr *int_ctx,
1622 					       uint32_t dp_budget);
1623 
1624 	/* Control Arch Ops */
1625 	QDF_STATUS (*txrx_set_vdev_param)(struct dp_soc *soc,
1626 					  struct dp_vdev *vdev,
1627 					  enum cdp_vdev_param_type param,
1628 					  cdp_config_param_type val);
1629 
1630 	/* Misc Arch Ops */
1631 	qdf_size_t (*txrx_get_context_size)(enum dp_context_type);
1632 	int (*dp_srng_test_and_update_nf_params)(struct dp_soc *soc,
1633 						 struct dp_srng *dp_srng,
1634 						 int *max_reap_limit);
1635 	void (*tx_implicit_rbm_set)(struct dp_soc *soc, uint8_t tx_ring_id,
1636 				    uint8_t bm_id);
1637 };
1638 
1639 /**
1640  * struct dp_soc_features: Data structure holding the SOC level feature flags.
1641  * @pn_in_reo_dest: PN provided by hardware in the REO destination ring.
1642  */
1643 struct dp_soc_features {
1644 	uint8_t pn_in_reo_dest;
1645 };
1646 
1647 /* SOC level structure for data path */
1648 struct dp_soc {
1649 	/**
1650 	 * re-use memory section starts
1651 	 */
1652 
1653 	/* Common base structure - Should be the first member */
1654 	struct cdp_soc_t cdp_soc;
1655 
1656 	/* SoC Obj */
1657 	struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
1658 
1659 	/* OS device abstraction */
1660 	qdf_device_t osdev;
1661 
1662 	/*cce disable*/
1663 	bool cce_disable;
1664 
1665 	/* WLAN config context */
1666 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
1667 
1668 	/* HTT handle for host-fw interaction */
1669 	struct htt_soc *htt_handle;
1670 
1671 	/* Commint init done */
1672 	qdf_atomic_t cmn_init_done;
1673 
1674 	/* Opaque hif handle */
1675 	struct hif_opaque_softc *hif_handle;
1676 
1677 	/* PDEVs on this SOC */
1678 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
1679 
1680 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
1681 	struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT];
1682 
1683 	struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW];
1684 
1685 	/* RXDMA error destination ring */
1686 	struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW];
1687 
1688 	/* RXDMA monitor buffer replenish ring */
1689 	struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW];
1690 
1691 	/* RXDMA monitor destination ring */
1692 	struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW];
1693 
1694 	/* RXDMA monitor status ring. TBD: Check format of this ring */
1695 	struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW];
1696 
1697 	/* Number of PDEVs */
1698 	uint8_t pdev_count;
1699 
1700 	/*ast override support in HW*/
1701 	bool ast_override_support;
1702 
1703 	/*number of hw dscp tid map*/
1704 	uint8_t num_hw_dscp_tid_map;
1705 
1706 	/* HAL SOC handle */
1707 	hal_soc_handle_t hal_soc;
1708 
1709 	/* rx monitor pkt tlv size */
1710 	uint16_t rx_mon_pkt_tlv_size;
1711 	/* rx pkt tlv size */
1712 	uint16_t rx_pkt_tlv_size;
1713 
1714 	struct dp_arch_ops arch_ops;
1715 
1716 	/* Device ID coming from Bus sub-system */
1717 	uint32_t device_id;
1718 
1719 	/* Link descriptor pages */
1720 	struct qdf_mem_multi_page_t link_desc_pages;
1721 
1722 	/* total link descriptors for regular RX and TX */
1723 	uint32_t total_link_descs;
1724 
1725 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
1726 	struct dp_srng wbm_idle_link_ring;
1727 
1728 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
1729 	 */
1730 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
1731 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
1732 	uint32_t num_scatter_bufs;
1733 
1734 	/* Tx SW descriptor pool */
1735 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
1736 
1737 	/* Tx MSDU Extension descriptor pool */
1738 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
1739 
1740 	/* Tx TSO descriptor pool */
1741 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
1742 
1743 	/* Tx TSO Num of segments pool */
1744 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
1745 
1746 	/* REO destination rings */
1747 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
1748 
1749 	/* REO exception ring - See if should combine this with reo_dest_ring */
1750 	struct dp_srng reo_exception_ring;
1751 
1752 	/* REO reinjection ring */
1753 	struct dp_srng reo_reinject_ring;
1754 
1755 	/* REO command ring */
1756 	struct dp_srng reo_cmd_ring;
1757 
1758 	/* REO command status ring */
1759 	struct dp_srng reo_status_ring;
1760 
1761 	/* WBM Rx release ring */
1762 	struct dp_srng rx_rel_ring;
1763 
1764 	/* TCL data ring */
1765 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
1766 
1767 	/* Number of TCL data rings */
1768 	uint8_t num_tcl_data_rings;
1769 
1770 	/* TCL CMD_CREDIT ring */
1771 	bool init_tcl_cmd_cred_ring;
1772 
1773 	/* It is used as credit based ring on QCN9000 else command ring */
1774 	struct dp_srng tcl_cmd_credit_ring;
1775 
1776 	/* TCL command status ring */
1777 	struct dp_srng tcl_status_ring;
1778 
1779 	/* WBM Tx completion rings */
1780 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
1781 
1782 	/* Common WBM link descriptor release ring (SW to WBM) */
1783 	struct dp_srng wbm_desc_rel_ring;
1784 
1785 	/* DP Interrupts */
1786 	struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
1787 
1788 	/* Monitor mode mac id to dp_intr_id map */
1789 	int mon_intr_id_lmac_map[MAX_NUM_LMAC_HW];
1790 	/* Rx SW descriptor pool for RXDMA monitor buffer */
1791 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
1792 
1793 	/* Rx SW descriptor pool for RXDMA status buffer */
1794 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
1795 
1796 	/* Rx SW descriptor pool for RXDMA buffer */
1797 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
1798 
1799 	/* Number of REO destination rings */
1800 	uint8_t num_reo_dest_rings;
1801 
1802 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1803 	/* lock to control access to soc TX descriptors */
1804 	qdf_spinlock_t flow_pool_array_lock;
1805 
1806 	/* pause callback to pause TX queues as per flow control */
1807 	tx_pause_callback pause_cb;
1808 
1809 	/* flow pool related statistics */
1810 	struct dp_txrx_pool_stats pool_stats;
1811 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
1812 
1813 	uint32_t wbm_idle_scatter_buf_size;
1814 
1815 	/* VDEVs on this SOC */
1816 	struct dp_vdev *vdev_id_map[MAX_VDEV_CNT];
1817 
1818 	/* Tx H/W queues lock */
1819 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
1820 
1821 	/* Tx ring map for interrupt processing */
1822 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
1823 
1824 	/* Rx ring map for interrupt processing */
1825 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
1826 
1827 	/* peer ID to peer object map (array of pointers to peer objects) */
1828 	struct dp_peer **peer_id_to_obj_map;
1829 
1830 	struct {
1831 		unsigned mask;
1832 		unsigned idx_bits;
1833 		TAILQ_HEAD(, dp_peer) * bins;
1834 	} peer_hash;
1835 
1836 	/* rx defrag state – TBD: do we need this per radio? */
1837 	struct {
1838 		struct {
1839 			TAILQ_HEAD(, dp_rx_tid) waitlist;
1840 			uint32_t timeout_ms;
1841 			uint32_t next_flush_ms;
1842 			qdf_spinlock_t defrag_lock;
1843 		} defrag;
1844 		struct {
1845 			int defrag_timeout_check;
1846 			int dup_check;
1847 		} flags;
1848 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
1849 		qdf_spinlock_t reo_cmd_lock;
1850 	} rx;
1851 
1852 	/* optional rx processing function */
1853 	void (*rx_opt_proc)(
1854 		struct dp_vdev *vdev,
1855 		struct dp_peer *peer,
1856 		unsigned tid,
1857 		qdf_nbuf_t msdu_list);
1858 
1859 	/* pool addr for mcast enhance buff */
1860 	struct {
1861 		int size;
1862 		uint32_t paddr;
1863 		uint32_t *vaddr;
1864 		struct dp_tx_me_buf_t *freelist;
1865 		int buf_in_use;
1866 		qdf_dma_mem_context(memctx);
1867 	} me_buf;
1868 
1869 	/* Protect peer hash table */
1870 	DP_MUTEX_TYPE peer_hash_lock;
1871 	/* Protect peer_id_to_objmap */
1872 	DP_MUTEX_TYPE peer_map_lock;
1873 
1874 	/* maximum value for peer_id */
1875 	uint32_t max_peers;
1876 
1877 	/* SoC level data path statistics */
1878 	struct dp_soc_stats stats;
1879 
1880 	/* timestamp to keep track of msdu buffers received on reo err ring */
1881 	uint64_t rx_route_err_start_pkt_ts;
1882 
1883 	/* Num RX Route err in a given window to keep track of rate of errors */
1884 	uint32_t rx_route_err_in_window;
1885 
1886 	/* Enable processing of Tx completion status words */
1887 	bool process_tx_status;
1888 	bool process_rx_status;
1889 	struct dp_ast_entry **ast_table;
1890 	struct {
1891 		unsigned mask;
1892 		unsigned idx_bits;
1893 		TAILQ_HEAD(, dp_ast_entry) * bins;
1894 	} ast_hash;
1895 
1896 #ifdef DP_TX_HW_DESC_HISTORY
1897 	struct dp_tx_hw_desc_history *tx_hw_desc_history;
1898 #endif
1899 
1900 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1901 	struct dp_rx_history *rx_ring_history[MAX_REO_DEST_RINGS];
1902 	struct dp_rx_refill_history *rx_refill_ring_history[MAX_PDEV_CNT];
1903 	struct dp_rx_err_history *rx_err_ring_history;
1904 	struct dp_rx_reinject_history *rx_reinject_ring_history;
1905 #endif
1906 
1907 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
1908 	struct dp_tx_tcl_history *tx_tcl_history;
1909 	struct dp_tx_comp_history *tx_comp_history;
1910 #endif
1911 
1912 	qdf_spinlock_t ast_lock;
1913 	/*Timer for AST entry ageout maintainance */
1914 	qdf_timer_t ast_aging_timer;
1915 
1916 	/*Timer counter for WDS AST entry ageout*/
1917 	uint8_t wds_ast_aging_timer_cnt;
1918 	bool pending_ageout;
1919 	uint32_t max_ast_ageout_count;
1920 	uint8_t eapol_over_control_port;
1921 
1922 	qdf_timer_t lmac_reap_timer;
1923 	uint8_t lmac_timer_init;
1924 	qdf_timer_t int_timer;
1925 	uint8_t intr_mode;
1926 	uint8_t lmac_polled_mode;
1927 
1928 	qdf_list_t reo_desc_freelist;
1929 	qdf_spinlock_t reo_desc_freelist_lock;
1930 
1931 	/* htt stats */
1932 	struct htt_t2h_stats htt_stats;
1933 
1934 	void *external_txrx_handle; /* External data path handle */
1935 #ifdef IPA_OFFLOAD
1936 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc;
1937 #ifdef IPA_WDI3_TX_TWO_PIPES
1938 	/* Resources for the alternative IPA TX pipe */
1939 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc_alt;
1940 #endif
1941 
1942 	/* IPA uC datapath offload Wlan Rx resources */
1943 	struct {
1944 		/* Resource info to be passed to IPA */
1945 		qdf_dma_addr_t ipa_reo_ring_base_paddr;
1946 		void *ipa_reo_ring_base_vaddr;
1947 		uint32_t ipa_reo_ring_size;
1948 		qdf_dma_addr_t ipa_reo_tp_paddr;
1949 
1950 		/* Resource info to be passed to firmware and IPA */
1951 		qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
1952 		void *ipa_rx_refill_buf_ring_base_vaddr;
1953 		uint32_t ipa_rx_refill_buf_ring_size;
1954 		qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
1955 	} ipa_uc_rx_rsc;
1956 
1957 	qdf_atomic_t ipa_pipes_enabled;
1958 	bool ipa_first_tx_db_access;
1959 	qdf_spinlock_t ipa_rx_buf_map_lock;
1960 	bool ipa_rx_buf_map_lock_initialized;
1961 	uint8_t ipa_reo_ctx_lock_required[MAX_REO_DEST_RINGS];
1962 #endif
1963 
1964 #ifdef WLAN_FEATURE_STATS_EXT
1965 	struct {
1966 		uint32_t rx_mpdu_received;
1967 		uint32_t rx_mpdu_missed;
1968 	} ext_stats;
1969 	qdf_event_t rx_hw_stats_event;
1970 	qdf_spinlock_t rx_hw_stats_lock;
1971 	bool is_last_stats_ctx_init;
1972 #endif /* WLAN_FEATURE_STATS_EXT */
1973 
1974 	/* Flag to indicate if HTT v2 is enabled*/
1975 	bool is_peer_map_unmap_v2;
1976 	/* Per peer per Tid ba window size support */
1977 	uint8_t per_tid_basize_max_tid;
1978 	/* Soc level flag to enable da_war */
1979 	uint8_t da_war_enabled;
1980 	/* number of active ast entries */
1981 	uint32_t num_ast_entries;
1982 	/* rdk rate statistics context at soc level*/
1983 	struct cdp_soc_rate_stats_ctx *rate_stats_ctx;
1984 	/* rdk rate statistics control flag */
1985 	bool rdkstats_enabled;
1986 
1987 	/* 8021p PCP-TID map values */
1988 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
1989 	/* TID map priority value */
1990 	uint8_t tidmap_prty;
1991 	/* Pointer to global per ring type specific configuration table */
1992 	struct wlan_srng_cfg *wlan_srng_cfg;
1993 	/* Num Tx outstanding on device */
1994 	qdf_atomic_t num_tx_outstanding;
1995 	/* Num Tx exception on device */
1996 	qdf_atomic_t num_tx_exception;
1997 	/* Num Tx allowed */
1998 	uint32_t num_tx_allowed;
1999 	/* Preferred HW mode */
2000 	uint8_t preferred_hw_mode;
2001 
2002 	/**
2003 	 * Flag to indicate whether WAR to address single cache entry
2004 	 * invalidation bug is enabled or not
2005 	 */
2006 	bool is_rx_fse_full_cache_invalidate_war_enabled;
2007 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2008 	/**
2009 	 * Pointer to DP RX Flow FST at SOC level if
2010 	 * is_rx_flow_search_table_per_pdev is false
2011 	 * TBD: rx_fst[num_macs] if we decide to have per mac FST
2012 	 */
2013 	struct dp_rx_fst *rx_fst;
2014 #ifdef WLAN_SUPPORT_RX_FISA
2015 	uint8_t fisa_enable;
2016 
2017 	/**
2018 	 * Params used for controlling the fisa aggregation dynamically
2019 	 */
2020 	struct {
2021 		qdf_atomic_t skip_fisa;
2022 		uint8_t fisa_force_flush[MAX_REO_DEST_RINGS];
2023 	} skip_fisa_param;
2024 #endif
2025 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
2026 	/* SG supported for msdu continued packets from wbm release ring */
2027 	bool wbm_release_desc_rx_sg_support;
2028 	bool peer_map_attach_success;
2029 	/* Flag to disable mac1 ring interrupts */
2030 	bool disable_mac1_intr;
2031 	/* Flag to disable mac2 ring interrupts */
2032 	bool disable_mac2_intr;
2033 
2034 	struct {
2035 		/* 1st msdu in sg for msdu continued packets in wbm rel ring */
2036 		bool wbm_is_first_msdu_in_sg;
2037 		/* Wbm sg list head */
2038 		qdf_nbuf_t wbm_sg_nbuf_head;
2039 		/* Wbm sg list tail */
2040 		qdf_nbuf_t wbm_sg_nbuf_tail;
2041 		uint32_t wbm_sg_desc_msdu_len;
2042 	} wbm_sg_param;
2043 	/* Number of msdu exception descriptors */
2044 	uint32_t num_msdu_exception_desc;
2045 
2046 	/* RX buffer params */
2047 	struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
2048 	struct rx_refill_buff_pool rx_refill_buff_pool;
2049 	/* Save recent operation related variable */
2050 	struct dp_last_op_info last_op_info;
2051 	TAILQ_HEAD(, dp_peer) inactive_peer_list;
2052 	qdf_spinlock_t inactive_peer_list_lock;
2053 	TAILQ_HEAD(, dp_vdev) inactive_vdev_list;
2054 	qdf_spinlock_t inactive_vdev_list_lock;
2055 	/* lock to protect vdev_id_map table*/
2056 	qdf_spinlock_t vdev_map_lock;
2057 
2058 	/* Flow Search Table is in CMEM */
2059 	bool fst_in_cmem;
2060 
2061 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2062 	struct dp_swlm swlm;
2063 #endif
2064 #ifdef FEATURE_RUNTIME_PM
2065 	/* Dp runtime refcount */
2066 	qdf_atomic_t dp_runtime_refcount;
2067 #endif
2068 	/* Invalid buffer that allocated for RX buffer */
2069 	qdf_nbuf_queue_t invalid_buf_queue;
2070 
2071 #ifdef FEATURE_MEC
2072 	/** @mec_lock: spinlock for MEC table */
2073 	qdf_spinlock_t mec_lock;
2074 	/** @mec_cnt: number of active mec entries */
2075 	qdf_atomic_t mec_cnt;
2076 	struct {
2077 		/** @mask: mask bits */
2078 		uint32_t mask;
2079 		/** @idx_bits: index to shift bits */
2080 		uint32_t idx_bits;
2081 		/** @bins: MEC table */
2082 		TAILQ_HEAD(, dp_mec_entry) * bins;
2083 	} mec_hash;
2084 #endif
2085 
2086 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
2087 	qdf_list_t reo_desc_deferred_freelist;
2088 	qdf_spinlock_t reo_desc_deferred_freelist_lock;
2089 	bool reo_desc_deferred_freelist_init;
2090 #endif
2091 	/* BM id for first WBM2SW  ring */
2092 	uint32_t wbm_sw0_bm_id;
2093 
2094 	/* Store arch_id from device_id */
2095 	uint16_t arch_id;
2096 
2097 	/* link desc ID start per device type */
2098 	uint32_t link_desc_id_start;
2099 
2100 	/* CMEM buffer target reserved for host usage */
2101 	uint64_t cmem_base;
2102 	/* CMEM size in bytes */
2103 	uint64_t cmem_size;
2104 
2105 	/* SOC level feature flags */
2106 	struct dp_soc_features features;
2107 
2108 #ifdef WIFI_MONITOR_SUPPORT
2109 	struct dp_mon_soc *monitor_soc;
2110 #endif
2111 	bool rxdma2sw_rings_not_supported;
2112 };
2113 
2114 #ifdef IPA_OFFLOAD
2115 /**
2116  * dp_ipa_resources - Resources needed for IPA
2117  */
2118 struct dp_ipa_resources {
2119 	qdf_shared_mem_t tx_ring;
2120 	uint32_t tx_num_alloc_buffer;
2121 
2122 	qdf_shared_mem_t tx_comp_ring;
2123 	qdf_shared_mem_t rx_rdy_ring;
2124 	qdf_shared_mem_t rx_refill_ring;
2125 
2126 	/* IPA UC doorbell registers paddr */
2127 	qdf_dma_addr_t tx_comp_doorbell_paddr;
2128 	uint32_t *tx_comp_doorbell_vaddr;
2129 	qdf_dma_addr_t rx_ready_doorbell_paddr;
2130 
2131 	bool is_db_ddr_mapped;
2132 
2133 #ifdef IPA_WDI3_TX_TWO_PIPES
2134 	qdf_shared_mem_t tx_alt_ring;
2135 	uint32_t tx_alt_ring_num_alloc_buffer;
2136 	qdf_shared_mem_t tx_alt_comp_ring;
2137 
2138 	/* IPA UC doorbell registers paddr */
2139 	qdf_dma_addr_t tx_alt_comp_doorbell_paddr;
2140 	uint32_t *tx_alt_comp_doorbell_vaddr;
2141 #endif
2142 };
2143 #endif
2144 
2145 #define MAX_RX_MAC_RINGS 2
2146 /* Same as NAC_MAX_CLENT */
2147 #define DP_NAC_MAX_CLIENT  24
2148 
2149 /*
2150  * 24 bits cookie size
2151  * 10 bits page id 0 ~ 1023 for MCL
2152  * 3 bits page id 0 ~ 7 for WIN
2153  * WBM Idle List Desc size = 128,
2154  * Num descs per page = 4096/128 = 32 for MCL
2155  * Num descs per page = 2MB/128 = 16384 for WIN
2156  */
2157 /*
2158  * Macros to setup link descriptor cookies - for link descriptors, we just
2159  * need first 3 bits to store bank/page ID for WIN. The
2160  * remaining bytes will be used to set a unique ID, which will
2161  * be useful in debugging
2162  */
2163 #ifdef MAX_ALLOC_PAGE_SIZE
2164 #define LINK_DESC_PAGE_ID_MASK  0x007FE0
2165 #define LINK_DESC_ID_SHIFT      5
2166 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
2167 	((((_page_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_desc_id))
2168 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
2169 	(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
2170 #else
2171 #define LINK_DESC_PAGE_ID_MASK  0x7
2172 #define LINK_DESC_ID_SHIFT      3
2173 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
2174 	((((_desc_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_page_id))
2175 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
2176 	((_cookie) & LINK_DESC_PAGE_ID_MASK)
2177 #endif
2178 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
2179 #define LINK_DESC_ID_START_20_BITS_COOKIE 0x4000
2180 
2181 /* same as ieee80211_nac_param */
2182 enum dp_nac_param_cmd {
2183 	/* IEEE80211_NAC_PARAM_ADD */
2184 	DP_NAC_PARAM_ADD = 1,
2185 	/* IEEE80211_NAC_PARAM_DEL */
2186 	DP_NAC_PARAM_DEL,
2187 	/* IEEE80211_NAC_PARAM_LIST */
2188 	DP_NAC_PARAM_LIST,
2189 };
2190 
2191 /**
2192  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
2193  * @neighbour_peers_macaddr: neighbour peer's mac address
2194  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
2195  * @ast_entry: ast_entry for neighbour peer
2196  * @rssi: rssi value
2197  */
2198 struct dp_neighbour_peer {
2199 	/* MAC address of neighbour's peer */
2200 	union dp_align_mac_addr neighbour_peers_macaddr;
2201 	struct dp_vdev *vdev;
2202 	struct dp_ast_entry *ast_entry;
2203 	uint8_t rssi;
2204 	/* node in the list of neighbour's peer */
2205 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
2206 };
2207 
2208 #ifdef WLAN_TX_PKT_CAPTURE_ENH
2209 #define WLAN_TX_PKT_CAPTURE_ENH 1
2210 #define DP_TX_PPDU_PROC_THRESHOLD 8
2211 #define DP_TX_PPDU_PROC_TIMEOUT 10
2212 #endif
2213 
2214 /**
2215  * struct ppdu_info - PPDU Status info descriptor
2216  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
2217  * @sched_cmdid: schedule command id, which will be same in a burst
2218  * @max_ppdu_id: wrap around for ppdu id
2219  * @last_tlv_cnt: Keep track for missing ppdu tlvs
2220  * @last_user: last ppdu processed for user
2221  * @is_ampdu: set if Ampdu aggregate
2222  * @nbuf: ppdu descriptor payload
2223  * @ppdu_desc: ppdu descriptor
2224  * @ppdu_info_list_elem: linked list of ppdu tlvs
2225  * @ppdu_info_queue_elem: Singly linked list (queue) of ppdu tlvs
2226  * @mpdu_compltn_common_tlv: Successful tlv counter from COMPLTN COMMON tlv
2227  * @mpdu_ack_ba_tlv: Successful tlv counter from ACK BA tlv
2228  */
2229 struct ppdu_info {
2230 	uint32_t ppdu_id;
2231 	uint32_t sched_cmdid;
2232 	uint32_t max_ppdu_id;
2233 	uint32_t tsf_l32;
2234 	uint16_t tlv_bitmap;
2235 	uint16_t last_tlv_cnt;
2236 	uint16_t last_user:8,
2237 		 is_ampdu:1;
2238 	qdf_nbuf_t nbuf;
2239 	struct cdp_tx_completion_ppdu *ppdu_desc;
2240 #ifdef WLAN_TX_PKT_CAPTURE_ENH
2241 	union {
2242 		TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem;
2243 		STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem;
2244 	} ulist;
2245 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem
2246 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem
2247 #else
2248 	TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
2249 #endif
2250 	uint8_t compltn_common_tlv;
2251 	uint8_t ack_ba_tlv;
2252 	bool done;
2253 };
2254 
2255 /**
2256  * struct msdu_completion_info - wbm msdu completion info
2257  * @ppdu_id            - Unique ppduid assigned by firmware for every tx packet
2258  * @peer_id            - peer_id
2259  * @tid                - tid which used during transmit
2260  * @first_msdu         - first msdu indication
2261  * @last_msdu          - last msdu indication
2262  * @msdu_part_of_amsdu - msdu part of amsdu
2263  * @transmit_cnt       - retried count
2264  * @status             - transmit status
2265  * @tsf                - timestamp which it transmitted
2266  */
2267 struct msdu_completion_info {
2268 	uint32_t ppdu_id;
2269 	uint16_t peer_id;
2270 	uint8_t tid;
2271 	uint8_t first_msdu:1,
2272 		last_msdu:1,
2273 		msdu_part_of_amsdu:1;
2274 	uint8_t transmit_cnt;
2275 	uint8_t status;
2276 	uint32_t tsf;
2277 };
2278 
2279 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
2280 struct rx_protocol_tag_map {
2281 	/* This is the user configured tag for the said protocol type */
2282 	uint16_t tag;
2283 };
2284 
2285 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
2286 struct rx_protocol_tag_stats {
2287 	uint32_t tag_ctr;
2288 };
2289 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
2290 
2291 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
2292 
2293 #ifdef WLAN_RX_PKT_CAPTURE_ENH
2294 /* Template data to be set for Enhanced RX Monitor packets */
2295 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a
2296 
2297 /**
2298  * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern
2299  * at end of each MSDU in monitor-lite mode
2300  * @reserved1: reserved for future use
2301  * @reserved2: reserved for future use
2302  * @flow_tag: flow tag value read from skb->cb
2303  * @protocol_tag: protocol tag value read from skb->cb
2304  */
2305 struct dp_rx_mon_enh_trailer_data {
2306 	uint16_t reserved1;
2307 	uint16_t reserved2;
2308 	uint16_t flow_tag;
2309 	uint16_t protocol_tag;
2310 };
2311 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
2312 
2313 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2314 /* Number of debugfs entries created for HTT stats */
2315 #define PDEV_HTT_STATS_DBGFS_SIZE HTT_DBG_NUM_EXT_STATS
2316 
2317 /* struct pdev_htt_stats_dbgfs_priv - Structure to maintain debugfs information
2318  * of HTT stats
2319  * @pdev: dp pdev of debugfs entry
2320  * @stats_id: stats id of debugfs entry
2321  */
2322 struct pdev_htt_stats_dbgfs_priv {
2323 	struct dp_pdev *pdev;
2324 	uint16_t stats_id;
2325 };
2326 
2327 /* struct pdev_htt_stats_dbgfs_cfg - PDEV level data structure for debugfs
2328  * support for HTT stats
2329  * @debugfs_entry: qdf_debugfs directory entry
2330  * @m: qdf debugfs file handler
2331  * @pdev_htt_stats_dbgfs_ops: File operations of entry created
2332  * @priv: HTT stats debugfs private object
2333  * @htt_stats_dbgfs_event: HTT stats event for debugfs support
2334  * @lock: HTT stats debugfs lock
2335  * @htt_stats_dbgfs_msg_process: Function callback to print HTT stats
2336  */
2337 struct pdev_htt_stats_dbgfs_cfg {
2338 	qdf_dentry_t debugfs_entry[PDEV_HTT_STATS_DBGFS_SIZE];
2339 	qdf_debugfs_file_t m;
2340 	struct qdf_debugfs_fops
2341 			pdev_htt_stats_dbgfs_ops[PDEV_HTT_STATS_DBGFS_SIZE - 1];
2342 	struct pdev_htt_stats_dbgfs_priv priv[PDEV_HTT_STATS_DBGFS_SIZE - 1];
2343 	qdf_event_t htt_stats_dbgfs_event;
2344 	qdf_mutex_t lock;
2345 	void (*htt_stats_dbgfs_msg_process)(void *data, A_INT32 len);
2346 };
2347 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
2348 
2349 struct dp_srng_ring_state {
2350 	enum hal_ring_type ring_type;
2351 	uint32_t sw_head;
2352 	uint32_t sw_tail;
2353 	uint32_t hw_head;
2354 	uint32_t hw_tail;
2355 
2356 };
2357 
2358 struct dp_soc_srngs_state {
2359 	uint32_t seq_num;
2360 	uint32_t max_ring_id;
2361 	struct dp_srng_ring_state ring_state[DP_MAX_SRNGS];
2362 	TAILQ_ENTRY(dp_soc_srngs_state) list_elem;
2363 };
2364 
2365 /* PDEV level structure for data path */
2366 struct dp_pdev {
2367 	/**
2368 	 * Re-use Memory Section Starts
2369 	 */
2370 
2371 	/* PDEV Id */
2372 	int pdev_id;
2373 
2374 	/* LMAC Id */
2375 	int lmac_id;
2376 
2377 	/* Target pdev  Id */
2378 	int target_pdev_id;
2379 
2380 	/* TXRX SOC handle */
2381 	struct dp_soc *soc;
2382 
2383 	bool pdev_deinit;
2384 
2385 	/* pdev status down or up required to handle dynamic hw
2386 	 * mode switch between DBS and DBS_SBS.
2387 	 * 1 = down
2388 	 * 0 = up
2389 	 */
2390 	bool is_pdev_down;
2391 
2392 	/* Second ring used to replenish rx buffers */
2393 	struct dp_srng rx_refill_buf_ring2;
2394 
2395 	/* Empty ring used by firmware to post rx buffers to the MAC */
2396 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
2397 
2398 	int ch_band_lmac_id_mapping[REG_BAND_UNKNOWN];
2399 
2400 	/* wlan_cfg pdev ctxt*/
2401 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
2402 
2403 	/**
2404 	 * TODO: See if we need a ring map here for LMAC rings.
2405 	 * 1. Monitor rings are currently planning to be processed on receiving
2406 	 * PPDU end interrupts and hence wont need ring based interrupts.
2407 	 * 2. Rx buffer rings will be replenished during REO destination
2408 	 * processing and doesn't require regular interrupt handling - we will
2409 	 * only handle low water mark interrupts which is not expected
2410 	 * frequently
2411 	 */
2412 
2413 	/* VDEV list */
2414 	TAILQ_HEAD(, dp_vdev) vdev_list;
2415 
2416 	/* vdev list lock */
2417 	qdf_spinlock_t vdev_list_lock;
2418 
2419 	/* Number of vdevs this device have */
2420 	uint16_t vdev_count;
2421 
2422 	/* PDEV transmit lock */
2423 	qdf_spinlock_t tx_lock;
2424 
2425 	/*tx_mutex for me*/
2426 	DP_MUTEX_TYPE tx_mutex;
2427 
2428 	/* msdu chain head & tail */
2429 	qdf_nbuf_t invalid_peer_head_msdu;
2430 	qdf_nbuf_t invalid_peer_tail_msdu;
2431 
2432 	/* Band steering  */
2433 	/* TBD */
2434 
2435 	/* PDEV level data path statistics */
2436 	struct cdp_pdev_stats stats;
2437 
2438 	/* Global RX decap mode for the device */
2439 	enum htt_pkt_type rx_decap_mode;
2440 
2441 	/* Enhanced Stats is enabled */
2442 	bool enhanced_stats_en;
2443 
2444 	qdf_atomic_t num_tx_outstanding;
2445 	int32_t tx_descs_max;
2446 
2447 	qdf_atomic_t num_tx_exception;
2448 
2449 	/* MCL specific local peer handle */
2450 	struct {
2451 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
2452 		uint8_t freelist;
2453 		qdf_spinlock_t lock;
2454 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
2455 	} local_peer_ids;
2456 
2457 	/* dscp_tid_map_*/
2458 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
2459 
2460 	/* operating channel */
2461 	struct {
2462 		uint8_t num;
2463 		uint8_t band;
2464 		uint16_t freq;
2465 	} operating_channel;
2466 
2467 	/* pool addr for mcast enhance buff */
2468 	struct {
2469 		int size;
2470 		uint32_t paddr;
2471 		char *vaddr;
2472 		struct dp_tx_me_buf_t *freelist;
2473 		int buf_in_use;
2474 		qdf_dma_mem_context(memctx);
2475 	} me_buf;
2476 
2477 	bool hmmc_tid_override_en;
2478 	uint8_t hmmc_tid;
2479 
2480 	/* Number of VAPs with mcast enhancement enabled */
2481 	qdf_atomic_t mc_num_vap_attached;
2482 
2483 	qdf_atomic_t stats_cmd_complete;
2484 
2485 #ifdef IPA_OFFLOAD
2486 	ipa_uc_op_cb_type ipa_uc_op_cb;
2487 	void *usr_ctxt;
2488 	struct dp_ipa_resources ipa_resource;
2489 #endif
2490 
2491 	/* TBD */
2492 
2493 	/* map this pdev to a particular Reo Destination ring */
2494 	enum cdp_host_reo_dest_ring reo_dest;
2495 
2496 	/* WDI event handlers */
2497 	struct wdi_event_subscribe_t **wdi_event_list;
2498 
2499 	bool cfr_rcc_mode;
2500 
2501 	/* enable time latency check for tx completion */
2502 	bool latency_capture_enable;
2503 
2504 	/* enable calculation of delay stats*/
2505 	bool delay_stats_flag;
2506 	void *dp_txrx_handle; /* Advanced data path handle */
2507 	uint32_t ppdu_id;
2508 	bool first_nbuf;
2509 	/* Current noise-floor reading for the pdev channel */
2510 	int16_t chan_noise_floor;
2511 
2512 	/*
2513 	 * For multiradio device, this flag indicates if
2514 	 * this radio is primary or secondary.
2515 	 *
2516 	 * For HK 1.0, this is used for WAR for the AST issue.
2517 	 * HK 1.x mandates creation of only 1 AST entry with same MAC address
2518 	 * across 2 radios. is_primary indicates the radio on which DP should
2519 	 * install HW AST entry if there is a request to add 2 AST entries
2520 	 * with same MAC address across 2 radios
2521 	 */
2522 	uint8_t is_primary;
2523 	struct cdp_tx_sojourn_stats sojourn_stats;
2524 	qdf_nbuf_t sojourn_buf;
2525 
2526 	/* peer pointer for collecting invalid peer stats */
2527 	struct dp_peer *invalid_peer;
2528 
2529 	union dp_rx_desc_list_elem_t *free_list_head;
2530 	union dp_rx_desc_list_elem_t *free_list_tail;
2531 	/* Cached peer_id from htt_peer_details_tlv */
2532 	uint16_t fw_stats_peer_id;
2533 
2534 	/* qdf_event for fw_peer_stats */
2535 	qdf_event_t fw_peer_stats_event;
2536 
2537 	/* User configured max number of tx buffers */
2538 	uint32_t num_tx_allowed;
2539 
2540 	/* unique cookie required for peer session */
2541 	uint32_t next_peer_cookie;
2542 
2543 	/*
2544 	 * Run time enabled when the first protocol tag is added,
2545 	 * run time disabled when the last protocol tag is deleted
2546 	 */
2547 	bool  is_rx_protocol_tagging_enabled;
2548 
2549 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
2550 	/*
2551 	 * The protocol type is used as array index to save
2552 	 * user provided tag info
2553 	 */
2554 	struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX];
2555 
2556 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
2557 	/*
2558 	 * Track msdus received from each reo ring separately to avoid
2559 	 * simultaneous writes from different core
2560 	 */
2561 	struct rx_protocol_tag_stats
2562 		reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX];
2563 	/* Track msdus received from expection ring separately */
2564 	struct rx_protocol_tag_stats
2565 		rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
2566 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
2567 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
2568 
2569 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
2570 	/**
2571 	 * Pointer to DP Flow FST at SOC level if
2572 	 * is_rx_flow_search_table_per_pdev is true
2573 	 */
2574 	struct dp_rx_fst *rx_fst;
2575 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
2576 
2577 #ifdef FEATURE_TSO_STATS
2578 	/* TSO Id to index into TSO packet information */
2579 	qdf_atomic_t tso_idx;
2580 #endif /* FEATURE_TSO_STATS */
2581 
2582 #ifdef WLAN_SUPPORT_DATA_STALL
2583 	data_stall_detect_cb data_stall_detect_callback;
2584 #endif /* WLAN_SUPPORT_DATA_STALL */
2585 
2586 	/* flag to indicate whether LRO hash command has been sent to FW */
2587 	uint8_t is_lro_hash_configured;
2588 
2589 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2590 	/* HTT stats debugfs params */
2591 	struct pdev_htt_stats_dbgfs_cfg *dbgfs_cfg;
2592 #endif
2593 	struct {
2594 		qdf_work_t work;
2595 		qdf_workqueue_t *work_queue;
2596 		uint32_t seq_num;
2597 		qdf_spinlock_t list_lock;
2598 
2599 		TAILQ_HEAD(, dp_soc_srngs_state) list;
2600 	} bkp_stats;
2601 #ifdef WIFI_MONITOR_SUPPORT
2602 	struct dp_mon_pdev *monitor_pdev;
2603 #endif
2604 };
2605 
2606 struct dp_peer;
2607 
2608 /* VDEV structure for data path state */
2609 struct dp_vdev {
2610 	/* OS device abstraction */
2611 	qdf_device_t osdev;
2612 
2613 	/* physical device that is the parent of this virtual device */
2614 	struct dp_pdev *pdev;
2615 
2616 	/* VDEV operating mode */
2617 	enum wlan_op_mode opmode;
2618 
2619 	/* VDEV subtype */
2620 	enum wlan_op_subtype subtype;
2621 
2622 	/* Tx encapsulation type for this VAP */
2623 	enum htt_cmn_pkt_type tx_encap_type;
2624 
2625 	/* Rx Decapsulation type for this VAP */
2626 	enum htt_cmn_pkt_type rx_decap_type;
2627 
2628 	/* WDS enabled */
2629 	bool wds_enabled;
2630 
2631 	/* MEC enabled */
2632 	bool mec_enabled;
2633 
2634 #ifdef QCA_SUPPORT_WDS_EXTENDED
2635 	bool wds_ext_enabled;
2636 #endif /* QCA_SUPPORT_WDS_EXTENDED */
2637 
2638 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
2639 	bool skip_bar_update;
2640 	unsigned long skip_bar_update_last_ts;
2641 #endif
2642 	/* WDS Aging timer period */
2643 	uint32_t wds_aging_timer_val;
2644 
2645 	/* NAWDS enabled */
2646 	bool nawds_enabled;
2647 
2648 	/* Multicast enhancement enabled */
2649 	uint8_t mcast_enhancement_en;
2650 
2651 	/* IGMP multicast enhancement enabled */
2652 	uint8_t igmp_mcast_enhanc_en;
2653 
2654 	/* HW TX Checksum Enabled Flag */
2655 	uint8_t csum_enabled;
2656 
2657 	/* vdev_id - ID used to specify a particular vdev to the target */
2658 	uint8_t vdev_id;
2659 
2660 	/* Default HTT meta data for this VDEV */
2661 	/* TBD: check alignment constraints */
2662 	uint16_t htt_tcl_metadata;
2663 
2664 	/* Mesh mode vdev */
2665 	uint32_t mesh_vdev;
2666 
2667 	/* Mesh mode rx filter setting */
2668 	uint32_t mesh_rx_filter;
2669 
2670 	/* DSCP-TID mapping table ID */
2671 	uint8_t dscp_tid_map_id;
2672 
2673 	/* Address search type to be set in TX descriptor */
2674 	uint8_t search_type;
2675 
2676 	/*
2677 	 * Flag to indicate if s/w tid classification should be
2678 	 * skipped
2679 	 */
2680 	uint8_t skip_sw_tid_classification;
2681 
2682 	/* Flag to enable peer authorization */
2683 	uint8_t peer_authorize;
2684 
2685 	/* AST hash value for BSS peer in HW valid for STA VAP*/
2686 	uint16_t bss_ast_hash;
2687 
2688 	/* vdev lmac_id */
2689 	int lmac_id;
2690 
2691 	bool multipass_en;
2692 
2693 	/* Address search flags to be configured in HAL descriptor */
2694 	uint8_t hal_desc_addr_search_flags;
2695 
2696 	/* Handle to the OS shim SW's virtual device */
2697 	ol_osif_vdev_handle osif_vdev;
2698 
2699 	/* MAC address */
2700 	union dp_align_mac_addr mac_addr;
2701 
2702 	/* node in the pdev's list of vdevs */
2703 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
2704 
2705 	/* dp_peer list */
2706 	TAILQ_HEAD(, dp_peer) peer_list;
2707 	/* to protect peer_list */
2708 	DP_MUTEX_TYPE peer_list_lock;
2709 
2710 	/* RX call back function to flush GRO packets*/
2711 	ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;
2712 	/* default RX call back function called by dp */
2713 	ol_txrx_rx_fp osif_rx;
2714 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
2715 	/* callback to receive eapol frames */
2716 	ol_txrx_rx_fp osif_rx_eapol;
2717 #endif
2718 	/* callback to deliver rx frames to the OS */
2719 	ol_txrx_rx_fp osif_rx_stack;
2720 	/* Callback to handle rx fisa frames */
2721 	ol_txrx_fisa_rx_fp osif_fisa_rx;
2722 	ol_txrx_fisa_flush_fp osif_fisa_flush;
2723 
2724 	/* call back function to flush out queued rx packets*/
2725 	ol_txrx_rx_flush_fp osif_rx_flush;
2726 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
2727 	ol_txrx_get_key_fp osif_get_key;
2728 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
2729 
2730 #ifdef notyet
2731 	/* callback to check if the msdu is an WAI (WAPI) frame */
2732 	ol_rx_check_wai_fp osif_check_wai;
2733 #endif
2734 
2735 	/* proxy arp function */
2736 	ol_txrx_proxy_arp_fp osif_proxy_arp;
2737 
2738 	ol_txrx_mcast_me_fp me_convert;
2739 
2740 	/* completion function used by this vdev*/
2741 	ol_txrx_completion_fp tx_comp;
2742 
2743 	/* deferred vdev deletion state */
2744 	struct {
2745 		/* VDEV delete pending */
2746 		int pending;
2747 		/*
2748 		* callback and a context argument to provide a
2749 		* notification for when the vdev is deleted.
2750 		*/
2751 		ol_txrx_vdev_delete_cb callback;
2752 		void *context;
2753 	} delete;
2754 
2755 	/* tx data delivery notification callback function */
2756 	struct {
2757 		ol_txrx_data_tx_cb func;
2758 		void *ctxt;
2759 	} tx_non_std_data_callback;
2760 
2761 
2762 	/* safe mode control to bypass the encrypt and decipher process*/
2763 	uint32_t safemode;
2764 
2765 	/* rx filter related */
2766 	uint32_t drop_unenc;
2767 #ifdef notyet
2768 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
2769 	uint32_t filters_num;
2770 #endif
2771 	/* TDLS Link status */
2772 	bool tdls_link_connected;
2773 	bool is_tdls_frame;
2774 
2775 	/* per vdev rx nbuf queue */
2776 	qdf_nbuf_queue_t rxq;
2777 
2778 	uint8_t tx_ring_id;
2779 	struct dp_tx_desc_pool_s *tx_desc;
2780 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
2781 
2782 	/* VDEV Stats */
2783 	struct cdp_vdev_stats stats;
2784 
2785 	/* Is this a proxySTA VAP */
2786 	bool proxysta_vdev;
2787 	/* Is isolation mode enabled */
2788 	bool isolation_vdev;
2789 
2790 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2791 	struct dp_tx_desc_pool_s *pool;
2792 #endif
2793 	/* AP BRIDGE enabled */
2794 	bool ap_bridge_enabled;
2795 
2796 	enum cdp_sec_type  sec_type;
2797 
2798 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
2799 	bool raw_mode_war;
2800 
2801 
2802 	/* AST hash index for BSS peer in HW valid for STA VAP*/
2803 	uint16_t bss_ast_idx;
2804 
2805 	/* Capture timestamp of previous tx packet enqueued */
2806 	uint64_t prev_tx_enq_tstamp;
2807 
2808 	/* Capture timestamp of previous rx packet delivered */
2809 	uint64_t prev_rx_deliver_tstamp;
2810 
2811 	/* 8021p PCP-TID mapping table ID */
2812 	uint8_t tidmap_tbl_id;
2813 
2814 	/* 8021p PCP-TID map values */
2815 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
2816 
2817 	/* TIDmap priority */
2818 	uint8_t tidmap_prty;
2819 
2820 #ifdef QCA_MULTIPASS_SUPPORT
2821 	uint16_t *iv_vlan_map;
2822 
2823 	/* dp_peer special list */
2824 	TAILQ_HEAD(, dp_peer) mpass_peer_list;
2825 	DP_MUTEX_TYPE mpass_peer_mutex;
2826 #endif
2827 	/* Extended data path handle */
2828 	struct cdp_ext_vdev *vdev_dp_ext_handle;
2829 #ifdef VDEV_PEER_PROTOCOL_COUNT
2830 	/*
2831 	 * Rx-Ingress and Tx-Egress are in the lower level DP layer
2832 	 * Rx-Egress and Tx-ingress are handled in osif layer for DP
2833 	 * So
2834 	 * Rx-Egress and Tx-ingress mask definitions are in OSIF layer
2835 	 * Rx-Ingress and Tx-Egress definitions are here below
2836 	 */
2837 #define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1
2838 #define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2
2839 #define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4
2840 #define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8
2841 	bool peer_protocol_count_track;
2842 	int peer_protocol_count_dropmask;
2843 #endif
2844 	/* callback to collect connectivity stats */
2845 	ol_txrx_stats_rx_fp stats_cb;
2846 	uint32_t num_peers;
2847 	/* entry to inactive_list*/
2848 	TAILQ_ENTRY(dp_vdev) inactive_list_elem;
2849 
2850 #ifdef WLAN_SUPPORT_RX_FISA
2851 	/**
2852 	 * Params used for controlling the fisa aggregation dynamically
2853 	 */
2854 	uint8_t fisa_disallowed[MAX_REO_DEST_RINGS];
2855 	uint8_t fisa_force_flushed[MAX_REO_DEST_RINGS];
2856 #endif
2857 	/*
2858 	 * Refcount for VDEV currently incremented when
2859 	 * peer is created for VDEV
2860 	 */
2861 	qdf_atomic_t ref_cnt;
2862 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
2863 	uint8_t num_latency_critical_conn;
2864 #ifdef WLAN_SUPPORT_MESH_LATENCY
2865 	uint8_t peer_tid_latency_enabled;
2866 	/* tid latency configuration parameters */
2867 	struct {
2868 		uint32_t service_interval;
2869 		uint32_t burst_size;
2870 		uint8_t latency_tid;
2871 	} mesh_tid_latency_config;
2872 #endif
2873 #ifdef WIFI_MONITOR_SUPPORT
2874 	struct dp_mon_vdev *monitor_vdev;
2875 #endif
2876 
2877 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
2878 	/* Indicate if uplink delay report is enabled or not */
2879 	qdf_atomic_t ul_delay_report;
2880 	/* Delta between TQM clock and TSF clock */
2881 	uint32_t delta_tsf;
2882 	/* accumulative delay for every TX completion */
2883 	qdf_atomic_t ul_delay_accum;
2884 	/* accumulative number of packets delay has accumulated */
2885 	qdf_atomic_t ul_pkts_accum;
2886 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
2887 };
2888 
2889 enum {
2890 	dp_sec_mcast = 0,
2891 	dp_sec_ucast
2892 };
2893 
2894 #ifdef WDS_VENDOR_EXTENSION
2895 typedef struct {
2896 	uint8_t	wds_tx_mcast_4addr:1,
2897 		wds_tx_ucast_4addr:1,
2898 		wds_rx_filter:1,      /* enforce rx filter */
2899 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
2900 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
2901 
2902 } dp_ecm_policy;
2903 #endif
2904 
2905 /*
2906  * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
2907  * @cached_bufq: nbuff list to enqueue rx packets
2908  * @bufq_lock: spinlock for nbuff list access
2909  * @thres: maximum threshold for number of rx buff to enqueue
2910  * @entries: number of entries
2911  * @dropped: number of packets dropped
2912  */
2913 struct dp_peer_cached_bufq {
2914 	qdf_list_t cached_bufq;
2915 	qdf_spinlock_t bufq_lock;
2916 	uint32_t thresh;
2917 	uint32_t entries;
2918 	uint32_t dropped;
2919 };
2920 
2921 /**
2922  * enum dp_peer_ast_flowq
2923  * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue
2924  * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue
2925  * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP
2926  * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP
2927  */
2928 enum dp_peer_ast_flowq {
2929 	DP_PEER_AST_FLOWQ_HI_PRIO,
2930 	DP_PEER_AST_FLOWQ_LOW_PRIO,
2931 	DP_PEER_AST_FLOWQ_UDP,
2932 	DP_PEER_AST_FLOWQ_NON_UDP,
2933 	DP_PEER_AST_FLOWQ_MAX,
2934 };
2935 
2936 /*
2937  * struct dp_ast_flow_override_info - ast override info
2938  * @ast_index - ast indexes in peer map message
2939  * @ast_valid_mask - ast valid mask for each ast index
2940  * @ast_flow_mask - ast flow mask for each ast index
2941  * @tid_valid_low_pri_mask - per tid mask for low priority flow
2942  * @tid_valid_hi_pri_mask - per tid mask for hi priority flow
2943  */
2944 struct dp_ast_flow_override_info {
2945 	uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX];
2946 	uint8_t ast_valid_mask;
2947 	uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX];
2948 	uint8_t tid_valid_low_pri_mask;
2949 	uint8_t tid_valid_hi_pri_mask;
2950 };
2951 
2952 /*
2953  * struct dp_peer_ast_params - ast parameters for a msdu flow-queue
2954  * @ast_index - ast index populated by FW
2955  * @is_valid - ast flow valid mask
2956  * @valid_tid_mask - per tid mask for this ast index
2957  * @flowQ - flow queue id associated with this ast index
2958  */
2959 struct dp_peer_ast_params {
2960 	uint16_t ast_idx;
2961 	uint8_t is_valid;
2962 	uint8_t valid_tid_mask;
2963 	uint8_t flowQ;
2964 };
2965 
2966 #ifdef WLAN_SUPPORT_SCS
2967 /* SCS procedures macros */
2968 /* SCS Procedures - SCS parameters
2969  * obtained from SCS request are stored
2970  * in a peer based database for traffic
2971  * classification.
2972  */
2973 #define IEEE80211_SCS_MAX_NO_OF_ELEM 10
2974 #endif
2975 
2976 #ifdef WLAN_SUPPORT_MSCS
2977 /*MSCS Procedure based macros */
2978 #define IEEE80211_MSCS_MAX_ELEM_SIZE    5
2979 #define IEEE80211_TCLAS_MASK_CLA_TYPE_4  4
2980 /*
2981  * struct dp_peer_mscs_parameter - MSCS database obtained from
2982  * MSCS Request and Response in the control path. This data is used
2983  * by the AP to find out what priority to set based on the tuple
2984  * classification during packet processing.
2985  * @user_priority_bitmap - User priority bitmap obtained during
2986  * handshake
2987  * @user_priority_limit - User priority limit obtained during
2988  * handshake
2989  * @classifier_mask - params to be compared during processing
2990  */
2991 struct dp_peer_mscs_parameter {
2992 	uint8_t user_priority_bitmap;
2993 	uint8_t user_priority_limit;
2994 	uint8_t classifier_mask;
2995 };
2996 #endif
2997 
2998 #ifdef QCA_SUPPORT_WDS_EXTENDED
2999 #define WDS_EXT_PEER_INIT_BIT 0
3000 
3001 /**
3002  * struct dp_wds_ext_peer - wds ext peer structure
3003  * This is used when wds extended feature is enabled
3004  * both compile time and run time. It is created
3005  * when 1st 4 address frame is received from
3006  * wds backhaul.
3007  * @osif_vdev: Handle to the OS shim SW's virtual device
3008  * @init: wds ext netdev state
3009  */
3010 struct dp_wds_ext_peer {
3011 	ol_osif_peer_handle osif_peer;
3012 	unsigned long init;
3013 };
3014 #endif /* QCA_SUPPORT_WDS_EXTENDED */
3015 
3016 #ifdef WLAN_SUPPORT_MESH_LATENCY
3017 /*Advanced Mesh latency feature based macros */
3018 /*
3019  * struct dp_peer_mesh_latency parameter - Mesh latency related
3020  * parameters. This data is updated per peer per TID based on
3021  * the flow tuple classification in external rule database
3022  * during packet processing.
3023  * @service_interval_dl - Service interval associated with TID in DL
3024  * @burst_size_dl - Burst size additive over multiple flows in DL
3025  * @service_interval_ul - Service interval associated with TID in UL
3026  * @burst_size_ul - Burst size additive over multiple flows in UL
3027  * @ac - custom ac derived from service interval
3028  * @msduq - MSDU queue number within TID
3029  */
3030 struct dp_peer_mesh_latency_parameter {
3031 	uint32_t service_interval_dl;
3032 	uint32_t burst_size_dl;
3033 	uint32_t service_interval_ul;
3034 	uint32_t burst_size_ul;
3035 	uint8_t ac;
3036 	uint8_t msduq;
3037 };
3038 #endif
3039 
3040 /* Peer structure for data path state */
3041 struct dp_peer {
3042 	/* VDEV to which this peer is associated */
3043 	struct dp_vdev *vdev;
3044 
3045 	struct dp_ast_entry *self_ast_entry;
3046 
3047 	qdf_atomic_t ref_cnt;
3048 
3049 	/* peer ID for this peer */
3050 	uint16_t peer_id;
3051 
3052 	union dp_align_mac_addr mac_addr;
3053 
3054 	/* node in the vdev's list of peers */
3055 	TAILQ_ENTRY(dp_peer) peer_list_elem;
3056 	/* node in the hash table bin's list of peers */
3057 	TAILQ_ENTRY(dp_peer) hash_list_elem;
3058 
3059 	/* TID structures */
3060 	struct dp_rx_tid rx_tid[DP_MAX_TIDS];
3061 
3062 	/* TBD: No transmit TID state required? */
3063 
3064 	struct {
3065 		enum cdp_sec_type sec_type;
3066 		u_int32_t michael_key[2]; /* relevant for TKIP */
3067 	} security[2]; /* 0 -> multicast, 1 -> unicast */
3068 
3069 	/* NAWDS Flag and Bss Peer bit */
3070 	uint16_t nawds_enabled:1, /* NAWDS flag */
3071 		bss_peer:1, /* set for bss peer */
3072 		wds_enabled:1, /* WDS peer */
3073 		authorize:1, /* Set when authorized */
3074 		nac:1, /* NAC Peer*/
3075 		tx_cap_enabled:1, /* Peer's tx-capture is enabled */
3076 		rx_cap_enabled:1, /* Peer's rx-capture is enabled */
3077 		valid:1, /* valid bit */
3078 		in_twt:1, /* in TWT session */
3079 		delete_in_progress:1, /* Indicate kickout sent */
3080 		sta_self_peer:1; /* Indicate STA self peer */
3081 
3082 #ifdef QCA_SUPPORT_PEER_ISOLATION
3083 	bool isolation; /* enable peer isolation for this peer */
3084 #endif
3085 
3086 	/* MCL specific peer local id */
3087 	uint16_t local_id;
3088 	enum ol_txrx_peer_state state;
3089 	qdf_spinlock_t peer_info_lock;
3090 
3091 	/* Peer Stats */
3092 	struct cdp_peer_stats stats;
3093 
3094 	/* Peer extended stats */
3095 	struct cdp_peer_ext_stats *pext_stats;
3096 
3097 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
3098 	/* TBD */
3099 
3100 #ifdef WDS_VENDOR_EXTENSION
3101 	dp_ecm_policy wds_ecm;
3102 #endif
3103 
3104 	/* Active Block ack sessions */
3105 	uint16_t active_ba_session_cnt;
3106 
3107 	/* Current HW buffersize setting */
3108 	uint16_t hw_buffer_size;
3109 
3110 	/*
3111 	 * Flag to check if sessions with 256 buffersize
3112 	 * should be terminated.
3113 	 */
3114 	uint8_t kill_256_sessions;
3115 	qdf_atomic_t is_default_route_set;
3116 	/* Peer level flag to check peer based pktlog enabled or
3117 	 * disabled
3118 	 */
3119 	uint8_t peer_based_pktlog_filter;
3120 
3121 	/* rdk statistics context */
3122 	struct cdp_peer_rate_stats_ctx *rdkstats_ctx;
3123 	/* average sojourn time */
3124 	qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
3125 
3126 #ifdef QCA_MULTIPASS_SUPPORT
3127 	/* node in the special peer list element */
3128 	TAILQ_ENTRY(dp_peer) mpass_peer_list_elem;
3129 	/* vlan id for key */
3130 	uint16_t vlan_id;
3131 #endif
3132 
3133 #ifdef PEER_CACHE_RX_PKTS
3134 	qdf_atomic_t flush_in_progress;
3135 	struct dp_peer_cached_bufq bufq_info;
3136 #endif
3137 #ifdef QCA_PEER_MULTIQ_SUPPORT
3138 	struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
3139 #endif
3140 	/* entry to inactive_list*/
3141 	TAILQ_ENTRY(dp_peer) inactive_list_elem;
3142 
3143 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
3144 
3145 	uint8_t peer_state;
3146 	qdf_spinlock_t peer_state_lock;
3147 #ifdef WLAN_SUPPORT_SCS
3148 	struct cdp_scs_params scs[IEEE80211_SCS_MAX_NO_OF_ELEM];
3149 	bool scs_is_active;
3150 	uint8_t no_of_scs_sessions;
3151 #endif
3152 #ifdef WLAN_SUPPORT_MSCS
3153 	struct dp_peer_mscs_parameter mscs_ipv4_parameter, mscs_ipv6_parameter;
3154 	bool mscs_active;
3155 #endif
3156 #ifdef QCA_SUPPORT_WDS_EXTENDED
3157 	struct dp_wds_ext_peer wds_ext;
3158 	ol_txrx_rx_fp osif_rx;
3159 #endif
3160 #ifdef WLAN_SUPPORT_MESH_LATENCY
3161 	struct dp_peer_mesh_latency_parameter mesh_latency_params[DP_MAX_TIDS];
3162 #endif
3163 #ifdef WIFI_MONITOR_SUPPORT
3164 	struct dp_mon_peer *monitor_peer;
3165 #endif
3166 };
3167 
3168 /*
3169  * dp_invalid_peer_msg
3170  * @nbuf: data buffer
3171  * @wh: 802.11 header
3172  * @vdev_id: id of vdev
3173  */
3174 struct dp_invalid_peer_msg {
3175 	qdf_nbuf_t nbuf;
3176 	struct ieee80211_frame *wh;
3177 	uint8_t vdev_id;
3178 };
3179 
3180 /*
3181  * dp_tx_me_buf_t: ME buffer
3182  * next: pointer to next buffer
3183  * data: Destination Mac address
3184  * paddr_macbuf: physical address for dest_mac
3185  */
3186 struct dp_tx_me_buf_t {
3187 	/* Note: ME buf pool initialization logic expects next pointer to
3188 	 * be the first element. Dont add anything before next */
3189 	struct dp_tx_me_buf_t *next;
3190 	uint8_t data[QDF_MAC_ADDR_SIZE];
3191 	qdf_dma_addr_t paddr_macbuf;
3192 };
3193 
3194 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
3195 struct hal_rx_fst;
3196 
3197 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3198 struct dp_rx_fse {
3199 	/* HAL Rx Flow Search Entry which matches HW definition */
3200 	void *hal_rx_fse;
3201 	/* Toeplitz hash value */
3202 	uint32_t flow_hash;
3203 	/* Flow index, equivalent to hash value truncated to FST size */
3204 	uint32_t flow_id;
3205 	/* Stats tracking for this flow */
3206 	struct cdp_flow_stats stats;
3207 	/* Flag indicating whether flow is IPv4 address tuple */
3208 	uint8_t is_ipv4_addr_entry;
3209 	/* Flag indicating whether flow is valid */
3210 	uint8_t is_valid;
3211 };
3212 
3213 struct dp_rx_fst {
3214 	/* Software (DP) FST */
3215 	uint8_t *base;
3216 	/* Pointer to HAL FST */
3217 	struct hal_rx_fst *hal_rx_fst;
3218 	/* Base physical address of HAL RX HW FST */
3219 	uint64_t hal_rx_fst_base_paddr;
3220 	/* Maximum number of flows FSE supports */
3221 	uint16_t max_entries;
3222 	/* Num entries in flow table */
3223 	uint16_t num_entries;
3224 	/* SKID Length */
3225 	uint16_t max_skid_length;
3226 	/* Hash mask to obtain legitimate hash entry */
3227 	uint32_t hash_mask;
3228 	/* Timer for bundling of flows */
3229 	qdf_timer_t cache_invalidate_timer;
3230 	/**
3231 	 * Flag which tracks whether cache update
3232 	 * is needed on timer expiry
3233 	 */
3234 	qdf_atomic_t is_cache_update_pending;
3235 	/* Flag to indicate completion of FSE setup in HW/FW */
3236 	bool fse_setup_done;
3237 };
3238 
3239 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
3240 #elif WLAN_SUPPORT_RX_FISA
3241 
3242 struct dp_fisa_stats {
3243 	/* flow index invalid from RX HW TLV */
3244 	uint32_t invalid_flow_index;
3245 	uint32_t reo_mismatch;
3246 };
3247 
3248 enum fisa_aggr_ret {
3249 	FISA_AGGR_DONE,
3250 	FISA_AGGR_NOT_ELIGIBLE,
3251 	FISA_FLUSH_FLOW
3252 };
3253 
3254 /**
3255  * struct fisa_pkt_hist - FISA Packet history structure
3256  * @tlv_hist: array of TLV history
3257  * @ts: array of timestamps of fisa packets
3258  * @idx: index indicating the next location to be used in the array.
3259  */
3260 struct fisa_pkt_hist {
3261 	uint8_t *tlv_hist;
3262 	qdf_time_t ts_hist[FISA_FLOW_MAX_AGGR_COUNT];
3263 	uint32_t idx;
3264 };
3265 
3266 struct dp_fisa_rx_sw_ft {
3267 	/* HAL Rx Flow Search Entry which matches HW definition */
3268 	void *hw_fse;
3269 	/* hash value */
3270 	uint32_t flow_hash;
3271 	/* toeplitz hash value*/
3272 	uint32_t flow_id_toeplitz;
3273 	/* Flow index, equivalent to hash value truncated to FST size */
3274 	uint32_t flow_id;
3275 	/* Stats tracking for this flow */
3276 	struct cdp_flow_stats stats;
3277 	/* Flag indicating whether flow is IPv4 address tuple */
3278 	uint8_t is_ipv4_addr_entry;
3279 	/* Flag indicating whether flow is valid */
3280 	uint8_t is_valid;
3281 	uint8_t is_populated;
3282 	uint8_t is_flow_udp;
3283 	uint8_t is_flow_tcp;
3284 	qdf_nbuf_t head_skb;
3285 	uint16_t cumulative_l4_checksum;
3286 	uint16_t adjusted_cumulative_ip_length;
3287 	uint16_t cur_aggr;
3288 	uint16_t napi_flush_cumulative_l4_checksum;
3289 	uint16_t napi_flush_cumulative_ip_length;
3290 	qdf_nbuf_t last_skb;
3291 	uint32_t head_skb_ip_hdr_offset;
3292 	uint32_t head_skb_l4_hdr_offset;
3293 	struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
3294 	uint8_t napi_id;
3295 	struct dp_vdev *vdev;
3296 	uint64_t bytes_aggregated;
3297 	uint32_t flush_count;
3298 	uint32_t aggr_count;
3299 	uint8_t do_not_aggregate;
3300 	uint16_t hal_cumultive_ip_len;
3301 	struct dp_soc *soc_hdl;
3302 	/* last aggregate count fetched from RX PKT TLV */
3303 	uint32_t last_hal_aggr_count;
3304 	uint32_t cur_aggr_gso_size;
3305 	struct udphdr *head_skb_udp_hdr;
3306 	uint16_t frags_cumulative_len;
3307 	/* CMEM parameters */
3308 	uint32_t cmem_offset;
3309 	uint32_t metadata;
3310 	uint32_t reo_dest_indication;
3311 	qdf_time_t flow_init_ts;
3312 	qdf_time_t last_accessed_ts;
3313 #ifdef WLAN_SUPPORT_RX_FISA_HIST
3314 	struct fisa_pkt_hist pkt_hist;
3315 #endif
3316 };
3317 
3318 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_fisa_rx_sw_ft)
3319 #define MAX_FSE_CACHE_FL_HST 10
3320 /**
3321  * struct fse_cache_flush_history - Debug history cache flush
3322  * @timestamp: Entry update timestamp
3323  * @flows_added: Number of flows added for this flush
3324  * @flows_deleted: Number of flows deleted for this flush
3325  */
3326 struct fse_cache_flush_history {
3327 	uint64_t timestamp;
3328 	uint32_t flows_added;
3329 	uint32_t flows_deleted;
3330 };
3331 
3332 struct dp_rx_fst {
3333 	/* Software (DP) FST */
3334 	uint8_t *base;
3335 	/* Pointer to HAL FST */
3336 	struct hal_rx_fst *hal_rx_fst;
3337 	/* Base physical address of HAL RX HW FST */
3338 	uint64_t hal_rx_fst_base_paddr;
3339 	/* Maximum number of flows FSE supports */
3340 	uint16_t max_entries;
3341 	/* Num entries in flow table */
3342 	uint16_t num_entries;
3343 	/* SKID Length */
3344 	uint16_t max_skid_length;
3345 	/* Hash mask to obtain legitimate hash entry */
3346 	uint32_t hash_mask;
3347 	/* Lock for adding/deleting entries of FST */
3348 	qdf_spinlock_t dp_rx_fst_lock;
3349 	uint32_t add_flow_count;
3350 	uint32_t del_flow_count;
3351 	uint32_t hash_collision_cnt;
3352 	struct dp_soc *soc_hdl;
3353 	qdf_atomic_t fse_cache_flush_posted;
3354 	qdf_timer_t fse_cache_flush_timer;
3355 	/* Allow FSE cache flush cmd to FW */
3356 	bool fse_cache_flush_allow;
3357 	struct fse_cache_flush_history cache_fl_rec[MAX_FSE_CACHE_FL_HST];
3358 	/* FISA DP stats */
3359 	struct dp_fisa_stats stats;
3360 
3361 	/* CMEM params */
3362 	qdf_work_t fst_update_work;
3363 	qdf_workqueue_t *fst_update_wq;
3364 	qdf_list_t fst_update_list;
3365 	uint32_t meta_counter;
3366 	uint32_t cmem_ba;
3367 	qdf_spinlock_t dp_rx_sw_ft_lock[MAX_REO_DEST_RINGS];
3368 	qdf_event_t cmem_resp_event;
3369 	bool flow_deletion_supported;
3370 	bool fst_in_cmem;
3371 	bool pm_suspended;
3372 };
3373 
3374 #endif /* WLAN_SUPPORT_RX_FISA */
3375 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
3376 
3377 #ifdef WLAN_FEATURE_STATS_EXT
3378 /*
3379  * dp_req_rx_hw_stats_t: RX peer HW stats query structure
3380  * @pending_tid_query_cnt: pending tid stats count which waits for REO status
3381  * @is_query_timeout: flag to show is stats query timeout
3382  */
3383 struct dp_req_rx_hw_stats_t {
3384 	qdf_atomic_t pending_tid_stats_cnt;
3385 	bool is_query_timeout;
3386 };
3387 #endif
3388 /* soc level structure to declare arch specific ops for DP */
3389 
3390 
3391 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id);
3392 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
3393 					    uint32_t mac_id);
3394 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
3395 
3396 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
3397 void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc);
3398 #else
3399 static inline void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc) {}
3400 #endif
3401 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
3402 			 int ring_type, uint32_t num_entries,
3403 			 bool cached);
3404 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng);
3405 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
3406 			int ring_type, int ring_num, int mac_id);
3407 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
3408 		    int ring_type, int ring_num);
3409 
3410 enum timer_yield_status
3411 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
3412 			  uint64_t start_time);
3413 #endif /* _DP_TYPES_H_ */
3414