xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision d12f727152cc50131e0f9ad64c27a22a00dfab06)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_TYPES_H_
20 #define _DP_TYPES_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include <qdf_lock.h>
25 #include <qdf_atomic.h>
26 #include <qdf_util.h>
27 #include <qdf_list.h>
28 #include <qdf_lro.h>
29 #include <queue.h>
30 #include <htt_common.h>
31 #include <htt_stats.h>
32 #include <cdp_txrx_cmn.h>
33 #ifdef DP_MOB_DEFS
34 #include <cds_ieee80211_common.h>
35 #endif
36 #include <wdi_event_api.h>    /* WDI subscriber event list */
37 
38 #include "hal_hw_headers.h"
39 #include <hal_tx.h>
40 #include <hal_reo.h>
41 #include "wlan_cfg.h"
42 #include "hal_rx.h"
43 #include <hal_api.h>
44 #include <hal_api_mon.h>
45 #include "hal_rx.h"
46 //#include "hal_rx_flow.h"
47 
48 #define MAX_BW 7
49 #define MAX_RETRIES 4
50 #define MAX_RECEPTION_TYPES 4
51 
52 #define MINIDUMP_STR_SIZE 25
53 #ifndef REMOVE_PKT_LOG
54 #include <pktlog.h>
55 #endif
56 
57 #ifdef WLAN_TX_PKT_CAPTURE_ENH
58 #include "dp_tx_capture.h"
59 #endif
60 
61 #define REPT_MU_MIMO 1
62 #define REPT_MU_OFDMA_MIMO 3
63 #define DP_VO_TID 6
64  /** MAX TID MAPS AVAILABLE PER PDEV */
65 #define DP_MAX_TID_MAPS 16
66 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */
67 #define DSCP_TID_MAP_MAX (64 + 6)
68 #define DP_IP_DSCP_SHIFT 2
69 #define DP_IP_DSCP_MASK 0x3f
70 #define DP_FC0_SUBTYPE_QOS 0x80
71 #define DP_QOS_TID 0x0f
72 #define DP_IPV6_PRIORITY_SHIFT 20
73 #define MAX_MON_LINK_DESC_BANKS 2
74 #define DP_VDEV_ALL 0xff
75 
76 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
77 #define MAX_PDEV_CNT 1
78 #define WLAN_DP_RESET_MON_BUF_RING_FILTER
79 #else
80 #define MAX_PDEV_CNT 3
81 #endif
82 
83 /* Max no. of VDEV per PSOC */
84 #ifdef WLAN_PSOC_MAX_VDEVS
85 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS
86 #else
87 #define MAX_VDEV_CNT 51
88 #endif
89 
90 /* Max no. of VDEVs, a PDEV can support */
91 #ifdef WLAN_PDEV_MAX_VDEVS
92 #define DP_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS
93 #else
94 #define DP_PDEV_MAX_VDEVS 17
95 #endif
96 
97 #define MAX_TXDESC_POOLS 4
98 #define MAX_RXDESC_POOLS 4
99 #define MAX_REO_DEST_RINGS 4
100 #define EXCEPTION_DEST_RING_ID 0
101 #define MAX_TCL_DATA_RINGS 4
102 #define MAX_IDLE_SCATTER_BUFS 16
103 #define DP_MAX_IRQ_PER_CONTEXT 12
104 #define DEFAULT_HW_PEER_ID 0xffff
105 
106 #define WBM_INT_ERROR_ALL 0
107 #define WBM_INT_ERROR_REO_NULL_BUFFER 1
108 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2
109 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3
110 #define WBM_INT_ERROR_REO_BUFF_REAPED 4
111 #define MAX_WBM_INT_ERROR_REASONS 5
112 
113 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
114 /* Maximum retries for Delba per tid per peer */
115 #define DP_MAX_DELBA_RETRY 3
116 
117 #define PCP_TID_MAP_MAX 8
118 #define MAX_MU_USERS 37
119 
120 #define REO_CMD_EVENT_HIST_MAX 64
121 
122 /* 2G PHYB */
123 #define PHYB_2G_LMAC_ID 2
124 #define PHYB_2G_TARGET_PDEV_ID 2
125 
126 /* Flags for skippig s/w tid classification */
127 #define DP_TX_HW_DSCP_TID_MAP_VALID 0x1
128 #define DP_TXRX_HLOS_TID_OVERRIDE_ENABLED 0x2
129 #define DP_TX_MESH_ENABLED 0x4
130 
131 enum rx_pktlog_mode {
132 	DP_RX_PKTLOG_DISABLED = 0,
133 	DP_RX_PKTLOG_FULL,
134 	DP_RX_PKTLOG_LITE,
135 };
136 
137 /* enum m_copy_mode - Available mcopy mode
138  *
139  */
140 enum m_copy_mode {
141 	M_COPY_DISABLED = 0,
142 	M_COPY = 2,
143 	M_COPY_EXTENDED = 4,
144 };
145 
146 struct msdu_list {
147 	qdf_nbuf_t head;
148 	qdf_nbuf_t tail;
149 	uint32 sum_len;
150 };
151 
152 struct dp_soc_cmn;
153 struct dp_pdev;
154 struct dp_vdev;
155 struct dp_tx_desc_s;
156 struct dp_soc;
157 union dp_rx_desc_list_elem_t;
158 struct cdp_peer_rate_stats_ctx;
159 struct cdp_soc_rate_stats_ctx;
160 struct dp_rx_fst;
161 struct dp_mon_filter;
162 struct dp_mon_mpdu;
163 
164 /**
165  * enum for DP peer state
166  */
167 enum dp_peer_state {
168 	DP_PEER_STATE_NONE,
169 	DP_PEER_STATE_INIT,
170 	DP_PEER_STATE_ACTIVE,
171 	DP_PEER_STATE_LOGICAL_DELETE,
172 	DP_PEER_STATE_INACTIVE,
173 	DP_PEER_STATE_FREED,
174 	DP_PEER_STATE_INVALID,
175 };
176 
177 /**
178  * enum for modules ids of
179  */
180 enum dp_mod_id {
181 	DP_MOD_ID_TX_COMP = 0,
182 	DP_MOD_ID_RX = 1,
183 	DP_MOD_ID_HTT_COMP = 2,
184 	DP_MOD_ID_RX_ERR = 3,
185 	DP_MOD_ID_TX_PPDU_STATS = 4,
186 	DP_MOD_ID_RX_PPDU_STATS = 5,
187 	DP_MOD_ID_CDP = 6,
188 	DP_MOD_ID_GENERIC_STATS = 7,
189 	DP_MOD_ID_TX_MULTIPASS = 8,
190 	DP_MOD_ID_TX_CAPTURE = 9,
191 	DP_MOD_ID_NSS_OFFLOAD = 10,
192 	DP_MOD_ID_CONFIG = 11,
193 	DP_MOD_ID_HTT = 12,
194 	DP_MOD_ID_IPA = 13,
195 	DP_MOD_ID_AST = 14,
196 	DP_MOD_ID_MCAST2UCAST = 15,
197 	DP_MOD_ID_CHILD = 16,
198 	DP_MOD_ID_MESH = 17,
199 	DP_MOD_ID_TX_EXCEPTION = 18,
200 	DP_MOD_ID_TDLS = 19,
201 	DP_MOD_ID_MISC = 20,
202 	DP_MOD_ID_MSCS = 21,
203 	DP_MOD_ID_TX = 22,
204 	DP_MOD_ID_MAX = 23,
205 };
206 
207 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
208 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
209 
210 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
211 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
212 
213 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
214 	TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase))
215 
216 #define DP_MUTEX_TYPE qdf_spinlock_t
217 
218 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
219 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
220 
221 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
222     ((_a)[0] == 0x33 &&                         \
223      (_a)[1] == 0x33)
224 
225 #define DP_FRAME_IS_BROADCAST(_a)              \
226     ((_a)[0] == 0xff &&                         \
227      (_a)[1] == 0xff &&                         \
228      (_a)[2] == 0xff &&                         \
229      (_a)[3] == 0xff &&                         \
230      (_a)[4] == 0xff &&                         \
231      (_a)[5] == 0xff)
232 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
233 		(_llc)->llc_ssap == 0xaa && \
234 		(_llc)->llc_un.type_snap.control == 0x3)
235 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
236 #define DP_FRAME_FC0_TYPE_MASK 0x0c
237 #define DP_FRAME_FC0_TYPE_DATA 0x08
238 #define DP_FRAME_IS_DATA(_frame) \
239 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
240 
241 /**
242  * macros to convert hw mac id to sw mac id:
243  * mac ids used by hardware start from a value of 1 while
244  * those in host software start from a value of 0. Use the
245  * macros below to convert between mac ids used by software and
246  * hardware
247  */
248 #define DP_SW2HW_MACID(id) ((id) + 1)
249 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
250 
251 /**
252  * Number of Tx Queues
253  * enum and macro to define how many threshold levels is used
254  * for the AC based flow control
255  */
256 #ifdef QCA_AC_BASED_FLOW_CONTROL
257 enum dp_fl_ctrl_threshold {
258 	DP_TH_BE_BK = 0,
259 	DP_TH_VI,
260 	DP_TH_VO,
261 	DP_TH_HI,
262 };
263 
264 #define FL_TH_MAX (4)
265 #define FL_TH_VI_PERCENTAGE (80)
266 #define FL_TH_VO_PERCENTAGE (60)
267 #define FL_TH_HI_PERCENTAGE (40)
268 #endif
269 
270 /**
271  * enum dp_intr_mode
272  * @DP_INTR_INTEGRATED: Line interrupts
273  * @DP_INTR_MSI: MSI interrupts
274  * @DP_INTR_POLL: Polling
275  */
276 enum dp_intr_mode {
277 	DP_INTR_INTEGRATED = 0,
278 	DP_INTR_MSI,
279 	DP_INTR_POLL,
280 };
281 
282 /**
283  * enum dp_tx_frm_type
284  * @dp_tx_frm_std: Regular frame, no added header fragments
285  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
286  * @dp_tx_frm_sg: SG segment
287  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
288  * @dp_tx_frm_me: Multicast to Unicast Converted frame
289  * @dp_tx_frm_raw: Raw Frame
290  */
291 enum dp_tx_frm_type {
292 	dp_tx_frm_std = 0,
293 	dp_tx_frm_tso,
294 	dp_tx_frm_sg,
295 	dp_tx_frm_audio,
296 	dp_tx_frm_me,
297 	dp_tx_frm_raw,
298 };
299 
300 /**
301  * enum dp_ast_type
302  * @dp_ast_type_wds: WDS peer AST type
303  * @dp_ast_type_static: static ast entry type
304  * @dp_ast_type_mec: Multicast echo ast entry type
305  */
306 enum dp_ast_type {
307 	dp_ast_type_wds = 0,
308 	dp_ast_type_static,
309 	dp_ast_type_mec,
310 };
311 
312 /**
313  * enum dp_nss_cfg
314  * @dp_nss_cfg_default: No radios are offloaded
315  * @dp_nss_cfg_first_radio: First radio offloaded
316  * @dp_nss_cfg_second_radio: Second radio offloaded
317  * @dp_nss_cfg_dbdc: Dual radios offloaded
318  * @dp_nss_cfg_dbtc: Three radios offloaded
319  */
320 enum dp_nss_cfg {
321 	dp_nss_cfg_default = 0x0,
322 	dp_nss_cfg_first_radio = 0x1,
323 	dp_nss_cfg_second_radio = 0x2,
324 	dp_nss_cfg_dbdc = 0x3,
325 	dp_nss_cfg_dbtc = 0x7,
326 	dp_nss_cfg_max
327 };
328 
329 #ifdef WLAN_TX_PKT_CAPTURE_ENH
330 #define DP_CPU_RING_MAP_1 1
331 #endif
332 
333 /**
334  * dp_cpu_ring_map_type - dp tx cpu ring map
335  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
336  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
337  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
338  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
339  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
340  * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring
341  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
342  */
343 enum dp_cpu_ring_map_types {
344 	DP_NSS_DEFAULT_MAP,
345 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
346 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
347 	DP_NSS_DBDC_OFFLOADED_MAP,
348 	DP_NSS_DBTC_OFFLOADED_MAP,
349 #ifdef WLAN_TX_PKT_CAPTURE_ENH
350 	DP_SINGLE_TX_RING_MAP,
351 #endif
352 	DP_NSS_CPU_RING_MAP_MAX
353 };
354 
355 /**
356  * dp_rx_nbuf_frag_info - Hold vaddr and paddr for a buffer
357  *
358  * paddr: Physical address of buffer allocated.
359  * nbuf: Allocated nbuf in case of nbuf approach.
360  * vaddr: Virtual address of frag allocated in case of frag approach.
361  */
362 struct dp_rx_nbuf_frag_info {
363 	qdf_dma_addr_t paddr;
364 	union {
365 		qdf_nbuf_t nbuf;
366 		qdf_frag_t vaddr;
367 	} virt_addr;
368 };
369 
370 /**
371  * enum dp_ctxt - context type
372  * @DP_PDEV_TYPE: PDEV context
373  * @DP_RX_RING_HIST_TYPE: Datapath rx ring history
374  * @DP_RX_ERR_RING_HIST_TYPE: Datapath rx error ring history
375  * @DP_RX_REINJECT_RING_HIST_TYPE: Datapath reinject ring history
376  */
377 enum dp_ctxt_type {
378 	DP_PDEV_TYPE,
379 	DP_RX_RING_HIST_TYPE,
380 	DP_RX_ERR_RING_HIST_TYPE,
381 	DP_RX_REINJECT_RING_HIST_TYPE,
382 };
383 
384 /**
385  * enum dp_desc_type - source type for multiple pages allocation
386  * @DP_TX_DESC_TYPE: DP SW TX descriptor
387  * @DP_TX_EXT_DESC_TYPE: DP TX msdu extension descriptor
388  * @DP_TX_EXT_DESC_LINK_TYPE: DP link descriptor for msdu ext_desc
389  * @DP_TX_TSO_DESC_TYPE: DP TX TSO descriptor
390  * @DP_TX_TSO_NUM_SEG_TYPE: DP TX number of segments
391  * @DP_RX_DESC_BUF_TYPE: DP RX SW descriptor
392  * @DP_RX_DESC_STATUS_TYPE: DP RX SW descriptor for monitor status
393  * @DP_HW_LINK_DESC_TYPE: DP HW link descriptor
394  */
395 enum dp_desc_type {
396 	DP_TX_DESC_TYPE,
397 	DP_TX_EXT_DESC_TYPE,
398 	DP_TX_EXT_DESC_LINK_TYPE,
399 	DP_TX_TSO_DESC_TYPE,
400 	DP_TX_TSO_NUM_SEG_TYPE,
401 	DP_RX_DESC_BUF_TYPE,
402 	DP_RX_DESC_STATUS_TYPE,
403 	DP_HW_LINK_DESC_TYPE,
404 };
405 
406 /**
407  * struct rx_desc_pool
408  * @pool_size: number of RX descriptor in the pool
409  * @elem_size: Element size
410  * @desc_pages: Multi page descriptors
411  * @array: pointer to array of RX descriptor
412  * @freelist: pointer to free RX descriptor link list
413  * @lock: Protection for the RX descriptor pool
414  * @owner: owner for nbuf
415  * @buf_size: Buffer size
416  * @buf_alignment: Buffer alignment
417  * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
418  * @desc_type: type of desc this pool serves
419  */
420 struct rx_desc_pool {
421 	uint32_t pool_size;
422 #ifdef RX_DESC_MULTI_PAGE_ALLOC
423 	uint16_t elem_size;
424 	struct qdf_mem_multi_page_t desc_pages;
425 #else
426 	union dp_rx_desc_list_elem_t *array;
427 #endif
428 	union dp_rx_desc_list_elem_t *freelist;
429 	qdf_spinlock_t lock;
430 	uint8_t owner;
431 	uint16_t buf_size;
432 	uint8_t buf_alignment;
433 	bool rx_mon_dest_frag_enable;
434 	enum dp_desc_type desc_type;
435 };
436 
437 /**
438  * struct dp_tx_ext_desc_elem_s
439  * @next: next extension descriptor pointer
440  * @vaddr: hlos virtual address pointer
441  * @paddr: physical address pointer for descriptor
442  * @flags: mark features for extension descriptor
443  */
444 struct dp_tx_ext_desc_elem_s {
445 	struct dp_tx_ext_desc_elem_s *next;
446 	void *vaddr;
447 	qdf_dma_addr_t paddr;
448 	uint16_t flags;
449 };
450 
451 /**
452  * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool
453  * @elem_count: Number of descriptors in the pool
454  * @elem_size: Size of each descriptor
455  * @num_free: Number of free descriptors
456  * @msdu_ext_desc: MSDU extension descriptor
457  * @desc_pages: multiple page allocation information for actual descriptors
458  * @link_elem_size: size of the link descriptor in cacheable memory used for
459  * 		    chaining the extension descriptors
460  * @desc_link_pages: multiple page allocation information for link descriptors
461  */
462 struct dp_tx_ext_desc_pool_s {
463 	uint16_t elem_count;
464 	int elem_size;
465 	uint16_t num_free;
466 	struct qdf_mem_multi_page_t desc_pages;
467 	int link_elem_size;
468 	struct qdf_mem_multi_page_t desc_link_pages;
469 	struct dp_tx_ext_desc_elem_s *freelist;
470 	qdf_spinlock_t lock;
471 	qdf_dma_mem_context(memctx);
472 };
473 
474 /**
475  * struct dp_tx_desc_s - Tx Descriptor
476  * @next: Next in the chain of descriptors in freelist or in the completion list
477  * @nbuf: Buffer Address
478  * @msdu_ext_desc: MSDU extension descriptor
479  * @id: Descriptor ID
480  * @vdev_id: vdev_id of vdev over which the packet was transmitted
481  * @pdev: Handle to pdev
482  * @pool_id: Pool ID - used when releasing the descriptor
483  * @flags: Flags to track the state of descriptor and special frame handling
484  * @comp: Pool ID - used when releasing the descriptor
485  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
486  * 		   This is maintained in descriptor to allow more efficient
487  * 		   processing in completion event processing code.
488  * 		    This field is filled in with the htt_pkt_type enum.
489  * @frm_type: Frame Type - ToDo check if this is redundant
490  * @pkt_offset: Offset from which the actual packet data starts
491  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
492  *		Tx completion of ME packet
493  * @pool: handle to flow_pool this descriptor belongs to.
494  */
495 struct dp_tx_desc_s {
496 	struct dp_tx_desc_s *next;
497 	qdf_nbuf_t nbuf;
498 	uint16_t length;
499 	uint16_t flags;
500 	uint32_t id;
501 	qdf_dma_addr_t dma_addr;
502 	uint8_t vdev_id;
503 	uint8_t tx_status;
504 	uint16_t peer_id;
505 	struct dp_pdev *pdev;
506 	uint8_t tx_encap_type;
507 	uint8_t frm_type;
508 	uint8_t pkt_offset;
509 	uint8_t  pool_id;
510 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
511 	void *me_buffer;
512 	void *tso_desc;
513 	void *tso_num_desc;
514 	uint64_t timestamp;
515 	struct hal_tx_desc_comp_s comp;
516 };
517 
518 /**
519  * enum flow_pool_status - flow pool status
520  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
521  *				and network queues are unpaused
522  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
523  *			   and network queues are paused
524  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
525  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
526  */
527 enum flow_pool_status {
528 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
529 	FLOW_POOL_ACTIVE_PAUSED = 1,
530 	FLOW_POOL_BE_BK_PAUSED = 2,
531 	FLOW_POOL_VI_PAUSED = 3,
532 	FLOW_POOL_VO_PAUSED = 4,
533 	FLOW_POOL_INVALID = 5,
534 	FLOW_POOL_INACTIVE = 6,
535 };
536 
537 /**
538  * struct dp_tx_tso_seg_pool_s
539  * @pool_size: total number of pool elements
540  * @num_free: free element count
541  * @freelist: first free element pointer
542  * @desc_pages: multiple page allocation information for actual descriptors
543  * @lock: lock for accessing the pool
544  */
545 struct dp_tx_tso_seg_pool_s {
546 	uint16_t pool_size;
547 	uint16_t num_free;
548 	struct qdf_tso_seg_elem_t *freelist;
549 	struct qdf_mem_multi_page_t desc_pages;
550 	qdf_spinlock_t lock;
551 };
552 
553 /**
554  * struct dp_tx_tso_num_seg_pool_s {
555  * @num_seg_pool_size: total number of pool elements
556  * @num_free: free element count
557  * @freelist: first free element pointer
558  * @desc_pages: multiple page allocation information for actual descriptors
559  * @lock: lock for accessing the pool
560  */
561 
562 struct dp_tx_tso_num_seg_pool_s {
563 	uint16_t num_seg_pool_size;
564 	uint16_t num_free;
565 	struct qdf_tso_num_seg_elem_t *freelist;
566 	struct qdf_mem_multi_page_t desc_pages;
567 	/*tso mutex */
568 	qdf_spinlock_t lock;
569 };
570 
571 /**
572  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
573  * @elem_size: Size of each descriptor in the pool
574  * @pool_size: Total number of descriptors in the pool
575  * @num_free: Number of free descriptors
576  * @num_allocated: Number of used descriptors
577  * @freelist: Chain of free descriptors
578  * @desc_pages: multiple page allocation information for actual descriptors
579  * @num_invalid_bin: Deleted pool with pending Tx completions.
580  * @flow_pool_array_lock: Lock when operating on flow_pool_array.
581  * @flow_pool_array: List of allocated flow pools
582  * @lock- Lock for descriptor allocation/free from/to the pool
583  */
584 struct dp_tx_desc_pool_s {
585 	uint16_t elem_size;
586 	uint32_t num_allocated;
587 	struct dp_tx_desc_s *freelist;
588 	struct qdf_mem_multi_page_t desc_pages;
589 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
590 	uint16_t pool_size;
591 	uint8_t flow_pool_id;
592 	uint8_t num_invalid_bin;
593 	uint16_t avail_desc;
594 	enum flow_pool_status status;
595 	enum htt_flow_type flow_type;
596 #ifdef QCA_AC_BASED_FLOW_CONTROL
597 	uint16_t stop_th[FL_TH_MAX];
598 	uint16_t start_th[FL_TH_MAX];
599 	qdf_time_t max_pause_time[FL_TH_MAX];
600 	qdf_time_t latest_pause_time[FL_TH_MAX];
601 #else
602 	uint16_t stop_th;
603 	uint16_t start_th;
604 #endif
605 	uint16_t pkt_drop_no_desc;
606 	qdf_spinlock_t flow_pool_lock;
607 	uint8_t pool_create_cnt;
608 	void *pool_owner_ctx;
609 #else
610 	uint16_t elem_count;
611 	uint32_t num_free;
612 	qdf_spinlock_t lock;
613 #endif
614 };
615 
616 /**
617  * struct dp_txrx_pool_stats - flow pool related statistics
618  * @pool_map_count: flow pool map received
619  * @pool_unmap_count: flow pool unmap received
620  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
621  */
622 struct dp_txrx_pool_stats {
623 	uint16_t pool_map_count;
624 	uint16_t pool_unmap_count;
625 	uint16_t pkt_drop_no_pool;
626 };
627 
628 /**
629  * struct dp_srng - DP srng structure
630  * @hal_srng: hal_srng handle
631  * @base_vaddr_unaligned: un-aligned virtual base address of the srng ring
632  * @base_vaddr_aligned: aligned virtual base address of the srng ring
633  * @base_paddr_unaligned: un-aligned physical base address of the srng ring
634  * @base_paddr_aligned: aligned physical base address of the srng ring
635  * @alloc_size: size of the srng ring
636  * @cached: is the srng ring memory cached or un-cached memory
637  * @irq: irq number of the srng ring
638  * @num_entries: number of entries in the srng ring
639  */
640 struct dp_srng {
641 	hal_ring_handle_t hal_srng;
642 	void *base_vaddr_unaligned;
643 	void *base_vaddr_aligned;
644 	qdf_dma_addr_t base_paddr_unaligned;
645 	qdf_dma_addr_t base_paddr_aligned;
646 	uint32_t alloc_size;
647 	uint8_t cached;
648 	int irq;
649 	uint32_t num_entries;
650 #ifdef DP_MEM_PRE_ALLOC
651 	uint8_t is_mem_prealloc;
652 #endif
653 };
654 
655 struct dp_rx_reorder_array_elem {
656 	qdf_nbuf_t head;
657 	qdf_nbuf_t tail;
658 };
659 
660 #define DP_RX_BA_INACTIVE 0
661 #define DP_RX_BA_ACTIVE 1
662 #define DP_RX_BA_IN_PROGRESS 2
663 struct dp_reo_cmd_info {
664 	uint16_t cmd;
665 	enum hal_reo_cmd_type cmd_type;
666 	void *data;
667 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
668 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
669 };
670 
671 /* Rx TID */
672 struct dp_rx_tid {
673 	/* TID */
674 	int tid;
675 
676 	/* Num of addba requests */
677 	uint32_t num_of_addba_req;
678 
679 	/* Num of addba responses */
680 	uint32_t num_of_addba_resp;
681 
682 	/* Num of delba requests */
683 	uint32_t num_of_delba_req;
684 
685 	/* Num of addba responses successful */
686 	uint32_t num_addba_rsp_success;
687 
688 	/* Num of addba responses failed */
689 	uint32_t num_addba_rsp_failed;
690 
691 	/* pn size */
692 	uint8_t pn_size;
693 	/* REO TID queue descriptors */
694 	void *hw_qdesc_vaddr_unaligned;
695 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
696 	qdf_dma_addr_t hw_qdesc_paddr;
697 	uint32_t hw_qdesc_alloc_size;
698 
699 	/* RX ADDBA session state */
700 	int ba_status;
701 
702 	/* RX BA window size */
703 	uint16_t ba_win_size;
704 
705 	/* Starting sequence number in Addba request */
706 	uint16_t startseqnum;
707 
708 	/* TODO: Check the following while adding defragmentation support */
709 	struct dp_rx_reorder_array_elem *array;
710 	/* base - single rx reorder element used for non-aggr cases */
711 	struct dp_rx_reorder_array_elem base;
712 
713 	/* only used for defrag right now */
714 	TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem;
715 
716 	/* Store dst desc for reinjection */
717 	hal_ring_desc_t dst_ring_desc;
718 	struct dp_rx_desc *head_frag_desc;
719 
720 	/* rx_tid lock */
721 	qdf_spinlock_t tid_lock;
722 
723 	/* Sequence and fragments that are being processed currently */
724 	uint32_t curr_seq_num;
725 	uint32_t curr_frag_num;
726 
727 	/* head PN number */
728 	uint64_t pn128[2];
729 
730 	uint32_t defrag_timeout_ms;
731 	uint16_t dialogtoken;
732 	uint16_t statuscode;
733 	/* user defined ADDBA response status code */
734 	uint16_t userstatuscode;
735 
736 	/* Store ppdu_id when 2k exception is received */
737 	uint32_t ppdu_id_2k;
738 
739 	/* Delba Tx completion status */
740 	uint8_t delba_tx_status;
741 
742 	/* Delba Tx retry count */
743 	uint8_t delba_tx_retry;
744 
745 	/* Delba stats */
746 	uint32_t delba_tx_success_cnt;
747 	uint32_t delba_tx_fail_cnt;
748 
749 	/* Delba reason code for retries */
750 	uint8_t delba_rcode;
751 
752 	/* Coex Override preserved windows size 1 based */
753 	uint16_t rx_ba_win_size_override;
754 
755 	/* Peer TID statistics */
756 	struct cdp_peer_tid_stats stats;
757 };
758 
759 /**
760  * struct dp_intr_stats - DP Interrupt Stats for an interrupt context
761  * @num_tx_ring_masks: interrupts with tx_ring_mask set
762  * @num_rx_ring_masks: interrupts with rx_ring_mask set
763  * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set
764  * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set
765  * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set
766  * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set
767  * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set
768  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
769  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
770  * @num_masks: total number of times the interrupt was received
771  *
772  * Counter for individual masks are incremented only if there are any packets
773  * on that ring.
774  */
775 struct dp_intr_stats {
776 	uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS];
777 	uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS];
778 	uint32_t num_rx_mon_ring_masks;
779 	uint32_t num_rx_err_ring_masks;
780 	uint32_t num_rx_wbm_rel_ring_masks;
781 	uint32_t num_reo_status_ring_masks;
782 	uint32_t num_rxdma2host_ring_masks;
783 	uint32_t num_host2rxdma_ring_masks;
784 	uint32_t num_masks;
785 };
786 
787 /* per interrupt context  */
788 struct dp_intr {
789 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
790 				associated with this napi context */
791 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
792 				with this interrupt context */
793 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
794 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
795 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
796 	uint8_t reo_status_ring_mask; /* REO command response ring */
797 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
798 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
799 	/* Host to RXDMA monitor  buffer ring */
800 	uint8_t host2rxdma_mon_ring_mask;
801 	struct dp_soc *soc;    /* Reference to SoC structure ,
802 				to get DMA ring handles */
803 	qdf_lro_ctx_t lro_ctx;
804 	uint8_t dp_intr_id;
805 
806 	/* Interrupt Stats for individual masks */
807 	struct dp_intr_stats intr_stats;
808 };
809 
810 #define REO_DESC_FREELIST_SIZE 64
811 #define REO_DESC_FREE_DEFER_MS 1000
812 struct reo_desc_list_node {
813 	qdf_list_node_t node;
814 	unsigned long free_ts;
815 	struct dp_rx_tid rx_tid;
816 	bool resend_update_reo_cmd;
817 	uint32_t pending_ext_desc_size;
818 };
819 
820 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
821 /**
822  * struct reo_cmd_event_record: Elements to record for each reo command
823  * @cmd_type: reo command type
824  * @cmd_return_status: reo command post status
825  * @timestamp: record timestamp for the reo command
826  */
827 struct reo_cmd_event_record {
828 	enum hal_reo_cmd_type cmd_type;
829 	uint8_t cmd_return_status;
830 	uint32_t timestamp;
831 };
832 
833 /**
834  * struct reo_cmd_event_history: Account for reo cmd events
835  * @index: record number
836  * @cmd_record: list of records
837  */
838 struct reo_cmd_event_history {
839 	qdf_atomic_t index;
840 	struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX];
841 };
842 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
843 
844 /* SoC level data path statistics */
845 struct dp_soc_stats {
846 	struct {
847 		uint32_t added;
848 		uint32_t deleted;
849 		uint32_t aged_out;
850 		uint32_t map_err;
851 		uint32_t ast_mismatch;
852 	} ast;
853 
854 	/* SOC level TX stats */
855 	struct {
856 		/* Total packets transmitted */
857 		struct cdp_pkt_info egress;
858 		/* packets dropped on tx because of no peer */
859 		struct cdp_pkt_info tx_invalid_peer;
860 		/* descriptors in each tcl ring */
861 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
862 		/* Descriptors in use at soc */
863 		uint32_t desc_in_use;
864 		/* tqm_release_reason == FW removed */
865 		uint32_t dropped_fw_removed;
866 		/* tx completion release_src != TQM or FW */
867 		uint32_t invalid_release_source;
868 		/* tx completion wbm_internal_error */
869 		uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS];
870 		/* tx completion non_wbm_internal_error */
871 		uint32_t non_wbm_internal_err;
872 		/* TX Comp loop packet limit hit */
873 		uint32_t tx_comp_loop_pkt_limit_hit;
874 		/* Head pointer Out of sync at the end of dp_tx_comp_handler */
875 		uint32_t hp_oos2;
876 	} tx;
877 
878 	/* SOC level RX stats */
879 	struct {
880 		/* Total rx packets count */
881 		struct cdp_pkt_info ingress;
882 		/* Rx errors */
883 		/* Total Packets in Rx Error ring */
884 		uint32_t err_ring_pkts;
885 		/* No of Fragments */
886 		uint32_t rx_frags;
887 		/* No of incomplete fragments in waitlist */
888 		uint32_t rx_frag_wait;
889 		/* Fragments dropped due to errors */
890 		uint32_t rx_frag_err;
891 		/* Fragments received OOR causing sequence num mismatch */
892 		uint32_t rx_frag_oor;
893 		/* Fragments dropped due to len errors in skb */
894 		uint32_t rx_frag_err_len_error;
895 		/* Fragments dropped due to no peer found */
896 		uint32_t rx_frag_err_no_peer;
897 		/* No of reinjected packets */
898 		uint32_t reo_reinject;
899 		/* Reap loop packet limit hit */
900 		uint32_t reap_loop_pkt_limit_hit;
901 		/* Head pointer Out of sync at the end of dp_rx_process */
902 		uint32_t hp_oos2;
903 		/* Rx ring near full */
904 		uint32_t near_full;
905 		/* Break ring reaping as not all scattered msdu received */
906 		uint32_t msdu_scatter_wait_break;
907 		/* Number of bar frames received */
908 		uint32_t bar_frame;
909 		/* Number of frames routed from rxdma */
910 		uint32_t rxdma2rel_route_drop;
911 		/* Number of frames routed from reo*/
912 		uint32_t reo2rel_route_drop;
913 
914 		struct {
915 			/* Invalid RBM error count */
916 			uint32_t invalid_rbm;
917 			/* Invalid VDEV Error count */
918 			uint32_t invalid_vdev;
919 			/* Invalid PDEV error count */
920 			uint32_t invalid_pdev;
921 
922 			/* Packets delivered to stack that no related peer */
923 			uint32_t pkt_delivered_no_peer;
924 			/* Defrag peer uninit error count */
925 			uint32_t defrag_peer_uninit;
926 			/* Invalid sa_idx or da_idx*/
927 			uint32_t invalid_sa_da_idx;
928 			/* MSDU DONE failures */
929 			uint32_t msdu_done_fail;
930 			/* Invalid PEER Error count */
931 			struct cdp_pkt_info rx_invalid_peer;
932 			/* Invalid PEER ID count */
933 			struct cdp_pkt_info rx_invalid_peer_id;
934 			/* Invalid packet length */
935 			struct cdp_pkt_info rx_invalid_pkt_len;
936 			/* HAL ring access Fail error count */
937 			uint32_t hal_ring_access_fail;
938 			/* HAL ring access full Fail error count */
939 			uint32_t hal_ring_access_full_fail;
940 			/* RX DMA error count */
941 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
942 			/* RX REO DEST Desc Invalid Magic count */
943 			uint32_t rx_desc_invalid_magic;
944 			/* REO Error count */
945 			uint32_t reo_error[HAL_REO_ERR_MAX];
946 			/* HAL REO ERR Count */
947 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
948 			/* HAL REO DEST Duplicate count */
949 			uint32_t hal_reo_dest_dup;
950 			/* HAL WBM RELEASE Duplicate count */
951 			uint32_t hal_wbm_rel_dup;
952 			/* HAL RXDMA error Duplicate count */
953 			uint32_t hal_rxdma_err_dup;
954 			/* ipa smmu map duplicate count */
955 			uint32_t ipa_smmu_map_dup;
956 			/* ipa smmu unmap duplicate count */
957 			uint32_t ipa_smmu_unmap_dup;
958 			/* ipa smmu unmap while ipa pipes is disabled */
959 			uint32_t ipa_unmap_no_pipe;
960 			/* REO cmd send fail/requeue count */
961 			uint32_t reo_cmd_send_fail;
962 			/* REO cmd send drain count */
963 			uint32_t reo_cmd_send_drain;
964 			/* RX msdu drop count due to scatter */
965 			uint32_t scatter_msdu;
966 			/* RX msdu drop count due to invalid cookie */
967 			uint32_t invalid_cookie;
968 			/* Count of stale cookie read in RX path */
969 			uint32_t stale_cookie;
970 			/* Delba sent count due to RX 2k jump */
971 			uint32_t rx_2k_jump_delba_sent;
972 			/* RX 2k jump msdu indicated to stack count */
973 			uint32_t rx_2k_jump_to_stack;
974 			/* RX 2k jump msdu dropped count */
975 			uint32_t rx_2k_jump_drop;
976 			/* REO OOR msdu drop count */
977 			uint32_t reo_err_oor_drop;
978 			/* REO OOR msdu indicated to stack count */
979 			uint32_t reo_err_oor_to_stack;
980 			/* REO OOR scattered msdu count */
981 			uint32_t reo_err_oor_sg_count;
982 			/* RX msdu rejected count on delivery to vdev stack_fn*/
983 			uint32_t rejected;
984 			/* Incorrect msdu count in MPDU desc info */
985 			uint32_t msdu_count_mismatch;
986 			/* RX raw frame dropped count */
987 			uint32_t raw_frm_drop;
988 			/* Stale link desc cookie count*/
989 			uint32_t invalid_link_cookie;
990 			/* Nbuf sanity failure */
991 			uint32_t nbuf_sanity_fail;
992 			/* Duplicate link desc refilled */
993 			uint32_t dup_refill_link_desc;
994 			/* Incorrect msdu continuation bit in MSDU desc */
995 			uint32_t msdu_continuation_err;
996 			/* REO OOR eapol drop count */
997 			uint32_t reo_err_oor_eapol_drop;
998 			/* Non Eapol packet drop count due to peer not authorized  */
999 			uint32_t peer_unauth_rx_pkt_drop;
1000 		} err;
1001 
1002 		/* packet count per core - per ring */
1003 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
1004 	} rx;
1005 
1006 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1007 	struct reo_cmd_event_history cmd_event_history;
1008 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1009 };
1010 
1011 union dp_align_mac_addr {
1012 	uint8_t raw[QDF_MAC_ADDR_SIZE];
1013 	struct {
1014 		uint16_t bytes_ab;
1015 		uint16_t bytes_cd;
1016 		uint16_t bytes_ef;
1017 	} align2;
1018 	struct {
1019 		uint32_t bytes_abcd;
1020 		uint16_t bytes_ef;
1021 	} align4;
1022 	struct __attribute__((__packed__)) {
1023 		uint16_t bytes_ab;
1024 		uint32_t bytes_cdef;
1025 	} align4_2;
1026 };
1027 
1028 /**
1029  * struct dp_ast_free_cb_params - HMWDS free callback cookie
1030  * @mac_addr: ast mac address
1031  * @peer_mac_addr: mac address of peer
1032  * @type: ast entry type
1033  * @vdev_id: vdev_id
1034  * @flags: ast flags
1035  */
1036 struct dp_ast_free_cb_params {
1037 	union dp_align_mac_addr mac_addr;
1038 	union dp_align_mac_addr peer_mac_addr;
1039 	enum cdp_txrx_ast_entry_type type;
1040 	uint8_t vdev_id;
1041 	uint32_t flags;
1042 };
1043 
1044 /*
1045  * dp_ast_entry
1046  *
1047  * @ast_idx: Hardware AST Index
1048  * @peer_id: Next Hop peer_id (for non-WDS nodes, this will be point to
1049  *           associated peer with this MAC address)
1050  * @mac_addr:  MAC Address for this AST entry
1051  * @next_hop: Set to 1 if this is for a WDS node
1052  * @is_active: flag to indicate active data traffic on this node
1053  *             (used for aging out/expiry)
1054  * @ase_list_elem: node in peer AST list
1055  * @is_bss: flag to indicate if entry corresponds to bss peer
1056  * @is_mapped: flag to indicate that we have mapped the AST entry
1057  *             in ast_table
1058  * @pdev_id: pdev ID
1059  * @vdev_id: vdev ID
1060  * @ast_hash_value: hast value in HW
1061  * @ref_cnt: reference count
1062  * @type: flag to indicate type of the entry(static/WDS/MEC)
1063  * @delete_in_progress: Flag to indicate that delete commands send to FW
1064  *                      and host is waiting for response from FW
1065  * @callback: ast free/unmap callback
1066  * @cookie: argument to callback
1067  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
1068  */
1069 struct dp_ast_entry {
1070 	uint16_t ast_idx;
1071 	uint16_t peer_id;
1072 	union dp_align_mac_addr mac_addr;
1073 	bool next_hop;
1074 	bool is_active;
1075 	bool is_mapped;
1076 	uint8_t pdev_id;
1077 	uint8_t vdev_id;
1078 	uint16_t ast_hash_value;
1079 	qdf_atomic_t ref_cnt;
1080 	enum cdp_txrx_ast_entry_type type;
1081 	bool delete_in_progress;
1082 	txrx_ast_free_cb callback;
1083 	void *cookie;
1084 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
1085 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
1086 };
1087 
1088 /* SOC level htt stats */
1089 struct htt_t2h_stats {
1090 	/* lock to protect htt_stats_msg update */
1091 	qdf_spinlock_t lock;
1092 
1093 	/* work queue to process htt stats */
1094 	qdf_work_t work;
1095 
1096 	/* T2H Ext stats message queue */
1097 	qdf_nbuf_queue_t msg;
1098 
1099 	/* number of completed stats in htt_stats_msg */
1100 	uint32_t num_stats;
1101 };
1102 
1103 struct link_desc_bank {
1104 	void *base_vaddr_unaligned;
1105 	void *base_vaddr;
1106 	qdf_dma_addr_t base_paddr_unaligned;
1107 	qdf_dma_addr_t base_paddr;
1108 	uint32_t size;
1109 };
1110 
1111 struct rx_buff_pool {
1112 	qdf_nbuf_queue_head_t emerg_nbuf_q;
1113 	uint32_t nbuf_fail_cnt;
1114 	bool is_initialized;
1115 };
1116 
1117 /*
1118  * The logic for get current index of these history is dependent on this
1119  * value being power of 2.
1120  */
1121 #define DP_RX_HIST_MAX 2048
1122 #define DP_RX_ERR_HIST_MAX 2048
1123 #define DP_RX_REINJECT_HIST_MAX 1024
1124 
1125 QDF_COMPILE_TIME_ASSERT(rx_history_size,
1126 			(DP_RX_HIST_MAX &
1127 			 (DP_RX_HIST_MAX - 1)) == 0);
1128 QDF_COMPILE_TIME_ASSERT(rx_err_history_size,
1129 			(DP_RX_ERR_HIST_MAX &
1130 			 (DP_RX_ERR_HIST_MAX - 1)) == 0);
1131 QDF_COMPILE_TIME_ASSERT(rx_reinject_history_size,
1132 			(DP_RX_REINJECT_HIST_MAX &
1133 			 (DP_RX_REINJECT_HIST_MAX - 1)) == 0);
1134 
1135 /**
1136  * struct dp_buf_info_record - ring buffer info
1137  * @hbi: HW ring buffer info
1138  * @timestamp: timestamp when this entry was recorded
1139  */
1140 struct dp_buf_info_record {
1141 	struct hal_buf_info hbi;
1142 	uint64_t timestamp;
1143 };
1144 
1145 /* struct dp_rx_history - rx ring hisotry
1146  * @index: Index where the last entry is written
1147  * @entry: history entries
1148  */
1149 struct dp_rx_history {
1150 	qdf_atomic_t index;
1151 	struct dp_buf_info_record entry[DP_RX_HIST_MAX];
1152 };
1153 
1154 /* struct dp_rx_err_history - rx err ring hisotry
1155  * @index: Index where the last entry is written
1156  * @entry: history entries
1157  */
1158 struct dp_rx_err_history {
1159 	qdf_atomic_t index;
1160 	struct dp_buf_info_record entry[DP_RX_ERR_HIST_MAX];
1161 };
1162 
1163 /* struct dp_rx_reinject_history - rx reinject ring hisotry
1164  * @index: Index where the last entry is written
1165  * @entry: history entries
1166  */
1167 struct dp_rx_reinject_history {
1168 	qdf_atomic_t index;
1169 	struct dp_buf_info_record entry[DP_RX_REINJECT_HIST_MAX];
1170 };
1171 
1172 /* structure to record recent operation related variable */
1173 struct dp_last_op_info {
1174 	/* last link desc buf info through WBM release ring */
1175 	struct hal_buf_info wbm_rel_link_desc;
1176 	/* last link desc buf info through REO reinject ring */
1177 	struct hal_buf_info reo_reinject_link_desc;
1178 };
1179 
1180 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1181 
1182 /**
1183  * struct dp_swlm_tcl_data - params for tcl register write coalescing
1184  *			     descision making
1185  * @nbuf: TX packet
1186  * @tid: tid for transmitting the current packet
1187  * @num_ll_connections: Number of low latency connections on this vdev
1188  *
1189  * This structure contains the information required by the software
1190  * latency manager to decide on whether to coalesce the current TCL
1191  * register write or not.
1192  */
1193 struct dp_swlm_tcl_data {
1194 	qdf_nbuf_t nbuf;
1195 	uint8_t tid;
1196 	uint8_t num_ll_connections;
1197 };
1198 
1199 /**
1200  * union swlm_data - SWLM query data
1201  * @tcl_data: data for TCL query in SWLM
1202  */
1203 union swlm_data {
1204 	struct dp_swlm_tcl_data *tcl_data;
1205 };
1206 
1207 /**
1208  * struct dp_swlm_ops - SWLM ops
1209  * @tcl_wr_coalesce_check: handler to check if the current TCL register
1210  *			   write can be coalesced or not
1211  */
1212 struct dp_swlm_ops {
1213 	int (*tcl_wr_coalesce_check)(struct dp_soc *soc,
1214 				     struct dp_swlm_tcl_data *tcl_data);
1215 };
1216 
1217 /**
1218  * struct dp_swlm_stats - Stats for Software Latency manager.
1219  * @tcl.timer_flush_success: Num TCL HP writes success from timer context
1220  * @tcl.timer_flush_fail: Num TCL HP writes failure from timer context
1221  * @tcl.tid_fail: Num TCL register write coalescing skips, since the pkt
1222  *		 was being transmitted on a TID above coalescing threshold
1223  * @tcl.sp_frames: Num TCL register write coalescing skips, since the pkt
1224  *		  being transmitted was a special frame
1225  * @tcl.ll_connection: Num TCL register write coalescing skips, since the
1226  *		       vdev has low latency connections
1227  * @tcl.bytes_thresh_reached: Num TCL HP writes flush after the coalescing
1228  *			     bytes threshold was reached
1229  * @tcl.time_thresh_reached: Num TCL HP writes flush after the coalescing
1230  *			    session time expired
1231  * @tcl.tput_criteria_fail: Num TCL HP writes coalescing fails, since the
1232  *			   throughput did not meet session threshold
1233  * @tcl.coalesce_success: Num of TCL HP writes coalesced successfully.
1234  * @tcl.coalesce_fail: Num of TCL HP writes coalesces failed
1235  */
1236 struct dp_swlm_stats {
1237 	struct {
1238 		uint32_t timer_flush_success;
1239 		uint32_t timer_flush_fail;
1240 		uint32_t tid_fail;
1241 		uint32_t sp_frames;
1242 		uint32_t ll_connection;
1243 		uint32_t bytes_thresh_reached;
1244 		uint32_t time_thresh_reached;
1245 		uint32_t tput_criteria_fail;
1246 		uint32_t coalesce_success;
1247 		uint32_t coalesce_fail;
1248 	} tcl;
1249 };
1250 
1251 /**
1252  * struct dp_swlm_params: Parameters for different modules in the
1253  *			  Software latency manager.
1254  * @tcl.flush_timer: Timer for flushing the coalesced TCL HP writes
1255  * @tcl.rx_traffic_thresh: Threshold for RX traffic, to begin TCL register
1256  *			   write coalescing
1257  * @tcl.tx_traffic_thresh: Threshold for TX traffic, to begin TCL register
1258  *			   write coalescing
1259  * @tcl.sampling_time: Sampling time to test the throughput threshold
1260  * @tcl.sampling_session_tx_bytes: Num bytes transmitted in the sampling time
1261  * @tcl.bytes_flush_thresh: Bytes threshold to flush the TCL HP register write
1262  * @tcl.time_flush_thresh: Time threshold to flush the TCL HP register write
1263  * @tcl.tx_thresh_multiplier: Multiplier to deduce the bytes threshold after
1264  *			      which the TCL HP register is written, thereby
1265  *			      ending the coalescing.
1266  * @tcl.coalesce_end_time: End timestamp for current coalescing session
1267  * @tcl.bytes_coalesced: Num bytes coalesced in the current session
1268  */
1269 struct dp_swlm_params {
1270 	struct {
1271 		qdf_timer_t flush_timer;
1272 		uint32_t rx_traffic_thresh;
1273 		uint32_t tx_traffic_thresh;
1274 		uint32_t sampling_time;
1275 		uint32_t sampling_session_tx_bytes;
1276 		uint32_t bytes_flush_thresh;
1277 		uint32_t time_flush_thresh;
1278 		uint32_t tx_thresh_multiplier;
1279 		uint64_t coalesce_end_time;
1280 		uint32_t bytes_coalesced;
1281 	} tcl;
1282 };
1283 
1284 /**
1285  * struct dp_swlm - Software latency manager context
1286  * @ops: SWLM ops pointers
1287  * @is_enabled: SWLM enabled/disabled
1288  * @is_init: SWLM module initialized
1289  * @stats: SWLM stats
1290  * @params: SWLM SRNG params
1291  * @tcl_flush_timer: flush timer for TCL register writes
1292  */
1293 struct dp_swlm {
1294 	struct dp_swlm_ops *ops;
1295 	uint8_t is_enabled:1,
1296 		is_init:1;
1297 	struct dp_swlm_stats stats;
1298 	struct dp_swlm_params params;
1299 };
1300 #endif
1301 
1302 /* SOC level structure for data path */
1303 struct dp_soc {
1304 	/**
1305 	 * re-use memory section starts
1306 	 */
1307 
1308 	/* Common base structure - Should be the first member */
1309 	struct cdp_soc_t cdp_soc;
1310 
1311 	/* SoC Obj */
1312 	struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
1313 
1314 	/* OS device abstraction */
1315 	qdf_device_t osdev;
1316 
1317 	/*cce disable*/
1318 	bool cce_disable;
1319 
1320 	/* WLAN config context */
1321 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
1322 
1323 	/* HTT handle for host-fw interaction */
1324 	struct htt_soc *htt_handle;
1325 
1326 	/* Commint init done */
1327 	qdf_atomic_t cmn_init_done;
1328 
1329 	/* Opaque hif handle */
1330 	struct hif_opaque_softc *hif_handle;
1331 
1332 	/* PDEVs on this SOC */
1333 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
1334 
1335 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
1336 	struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT];
1337 
1338 	struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW];
1339 
1340 	/* RXDMA error destination ring */
1341 	struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW];
1342 
1343 	/* RXDMA monitor buffer replenish ring */
1344 	struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW];
1345 
1346 	/* RXDMA monitor destination ring */
1347 	struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW];
1348 
1349 	/* RXDMA monitor status ring. TBD: Check format of this ring */
1350 	struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW];
1351 
1352 	/* Number of PDEVs */
1353 	uint8_t pdev_count;
1354 
1355 	/*ast override support in HW*/
1356 	bool ast_override_support;
1357 
1358 	/*number of hw dscp tid map*/
1359 	uint8_t num_hw_dscp_tid_map;
1360 
1361 	/* HAL SOC handle */
1362 	hal_soc_handle_t hal_soc;
1363 
1364 	/* Device ID coming from Bus sub-system */
1365 	uint32_t device_id;
1366 
1367 	/* Link descriptor pages */
1368 	struct qdf_mem_multi_page_t link_desc_pages;
1369 
1370 	/* total link descriptors for regular RX and TX */
1371 	uint32_t total_link_descs;
1372 
1373 	/* monitor link descriptor pages */
1374 	struct qdf_mem_multi_page_t mon_link_desc_pages[MAX_NUM_LMAC_HW];
1375 
1376 	/* total link descriptors for monitor mode for each radio */
1377 	uint32_t total_mon_link_descs[MAX_NUM_LMAC_HW];
1378 
1379 	/* Monitor Link descriptor memory banks */
1380 	struct link_desc_bank
1381 		mon_link_desc_banks[MAX_NUM_LMAC_HW][MAX_MON_LINK_DESC_BANKS];
1382 	uint32_t num_mon_link_desc_banks[MAX_NUM_LMAC_HW];
1383 
1384 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
1385 	struct dp_srng wbm_idle_link_ring;
1386 
1387 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
1388 	 */
1389 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
1390 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
1391 	uint32_t num_scatter_bufs;
1392 
1393 	/* Tx SW descriptor pool */
1394 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
1395 
1396 	/* Tx MSDU Extension descriptor pool */
1397 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
1398 
1399 	/* Tx TSO descriptor pool */
1400 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
1401 
1402 	/* Tx TSO Num of segments pool */
1403 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
1404 
1405 	/* REO destination rings */
1406 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
1407 
1408 	/* REO exception ring - See if should combine this with reo_dest_ring */
1409 	struct dp_srng reo_exception_ring;
1410 
1411 	/* REO reinjection ring */
1412 	struct dp_srng reo_reinject_ring;
1413 
1414 	/* REO command ring */
1415 	struct dp_srng reo_cmd_ring;
1416 
1417 	/* REO command status ring */
1418 	struct dp_srng reo_status_ring;
1419 
1420 	/* WBM Rx release ring */
1421 	struct dp_srng rx_rel_ring;
1422 
1423 	/* TCL data ring */
1424 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
1425 
1426 	/* Number of TCL data rings */
1427 	uint8_t num_tcl_data_rings;
1428 
1429 	/* TCL CMD_CREDIT ring */
1430 	bool init_tcl_cmd_cred_ring;
1431 
1432 	/* It is used as credit based ring on QCN9000 else command ring */
1433 	struct dp_srng tcl_cmd_credit_ring;
1434 
1435 	/* TCL command status ring */
1436 	struct dp_srng tcl_status_ring;
1437 
1438 	/* WBM Tx completion rings */
1439 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
1440 
1441 	/* Common WBM link descriptor release ring (SW to WBM) */
1442 	struct dp_srng wbm_desc_rel_ring;
1443 
1444 	/* DP Interrupts */
1445 	struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
1446 
1447 	/* Monitor mode mac id to dp_intr_id map */
1448 	int mon_intr_id_lmac_map[MAX_NUM_LMAC_HW];
1449 	/* Rx SW descriptor pool for RXDMA monitor buffer */
1450 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
1451 
1452 	/* Rx SW descriptor pool for RXDMA status buffer */
1453 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
1454 
1455 	/* Rx SW descriptor pool for RXDMA buffer */
1456 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
1457 
1458 	/* Number of REO destination rings */
1459 	uint8_t num_reo_dest_rings;
1460 
1461 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1462 	/* lock to control access to soc TX descriptors */
1463 	qdf_spinlock_t flow_pool_array_lock;
1464 
1465 	/* pause callback to pause TX queues as per flow control */
1466 	tx_pause_callback pause_cb;
1467 
1468 	/* flow pool related statistics */
1469 	struct dp_txrx_pool_stats pool_stats;
1470 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
1471 
1472 	uint32_t wbm_idle_scatter_buf_size;
1473 
1474 	/* VDEVs on this SOC */
1475 	struct dp_vdev *vdev_id_map[MAX_VDEV_CNT];
1476 
1477 	/* Tx H/W queues lock */
1478 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
1479 
1480 	/* Tx ring map for interrupt processing */
1481 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
1482 
1483 	/* Rx ring map for interrupt processing */
1484 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
1485 
1486 	/* peer ID to peer object map (array of pointers to peer objects) */
1487 	struct dp_peer **peer_id_to_obj_map;
1488 
1489 	struct {
1490 		unsigned mask;
1491 		unsigned idx_bits;
1492 		TAILQ_HEAD(, dp_peer) * bins;
1493 	} peer_hash;
1494 
1495 	/* rx defrag state – TBD: do we need this per radio? */
1496 	struct {
1497 		struct {
1498 			TAILQ_HEAD(, dp_rx_tid) waitlist;
1499 			uint32_t timeout_ms;
1500 			uint32_t next_flush_ms;
1501 			qdf_spinlock_t defrag_lock;
1502 		} defrag;
1503 		struct {
1504 			int defrag_timeout_check;
1505 			int dup_check;
1506 		} flags;
1507 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
1508 		qdf_spinlock_t reo_cmd_lock;
1509 	} rx;
1510 
1511 	/* optional rx processing function */
1512 	void (*rx_opt_proc)(
1513 		struct dp_vdev *vdev,
1514 		struct dp_peer *peer,
1515 		unsigned tid,
1516 		qdf_nbuf_t msdu_list);
1517 
1518 	/* pool addr for mcast enhance buff */
1519 	struct {
1520 		int size;
1521 		uint32_t paddr;
1522 		uint32_t *vaddr;
1523 		struct dp_tx_me_buf_t *freelist;
1524 		int buf_in_use;
1525 		qdf_dma_mem_context(memctx);
1526 	} me_buf;
1527 
1528 	/* Protect peer hash table */
1529 	DP_MUTEX_TYPE peer_hash_lock;
1530 	/* Protect peer_id_to_objmap */
1531 	DP_MUTEX_TYPE peer_map_lock;
1532 
1533 	/* maximum value for peer_id */
1534 	uint32_t max_peers;
1535 
1536 	/* SoC level data path statistics */
1537 	struct dp_soc_stats stats;
1538 
1539 	/* Enable processing of Tx completion status words */
1540 	bool process_tx_status;
1541 	bool process_rx_status;
1542 	struct dp_ast_entry **ast_table;
1543 	struct {
1544 		unsigned mask;
1545 		unsigned idx_bits;
1546 		TAILQ_HEAD(, dp_ast_entry) * bins;
1547 	} ast_hash;
1548 
1549 	struct dp_rx_history *rx_ring_history[MAX_REO_DEST_RINGS];
1550 	struct dp_rx_err_history *rx_err_ring_history;
1551 	struct dp_rx_reinject_history *rx_reinject_ring_history;
1552 
1553 	qdf_spinlock_t ast_lock;
1554 	/*Timer for AST entry ageout maintainance */
1555 	qdf_timer_t ast_aging_timer;
1556 
1557 	/*Timer counter for WDS AST entry ageout*/
1558 	uint8_t wds_ast_aging_timer_cnt;
1559 
1560 	/*interrupt timer*/
1561 	qdf_timer_t mon_reap_timer;
1562 	uint8_t reap_timer_init;
1563 	qdf_timer_t lmac_reap_timer;
1564 	uint8_t lmac_timer_init;
1565 	qdf_timer_t int_timer;
1566 	uint8_t intr_mode;
1567 	uint8_t lmac_polled_mode;
1568 	qdf_timer_t mon_vdev_timer;
1569 	uint8_t mon_vdev_timer_state;
1570 
1571 	qdf_list_t reo_desc_freelist;
1572 	qdf_spinlock_t reo_desc_freelist_lock;
1573 
1574 	/* htt stats */
1575 	struct htt_t2h_stats htt_stats;
1576 
1577 	void *external_txrx_handle; /* External data path handle */
1578 #ifdef IPA_OFFLOAD
1579 	/* IPA uC datapath offload Wlan Tx resources */
1580 	struct {
1581 		/* Resource info to be passed to IPA */
1582 		qdf_dma_addr_t ipa_tcl_ring_base_paddr;
1583 		void *ipa_tcl_ring_base_vaddr;
1584 		uint32_t ipa_tcl_ring_size;
1585 		qdf_dma_addr_t ipa_tcl_hp_paddr;
1586 		uint32_t alloc_tx_buf_cnt;
1587 
1588 		qdf_dma_addr_t ipa_wbm_ring_base_paddr;
1589 		void *ipa_wbm_ring_base_vaddr;
1590 		uint32_t ipa_wbm_ring_size;
1591 		qdf_dma_addr_t ipa_wbm_tp_paddr;
1592 		/* WBM2SW HP shadow paddr */
1593 		qdf_dma_addr_t ipa_wbm_hp_shadow_paddr;
1594 
1595 		/* TX buffers populated into the WBM ring */
1596 		void **tx_buf_pool_vaddr_unaligned;
1597 		qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
1598 	} ipa_uc_tx_rsc;
1599 
1600 	/* IPA uC datapath offload Wlan Rx resources */
1601 	struct {
1602 		/* Resource info to be passed to IPA */
1603 		qdf_dma_addr_t ipa_reo_ring_base_paddr;
1604 		void *ipa_reo_ring_base_vaddr;
1605 		uint32_t ipa_reo_ring_size;
1606 		qdf_dma_addr_t ipa_reo_tp_paddr;
1607 
1608 		/* Resource info to be passed to firmware and IPA */
1609 		qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
1610 		void *ipa_rx_refill_buf_ring_base_vaddr;
1611 		uint32_t ipa_rx_refill_buf_ring_size;
1612 		qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
1613 	} ipa_uc_rx_rsc;
1614 
1615 	qdf_atomic_t ipa_pipes_enabled;
1616 	bool ipa_first_tx_db_access;
1617 #endif
1618 
1619 #ifdef WLAN_FEATURE_STATS_EXT
1620 	struct {
1621 		uint32_t rx_mpdu_received;
1622 		uint32_t rx_mpdu_missed;
1623 	} ext_stats;
1624 	qdf_event_t rx_hw_stats_event;
1625 	qdf_spinlock_t rx_hw_stats_lock;
1626 	bool is_last_stats_ctx_init;
1627 #endif /* WLAN_FEATURE_STATS_EXT */
1628 
1629 	/* Smart monitor capability for HKv2 */
1630 	uint8_t hw_nac_monitor_support;
1631 	/* Flag to indicate if HTT v2 is enabled*/
1632 	bool is_peer_map_unmap_v2;
1633 	/* Per peer per Tid ba window size support */
1634 	uint8_t per_tid_basize_max_tid;
1635 	/* Soc level flag to enable da_war */
1636 	uint8_t da_war_enabled;
1637 	/* number of active ast entries */
1638 	uint32_t num_ast_entries;
1639 	/* rdk rate statistics context at soc level*/
1640 	struct cdp_soc_rate_stats_ctx *rate_stats_ctx;
1641 	/* rdk rate statistics control flag */
1642 	bool rdkstats_enabled;
1643 
1644 	/* 8021p PCP-TID map values */
1645 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
1646 	/* TID map priority value */
1647 	uint8_t tidmap_prty;
1648 	/* Pointer to global per ring type specific configuration table */
1649 	struct wlan_srng_cfg *wlan_srng_cfg;
1650 	/* Num Tx outstanding on device */
1651 	qdf_atomic_t num_tx_outstanding;
1652 	/* Num Tx exception on device */
1653 	qdf_atomic_t num_tx_exception;
1654 	/* Num Tx allowed */
1655 	uint32_t num_tx_allowed;
1656 	/* Preferred HW mode */
1657 	uint8_t preferred_hw_mode;
1658 
1659 	/**
1660 	 * Flag to indicate whether WAR to address single cache entry
1661 	 * invalidation bug is enabled or not
1662 	 */
1663 	bool is_rx_fse_full_cache_invalidate_war_enabled;
1664 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
1665 	/**
1666 	 * Pointer to DP RX Flow FST at SOC level if
1667 	 * is_rx_flow_search_table_per_pdev is false
1668 	 * TBD: rx_fst[num_macs] if we decide to have per mac FST
1669 	 */
1670 	struct dp_rx_fst *rx_fst;
1671 #ifdef WLAN_SUPPORT_RX_FISA
1672 	uint8_t fisa_enable;
1673 
1674 	/**
1675 	 * Params used for controlling the fisa aggregation dynamically
1676 	 */
1677 	struct {
1678 		qdf_atomic_t skip_fisa;
1679 		uint8_t fisa_force_flush[MAX_REO_DEST_RINGS];
1680 	} skip_fisa_param;
1681 #endif
1682 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
1683 	/* Full monitor mode support */
1684 	bool full_mon_mode;
1685 	/* SG supported for msdu continued packets from wbm release ring */
1686 	bool wbm_release_desc_rx_sg_support;
1687 	bool peer_map_attach_success;
1688 	/* Flag to disable mac1 ring interrupts */
1689 	bool disable_mac1_intr;
1690 	/* Flag to disable mac2 ring interrupts */
1691 	bool disable_mac2_intr;
1692 
1693 	struct {
1694 		/* 1st msdu in sg for msdu continued packets in wbm rel ring */
1695 		bool wbm_is_first_msdu_in_sg;
1696 		/* Wbm sg list head */
1697 		qdf_nbuf_t wbm_sg_nbuf_head;
1698 		/* Wbm sg list tail */
1699 		qdf_nbuf_t wbm_sg_nbuf_tail;
1700 		uint32_t wbm_sg_desc_msdu_len;
1701 	} wbm_sg_param;
1702 	/* Number of msdu exception descriptors */
1703 	uint32_t num_msdu_exception_desc;
1704 
1705 	/* RX buffer params */
1706 	struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
1707 	/* Save recent operation related variable */
1708 	struct dp_last_op_info last_op_info;
1709 	TAILQ_HEAD(, dp_peer) inactive_peer_list;
1710 	qdf_spinlock_t inactive_peer_list_lock;
1711 	TAILQ_HEAD(, dp_vdev) inactive_vdev_list;
1712 	qdf_spinlock_t inactive_vdev_list_lock;
1713 	/* lock to protect vdev_id_map table*/
1714 	qdf_spinlock_t vdev_map_lock;
1715 
1716 	/* Flow Search Table is in CMEM */
1717 	bool fst_in_cmem;
1718 
1719 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1720 	struct dp_swlm swlm;
1721 #endif
1722 #ifdef FEATURE_RUNTIME_PM
1723 	/* Dp runtime refcount */
1724 	qdf_atomic_t dp_runtime_refcount;
1725 #endif
1726 };
1727 
1728 #ifdef IPA_OFFLOAD
1729 /**
1730  * dp_ipa_resources - Resources needed for IPA
1731  */
1732 struct dp_ipa_resources {
1733 	qdf_shared_mem_t tx_ring;
1734 	uint32_t tx_num_alloc_buffer;
1735 
1736 	qdf_shared_mem_t tx_comp_ring;
1737 	qdf_shared_mem_t rx_rdy_ring;
1738 	qdf_shared_mem_t rx_refill_ring;
1739 
1740 	/* IPA UC doorbell registers paddr */
1741 	qdf_dma_addr_t tx_comp_doorbell_paddr;
1742 	uint32_t *tx_comp_doorbell_vaddr;
1743 	qdf_dma_addr_t rx_ready_doorbell_paddr;
1744 
1745 	bool is_db_ddr_mapped;
1746 };
1747 #endif
1748 
1749 #define MAX_RX_MAC_RINGS 2
1750 /* Same as NAC_MAX_CLENT */
1751 #define DP_NAC_MAX_CLIENT  24
1752 
1753 /*
1754  * 24 bits cookie size
1755  * 10 bits page id 0 ~ 1023 for MCL
1756  * 3 bits page id 0 ~ 7 for WIN
1757  * WBM Idle List Desc size = 128,
1758  * Num descs per page = 4096/128 = 32 for MCL
1759  * Num descs per page = 2MB/128 = 16384 for WIN
1760  */
1761 /*
1762  * Macros to setup link descriptor cookies - for link descriptors, we just
1763  * need first 3 bits to store bank/page ID for WIN. The
1764  * remaining bytes will be used to set a unique ID, which will
1765  * be useful in debugging
1766  */
1767 #ifdef MAX_ALLOC_PAGE_SIZE
1768 #define LINK_DESC_PAGE_ID_MASK  0x007FE0
1769 #define LINK_DESC_ID_SHIFT      5
1770 #define LINK_DESC_COOKIE(_desc_id, _page_id) \
1771 	((((_page_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_desc_id))
1772 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
1773 	(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
1774 #else
1775 #define LINK_DESC_PAGE_ID_MASK  0x7
1776 #define LINK_DESC_ID_SHIFT      3
1777 #define LINK_DESC_COOKIE(_desc_id, _page_id) \
1778 	((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_page_id))
1779 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
1780 	((_cookie) & LINK_DESC_PAGE_ID_MASK)
1781 #endif
1782 #define LINK_DESC_ID_START 0x8000
1783 
1784 /* same as ieee80211_nac_param */
1785 enum dp_nac_param_cmd {
1786 	/* IEEE80211_NAC_PARAM_ADD */
1787 	DP_NAC_PARAM_ADD = 1,
1788 	/* IEEE80211_NAC_PARAM_DEL */
1789 	DP_NAC_PARAM_DEL,
1790 	/* IEEE80211_NAC_PARAM_LIST */
1791 	DP_NAC_PARAM_LIST,
1792 };
1793 
1794 /**
1795  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
1796  * @neighbour_peers_macaddr: neighbour peer's mac address
1797  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
1798  * @ast_entry: ast_entry for neighbour peer
1799  * @rssi: rssi value
1800  */
1801 struct dp_neighbour_peer {
1802 	/* MAC address of neighbour's peer */
1803 	union dp_align_mac_addr neighbour_peers_macaddr;
1804 	struct dp_vdev *vdev;
1805 	struct dp_ast_entry *ast_entry;
1806 	uint8_t rssi;
1807 	/* node in the list of neighbour's peer */
1808 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
1809 };
1810 
1811 #ifdef WLAN_TX_PKT_CAPTURE_ENH
1812 #define WLAN_TX_PKT_CAPTURE_ENH 1
1813 #define DP_TX_PPDU_PROC_THRESHOLD 8
1814 #define DP_TX_PPDU_PROC_TIMEOUT 10
1815 #endif
1816 
1817 /**
1818  * struct ppdu_info - PPDU Status info descriptor
1819  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
1820  * @sched_cmdid: schedule command id, which will be same in a burst
1821  * @max_ppdu_id: wrap around for ppdu id
1822  * @last_tlv_cnt: Keep track for missing ppdu tlvs
1823  * @last_user: last ppdu processed for user
1824  * @is_ampdu: set if Ampdu aggregate
1825  * @nbuf: ppdu descriptor payload
1826  * @ppdu_desc: ppdu descriptor
1827  * @ppdu_info_list_elem: linked list of ppdu tlvs
1828  * @ppdu_info_queue_elem: Singly linked list (queue) of ppdu tlvs
1829  * @mpdu_compltn_common_tlv: Successful tlv counter from COMPLTN COMMON tlv
1830  * @mpdu_ack_ba_tlv: Successful tlv counter from ACK BA tlv
1831  */
1832 struct ppdu_info {
1833 	uint32_t ppdu_id;
1834 	uint32_t sched_cmdid;
1835 	uint32_t max_ppdu_id;
1836 	uint32_t tsf_l32;
1837 	uint16_t tlv_bitmap;
1838 	uint16_t last_tlv_cnt;
1839 	uint16_t last_user:8,
1840 		 is_ampdu:1;
1841 	qdf_nbuf_t nbuf;
1842 	struct cdp_tx_completion_ppdu *ppdu_desc;
1843 #ifdef WLAN_TX_PKT_CAPTURE_ENH
1844 	union {
1845 		TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem;
1846 		STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem;
1847 	} ulist;
1848 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem
1849 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem
1850 #else
1851 	TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
1852 #endif
1853 	uint8_t compltn_common_tlv;
1854 	uint8_t ack_ba_tlv;
1855 	bool done;
1856 };
1857 
1858 /**
1859  * struct msdu_completion_info - wbm msdu completion info
1860  * @ppdu_id            - Unique ppduid assigned by firmware for every tx packet
1861  * @peer_id            - peer_id
1862  * @tid                - tid which used during transmit
1863  * @first_msdu         - first msdu indication
1864  * @last_msdu          - last msdu indication
1865  * @msdu_part_of_amsdu - msdu part of amsdu
1866  * @transmit_cnt       - retried count
1867  * @status             - transmit status
1868  * @tsf                - timestamp which it transmitted
1869  */
1870 struct msdu_completion_info {
1871 	uint32_t ppdu_id;
1872 	uint16_t peer_id;
1873 	uint8_t tid;
1874 	uint8_t first_msdu:1,
1875 		last_msdu:1,
1876 		msdu_part_of_amsdu:1;
1877 	uint8_t transmit_cnt;
1878 	uint8_t status;
1879 	uint32_t tsf;
1880 };
1881 
1882 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1883 struct rx_protocol_tag_map {
1884 	/* This is the user configured tag for the said protocol type */
1885 	uint16_t tag;
1886 };
1887 
1888 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
1889 struct rx_protocol_tag_stats {
1890 	uint32_t tag_ctr;
1891 };
1892 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
1893 
1894 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1895 
1896 #ifndef WLAN_TX_PKT_CAPTURE_ENH
1897 struct dp_pdev_tx_capture {
1898 };
1899 
1900 struct dp_peer_tx_capture {
1901 };
1902 #endif
1903 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1904 /* Template data to be set for Enhanced RX Monitor packets */
1905 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a
1906 
1907 /**
1908  * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern
1909  * at end of each MSDU in monitor-lite mode
1910  * @reserved1: reserved for future use
1911  * @reserved2: reserved for future use
1912  * @flow_tag: flow tag value read from skb->cb
1913  * @protocol_tag: protocol tag value read from skb->cb
1914  */
1915 struct dp_rx_mon_enh_trailer_data {
1916 	uint16_t reserved1;
1917 	uint16_t reserved2;
1918 	uint16_t flow_tag;
1919 	uint16_t protocol_tag;
1920 };
1921 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
1922 
1923 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1924 /* Number of debugfs entries created for HTT stats */
1925 #define PDEV_HTT_STATS_DBGFS_SIZE HTT_DBG_NUM_EXT_STATS
1926 
1927 /* struct pdev_htt_stats_dbgfs_priv - Structure to maintain debugfs information
1928  * of HTT stats
1929  * @pdev: dp pdev of debugfs entry
1930  * @stats_id: stats id of debugfs entry
1931  */
1932 struct pdev_htt_stats_dbgfs_priv {
1933 	struct dp_pdev *pdev;
1934 	uint16_t stats_id;
1935 };
1936 
1937 /* struct pdev_htt_stats_dbgfs_cfg - PDEV level data structure for debugfs
1938  * support for HTT stats
1939  * @debugfs_entry: qdf_debugfs directory entry
1940  * @m: qdf debugfs file handler
1941  * @pdev_htt_stats_dbgfs_ops: File operations of entry created
1942  * @priv: HTT stats debugfs private object
1943  * @htt_stats_dbgfs_event: HTT stats event for debugfs support
1944  * @lock: HTT stats debugfs lock
1945  * @htt_stats_dbgfs_msg_process: Function callback to print HTT stats
1946  */
1947 struct pdev_htt_stats_dbgfs_cfg {
1948 	qdf_dentry_t debugfs_entry[PDEV_HTT_STATS_DBGFS_SIZE];
1949 	qdf_debugfs_file_t m;
1950 	struct qdf_debugfs_fops
1951 			pdev_htt_stats_dbgfs_ops[PDEV_HTT_STATS_DBGFS_SIZE - 1];
1952 	struct pdev_htt_stats_dbgfs_priv priv[PDEV_HTT_STATS_DBGFS_SIZE - 1];
1953 	qdf_event_t htt_stats_dbgfs_event;
1954 	qdf_mutex_t lock;
1955 	void (*htt_stats_dbgfs_msg_process)(void *data, A_INT32 len);
1956 };
1957 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1958 
1959 /* PDEV level structure for data path */
1960 struct dp_pdev {
1961 	/**
1962 	 * Re-use Memory Section Starts
1963 	 */
1964 
1965 	/* PDEV Id */
1966 	int pdev_id;
1967 
1968 	/* LMAC Id */
1969 	int lmac_id;
1970 
1971 	/* Target pdev  Id */
1972 	int target_pdev_id;
1973 
1974 	/* TXRX SOC handle */
1975 	struct dp_soc *soc;
1976 
1977 	/* Stuck count on monitor destination ring MPDU process */
1978 	uint32_t mon_dest_ring_stuck_cnt;
1979 
1980 	bool pdev_deinit;
1981 
1982 	/* pdev status down or up required to handle dynamic hw
1983 	 * mode switch between DBS and DBS_SBS.
1984 	 * 1 = down
1985 	 * 0 = up
1986 	 */
1987 	bool is_pdev_down;
1988 
1989 	/* Second ring used to replenish rx buffers */
1990 	struct dp_srng rx_refill_buf_ring2;
1991 
1992 	/* Empty ring used by firmware to post rx buffers to the MAC */
1993 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
1994 
1995 	int ch_band_lmac_id_mapping[REG_BAND_UNKNOWN];
1996 
1997 	/* wlan_cfg pdev ctxt*/
1998 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
1999 
2000 	/**
2001 	 * TODO: See if we need a ring map here for LMAC rings.
2002 	 * 1. Monitor rings are currently planning to be processed on receiving
2003 	 * PPDU end interrupts and hence wont need ring based interrupts.
2004 	 * 2. Rx buffer rings will be replenished during REO destination
2005 	 * processing and doesn't require regular interrupt handling - we will
2006 	 * only handle low water mark interrupts which is not expected
2007 	 * frequently
2008 	 */
2009 
2010 	/* VDEV list */
2011 	TAILQ_HEAD(, dp_vdev) vdev_list;
2012 
2013 	/* vdev list lock */
2014 	qdf_spinlock_t vdev_list_lock;
2015 
2016 	/* Number of vdevs this device have */
2017 	uint16_t vdev_count;
2018 
2019 	/* PDEV transmit lock */
2020 	qdf_spinlock_t tx_lock;
2021 
2022 #ifndef REMOVE_PKT_LOG
2023 	bool pkt_log_init;
2024 	/* Pktlog pdev */
2025 	struct pktlog_dev_t *pl_dev;
2026 #endif /* #ifndef REMOVE_PKT_LOG */
2027 
2028 	/* Monitor mode interface and status storage */
2029 	struct dp_vdev *monitor_vdev;
2030 
2031 	/* Monitor mode operation channel */
2032 	int mon_chan_num;
2033 
2034 	/* Monitor mode operation frequency */
2035 	qdf_freq_t mon_chan_freq;
2036 
2037 	/* Monitor mode band */
2038 	enum reg_wifi_band mon_chan_band;
2039 
2040 	/* monitor mode lock */
2041 	qdf_spinlock_t mon_lock;
2042 
2043 	/*tx_mutex for me*/
2044 	DP_MUTEX_TYPE tx_mutex;
2045 
2046 	/* monitor */
2047 	bool monitor_configured;
2048 
2049 	/* Smart Mesh */
2050 	bool filter_neighbour_peers;
2051 
2052 	/*flag to indicate neighbour_peers_list not empty */
2053 	bool neighbour_peers_added;
2054 	/* smart mesh mutex */
2055 	qdf_spinlock_t neighbour_peer_mutex;
2056 	/* Neighnour peer list */
2057 	TAILQ_HEAD(, dp_neighbour_peer) neighbour_peers_list;
2058 	/* msdu chain head & tail */
2059 	qdf_nbuf_t invalid_peer_head_msdu;
2060 	qdf_nbuf_t invalid_peer_tail_msdu;
2061 
2062 	/* Band steering  */
2063 	/* TBD */
2064 
2065 	/* PDEV level data path statistics */
2066 	struct cdp_pdev_stats stats;
2067 
2068 	/* Global RX decap mode for the device */
2069 	enum htt_pkt_type rx_decap_mode;
2070 
2071 	/* Enhanced Stats is enabled */
2072 	bool enhanced_stats_en;
2073 
2074 	/* advance filter mode and type*/
2075 	uint8_t mon_filter_mode;
2076 	uint16_t fp_mgmt_filter;
2077 	uint16_t fp_ctrl_filter;
2078 	uint16_t fp_data_filter;
2079 	uint16_t mo_mgmt_filter;
2080 	uint16_t mo_ctrl_filter;
2081 	uint16_t mo_data_filter;
2082 	uint16_t md_data_filter;
2083 
2084 	qdf_atomic_t num_tx_outstanding;
2085 	int32_t tx_descs_max;
2086 
2087 	qdf_atomic_t num_tx_exception;
2088 
2089 	/* MCL specific local peer handle */
2090 	struct {
2091 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
2092 		uint8_t freelist;
2093 		qdf_spinlock_t lock;
2094 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
2095 	} local_peer_ids;
2096 
2097 	/* dscp_tid_map_*/
2098 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
2099 
2100 	struct hal_rx_ppdu_info ppdu_info;
2101 
2102 	/* operating channel */
2103 	struct {
2104 		uint8_t num;
2105 		uint8_t band;
2106 		uint16_t freq;
2107 	} operating_channel;
2108 
2109 	qdf_nbuf_queue_t rx_status_q;
2110 	uint32_t mon_ppdu_status;
2111 	struct cdp_mon_status rx_mon_recv_status;
2112 	/* monitor mode status/destination ring PPDU and MPDU count */
2113 	struct cdp_pdev_mon_stats rx_mon_stats;
2114 	/* to track duplicate link descriptor indications by HW for a WAR */
2115 	uint64_t mon_last_linkdesc_paddr;
2116 	/* to track duplicate buffer indications by HW for a WAR */
2117 	uint32_t mon_last_buf_cookie;
2118 	/* 128 bytes mpdu header queue per user for ppdu */
2119 	qdf_nbuf_queue_t mpdu_q[MAX_MU_USERS];
2120 	/* is this a mpdu header TLV and not msdu header TLV */
2121 	bool is_mpdu_hdr[MAX_MU_USERS];
2122 	/* per user 128 bytes msdu header list for MPDU */
2123 	struct msdu_list msdu_list[MAX_MU_USERS];
2124 	/* RX enhanced capture mode */
2125 	uint8_t rx_enh_capture_mode;
2126 	/* Rx per peer enhanced capture mode */
2127 	bool rx_enh_capture_peer;
2128 	struct dp_vdev *rx_enh_monitor_vdev;
2129 	/* RX enhanced capture trailer enable/disable flag */
2130 	bool is_rx_enh_capture_trailer_enabled;
2131 #ifdef WLAN_RX_PKT_CAPTURE_ENH
2132 	/* RX per MPDU/PPDU information */
2133 	struct cdp_rx_indication_mpdu mpdu_ind;
2134 #endif
2135 	/* pool addr for mcast enhance buff */
2136 	struct {
2137 		int size;
2138 		uint32_t paddr;
2139 		char *vaddr;
2140 		struct dp_tx_me_buf_t *freelist;
2141 		int buf_in_use;
2142 		qdf_dma_mem_context(memctx);
2143 	} me_buf;
2144 
2145 	bool hmmc_tid_override_en;
2146 	uint8_t hmmc_tid;
2147 
2148 	/* Number of VAPs with mcast enhancement enabled */
2149 	qdf_atomic_t mc_num_vap_attached;
2150 
2151 	qdf_atomic_t stats_cmd_complete;
2152 
2153 #ifdef IPA_OFFLOAD
2154 	ipa_uc_op_cb_type ipa_uc_op_cb;
2155 	void *usr_ctxt;
2156 	struct dp_ipa_resources ipa_resource;
2157 #endif
2158 
2159 	/* TBD */
2160 
2161 	/* map this pdev to a particular Reo Destination ring */
2162 	enum cdp_host_reo_dest_ring reo_dest;
2163 
2164 	/* Packet log mode */
2165 	uint8_t rx_pktlog_mode;
2166 
2167 	/* WDI event handlers */
2168 	struct wdi_event_subscribe_t **wdi_event_list;
2169 
2170 	/* ppdu_id of last received HTT TX stats */
2171 	uint32_t last_ppdu_id;
2172 	struct {
2173 		uint8_t last_user;
2174 		qdf_nbuf_t buf;
2175 	} tx_ppdu_info;
2176 
2177 	bool tx_sniffer_enable;
2178 	/* mirror copy mode */
2179 	enum m_copy_mode mcopy_mode;
2180 	bool cfr_rcc_mode;
2181 	bool enable_reap_timer_non_pkt;
2182 	bool bpr_enable;
2183 
2184 	/* enable time latency check for tx completion */
2185 	bool latency_capture_enable;
2186 
2187 	/* enable calculation of delay stats*/
2188 	bool delay_stats_flag;
2189 	struct {
2190 		uint32_t tx_ppdu_id;
2191 		uint16_t tx_peer_id;
2192 		uint32_t rx_ppdu_id;
2193 	} m_copy_id;
2194 
2195 	/* To check if PPDU Tx stats are enabled for Pktlog */
2196 	bool pktlog_ppdu_stats;
2197 
2198 	void *dp_txrx_handle; /* Advanced data path handle */
2199 
2200 #ifdef ATH_SUPPORT_NAC_RSSI
2201 	bool nac_rssi_filtering;
2202 #endif
2203 	/* list of ppdu tlvs */
2204 	TAILQ_HEAD(, ppdu_info) ppdu_info_list;
2205 	TAILQ_HEAD(, ppdu_info) sched_comp_ppdu_list;
2206 
2207 	uint32_t sched_comp_list_depth;
2208 	uint16_t delivered_sched_cmdid;
2209 	uint16_t last_sched_cmdid;
2210 	uint32_t tlv_count;
2211 	uint32_t list_depth;
2212 	uint32_t ppdu_id;
2213 	bool first_nbuf;
2214 	struct {
2215 		qdf_nbuf_t last_nbuf; /*Ptr to mgmt last buf */
2216 		uint8_t *mgmt_buf; /* Ptr to mgmt. payload in HTT ppdu stats */
2217 		uint32_t mgmt_buf_len; /* Len of mgmt. payload in ppdu stats */
2218 		uint32_t ppdu_id;
2219 	} mgmtctrl_frm_info;
2220 
2221 	/* Current noise-floor reading for the pdev channel */
2222 	int16_t chan_noise_floor;
2223 
2224 	/*
2225 	 * For multiradio device, this flag indicates if
2226 	 * this radio is primary or secondary.
2227 	 *
2228 	 * For HK 1.0, this is used for WAR for the AST issue.
2229 	 * HK 1.x mandates creation of only 1 AST entry with same MAC address
2230 	 * across 2 radios. is_primary indicates the radio on which DP should
2231 	 * install HW AST entry if there is a request to add 2 AST entries
2232 	 * with same MAC address across 2 radios
2233 	 */
2234 	uint8_t is_primary;
2235 	/* Context of cal client timer */
2236 	struct cdp_cal_client *cal_client_ctx;
2237 	struct cdp_tx_sojourn_stats sojourn_stats;
2238 	qdf_nbuf_t sojourn_buf;
2239 
2240 	/* peer pointer for collecting invalid peer stats */
2241 	struct dp_peer *invalid_peer;
2242 
2243 	union dp_rx_desc_list_elem_t *free_list_head;
2244 	union dp_rx_desc_list_elem_t *free_list_tail;
2245 	/* Pdev level flag to check peer based pktlog enabled or
2246 	 * disabled
2247 	 */
2248 	uint8_t dp_peer_based_pktlog;
2249 
2250 	/* Cached peer_id from htt_peer_details_tlv */
2251 	uint16_t fw_stats_peer_id;
2252 
2253 	/* qdf_event for fw_peer_stats */
2254 	qdf_event_t fw_peer_stats_event;
2255 
2256 	/* User configured max number of tx buffers */
2257 	uint32_t num_tx_allowed;
2258 
2259 	/* unique cookie required for peer session */
2260 	uint32_t next_peer_cookie;
2261 
2262 	/*
2263 	 * Run time enabled when the first protocol tag is added,
2264 	 * run time disabled when the last protocol tag is deleted
2265 	 */
2266 	bool  is_rx_protocol_tagging_enabled;
2267 
2268 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
2269 	/*
2270 	 * The protocol type is used as array index to save
2271 	 * user provided tag info
2272 	 */
2273 	struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX];
2274 
2275 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
2276 	/*
2277 	 * Track msdus received from each reo ring separately to avoid
2278 	 * simultaneous writes from different core
2279 	 */
2280 	struct rx_protocol_tag_stats
2281 		reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX];
2282 	/* Track msdus received from expection ring separately */
2283 	struct rx_protocol_tag_stats
2284 		rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
2285 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
2286 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
2287 
2288 	/* tx packet capture enhancement */
2289 	enum cdp_tx_enh_capture_mode tx_capture_enabled;
2290 	struct dp_pdev_tx_capture tx_capture;
2291 
2292 	uint32_t *ppdu_tlv_buf; /* Buffer to hold HTT ppdu stats TLVs*/
2293 
2294 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
2295 	/**
2296 	 * Pointer to DP Flow FST at SOC level if
2297 	 * is_rx_flow_search_table_per_pdev is true
2298 	 */
2299 	struct dp_rx_fst *rx_fst;
2300 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
2301 
2302 #ifdef FEATURE_TSO_STATS
2303 	/* TSO Id to index into TSO packet information */
2304 	qdf_atomic_t tso_idx;
2305 #endif /* FEATURE_TSO_STATS */
2306 
2307 #ifdef WLAN_SUPPORT_DATA_STALL
2308 	data_stall_detect_cb data_stall_detect_callback;
2309 #endif /* WLAN_SUPPORT_DATA_STALL */
2310 
2311 	struct dp_mon_filter **filter;	/* Monitor Filter pointer */
2312 
2313 #ifdef QCA_SUPPORT_FULL_MON
2314 	/* List to maintain all MPDUs for a PPDU in monitor mode */
2315 	TAILQ_HEAD(, dp_mon_mpdu) mon_mpdu_q;
2316 
2317 	/* TODO: define per-user mpdu list
2318 	 * struct dp_mon_mpdu_list mpdu_list[MAX_MU_USERS];
2319 	 */
2320 	struct hal_rx_mon_desc_info *mon_desc;
2321 #endif
2322 	qdf_nbuf_t mcopy_status_nbuf;
2323 
2324 	/* Flag to hold on to monitor destination ring */
2325 	bool hold_mon_dest_ring;
2326 
2327 #ifdef WLAN_ATF_ENABLE
2328 	/* ATF stats enable */
2329 	bool dp_atf_stats_enable;
2330 #endif
2331 
2332 	/* Maintains first status buffer's paddr of a PPDU */
2333 	uint64_t status_buf_addr;
2334 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2335 	/* HTT stats debugfs params */
2336 	struct pdev_htt_stats_dbgfs_cfg *dbgfs_cfg;
2337 #endif
2338 	/* Flag to inidicate monitor rings are initialized */
2339 	uint8_t pdev_mon_init;
2340 };
2341 
2342 struct dp_peer;
2343 
2344 /* VDEV structure for data path state */
2345 struct dp_vdev {
2346 	/* OS device abstraction */
2347 	qdf_device_t osdev;
2348 
2349 	/* physical device that is the parent of this virtual device */
2350 	struct dp_pdev *pdev;
2351 
2352 	/* VDEV operating mode */
2353 	enum wlan_op_mode opmode;
2354 
2355 	/* VDEV subtype */
2356 	enum wlan_op_subtype subtype;
2357 
2358 	/* Tx encapsulation type for this VAP */
2359 	enum htt_cmn_pkt_type tx_encap_type;
2360 
2361 	/* Rx Decapsulation type for this VAP */
2362 	enum htt_cmn_pkt_type rx_decap_type;
2363 
2364 	/* WDS enabled */
2365 	bool wds_enabled;
2366 
2367 	/* MEC enabled */
2368 	bool mec_enabled;
2369 
2370 #ifdef QCA_SUPPORT_WDS_EXTENDED
2371 	bool wds_ext_enabled;
2372 #endif /* QCA_SUPPORT_WDS_EXTENDED */
2373 
2374 	/* WDS Aging timer period */
2375 	uint32_t wds_aging_timer_val;
2376 
2377 	/* NAWDS enabled */
2378 	bool nawds_enabled;
2379 
2380 	/* Multicast enhancement enabled */
2381 	uint8_t mcast_enhancement_en;
2382 
2383 	/* IGMP multicast enhancement enabled */
2384 	uint8_t igmp_mcast_enhanc_en;
2385 
2386 	/* HW TX Checksum Enabled Flag */
2387 	uint8_t csum_enabled;
2388 
2389 	/* vdev_id - ID used to specify a particular vdev to the target */
2390 	uint8_t vdev_id;
2391 
2392 	/* Default HTT meta data for this VDEV */
2393 	/* TBD: check alignment constraints */
2394 	uint16_t htt_tcl_metadata;
2395 
2396 	/* Mesh mode vdev */
2397 	uint32_t mesh_vdev;
2398 
2399 	/* Mesh mode rx filter setting */
2400 	uint32_t mesh_rx_filter;
2401 
2402 	/* DSCP-TID mapping table ID */
2403 	uint8_t dscp_tid_map_id;
2404 
2405 	/* Address search type to be set in TX descriptor */
2406 	uint8_t search_type;
2407 
2408 	/*
2409 	 * Flag to indicate if s/w tid classification should be
2410 	 * skipped
2411 	 */
2412 	uint8_t skip_sw_tid_classification;
2413 
2414 	/* Flag to enable peer authorization */
2415 	uint8_t peer_authorize;
2416 
2417 	/* AST hash value for BSS peer in HW valid for STA VAP*/
2418 	uint16_t bss_ast_hash;
2419 
2420 	/* vdev lmac_id */
2421 	int lmac_id;
2422 
2423 	bool multipass_en;
2424 
2425 	/* Address search flags to be configured in HAL descriptor */
2426 	uint8_t hal_desc_addr_search_flags;
2427 
2428 	/* Handle to the OS shim SW's virtual device */
2429 	ol_osif_vdev_handle osif_vdev;
2430 
2431 	/* MAC address */
2432 	union dp_align_mac_addr mac_addr;
2433 
2434 	/* node in the pdev's list of vdevs */
2435 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
2436 
2437 	/* dp_peer list */
2438 	TAILQ_HEAD(, dp_peer) peer_list;
2439 	/* to protect peer_list */
2440 	DP_MUTEX_TYPE peer_list_lock;
2441 
2442 	/* RX call back function to flush GRO packets*/
2443 	ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;
2444 	/* default RX call back function called by dp */
2445 	ol_txrx_rx_fp osif_rx;
2446 	/* callback to deliver rx frames to the OS */
2447 	ol_txrx_rx_fp osif_rx_stack;
2448 	/* Callback to handle rx fisa frames */
2449 	ol_txrx_fisa_rx_fp osif_fisa_rx;
2450 	ol_txrx_fisa_flush_fp osif_fisa_flush;
2451 
2452 	/* call back function to flush out queued rx packets*/
2453 	ol_txrx_rx_flush_fp osif_rx_flush;
2454 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
2455 	ol_txrx_get_key_fp osif_get_key;
2456 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
2457 
2458 #ifdef notyet
2459 	/* callback to check if the msdu is an WAI (WAPI) frame */
2460 	ol_rx_check_wai_fp osif_check_wai;
2461 #endif
2462 
2463 	/* proxy arp function */
2464 	ol_txrx_proxy_arp_fp osif_proxy_arp;
2465 
2466 	/* callback to hand rx monitor 802.11 MPDU to the OS shim */
2467 	ol_txrx_rx_mon_fp osif_rx_mon;
2468 
2469 	ol_txrx_mcast_me_fp me_convert;
2470 
2471 	/* completion function used by this vdev*/
2472 	ol_txrx_completion_fp tx_comp;
2473 
2474 	/* deferred vdev deletion state */
2475 	struct {
2476 		/* VDEV delete pending */
2477 		int pending;
2478 		/*
2479 		* callback and a context argument to provide a
2480 		* notification for when the vdev is deleted.
2481 		*/
2482 		ol_txrx_vdev_delete_cb callback;
2483 		void *context;
2484 	} delete;
2485 
2486 	/* tx data delivery notification callback function */
2487 	struct {
2488 		ol_txrx_data_tx_cb func;
2489 		void *ctxt;
2490 	} tx_non_std_data_callback;
2491 
2492 
2493 	/* safe mode control to bypass the encrypt and decipher process*/
2494 	uint32_t safemode;
2495 
2496 	/* rx filter related */
2497 	uint32_t drop_unenc;
2498 #ifdef notyet
2499 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
2500 	uint32_t filters_num;
2501 #endif
2502 	/* TDLS Link status */
2503 	bool tdls_link_connected;
2504 	bool is_tdls_frame;
2505 
2506 	/* per vdev rx nbuf queue */
2507 	qdf_nbuf_queue_t rxq;
2508 
2509 	uint8_t tx_ring_id;
2510 	struct dp_tx_desc_pool_s *tx_desc;
2511 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
2512 
2513 	/* VDEV Stats */
2514 	struct cdp_vdev_stats stats;
2515 
2516 	/* Is this a proxySTA VAP */
2517 	bool proxysta_vdev;
2518 	/* Is isolation mode enabled */
2519 	bool isolation_vdev;
2520 
2521 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2522 	struct dp_tx_desc_pool_s *pool;
2523 #endif
2524 	/* AP BRIDGE enabled */
2525 	bool ap_bridge_enabled;
2526 
2527 	enum cdp_sec_type  sec_type;
2528 
2529 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
2530 	bool raw_mode_war;
2531 
2532 
2533 	/* AST hash index for BSS peer in HW valid for STA VAP*/
2534 	uint16_t bss_ast_idx;
2535 
2536 	/* Capture timestamp of previous tx packet enqueued */
2537 	uint64_t prev_tx_enq_tstamp;
2538 
2539 	/* Capture timestamp of previous rx packet delivered */
2540 	uint64_t prev_rx_deliver_tstamp;
2541 
2542 	/* 8021p PCP-TID mapping table ID */
2543 	uint8_t tidmap_tbl_id;
2544 
2545 	/* 8021p PCP-TID map values */
2546 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
2547 
2548 	/* TIDmap priority */
2549 	uint8_t tidmap_prty;
2550 
2551 #ifdef QCA_MULTIPASS_SUPPORT
2552 	uint16_t *iv_vlan_map;
2553 
2554 	/* dp_peer special list */
2555 	TAILQ_HEAD(, dp_peer) mpass_peer_list;
2556 	DP_MUTEX_TYPE mpass_peer_mutex;
2557 #endif
2558 	/* Extended data path handle */
2559 	struct cdp_ext_vdev *vdev_dp_ext_handle;
2560 #ifdef VDEV_PEER_PROTOCOL_COUNT
2561 	/*
2562 	 * Rx-Ingress and Tx-Egress are in the lower level DP layer
2563 	 * Rx-Egress and Tx-ingress are handled in osif layer for DP
2564 	 * So
2565 	 * Rx-Egress and Tx-ingress mask definitions are in OSIF layer
2566 	 * Rx-Ingress and Tx-Egress definitions are here below
2567 	 */
2568 #define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1
2569 #define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2
2570 #define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4
2571 #define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8
2572 	bool peer_protocol_count_track;
2573 	int peer_protocol_count_dropmask;
2574 #endif
2575 	/* callback to collect connectivity stats */
2576 	ol_txrx_stats_rx_fp stats_cb;
2577 	uint32_t num_peers;
2578 	/* entry to inactive_list*/
2579 	TAILQ_ENTRY(dp_vdev) inactive_list_elem;
2580 
2581 #ifdef WLAN_SUPPORT_RX_FISA
2582 	/**
2583 	 * Params used for controlling the fisa aggregation dynamically
2584 	 */
2585 	uint8_t fisa_disallowed[MAX_REO_DEST_RINGS];
2586 	uint8_t fisa_force_flushed[MAX_REO_DEST_RINGS];
2587 #endif
2588 	/*
2589 	 * Refcount for VDEV currently incremented when
2590 	 * peer is created for VDEV
2591 	 */
2592 	qdf_atomic_t ref_cnt;
2593 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
2594 	uint8_t num_latency_critical_conn;
2595 };
2596 
2597 
2598 enum {
2599 	dp_sec_mcast = 0,
2600 	dp_sec_ucast
2601 };
2602 
2603 #ifdef WDS_VENDOR_EXTENSION
2604 typedef struct {
2605 	uint8_t	wds_tx_mcast_4addr:1,
2606 		wds_tx_ucast_4addr:1,
2607 		wds_rx_filter:1,      /* enforce rx filter */
2608 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
2609 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
2610 
2611 } dp_ecm_policy;
2612 #endif
2613 
2614 /*
2615  * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
2616  * @cached_bufq: nbuff list to enqueue rx packets
2617  * @bufq_lock: spinlock for nbuff list access
2618  * @thres: maximum threshold for number of rx buff to enqueue
2619  * @entries: number of entries
2620  * @dropped: number of packets dropped
2621  */
2622 struct dp_peer_cached_bufq {
2623 	qdf_list_t cached_bufq;
2624 	qdf_spinlock_t bufq_lock;
2625 	uint32_t thresh;
2626 	uint32_t entries;
2627 	uint32_t dropped;
2628 };
2629 
2630 /**
2631  * enum dp_peer_ast_flowq
2632  * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue
2633  * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue
2634  * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP
2635  * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP
2636  */
2637 enum dp_peer_ast_flowq {
2638 	DP_PEER_AST_FLOWQ_HI_PRIO,
2639 	DP_PEER_AST_FLOWQ_LOW_PRIO,
2640 	DP_PEER_AST_FLOWQ_UDP,
2641 	DP_PEER_AST_FLOWQ_NON_UDP,
2642 	DP_PEER_AST_FLOWQ_MAX,
2643 };
2644 
2645 /*
2646  * struct dp_ast_flow_override_info - ast override info
2647  * @ast_index - ast indexes in peer map message
2648  * @ast_valid_mask - ast valid mask for each ast index
2649  * @ast_flow_mask - ast flow mask for each ast index
2650  * @tid_valid_low_pri_mask - per tid mask for low priority flow
2651  * @tid_valid_hi_pri_mask - per tid mask for hi priority flow
2652  */
2653 struct dp_ast_flow_override_info {
2654 	uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX];
2655 	uint8_t ast_valid_mask;
2656 	uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX];
2657 	uint8_t tid_valid_low_pri_mask;
2658 	uint8_t tid_valid_hi_pri_mask;
2659 };
2660 
2661 /*
2662  * struct dp_peer_ast_params - ast parameters for a msdu flow-queue
2663  * @ast_index - ast index populated by FW
2664  * @is_valid - ast flow valid mask
2665  * @valid_tid_mask - per tid mask for this ast index
2666  * @flowQ - flow queue id associated with this ast index
2667  */
2668 struct dp_peer_ast_params {
2669 	uint16_t ast_idx;
2670 	uint8_t is_valid;
2671 	uint8_t valid_tid_mask;
2672 	uint8_t flowQ;
2673 };
2674 
2675 #ifdef WLAN_SUPPORT_MSCS
2676 /*MSCS Procedure based macros */
2677 #define IEEE80211_MSCS_MAX_ELEM_SIZE    5
2678 #define IEEE80211_TCLAS_MASK_CLA_TYPE_4  4
2679 /*
2680  * struct dp_peer_mscs_parameter - MSCS database obtained from
2681  * MSCS Request and Response in the control path. This data is used
2682  * by the AP to find out what priority to set based on the tuple
2683  * classification during packet processing.
2684  * @user_priority_bitmap - User priority bitmap obtained during
2685  * handshake
2686  * @user_priority_limit - User priority limit obtained during
2687  * handshake
2688  * @classifier_mask - params to be compared during processing
2689  */
2690 struct dp_peer_mscs_parameter {
2691 	uint8_t user_priority_bitmap;
2692 	uint8_t user_priority_limit;
2693 	uint8_t classifier_mask;
2694 };
2695 #endif
2696 
2697 #ifdef QCA_SUPPORT_WDS_EXTENDED
2698 #define WDS_EXT_PEER_INIT_BIT 0
2699 
2700 /**
2701  * struct dp_wds_ext_peer - wds ext peer structure
2702  * This is used when wds extended feature is enabled
2703  * both compile time and run time. It is created
2704  * when 1st 4 address frame is received from
2705  * wds backhaul.
2706  * @osif_vdev: Handle to the OS shim SW's virtual device
2707  * @init: wds ext netdev state
2708  */
2709 struct dp_wds_ext_peer {
2710 	ol_osif_peer_handle osif_peer;
2711 	unsigned long init;
2712 };
2713 #endif /* QCA_SUPPORT_WDS_EXTENDED */
2714 
2715 /* Peer structure for data path state */
2716 struct dp_peer {
2717 	/* VDEV to which this peer is associated */
2718 	struct dp_vdev *vdev;
2719 
2720 	struct dp_ast_entry *self_ast_entry;
2721 
2722 	qdf_atomic_t ref_cnt;
2723 
2724 	/* peer ID for this peer */
2725 	uint16_t peer_id;
2726 
2727 	union dp_align_mac_addr mac_addr;
2728 
2729 	/* node in the vdev's list of peers */
2730 	TAILQ_ENTRY(dp_peer) peer_list_elem;
2731 	/* node in the hash table bin's list of peers */
2732 	TAILQ_ENTRY(dp_peer) hash_list_elem;
2733 
2734 	/* TID structures */
2735 	struct dp_rx_tid rx_tid[DP_MAX_TIDS];
2736 	struct dp_peer_tx_capture tx_capture;
2737 
2738 
2739 	/* TBD: No transmit TID state required? */
2740 
2741 	struct {
2742 		enum cdp_sec_type sec_type;
2743 		u_int32_t michael_key[2]; /* relevant for TKIP */
2744 	} security[2]; /* 0 -> multicast, 1 -> unicast */
2745 
2746 	/* NAWDS Flag and Bss Peer bit */
2747 	uint16_t nawds_enabled:1, /* NAWDS flag */
2748 		bss_peer:1, /* set for bss peer */
2749 		wds_enabled:1, /* WDS peer */
2750 		authorize:1, /* Set when authorized */
2751 		nac:1, /* NAC Peer*/
2752 		tx_cap_enabled:1, /* Peer's tx-capture is enabled */
2753 		rx_cap_enabled:1, /* Peer's rx-capture is enabled */
2754 		valid:1, /* valid bit */
2755 		in_twt:1, /* in TWT session */
2756 		delete_in_progress:1, /* Indicate kickout sent */
2757 		sta_self_peer:1; /* Indicate STA self peer */
2758 
2759 #ifdef QCA_SUPPORT_PEER_ISOLATION
2760 	bool isolation; /* enable peer isolation for this peer */
2761 #endif
2762 
2763 	/* MCL specific peer local id */
2764 	uint16_t local_id;
2765 	enum ol_txrx_peer_state state;
2766 	qdf_spinlock_t peer_info_lock;
2767 
2768 	/* Peer Stats */
2769 	struct cdp_peer_stats stats;
2770 
2771 	/* Peer extended stats */
2772 	struct cdp_peer_ext_stats *pext_stats;
2773 
2774 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
2775 	/* TBD */
2776 
2777 #ifdef WDS_VENDOR_EXTENSION
2778 	dp_ecm_policy wds_ecm;
2779 #endif
2780 
2781 	/* Active Block ack sessions */
2782 	uint16_t active_ba_session_cnt;
2783 
2784 	/* Current HW buffersize setting */
2785 	uint16_t hw_buffer_size;
2786 
2787 	/*
2788 	 * Flag to check if sessions with 256 buffersize
2789 	 * should be terminated.
2790 	 */
2791 	uint8_t kill_256_sessions;
2792 	qdf_atomic_t is_default_route_set;
2793 	/* Peer level flag to check peer based pktlog enabled or
2794 	 * disabled
2795 	 */
2796 	uint8_t peer_based_pktlog_filter;
2797 
2798 	/* rdk statistics context */
2799 	struct cdp_peer_rate_stats_ctx *rdkstats_ctx;
2800 	/* average sojourn time */
2801 	qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
2802 
2803 #ifdef QCA_MULTIPASS_SUPPORT
2804 	/* node in the special peer list element */
2805 	TAILQ_ENTRY(dp_peer) mpass_peer_list_elem;
2806 	/* vlan id for key */
2807 	uint16_t vlan_id;
2808 #endif
2809 
2810 #ifdef PEER_CACHE_RX_PKTS
2811 	qdf_atomic_t flush_in_progress;
2812 	struct dp_peer_cached_bufq bufq_info;
2813 #endif
2814 #ifdef FEATURE_PERPKT_INFO
2815 	/* delayed ba ppdu stats handling */
2816 	struct cdp_delayed_tx_completion_ppdu_user delayed_ba_ppdu_stats;
2817 	/* delayed ba flag */
2818 	bool last_delayed_ba;
2819 	/* delayed ba ppdu id */
2820 	uint32_t last_delayed_ba_ppduid;
2821 #endif
2822 #ifdef QCA_PEER_MULTIQ_SUPPORT
2823 	struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
2824 #endif
2825 	/* entry to inactive_list*/
2826 	TAILQ_ENTRY(dp_peer) inactive_list_elem;
2827 
2828 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
2829 
2830 	uint8_t peer_state;
2831 	qdf_spinlock_t peer_state_lock;
2832 #ifdef WLAN_SUPPORT_MSCS
2833 	struct dp_peer_mscs_parameter mscs_ipv4_parameter, mscs_ipv6_parameter;
2834 	bool mscs_active;
2835 #endif
2836 #ifdef QCA_SUPPORT_WDS_EXTENDED
2837 	struct dp_wds_ext_peer wds_ext;
2838 	ol_txrx_rx_fp osif_rx;
2839 #endif
2840 };
2841 
2842 /*
2843  * dp_invalid_peer_msg
2844  * @nbuf: data buffer
2845  * @wh: 802.11 header
2846  * @vdev_id: id of vdev
2847  */
2848 struct dp_invalid_peer_msg {
2849 	qdf_nbuf_t nbuf;
2850 	struct ieee80211_frame *wh;
2851 	uint8_t vdev_id;
2852 };
2853 
2854 /*
2855  * dp_tx_me_buf_t: ME buffer
2856  * next: pointer to next buffer
2857  * data: Destination Mac address
2858  * paddr_macbuf: physical address for dest_mac
2859  */
2860 struct dp_tx_me_buf_t {
2861 	/* Note: ME buf pool initialization logic expects next pointer to
2862 	 * be the first element. Dont add anything before next */
2863 	struct dp_tx_me_buf_t *next;
2864 	uint8_t data[QDF_MAC_ADDR_SIZE];
2865 	qdf_dma_addr_t paddr_macbuf;
2866 };
2867 
2868 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2869 struct hal_rx_fst;
2870 
2871 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
2872 struct dp_rx_fse {
2873 	/* HAL Rx Flow Search Entry which matches HW definition */
2874 	void *hal_rx_fse;
2875 	/* Toeplitz hash value */
2876 	uint32_t flow_hash;
2877 	/* Flow index, equivalent to hash value truncated to FST size */
2878 	uint32_t flow_id;
2879 	/* Stats tracking for this flow */
2880 	struct cdp_flow_stats stats;
2881 	/* Flag indicating whether flow is IPv4 address tuple */
2882 	uint8_t is_ipv4_addr_entry;
2883 	/* Flag indicating whether flow is valid */
2884 	uint8_t is_valid;
2885 };
2886 
2887 struct dp_rx_fst {
2888 	/* Software (DP) FST */
2889 	uint8_t *base;
2890 	/* Pointer to HAL FST */
2891 	struct hal_rx_fst *hal_rx_fst;
2892 	/* Base physical address of HAL RX HW FST */
2893 	uint64_t hal_rx_fst_base_paddr;
2894 	/* Maximum number of flows FSE supports */
2895 	uint16_t max_entries;
2896 	/* Num entries in flow table */
2897 	uint16_t num_entries;
2898 	/* SKID Length */
2899 	uint16_t max_skid_length;
2900 	/* Hash mask to obtain legitimate hash entry */
2901 	uint32_t hash_mask;
2902 	/* Timer for bundling of flows */
2903 	qdf_timer_t cache_invalidate_timer;
2904 	/**
2905 	 * Flag which tracks whether cache update
2906 	 * is needed on timer expiry
2907 	 */
2908 	qdf_atomic_t is_cache_update_pending;
2909 	/* Flag to indicate completion of FSE setup in HW/FW */
2910 	bool fse_setup_done;
2911 };
2912 
2913 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
2914 #elif WLAN_SUPPORT_RX_FISA
2915 
2916 struct dp_fisa_stats {
2917 	/* flow index invalid from RX HW TLV */
2918 	uint32_t invalid_flow_index;
2919 	uint32_t reo_mismatch;
2920 };
2921 
2922 enum fisa_aggr_ret {
2923 	FISA_AGGR_DONE,
2924 	FISA_AGGR_NOT_ELIGIBLE,
2925 	FISA_FLUSH_FLOW
2926 };
2927 
2928 struct dp_fisa_rx_sw_ft {
2929 	/* HAL Rx Flow Search Entry which matches HW definition */
2930 	void *hw_fse;
2931 	/* Toeplitz hash value */
2932 	uint32_t flow_hash;
2933 	/* Flow index, equivalent to hash value truncated to FST size */
2934 	uint32_t flow_id;
2935 	/* Stats tracking for this flow */
2936 	struct cdp_flow_stats stats;
2937 	/* Flag indicating whether flow is IPv4 address tuple */
2938 	uint8_t is_ipv4_addr_entry;
2939 	/* Flag indicating whether flow is valid */
2940 	uint8_t is_valid;
2941 	uint8_t is_populated;
2942 	uint8_t is_flow_udp;
2943 	uint8_t is_flow_tcp;
2944 	qdf_nbuf_t head_skb;
2945 	uint16_t cumulative_l4_checksum;
2946 	uint16_t adjusted_cumulative_ip_length;
2947 	uint16_t cur_aggr;
2948 	uint16_t napi_flush_cumulative_l4_checksum;
2949 	uint16_t napi_flush_cumulative_ip_length;
2950 	qdf_nbuf_t last_skb;
2951 	uint32_t head_skb_ip_hdr_offset;
2952 	uint32_t head_skb_l4_hdr_offset;
2953 	struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
2954 	uint8_t napi_id;
2955 	struct dp_vdev *vdev;
2956 	uint64_t bytes_aggregated;
2957 	uint32_t flush_count;
2958 	uint32_t aggr_count;
2959 	uint8_t do_not_aggregate;
2960 	uint16_t hal_cumultive_ip_len;
2961 	struct dp_soc *soc_hdl;
2962 	/* last aggregate count fetched from RX PKT TLV */
2963 	uint32_t last_hal_aggr_count;
2964 	uint32_t cur_aggr_gso_size;
2965 	struct udphdr *head_skb_udp_hdr;
2966 	uint16_t frags_cumulative_len;
2967 	/* CMEM parameters */
2968 	uint32_t cmem_offset;
2969 	uint32_t metadata;
2970 	uint32_t reo_dest_indication;
2971 };
2972 
2973 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_fisa_rx_sw_ft)
2974 #define MAX_FSE_CACHE_FL_HST 10
2975 /**
2976  * struct fse_cache_flush_history - Debug history cache flush
2977  * @timestamp: Entry update timestamp
2978  * @flows_added: Number of flows added for this flush
2979  * @flows_deleted: Number of flows deleted for this flush
2980  */
2981 struct fse_cache_flush_history {
2982 	uint64_t timestamp;
2983 	uint32_t flows_added;
2984 	uint32_t flows_deleted;
2985 };
2986 
2987 struct dp_rx_fst {
2988 	/* Software (DP) FST */
2989 	uint8_t *base;
2990 	/* Pointer to HAL FST */
2991 	struct hal_rx_fst *hal_rx_fst;
2992 	/* Base physical address of HAL RX HW FST */
2993 	uint64_t hal_rx_fst_base_paddr;
2994 	/* Maximum number of flows FSE supports */
2995 	uint16_t max_entries;
2996 	/* Num entries in flow table */
2997 	uint16_t num_entries;
2998 	/* SKID Length */
2999 	uint16_t max_skid_length;
3000 	/* Hash mask to obtain legitimate hash entry */
3001 	uint32_t hash_mask;
3002 	/* Lock for adding/deleting entries of FST */
3003 	qdf_spinlock_t dp_rx_fst_lock;
3004 	uint32_t add_flow_count;
3005 	uint32_t del_flow_count;
3006 	uint32_t hash_collision_cnt;
3007 	struct dp_soc *soc_hdl;
3008 	qdf_atomic_t fse_cache_flush_posted;
3009 	qdf_timer_t fse_cache_flush_timer;
3010 	/* Allow FSE cache flush cmd to FW */
3011 	bool fse_cache_flush_allow;
3012 	struct fse_cache_flush_history cache_fl_rec[MAX_FSE_CACHE_FL_HST];
3013 	/* FISA DP stats */
3014 	struct dp_fisa_stats stats;
3015 
3016 	/* CMEM params */
3017 	qdf_work_t fst_update_work;
3018 	qdf_workqueue_t *fst_update_wq;
3019 	qdf_list_t fst_update_list;
3020 	uint32_t meta_counter;
3021 	uint32_t cmem_ba;
3022 	qdf_spinlock_t dp_rx_sw_ft_lock[MAX_REO_DEST_RINGS];
3023 	qdf_event_t cmem_resp_event;
3024 	bool flow_deletion_supported;
3025 	bool fst_in_cmem;
3026 	bool pm_suspended;
3027 };
3028 
3029 #endif /* WLAN_SUPPORT_RX_FISA */
3030 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
3031 
3032 #ifdef WLAN_FEATURE_STATS_EXT
3033 /*
3034  * dp_req_rx_hw_stats_t: RX peer HW stats query structure
3035  * @pending_tid_query_cnt: pending tid stats count which waits for REO status
3036  * @is_query_timeout: flag to show is stats query timeout
3037  */
3038 struct dp_req_rx_hw_stats_t {
3039 	qdf_atomic_t pending_tid_stats_cnt;
3040 	bool is_query_timeout;
3041 };
3042 #endif
3043 
3044 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id);
3045 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
3046 					    uint32_t mac_id);
3047 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
3048 
3049 #endif /* _DP_TYPES_H_ */
3050