xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_TYPES_H_
20 #define _DP_TYPES_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include <qdf_lock.h>
25 #include <qdf_atomic.h>
26 #include <qdf_util.h>
27 #include <qdf_list.h>
28 #include <qdf_lro.h>
29 #include <queue.h>
30 #include <htt_common.h>
31 
32 #include <cdp_txrx_cmn.h>
33 #ifdef DP_MOB_DEFS
34 #include <cds_ieee80211_common.h>
35 #endif
36 #include <wdi_event_api.h>    /* WDI subscriber event list */
37 
38 #include "hal_hw_headers.h"
39 #include <hal_tx.h>
40 #include <hal_reo.h>
41 #include "wlan_cfg.h"
42 #include "hal_rx.h"
43 #include <hal_api.h>
44 #include <hal_api_mon.h>
45 #include "hal_rx.h"
46 //#include "hal_rx_flow.h"
47 
48 #define MAX_BW 7
49 #define MAX_RETRIES 4
50 #define MAX_RECEPTION_TYPES 4
51 
52 #define MINIDUMP_STR_SIZE 25
53 #ifndef REMOVE_PKT_LOG
54 #include <pktlog.h>
55 #endif
56 
57 #ifdef WLAN_TX_PKT_CAPTURE_ENH
58 #include "dp_tx_capture.h"
59 #endif
60 
61 #define REPT_MU_MIMO 1
62 #define REPT_MU_OFDMA_MIMO 3
63 #define DP_VO_TID 6
64  /** MAX TID MAPS AVAILABLE PER PDEV */
65 #define DP_MAX_TID_MAPS 16
66 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */
67 #define DSCP_TID_MAP_MAX (64 + 6)
68 #define DP_IP_DSCP_SHIFT 2
69 #define DP_IP_DSCP_MASK 0x3f
70 #define DP_FC0_SUBTYPE_QOS 0x80
71 #define DP_QOS_TID 0x0f
72 #define DP_IPV6_PRIORITY_SHIFT 20
73 #define MAX_MON_LINK_DESC_BANKS 2
74 #define DP_VDEV_ALL 0xff
75 
76 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
77 #define MAX_PDEV_CNT 1
78 #else
79 #define MAX_PDEV_CNT 3
80 #endif
81 
82 /* Max no. of VDEV per PSOC */
83 #ifdef WLAN_PSOC_MAX_VDEVS
84 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS
85 #else
86 #define MAX_VDEV_CNT 51
87 #endif
88 
89 #define MAX_TXDESC_POOLS 4
90 #define MAX_RXDESC_POOLS 4
91 #define MAX_REO_DEST_RINGS 4
92 #define EXCEPTION_DEST_RING_ID 0
93 #define MAX_TCL_DATA_RINGS 4
94 #define MAX_IDLE_SCATTER_BUFS 16
95 #define DP_MAX_IRQ_PER_CONTEXT 12
96 #define DEFAULT_HW_PEER_ID 0xffff
97 
98 #define WBM_INT_ERROR_ALL 0
99 #define WBM_INT_ERROR_REO_NULL_BUFFER 1
100 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2
101 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3
102 #define WBM_INT_ERROR_REO_BUFF_REAPED 4
103 #define MAX_WBM_INT_ERROR_REASONS 5
104 
105 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
106 /* Maximum retries for Delba per tid per peer */
107 #define DP_MAX_DELBA_RETRY 3
108 
109 #define PCP_TID_MAP_MAX 8
110 #define MAX_MU_USERS 37
111 
112 #define REO_CMD_EVENT_HIST_MAX 64
113 
114 /* 2G PHYB */
115 #define PHYB_2G_LMAC_ID 2
116 #define PHYB_2G_TARGET_PDEV_ID 2
117 
118 #ifndef REMOVE_PKT_LOG
119 enum rx_pktlog_mode {
120 	DP_RX_PKTLOG_DISABLED = 0,
121 	DP_RX_PKTLOG_FULL,
122 	DP_RX_PKTLOG_LITE,
123 };
124 #endif
125 
126 /* enum m_copy_mode - Available mcopy mode
127  *
128  */
129 enum m_copy_mode {
130 	M_COPY_DISABLED = 0,
131 	M_COPY = 2,
132 	M_COPY_EXTENDED = 4,
133 };
134 
135 struct msdu_list {
136 	qdf_nbuf_t head;
137 	qdf_nbuf_t tail;
138 	uint32 sum_len;
139 };
140 
141 struct dp_soc_cmn;
142 struct dp_pdev;
143 struct dp_vdev;
144 struct dp_tx_desc_s;
145 struct dp_soc;
146 union dp_rx_desc_list_elem_t;
147 struct cdp_peer_rate_stats_ctx;
148 struct cdp_soc_rate_stats_ctx;
149 struct dp_rx_fst;
150 struct dp_mon_filter;
151 struct dp_mon_mpdu;
152 
153 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
154 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
155 
156 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
157 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
158 
159 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
160 	TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase))
161 
162 #define DP_MUTEX_TYPE qdf_spinlock_t
163 
164 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
165 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
166 
167 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
168     ((_a)[0] == 0x33 &&                         \
169      (_a)[1] == 0x33)
170 
171 #define DP_FRAME_IS_BROADCAST(_a)              \
172     ((_a)[0] == 0xff &&                         \
173      (_a)[1] == 0xff &&                         \
174      (_a)[2] == 0xff &&                         \
175      (_a)[3] == 0xff &&                         \
176      (_a)[4] == 0xff &&                         \
177      (_a)[5] == 0xff)
178 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
179 		(_llc)->llc_ssap == 0xaa && \
180 		(_llc)->llc_un.type_snap.control == 0x3)
181 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
182 #define DP_FRAME_FC0_TYPE_MASK 0x0c
183 #define DP_FRAME_FC0_TYPE_DATA 0x08
184 #define DP_FRAME_IS_DATA(_frame) \
185 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
186 
187 /**
188  * macros to convert hw mac id to sw mac id:
189  * mac ids used by hardware start from a value of 1 while
190  * those in host software start from a value of 0. Use the
191  * macros below to convert between mac ids used by software and
192  * hardware
193  */
194 #define DP_SW2HW_MACID(id) ((id) + 1)
195 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
196 
197 /**
198  * Number of Tx Queues
199  * enum and macro to define how many threshold levels is used
200  * for the AC based flow control
201  */
202 #ifdef QCA_AC_BASED_FLOW_CONTROL
203 enum dp_fl_ctrl_threshold {
204 	DP_TH_BE_BK = 0,
205 	DP_TH_VI,
206 	DP_TH_VO,
207 	DP_TH_HI,
208 };
209 
210 #define FL_TH_MAX (4)
211 #define FL_TH_VI_PERCENTAGE (80)
212 #define FL_TH_VO_PERCENTAGE (60)
213 #define FL_TH_HI_PERCENTAGE (40)
214 #endif
215 
216 /**
217  * enum dp_intr_mode
218  * @DP_INTR_INTEGRATED: Line interrupts
219  * @DP_INTR_MSI: MSI interrupts
220  * @DP_INTR_POLL: Polling
221  */
222 enum dp_intr_mode {
223 	DP_INTR_INTEGRATED = 0,
224 	DP_INTR_MSI,
225 	DP_INTR_POLL,
226 };
227 
228 /**
229  * enum dp_tx_frm_type
230  * @dp_tx_frm_std: Regular frame, no added header fragments
231  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
232  * @dp_tx_frm_sg: SG segment
233  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
234  * @dp_tx_frm_me: Multicast to Unicast Converted frame
235  * @dp_tx_frm_raw: Raw Frame
236  */
237 enum dp_tx_frm_type {
238 	dp_tx_frm_std = 0,
239 	dp_tx_frm_tso,
240 	dp_tx_frm_sg,
241 	dp_tx_frm_audio,
242 	dp_tx_frm_me,
243 	dp_tx_frm_raw,
244 };
245 
246 /**
247  * enum dp_ast_type
248  * @dp_ast_type_wds: WDS peer AST type
249  * @dp_ast_type_static: static ast entry type
250  * @dp_ast_type_mec: Multicast echo ast entry type
251  */
252 enum dp_ast_type {
253 	dp_ast_type_wds = 0,
254 	dp_ast_type_static,
255 	dp_ast_type_mec,
256 };
257 
258 /**
259  * enum dp_nss_cfg
260  * @dp_nss_cfg_default: No radios are offloaded
261  * @dp_nss_cfg_first_radio: First radio offloaded
262  * @dp_nss_cfg_second_radio: Second radio offloaded
263  * @dp_nss_cfg_dbdc: Dual radios offloaded
264  * @dp_nss_cfg_dbtc: Three radios offloaded
265  */
266 enum dp_nss_cfg {
267 	dp_nss_cfg_default = 0x0,
268 	dp_nss_cfg_first_radio = 0x1,
269 	dp_nss_cfg_second_radio = 0x2,
270 	dp_nss_cfg_dbdc = 0x3,
271 	dp_nss_cfg_dbtc = 0x7,
272 	dp_nss_cfg_max
273 };
274 
275 #ifdef WLAN_TX_PKT_CAPTURE_ENH
276 #define DP_CPU_RING_MAP_1 1
277 #endif
278 
279 /**
280  * dp_cpu_ring_map_type - dp tx cpu ring map
281  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
282  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
283  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
284  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
285  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
286  * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring
287  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
288  */
289 enum dp_cpu_ring_map_types {
290 	DP_NSS_DEFAULT_MAP,
291 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
292 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
293 	DP_NSS_DBDC_OFFLOADED_MAP,
294 	DP_NSS_DBTC_OFFLOADED_MAP,
295 #ifdef WLAN_TX_PKT_CAPTURE_ENH
296 	DP_SINGLE_TX_RING_MAP,
297 #endif
298 	DP_NSS_CPU_RING_MAP_MAX
299 };
300 
301 /**
302  * struct rx_desc_pool
303  * @pool_size: number of RX descriptor in the pool
304  * @elem_size: Element size
305  * @desc_pages: Multi page descriptors
306  * @array: pointer to array of RX descriptor
307  * @freelist: pointer to free RX descriptor link list
308  * @lock: Protection for the RX descriptor pool
309  * @owner: owner for nbuf
310  * @buf_size: Buffer size
311  * @buf_alignment: Buffer alignment
312  */
313 struct rx_desc_pool {
314 	uint32_t pool_size;
315 #ifdef RX_DESC_MULTI_PAGE_ALLOC
316 	uint16_t elem_size;
317 	struct qdf_mem_multi_page_t desc_pages;
318 #else
319 	union dp_rx_desc_list_elem_t *array;
320 #endif
321 	union dp_rx_desc_list_elem_t *freelist;
322 	qdf_spinlock_t lock;
323 	uint8_t owner;
324 	uint16_t buf_size;
325 	uint8_t buf_alignment;
326 };
327 
328 /**
329  * struct dp_tx_ext_desc_elem_s
330  * @next: next extension descriptor pointer
331  * @vaddr: hlos virtual address pointer
332  * @paddr: physical address pointer for descriptor
333  */
334 struct dp_tx_ext_desc_elem_s {
335 	struct dp_tx_ext_desc_elem_s *next;
336 	void *vaddr;
337 	qdf_dma_addr_t paddr;
338 };
339 
340 /**
341  * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool
342  * @elem_count: Number of descriptors in the pool
343  * @elem_size: Size of each descriptor
344  * @num_free: Number of free descriptors
345  * @msdu_ext_desc: MSDU extension descriptor
346  * @desc_pages: multiple page allocation information for actual descriptors
347  * @link_elem_size: size of the link descriptor in cacheable memory used for
348  * 		    chaining the extension descriptors
349  * @desc_link_pages: multiple page allocation information for link descriptors
350  */
351 struct dp_tx_ext_desc_pool_s {
352 	uint16_t elem_count;
353 	int elem_size;
354 	uint16_t num_free;
355 	struct qdf_mem_multi_page_t desc_pages;
356 	int link_elem_size;
357 	struct qdf_mem_multi_page_t desc_link_pages;
358 	struct dp_tx_ext_desc_elem_s *freelist;
359 	qdf_spinlock_t lock;
360 	qdf_dma_mem_context(memctx);
361 };
362 
363 /**
364  * struct dp_tx_desc_s - Tx Descriptor
365  * @next: Next in the chain of descriptors in freelist or in the completion list
366  * @nbuf: Buffer Address
367  * @msdu_ext_desc: MSDU extension descriptor
368  * @id: Descriptor ID
369  * @vdev: vdev over which the packet was transmitted
370  * @pdev: Handle to pdev
371  * @pool_id: Pool ID - used when releasing the descriptor
372  * @flags: Flags to track the state of descriptor and special frame handling
373  * @comp: Pool ID - used when releasing the descriptor
374  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
375  * 		   This is maintained in descriptor to allow more efficient
376  * 		   processing in completion event processing code.
377  * 		    This field is filled in with the htt_pkt_type enum.
378  * @frm_type: Frame Type - ToDo check if this is redundant
379  * @pkt_offset: Offset from which the actual packet data starts
380  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
381  *		Tx completion of ME packet
382  * @pool: handle to flow_pool this descriptor belongs to.
383  */
384 struct dp_tx_desc_s {
385 	struct dp_tx_desc_s *next;
386 	qdf_nbuf_t nbuf;
387 	uint16_t length;
388 	uint16_t flags;
389 	qdf_dma_addr_t dma_addr;
390 	uint32_t id;
391 	struct dp_vdev *vdev;
392 	struct dp_pdev *pdev;
393 	uint8_t tx_encap_type;
394 	uint8_t frm_type;
395 	uint8_t pkt_offset;
396 	uint8_t  pool_id;
397 	uint16_t peer_id;
398 	uint16_t tx_status;
399 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
400 	void *me_buffer;
401 	void *tso_desc;
402 	void *tso_num_desc;
403 	uint64_t timestamp;
404 	struct hal_tx_desc_comp_s comp;
405 };
406 
407 /**
408  * enum flow_pool_status - flow pool status
409  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
410  *				and network queues are unpaused
411  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
412  *			   and network queues are paused
413  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
414  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
415  */
416 enum flow_pool_status {
417 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
418 	FLOW_POOL_ACTIVE_PAUSED = 1,
419 	FLOW_POOL_BE_BK_PAUSED = 2,
420 	FLOW_POOL_VI_PAUSED = 3,
421 	FLOW_POOL_VO_PAUSED = 4,
422 	FLOW_POOL_INVALID = 5,
423 	FLOW_POOL_INACTIVE = 6,
424 };
425 
426 /**
427  * struct dp_tx_tso_seg_pool_s
428  * @pool_size: total number of pool elements
429  * @num_free: free element count
430  * @freelist: first free element pointer
431  * @desc_pages: multiple page allocation information for actual descriptors
432  * @lock: lock for accessing the pool
433  */
434 struct dp_tx_tso_seg_pool_s {
435 	uint16_t pool_size;
436 	uint16_t num_free;
437 	struct qdf_tso_seg_elem_t *freelist;
438 	struct qdf_mem_multi_page_t desc_pages;
439 	qdf_spinlock_t lock;
440 };
441 
442 /**
443  * struct dp_tx_tso_num_seg_pool_s {
444  * @num_seg_pool_size: total number of pool elements
445  * @num_free: free element count
446  * @freelist: first free element pointer
447  * @desc_pages: multiple page allocation information for actual descriptors
448  * @lock: lock for accessing the pool
449  */
450 
451 struct dp_tx_tso_num_seg_pool_s {
452 	uint16_t num_seg_pool_size;
453 	uint16_t num_free;
454 	struct qdf_tso_num_seg_elem_t *freelist;
455 	struct qdf_mem_multi_page_t desc_pages;
456 	/*tso mutex */
457 	qdf_spinlock_t lock;
458 };
459 
460 /**
461  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
462  * @elem_size: Size of each descriptor in the pool
463  * @pool_size: Total number of descriptors in the pool
464  * @num_free: Number of free descriptors
465  * @num_allocated: Number of used descriptors
466  * @freelist: Chain of free descriptors
467  * @desc_pages: multiple page allocation information for actual descriptors
468  * @num_invalid_bin: Deleted pool with pending Tx completions.
469  * @flow_pool_array_lock: Lock when operating on flow_pool_array.
470  * @flow_pool_array: List of allocated flow pools
471  * @lock- Lock for descriptor allocation/free from/to the pool
472  */
473 struct dp_tx_desc_pool_s {
474 	uint16_t elem_size;
475 	uint32_t num_allocated;
476 	struct dp_tx_desc_s *freelist;
477 	struct qdf_mem_multi_page_t desc_pages;
478 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
479 	uint16_t pool_size;
480 	uint8_t flow_pool_id;
481 	uint8_t num_invalid_bin;
482 	uint16_t avail_desc;
483 	enum flow_pool_status status;
484 	enum htt_flow_type flow_type;
485 #ifdef QCA_AC_BASED_FLOW_CONTROL
486 	uint16_t stop_th[FL_TH_MAX];
487 	uint16_t start_th[FL_TH_MAX];
488 	qdf_time_t max_pause_time[FL_TH_MAX];
489 	qdf_time_t latest_pause_time[FL_TH_MAX];
490 #else
491 	uint16_t stop_th;
492 	uint16_t start_th;
493 #endif
494 	uint16_t pkt_drop_no_desc;
495 	qdf_spinlock_t flow_pool_lock;
496 	uint8_t pool_create_cnt;
497 	void *pool_owner_ctx;
498 #else
499 	uint16_t elem_count;
500 	uint32_t num_free;
501 	qdf_spinlock_t lock;
502 #endif
503 };
504 
505 /**
506  * struct dp_txrx_pool_stats - flow pool related statistics
507  * @pool_map_count: flow pool map received
508  * @pool_unmap_count: flow pool unmap received
509  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
510  */
511 struct dp_txrx_pool_stats {
512 	uint16_t pool_map_count;
513 	uint16_t pool_unmap_count;
514 	uint16_t pkt_drop_no_pool;
515 };
516 
517 /**
518  * struct dp_srng - DP srng structure
519  * @hal_srng: hal_srng handle
520  * @base_vaddr_unaligned: un-aligned virtual base address of the srng ring
521  * @base_vaddr_aligned: aligned virtual base address of the srng ring
522  * @base_paddr_unaligned: un-aligned physical base address of the srng ring
523  * @base_paddr_aligned: aligned physical base address of the srng ring
524  * @alloc_size: size of the srng ring
525  * @cached: is the srng ring memory cached or un-cached memory
526  * @irq: irq number of the srng ring
527  * @num_entries: number of entries in the srng ring
528  */
529 struct dp_srng {
530 	hal_ring_handle_t hal_srng;
531 	void *base_vaddr_unaligned;
532 	void *base_vaddr_aligned;
533 	qdf_dma_addr_t base_paddr_unaligned;
534 	qdf_dma_addr_t base_paddr_aligned;
535 	uint32_t alloc_size;
536 	uint8_t cached;
537 	int irq;
538 	uint32_t num_entries;
539 };
540 
541 struct dp_rx_reorder_array_elem {
542 	qdf_nbuf_t head;
543 	qdf_nbuf_t tail;
544 };
545 
546 #define DP_RX_BA_INACTIVE 0
547 #define DP_RX_BA_ACTIVE 1
548 #define DP_RX_BA_IN_PROGRESS 2
549 struct dp_reo_cmd_info {
550 	uint16_t cmd;
551 	enum hal_reo_cmd_type cmd_type;
552 	void *data;
553 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
554 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
555 };
556 
557 /* Rx TID */
558 struct dp_rx_tid {
559 	/* TID */
560 	int tid;
561 
562 	/* Num of addba requests */
563 	uint32_t num_of_addba_req;
564 
565 	/* Num of addba responses */
566 	uint32_t num_of_addba_resp;
567 
568 	/* Num of delba requests */
569 	uint32_t num_of_delba_req;
570 
571 	/* Num of addba responses successful */
572 	uint32_t num_addba_rsp_success;
573 
574 	/* Num of addba responses failed */
575 	uint32_t num_addba_rsp_failed;
576 
577 	/* pn size */
578 	uint8_t pn_size;
579 	/* REO TID queue descriptors */
580 	void *hw_qdesc_vaddr_unaligned;
581 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
582 	qdf_dma_addr_t hw_qdesc_paddr;
583 	uint32_t hw_qdesc_alloc_size;
584 
585 	/* RX ADDBA session state */
586 	int ba_status;
587 
588 	/* RX BA window size */
589 	uint16_t ba_win_size;
590 
591 	/* Starting sequence number in Addba request */
592 	uint16_t startseqnum;
593 
594 	/* TODO: Check the following while adding defragmentation support */
595 	struct dp_rx_reorder_array_elem *array;
596 	/* base - single rx reorder element used for non-aggr cases */
597 	struct dp_rx_reorder_array_elem base;
598 
599 	/* only used for defrag right now */
600 	TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem;
601 
602 	/* Store dst desc for reinjection */
603 	hal_ring_desc_t dst_ring_desc;
604 	struct dp_rx_desc *head_frag_desc;
605 
606 	/* rx_tid lock */
607 	qdf_spinlock_t tid_lock;
608 
609 	/* Sequence and fragments that are being processed currently */
610 	uint32_t curr_seq_num;
611 	uint32_t curr_frag_num;
612 
613 	uint32_t defrag_timeout_ms;
614 	uint16_t dialogtoken;
615 	uint16_t statuscode;
616 	/* user defined ADDBA response status code */
617 	uint16_t userstatuscode;
618 
619 	/* Store ppdu_id when 2k exception is received */
620 	uint32_t ppdu_id_2k;
621 
622 	/* Delba Tx completion status */
623 	uint8_t delba_tx_status;
624 
625 	/* Delba Tx retry count */
626 	uint8_t delba_tx_retry;
627 
628 	/* Delba stats */
629 	uint32_t delba_tx_success_cnt;
630 	uint32_t delba_tx_fail_cnt;
631 
632 	/* Delba reason code for retries */
633 	uint8_t delba_rcode;
634 
635 	/* Coex Override preserved windows size 1 based */
636 	uint16_t rx_ba_win_size_override;
637 
638 #ifdef WLAN_PEER_JITTER
639 	/* Tx Jitter stats */
640 	uint32_t tx_avg_jitter;
641 	uint32_t tx_avg_delay;
642 	uint64_t tx_avg_err;
643 	uint64_t tx_total_success;
644 	uint64_t tx_drop;
645 #endif /* WLAN_PEER_JITTER */
646 };
647 
648 /**
649  * struct dp_intr_stats - DP Interrupt Stats for an interrupt context
650  * @num_tx_ring_masks: interrupts with tx_ring_mask set
651  * @num_rx_ring_masks: interrupts with rx_ring_mask set
652  * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set
653  * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set
654  * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set
655  * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set
656  * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set
657  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
658  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
659  * @num_masks: total number of times the interrupt was received
660  *
661  * Counter for individual masks are incremented only if there are any packets
662  * on that ring.
663  */
664 struct dp_intr_stats {
665 	uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS];
666 	uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS];
667 	uint32_t num_rx_mon_ring_masks;
668 	uint32_t num_rx_err_ring_masks;
669 	uint32_t num_rx_wbm_rel_ring_masks;
670 	uint32_t num_reo_status_ring_masks;
671 	uint32_t num_rxdma2host_ring_masks;
672 	uint32_t num_host2rxdma_ring_masks;
673 	uint32_t num_masks;
674 };
675 
676 /* per interrupt context  */
677 struct dp_intr {
678 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
679 				associated with this napi context */
680 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
681 				with this interrupt context */
682 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
683 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
684 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
685 	uint8_t reo_status_ring_mask; /* REO command response ring */
686 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
687 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
688 	/* Host to RXDMA monitor  buffer ring */
689 	uint8_t host2rxdma_mon_ring_mask;
690 	struct dp_soc *soc;    /* Reference to SoC structure ,
691 				to get DMA ring handles */
692 	qdf_lro_ctx_t lro_ctx;
693 	uint8_t dp_intr_id;
694 
695 	/* Interrupt Stats for individual masks */
696 	struct dp_intr_stats intr_stats;
697 };
698 
699 #define REO_DESC_FREELIST_SIZE 64
700 #define REO_DESC_FREE_DEFER_MS 1000
701 struct reo_desc_list_node {
702 	qdf_list_node_t node;
703 	unsigned long free_ts;
704 	struct dp_rx_tid rx_tid;
705 	bool resend_update_reo_cmd;
706 };
707 
708 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
709 /**
710  * struct reo_cmd_event_record: Elements to record for each reo command
711  * @cmd_type: reo command type
712  * @cmd_return_status: reo command post status
713  * @timestamp: record timestamp for the reo command
714  */
715 struct reo_cmd_event_record {
716 	enum hal_reo_cmd_type cmd_type;
717 	uint8_t cmd_return_status;
718 	uint32_t timestamp;
719 };
720 
721 /**
722  * struct reo_cmd_event_history: Account for reo cmd events
723  * @index: record number
724  * @cmd_record: list of records
725  */
726 struct reo_cmd_event_history {
727 	qdf_atomic_t index;
728 	struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX];
729 };
730 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
731 
732 /* SoC level data path statistics */
733 struct dp_soc_stats {
734 	struct {
735 		uint32_t added;
736 		uint32_t deleted;
737 		uint32_t aged_out;
738 		uint32_t map_err;
739 		uint32_t ast_mismatch;
740 	} ast;
741 
742 	/* SOC level TX stats */
743 	struct {
744 		/* packets dropped on tx because of no peer */
745 		struct cdp_pkt_info tx_invalid_peer;
746 		/* descriptors in each tcl ring */
747 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
748 		/* Descriptors in use at soc */
749 		uint32_t desc_in_use;
750 		/* tqm_release_reason == FW removed */
751 		uint32_t dropped_fw_removed;
752 		/* tx completion release_src != TQM or FW */
753 		uint32_t invalid_release_source;
754 		/* tx completion wbm_internal_error */
755 		uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS];
756 		/* tx completion non_wbm_internal_error */
757 		uint32_t non_wbm_internal_err;
758 		/* TX Comp loop packet limit hit */
759 		uint32_t tx_comp_loop_pkt_limit_hit;
760 		/* Head pointer Out of sync at the end of dp_tx_comp_handler */
761 		uint32_t hp_oos2;
762 	} tx;
763 
764 	/* SOC level RX stats */
765 	struct {
766 		/* Rx errors */
767 		/* Total Packets in Rx Error ring */
768 		uint32_t err_ring_pkts;
769 		/* No of Fragments */
770 		uint32_t rx_frags;
771 		/* No of incomplete fragments in waitlist */
772 		uint32_t rx_frag_wait;
773 		/* Fragments dropped due to errors */
774 		uint32_t rx_frag_err;
775 		/* Fragments dropped due to len errors in skb */
776 		uint32_t rx_frag_err_len_error;
777 		/* Fragments dropped due to no peer found */
778 		uint32_t rx_frag_err_no_peer;
779 		/* No of reinjected packets */
780 		uint32_t reo_reinject;
781 		/* Reap loop packet limit hit */
782 		uint32_t reap_loop_pkt_limit_hit;
783 		/* Head pointer Out of sync at the end of dp_rx_process */
784 		uint32_t hp_oos2;
785 		/* Rx ring near full */
786 		uint32_t near_full;
787 		/* Break ring reaping as not all scattered msdu received */
788 		uint32_t msdu_scatter_wait_break;
789 
790 		struct {
791 			/* Invalid RBM error count */
792 			uint32_t invalid_rbm;
793 			/* Invalid VDEV Error count */
794 			uint32_t invalid_vdev;
795 			/* Invalid PDEV error count */
796 			uint32_t invalid_pdev;
797 
798 			/* Packets delivered to stack that no related peer */
799 			uint32_t pkt_delivered_no_peer;
800 			/* Defrag peer uninit error count */
801 			uint32_t defrag_peer_uninit;
802 			/* Invalid sa_idx or da_idx*/
803 			uint32_t invalid_sa_da_idx;
804 			/* MSDU DONE failures */
805 			uint32_t msdu_done_fail;
806 			/* Invalid PEER Error count */
807 			struct cdp_pkt_info rx_invalid_peer;
808 			/* Invalid PEER ID count */
809 			struct cdp_pkt_info rx_invalid_peer_id;
810 			/* Invalid packet length */
811 			struct cdp_pkt_info rx_invalid_pkt_len;
812 			/* HAL ring access Fail error count */
813 			uint32_t hal_ring_access_fail;
814 			/* HAL ring access full Fail error count */
815 			uint32_t hal_ring_access_full_fail;
816 			/* RX DMA error count */
817 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
818 			/* RX REO DEST Desc Invalid Magic count */
819 			uint32_t rx_desc_invalid_magic;
820 			/* REO Error count */
821 			uint32_t reo_error[HAL_REO_ERR_MAX];
822 			/* HAL REO ERR Count */
823 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
824 			/* HAL REO DEST Duplicate count */
825 			uint32_t hal_reo_dest_dup;
826 			/* HAL WBM RELEASE Duplicate count */
827 			uint32_t hal_wbm_rel_dup;
828 			/* HAL RXDMA error Duplicate count */
829 			uint32_t hal_rxdma_err_dup;
830 			/* REO cmd send fail/requeue count */
831 			uint32_t reo_cmd_send_fail;
832 			/* REO cmd send drain count */
833 			uint32_t reo_cmd_send_drain;
834 			/* RX msdu drop count due to scatter */
835 			uint32_t scatter_msdu;
836 			/* RX msdu drop count due to invalid cookie */
837 			uint32_t invalid_cookie;
838 			/* Delba sent count due to RX 2k jump */
839 			uint32_t rx_2k_jump_delba_sent;
840 			/* RX 2k jump msdu indicated to stack count */
841 			uint32_t rx_2k_jump_to_stack;
842 			/* RX 2k jump msdu dropped count */
843 			uint32_t rx_2k_jump_drop;
844 			/* REO OOR msdu drop count */
845 			uint32_t reo_err_oor_drop;
846 			/* REO OOR msdu indicated to stack count */
847 			uint32_t reo_err_oor_to_stack;
848 			/* REO OOR scattered msdu count */
849 			uint32_t reo_err_oor_sg_count;
850 			/* RX msdu rejected count on delivery to vdev stack_fn*/
851 			uint32_t rejected;
852 		} err;
853 
854 		/* packet count per core - per ring */
855 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
856 	} rx;
857 
858 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
859 	struct reo_cmd_event_history cmd_event_history;
860 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
861 };
862 
863 union dp_align_mac_addr {
864 	uint8_t raw[QDF_MAC_ADDR_SIZE];
865 	struct {
866 		uint16_t bytes_ab;
867 		uint16_t bytes_cd;
868 		uint16_t bytes_ef;
869 	} align2;
870 	struct {
871 		uint32_t bytes_abcd;
872 		uint16_t bytes_ef;
873 	} align4;
874 	struct __attribute__((__packed__)) {
875 		uint16_t bytes_ab;
876 		uint32_t bytes_cdef;
877 	} align4_2;
878 };
879 
880 /**
881  * struct dp_ast_free_cb_params - HMWDS free callback cookie
882  * @mac_addr: ast mac address
883  * @peer_mac_addr: mac address of peer
884  * @type: ast entry type
885  * @vdev_id: vdev_id
886  * @flags: ast flags
887  */
888 struct dp_ast_free_cb_params {
889 	union dp_align_mac_addr mac_addr;
890 	union dp_align_mac_addr peer_mac_addr;
891 	enum cdp_txrx_ast_entry_type type;
892 	uint8_t vdev_id;
893 	uint32_t flags;
894 };
895 
896 /*
897  * dp_ast_entry
898  *
899  * @ast_idx: Hardware AST Index
900  * @mac_addr:  MAC Address for this AST entry
901  * @peer: Next Hop peer (for non-WDS nodes, this will be point to
902  *        associated peer with this MAC address)
903  * @next_hop: Set to 1 if this is for a WDS node
904  * @is_active: flag to indicate active data traffic on this node
905  *             (used for aging out/expiry)
906  * @ase_list_elem: node in peer AST list
907  * @is_bss: flag to indicate if entry corresponds to bss peer
908  * @is_mapped: flag to indicate that we have mapped the AST entry
909  *             in ast_table
910  * @pdev_id: pdev ID
911  * @vdev_id: vdev ID
912  * @ast_hash_value: hast value in HW
913  * @ref_cnt: reference count
914  * @type: flag to indicate type of the entry(static/WDS/MEC)
915  * @delete_in_progress: Flag to indicate that delete commands send to FW
916  *                      and host is waiting for response from FW
917  * @callback: ast free/unmap callback
918  * @cookie: argument to callback
919  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
920  */
921 struct dp_ast_entry {
922 	uint16_t ast_idx;
923 	union dp_align_mac_addr mac_addr;
924 	struct dp_peer *peer;
925 	bool next_hop;
926 	bool is_active;
927 	bool is_mapped;
928 	uint8_t pdev_id;
929 	uint16_t ast_hash_value;
930 	qdf_atomic_t ref_cnt;
931 	enum cdp_txrx_ast_entry_type type;
932 	bool delete_in_progress;
933 	txrx_ast_free_cb callback;
934 	void *cookie;
935 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
936 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
937 };
938 
939 /* SOC level htt stats */
940 struct htt_t2h_stats {
941 	/* lock to protect htt_stats_msg update */
942 	qdf_spinlock_t lock;
943 
944 	/* work queue to process htt stats */
945 	qdf_work_t work;
946 
947 	/* T2H Ext stats message queue */
948 	qdf_nbuf_queue_t msg;
949 
950 	/* number of completed stats in htt_stats_msg */
951 	uint32_t num_stats;
952 };
953 
954 struct link_desc_bank {
955 	void *base_vaddr_unaligned;
956 	void *base_vaddr;
957 	qdf_dma_addr_t base_paddr_unaligned;
958 	qdf_dma_addr_t base_paddr;
959 	uint32_t size;
960 };
961 
962 /* SOC level structure for data path */
963 struct dp_soc {
964 	/**
965 	 * re-use memory section starts
966 	 */
967 
968 	/* Common base structure - Should be the first member */
969 	struct cdp_soc_t cdp_soc;
970 
971 	/* SoC Obj */
972 	struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
973 
974 	/* OS device abstraction */
975 	qdf_device_t osdev;
976 
977 	/*cce disable*/
978 	bool cce_disable;
979 
980 	/* WLAN config context */
981 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
982 
983 	/* HTT handle for host-fw interaction */
984 	struct htt_soc *htt_handle;
985 
986 	/* Commint init done */
987 	qdf_atomic_t cmn_init_done;
988 
989 	/* Opaque hif handle */
990 	struct hif_opaque_softc *hif_handle;
991 
992 	/* PDEVs on this SOC */
993 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
994 
995 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
996 	struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT];
997 
998 	struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW];
999 
1000 	/* RXDMA error destination ring */
1001 	struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW];
1002 
1003 	/* RXDMA monitor buffer replenish ring */
1004 	struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW];
1005 
1006 	/* RXDMA monitor destination ring */
1007 	struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW];
1008 
1009 	/* RXDMA monitor status ring. TBD: Check format of this ring */
1010 	struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW];
1011 
1012 	/* Number of PDEVs */
1013 	uint8_t pdev_count;
1014 
1015 	/*ast override support in HW*/
1016 	bool ast_override_support;
1017 
1018 	/*number of hw dscp tid map*/
1019 	uint8_t num_hw_dscp_tid_map;
1020 
1021 	/* HAL SOC handle */
1022 	hal_soc_handle_t hal_soc;
1023 
1024 	/* Device ID coming from Bus sub-system */
1025 	uint32_t device_id;
1026 
1027 	/* Link descriptor pages */
1028 	struct qdf_mem_multi_page_t link_desc_pages;
1029 
1030 	/* total link descriptors for regular RX and TX */
1031 	uint32_t total_link_descs;
1032 
1033 	/* monitor link descriptor pages */
1034 	struct qdf_mem_multi_page_t mon_link_desc_pages[MAX_NUM_LMAC_HW];
1035 
1036 	/* total link descriptors for monitor mode for each radio */
1037 	uint32_t total_mon_link_descs[MAX_NUM_LMAC_HW];
1038 
1039 	/* Monitor Link descriptor memory banks */
1040 	struct link_desc_bank
1041 		mon_link_desc_banks[MAX_NUM_LMAC_HW][MAX_MON_LINK_DESC_BANKS];
1042 	uint32_t num_mon_link_desc_banks[MAX_NUM_LMAC_HW];
1043 
1044 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
1045 	struct dp_srng wbm_idle_link_ring;
1046 
1047 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
1048 	 */
1049 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
1050 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
1051 	uint32_t num_scatter_bufs;
1052 
1053 	/* Tx SW descriptor pool */
1054 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
1055 
1056 	/* Tx MSDU Extension descriptor pool */
1057 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
1058 
1059 	/* Tx TSO descriptor pool */
1060 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
1061 
1062 	/* Tx TSO Num of segments pool */
1063 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
1064 
1065 	/* REO destination rings */
1066 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
1067 
1068 	/* REO exception ring - See if should combine this with reo_dest_ring */
1069 	struct dp_srng reo_exception_ring;
1070 
1071 	/* REO reinjection ring */
1072 	struct dp_srng reo_reinject_ring;
1073 
1074 	/* REO command ring */
1075 	struct dp_srng reo_cmd_ring;
1076 
1077 	/* REO command status ring */
1078 	struct dp_srng reo_status_ring;
1079 
1080 	/* WBM Rx release ring */
1081 	struct dp_srng rx_rel_ring;
1082 
1083 	/* TCL data ring */
1084 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
1085 
1086 	/* Number of TCL data rings */
1087 	uint8_t num_tcl_data_rings;
1088 
1089 	/* TCL CMD_CREDIT ring */
1090 	/* It is used as credit based ring on QCN9000 else command ring */
1091 	struct dp_srng tcl_cmd_credit_ring;
1092 
1093 	/* TCL command status ring */
1094 	struct dp_srng tcl_status_ring;
1095 
1096 	/* WBM Tx completion rings */
1097 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
1098 
1099 	/* Common WBM link descriptor release ring (SW to WBM) */
1100 	struct dp_srng wbm_desc_rel_ring;
1101 
1102 	/* DP Interrupts */
1103 	struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
1104 
1105 	/* Rx SW descriptor pool for RXDMA monitor buffer */
1106 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
1107 
1108 	/* Rx SW descriptor pool for RXDMA status buffer */
1109 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
1110 
1111 	/* Rx SW descriptor pool for RXDMA buffer */
1112 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
1113 
1114 	/* Number of REO destination rings */
1115 	uint8_t num_reo_dest_rings;
1116 
1117 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1118 	/* lock to control access to soc TX descriptors */
1119 	qdf_spinlock_t flow_pool_array_lock;
1120 
1121 	/* pause callback to pause TX queues as per flow control */
1122 	tx_pause_callback pause_cb;
1123 
1124 	/* flow pool related statistics */
1125 	struct dp_txrx_pool_stats pool_stats;
1126 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
1127 
1128 	uint32_t wbm_idle_scatter_buf_size;
1129 
1130 	/* VDEVs on this SOC */
1131 	struct dp_vdev *vdev_id_map[MAX_VDEV_CNT];
1132 
1133 	/* Tx H/W queues lock */
1134 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
1135 
1136 	/* Tx ring map for interrupt processing */
1137 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
1138 
1139 	/* Rx ring map for interrupt processing */
1140 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
1141 
1142 	/* peer ID to peer object map (array of pointers to peer objects) */
1143 	struct dp_peer **peer_id_to_obj_map;
1144 
1145 	struct {
1146 		unsigned mask;
1147 		unsigned idx_bits;
1148 		TAILQ_HEAD(, dp_peer) * bins;
1149 	} peer_hash;
1150 
1151 	/* rx defrag state – TBD: do we need this per radio? */
1152 	struct {
1153 		struct {
1154 			TAILQ_HEAD(, dp_rx_tid) waitlist;
1155 			uint32_t timeout_ms;
1156 			uint32_t next_flush_ms;
1157 			qdf_spinlock_t defrag_lock;
1158 		} defrag;
1159 		struct {
1160 			int defrag_timeout_check;
1161 			int dup_check;
1162 		} flags;
1163 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
1164 		qdf_spinlock_t reo_cmd_lock;
1165 	} rx;
1166 
1167 	/* optional rx processing function */
1168 	void (*rx_opt_proc)(
1169 		struct dp_vdev *vdev,
1170 		struct dp_peer *peer,
1171 		unsigned tid,
1172 		qdf_nbuf_t msdu_list);
1173 
1174 	/* pool addr for mcast enhance buff */
1175 	struct {
1176 		int size;
1177 		uint32_t paddr;
1178 		uint32_t *vaddr;
1179 		struct dp_tx_me_buf_t *freelist;
1180 		int buf_in_use;
1181 		qdf_dma_mem_context(memctx);
1182 	} me_buf;
1183 
1184 	/**
1185 	 * peer ref mutex:
1186 	 * 1. Protect peer object lookups until the returned peer object's
1187 	 *	reference count is incremented.
1188 	 * 2. Provide mutex when accessing peer object lookup structures.
1189 	 */
1190 	DP_MUTEX_TYPE peer_ref_mutex;
1191 
1192 	/* maximum value for peer_id */
1193 	uint32_t max_peers;
1194 
1195 	/* SoC level data path statistics */
1196 	struct dp_soc_stats stats;
1197 
1198 	/* Enable processing of Tx completion status words */
1199 	bool process_tx_status;
1200 	bool process_rx_status;
1201 	struct dp_ast_entry **ast_table;
1202 	struct {
1203 		unsigned mask;
1204 		unsigned idx_bits;
1205 		TAILQ_HEAD(, dp_ast_entry) * bins;
1206 	} ast_hash;
1207 
1208 	qdf_spinlock_t ast_lock;
1209 	/*Timer for AST entry ageout maintainance */
1210 	qdf_timer_t ast_aging_timer;
1211 
1212 	/*Timer counter for WDS AST entry ageout*/
1213 	uint8_t wds_ast_aging_timer_cnt;
1214 
1215 	/*interrupt timer*/
1216 	qdf_timer_t mon_reap_timer;
1217 	uint8_t reap_timer_init;
1218 	qdf_timer_t lmac_reap_timer;
1219 	uint8_t lmac_timer_init;
1220 	qdf_timer_t int_timer;
1221 	uint8_t intr_mode;
1222 	uint8_t lmac_polled_mode;
1223 
1224 	qdf_list_t reo_desc_freelist;
1225 	qdf_spinlock_t reo_desc_freelist_lock;
1226 
1227 	/* htt stats */
1228 	struct htt_t2h_stats htt_stats;
1229 
1230 	void *external_txrx_handle; /* External data path handle */
1231 #ifdef IPA_OFFLOAD
1232 	/* IPA uC datapath offload Wlan Tx resources */
1233 	struct {
1234 		/* Resource info to be passed to IPA */
1235 		qdf_dma_addr_t ipa_tcl_ring_base_paddr;
1236 		void *ipa_tcl_ring_base_vaddr;
1237 		uint32_t ipa_tcl_ring_size;
1238 		qdf_dma_addr_t ipa_tcl_hp_paddr;
1239 		uint32_t alloc_tx_buf_cnt;
1240 
1241 		qdf_dma_addr_t ipa_wbm_ring_base_paddr;
1242 		void *ipa_wbm_ring_base_vaddr;
1243 		uint32_t ipa_wbm_ring_size;
1244 		qdf_dma_addr_t ipa_wbm_tp_paddr;
1245 
1246 		/* TX buffers populated into the WBM ring */
1247 		void **tx_buf_pool_vaddr_unaligned;
1248 		qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
1249 	} ipa_uc_tx_rsc;
1250 
1251 	/* IPA uC datapath offload Wlan Rx resources */
1252 	struct {
1253 		/* Resource info to be passed to IPA */
1254 		qdf_dma_addr_t ipa_reo_ring_base_paddr;
1255 		void *ipa_reo_ring_base_vaddr;
1256 		uint32_t ipa_reo_ring_size;
1257 		qdf_dma_addr_t ipa_reo_tp_paddr;
1258 
1259 		/* Resource info to be passed to firmware and IPA */
1260 		qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
1261 		void *ipa_rx_refill_buf_ring_base_vaddr;
1262 		uint32_t ipa_rx_refill_buf_ring_size;
1263 		qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
1264 	} ipa_uc_rx_rsc;
1265 
1266 	qdf_atomic_t ipa_pipes_enabled;
1267 	bool ipa_first_tx_db_access;
1268 #endif
1269 
1270 #ifdef WLAN_FEATURE_STATS_EXT
1271 	struct {
1272 		uint32_t rx_mpdu_received;
1273 		uint32_t rx_mpdu_missed;
1274 	} ext_stats;
1275 	qdf_event_t rx_hw_stats_event;
1276 	qdf_spinlock_t rx_hw_stats_lock;
1277 	bool is_last_stats_ctx_init;
1278 #endif /* WLAN_FEATURE_STATS_EXT */
1279 
1280 	/* Smart monitor capability for HKv2 */
1281 	uint8_t hw_nac_monitor_support;
1282 	/* Flag to indicate if HTT v2 is enabled*/
1283 	bool is_peer_map_unmap_v2;
1284 	/* Per peer per Tid ba window size support */
1285 	uint8_t per_tid_basize_max_tid;
1286 	/* Soc level flag to enable da_war */
1287 	uint8_t da_war_enabled;
1288 	/* number of active ast entries */
1289 	uint32_t num_ast_entries;
1290 	/* rdk rate statistics context at soc level*/
1291 	struct cdp_soc_rate_stats_ctx *rate_stats_ctx;
1292 	/* rdk rate statistics control flag */
1293 	bool wlanstats_enabled;
1294 
1295 	/* 8021p PCP-TID map values */
1296 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
1297 	/* TID map priority value */
1298 	uint8_t tidmap_prty;
1299 	/* Pointer to global per ring type specific configuration table */
1300 	struct wlan_srng_cfg *wlan_srng_cfg;
1301 	/* Num Tx outstanding on device */
1302 	qdf_atomic_t num_tx_outstanding;
1303 	/* Num Tx exception on device */
1304 	qdf_atomic_t num_tx_exception;
1305 	/* Num Tx allowed */
1306 	uint32_t num_tx_allowed;
1307 	/* Preferred HW mode */
1308 	uint8_t preferred_hw_mode;
1309 
1310 	/**
1311 	 * Flag to indicate whether WAR to address single cache entry
1312 	 * invalidation bug is enabled or not
1313 	 */
1314 	bool is_rx_fse_full_cache_invalidate_war_enabled;
1315 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
1316 	/**
1317 	 * Pointer to DP RX Flow FST at SOC level if
1318 	 * is_rx_flow_search_table_per_pdev is false
1319 	 * TBD: rx_fst[num_macs] if we decide to have per mac FST
1320 	 */
1321 	struct dp_rx_fst *rx_fst;
1322 #ifdef WLAN_SUPPORT_RX_FISA
1323 	uint8_t fisa_enable;
1324 
1325 	/**
1326 	 * Params used for controlling the fisa aggregation dynamically
1327 	 */
1328 	struct {
1329 		qdf_atomic_t skip_fisa;
1330 		uint8_t fisa_force_flush[MAX_REO_DEST_RINGS];
1331 	} skip_fisa_param;
1332 #endif
1333 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
1334 	/* Full monitor mode support */
1335 	bool full_mon_mode;
1336 	/* SG supported for msdu continued packets from wbm release ring */
1337 	bool wbm_release_desc_rx_sg_support;
1338 	bool peer_map_attach_success;
1339 	/* Flag to disable mac1 ring interrupts */
1340 	bool disable_mac1_intr;
1341 	/* Flag to disable mac2 ring interrupts */
1342 	bool disable_mac2_intr;
1343 
1344 	struct {
1345 		/* 1st msdu in sg for msdu continued packets in wbm rel ring */
1346 		bool wbm_is_first_msdu_in_sg;
1347 		/* Wbm sg list head */
1348 		qdf_nbuf_t wbm_sg_nbuf_head;
1349 		/* Wbm sg list tail */
1350 		qdf_nbuf_t wbm_sg_nbuf_tail;
1351 		uint32_t wbm_sg_desc_msdu_len;
1352 	} wbm_sg_param;
1353 	/* Number of msdu exception descriptors */
1354 	uint32_t num_msdu_exception_desc;
1355 };
1356 
1357 #ifdef IPA_OFFLOAD
1358 /**
1359  * dp_ipa_resources - Resources needed for IPA
1360  */
1361 struct dp_ipa_resources {
1362 	qdf_shared_mem_t tx_ring;
1363 	uint32_t tx_num_alloc_buffer;
1364 
1365 	qdf_shared_mem_t tx_comp_ring;
1366 	qdf_shared_mem_t rx_rdy_ring;
1367 	qdf_shared_mem_t rx_refill_ring;
1368 
1369 	/* IPA UC doorbell registers paddr */
1370 	qdf_dma_addr_t tx_comp_doorbell_paddr;
1371 	uint32_t *tx_comp_doorbell_vaddr;
1372 	qdf_dma_addr_t rx_ready_doorbell_paddr;
1373 };
1374 #endif
1375 
1376 #define MAX_RX_MAC_RINGS 2
1377 /* Same as NAC_MAX_CLENT */
1378 #define DP_NAC_MAX_CLIENT  24
1379 
1380 /*
1381  * 24 bits cookie size
1382  * 10 bits page id 0 ~ 1023 for MCL
1383  * 3 bits page id 0 ~ 7 for WIN
1384  * WBM Idle List Desc size = 128,
1385  * Num descs per page = 4096/128 = 32 for MCL
1386  * Num descs per page = 2MB/128 = 16384 for WIN
1387  */
1388 /*
1389  * Macros to setup link descriptor cookies - for link descriptors, we just
1390  * need first 3 bits to store bank/page ID for WIN. The
1391  * remaining bytes will be used to set a unique ID, which will
1392  * be useful in debugging
1393  */
1394 #ifdef MAX_ALLOC_PAGE_SIZE
1395 #define LINK_DESC_PAGE_ID_MASK  0x007FE0
1396 #define LINK_DESC_ID_SHIFT      5
1397 #define LINK_DESC_COOKIE(_desc_id, _page_id) \
1398 	((((_page_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_desc_id))
1399 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
1400 	(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
1401 #else
1402 #define LINK_DESC_PAGE_ID_MASK  0x7
1403 #define LINK_DESC_ID_SHIFT      3
1404 #define LINK_DESC_COOKIE(_desc_id, _page_id) \
1405 	((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_page_id))
1406 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
1407 	((_cookie) & LINK_DESC_PAGE_ID_MASK)
1408 #endif
1409 #define LINK_DESC_ID_START 0x8000
1410 
1411 /* same as ieee80211_nac_param */
1412 enum dp_nac_param_cmd {
1413 	/* IEEE80211_NAC_PARAM_ADD */
1414 	DP_NAC_PARAM_ADD = 1,
1415 	/* IEEE80211_NAC_PARAM_DEL */
1416 	DP_NAC_PARAM_DEL,
1417 	/* IEEE80211_NAC_PARAM_LIST */
1418 	DP_NAC_PARAM_LIST,
1419 };
1420 
1421 /**
1422  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
1423  * @neighbour_peers_macaddr: neighbour peer's mac address
1424  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
1425  * @ast_entry: ast_entry for neighbour peer
1426  * @rssi: rssi value
1427  */
1428 struct dp_neighbour_peer {
1429 	/* MAC address of neighbour's peer */
1430 	union dp_align_mac_addr neighbour_peers_macaddr;
1431 	struct dp_vdev *vdev;
1432 	struct dp_ast_entry *ast_entry;
1433 	uint8_t rssi;
1434 	/* node in the list of neighbour's peer */
1435 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
1436 };
1437 
1438 #ifdef WLAN_TX_PKT_CAPTURE_ENH
1439 #define WLAN_TX_PKT_CAPTURE_ENH 1
1440 #define DP_TX_PPDU_PROC_THRESHOLD 8
1441 #define DP_TX_PPDU_PROC_TIMEOUT 10
1442 #endif
1443 
1444 /**
1445  * struct ppdu_info - PPDU Status info descriptor
1446  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
1447  * @sched_cmdid: schedule command id, which will be same in a burst
1448  * @max_ppdu_id: wrap around for ppdu id
1449  * @last_tlv_cnt: Keep track for missing ppdu tlvs
1450  * @last_user: last ppdu processed for user
1451  * @is_ampdu: set if Ampdu aggregate
1452  * @nbuf: ppdu descriptor payload
1453  * @ppdu_desc: ppdu descriptor
1454  * @ppdu_info_list_elem: linked list of ppdu tlvs
1455  * @ppdu_info_queue_elem: Singly linked list (queue) of ppdu tlvs
1456  * @mpdu_compltn_common_tlv: Successful tlv counter from COMPLTN COMMON tlv
1457  * @mpdu_ack_ba_tlv: Successful tlv counter from ACK BA tlv
1458  */
1459 struct ppdu_info {
1460 	uint32_t ppdu_id;
1461 	uint32_t sched_cmdid;
1462 	uint32_t max_ppdu_id;
1463 	uint16_t tlv_bitmap;
1464 	uint16_t last_tlv_cnt;
1465 	uint16_t last_user:8,
1466 		 is_ampdu:1;
1467 	qdf_nbuf_t nbuf;
1468 	struct cdp_tx_completion_ppdu *ppdu_desc;
1469 #ifdef WLAN_TX_PKT_CAPTURE_ENH
1470 	union {
1471 		TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem;
1472 		STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem;
1473 	} ulist;
1474 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem
1475 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem
1476 #else
1477 	TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
1478 #endif
1479 	uint8_t compltn_common_tlv;
1480 	uint8_t ack_ba_tlv;
1481 };
1482 
1483 /**
1484  * struct msdu_completion_info - wbm msdu completion info
1485  * @ppdu_id            - Unique ppduid assigned by firmware for every tx packet
1486  * @peer_id            - peer_id
1487  * @tid                - tid which used during transmit
1488  * @first_msdu         - first msdu indication
1489  * @last_msdu          - last msdu indication
1490  * @msdu_part_of_amsdu - msdu part of amsdu
1491  * @transmit_cnt       - retried count
1492  * @status             - transmit status
1493  * @tsf                - timestamp which it transmitted
1494  */
1495 struct msdu_completion_info {
1496 	uint32_t ppdu_id;
1497 	uint16_t peer_id;
1498 	uint8_t tid;
1499 	uint8_t first_msdu:1,
1500 		last_msdu:1,
1501 		msdu_part_of_amsdu:1;
1502 	uint8_t transmit_cnt;
1503 	uint8_t status;
1504 	uint32_t tsf;
1505 };
1506 
1507 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1508 struct rx_protocol_tag_map {
1509 	/* This is the user configured tag for the said protocol type */
1510 	uint16_t tag;
1511 };
1512 
1513 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
1514 struct rx_protocol_tag_stats {
1515 	uint32_t tag_ctr;
1516 };
1517 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
1518 
1519 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1520 
1521 #ifndef WLAN_TX_PKT_CAPTURE_ENH
1522 struct dp_pdev_tx_capture {
1523 };
1524 
1525 struct dp_peer_tx_capture {
1526 };
1527 #endif
1528 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1529 /* Template data to be set for Enhanced RX Monitor packets */
1530 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a
1531 
1532 /**
1533  * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern
1534  * at end of each MSDU in monitor-lite mode
1535  * @reserved1: reserved for future use
1536  * @reserved2: reserved for future use
1537  * @flow_tag: flow tag value read from skb->cb
1538  * @protocol_tag: protocol tag value read from skb->cb
1539  */
1540 struct dp_rx_mon_enh_trailer_data {
1541 	uint16_t reserved1;
1542 	uint16_t reserved2;
1543 	uint16_t flow_tag;
1544 	uint16_t protocol_tag;
1545 };
1546 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
1547 
1548 /* PDEV level structure for data path */
1549 struct dp_pdev {
1550 	/**
1551 	 * Re-use Memory Section Starts
1552 	 */
1553 
1554 	/* PDEV Id */
1555 	int pdev_id;
1556 
1557 	/* LMAC Id */
1558 	int lmac_id;
1559 
1560 	/* Target pdev  Id */
1561 	int target_pdev_id;
1562 
1563 	/* TXRX SOC handle */
1564 	struct dp_soc *soc;
1565 
1566 	/* Stuck count on monitor destination ring MPDU process */
1567 	uint32_t mon_dest_ring_stuck_cnt;
1568 
1569 	bool pdev_deinit;
1570 
1571 	/* pdev status down or up required to handle dynamic hw
1572 	 * mode switch between DBS and DBS_SBS.
1573 	 * 1 = down
1574 	 * 0 = up
1575 	 */
1576 	bool is_pdev_down;
1577 
1578 	/* Second ring used to replenish rx buffers */
1579 	struct dp_srng rx_refill_buf_ring2;
1580 
1581 	/* Empty ring used by firmware to post rx buffers to the MAC */
1582 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
1583 
1584 	/* wlan_cfg pdev ctxt*/
1585 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
1586 
1587 	/**
1588 	 * TODO: See if we need a ring map here for LMAC rings.
1589 	 * 1. Monitor rings are currently planning to be processed on receiving
1590 	 * PPDU end interrupts and hence wont need ring based interrupts.
1591 	 * 2. Rx buffer rings will be replenished during REO destination
1592 	 * processing and doesn't require regular interrupt handling - we will
1593 	 * only handle low water mark interrupts which is not expected
1594 	 * frequently
1595 	 */
1596 
1597 	/* VDEV list */
1598 	TAILQ_HEAD(, dp_vdev) vdev_list;
1599 
1600 	/* vdev list lock */
1601 	qdf_spinlock_t vdev_list_lock;
1602 
1603 	/* Number of vdevs this device have */
1604 	uint16_t vdev_count;
1605 
1606 	/* PDEV transmit lock */
1607 	qdf_spinlock_t tx_lock;
1608 
1609 #ifndef REMOVE_PKT_LOG
1610 	bool pkt_log_init;
1611 	/* Pktlog pdev */
1612 	struct pktlog_dev_t *pl_dev;
1613 #endif /* #ifndef REMOVE_PKT_LOG */
1614 
1615 	/* Monitor mode interface and status storage */
1616 	struct dp_vdev *monitor_vdev;
1617 
1618 	/* Monitor mode operation channel */
1619 	int mon_chan_num;
1620 
1621 	/* Monitor mode operation frequency */
1622 	qdf_freq_t mon_chan_freq;
1623 
1624 	/* monitor mode lock */
1625 	qdf_spinlock_t mon_lock;
1626 
1627 	/*tx_mutex for me*/
1628 	DP_MUTEX_TYPE tx_mutex;
1629 
1630 	/* monitor */
1631 	bool monitor_configured;
1632 
1633 	/* Smart Mesh */
1634 	bool filter_neighbour_peers;
1635 
1636 	/*flag to indicate neighbour_peers_list not empty */
1637 	bool neighbour_peers_added;
1638 	/* smart mesh mutex */
1639 	qdf_spinlock_t neighbour_peer_mutex;
1640 	/* Neighnour peer list */
1641 	TAILQ_HEAD(, dp_neighbour_peer) neighbour_peers_list;
1642 	/* msdu chain head & tail */
1643 	qdf_nbuf_t invalid_peer_head_msdu;
1644 	qdf_nbuf_t invalid_peer_tail_msdu;
1645 
1646 	/* Band steering  */
1647 	/* TBD */
1648 
1649 	/* PDEV level data path statistics */
1650 	struct cdp_pdev_stats stats;
1651 
1652 	/* Global RX decap mode for the device */
1653 	enum htt_pkt_type rx_decap_mode;
1654 
1655 	/* Enhanced Stats is enabled */
1656 	bool enhanced_stats_en;
1657 
1658 	/* advance filter mode and type*/
1659 	uint8_t mon_filter_mode;
1660 	uint16_t fp_mgmt_filter;
1661 	uint16_t fp_ctrl_filter;
1662 	uint16_t fp_data_filter;
1663 	uint16_t mo_mgmt_filter;
1664 	uint16_t mo_ctrl_filter;
1665 	uint16_t mo_data_filter;
1666 	uint16_t md_data_filter;
1667 
1668 	qdf_atomic_t num_tx_outstanding;
1669 
1670 	qdf_atomic_t num_tx_exception;
1671 
1672 	/* MCL specific local peer handle */
1673 	struct {
1674 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
1675 		uint8_t freelist;
1676 		qdf_spinlock_t lock;
1677 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
1678 	} local_peer_ids;
1679 
1680 	/* dscp_tid_map_*/
1681 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
1682 
1683 	struct hal_rx_ppdu_info ppdu_info;
1684 
1685 	/* operating channel */
1686 	struct {
1687 		uint8_t num;
1688 		uint8_t band;
1689 		uint16_t freq;
1690 	} operating_channel;
1691 
1692 	qdf_nbuf_queue_t rx_status_q;
1693 	uint32_t mon_ppdu_status;
1694 	struct cdp_mon_status rx_mon_recv_status;
1695 	/* monitor mode status/destination ring PPDU and MPDU count */
1696 	struct cdp_pdev_mon_stats rx_mon_stats;
1697 	/* to track duplicate link descriptor indications by HW for a WAR */
1698 	uint64_t mon_last_linkdesc_paddr;
1699 	/* to track duplicate buffer indications by HW for a WAR */
1700 	uint32_t mon_last_buf_cookie;
1701 	/* 128 bytes mpdu header queue per user for ppdu */
1702 	qdf_nbuf_queue_t mpdu_q[MAX_MU_USERS];
1703 	/* is this a mpdu header TLV and not msdu header TLV */
1704 	bool is_mpdu_hdr[MAX_MU_USERS];
1705 	/* per user 128 bytes msdu header list for MPDU */
1706 	struct msdu_list msdu_list[MAX_MU_USERS];
1707 	/* RX enhanced capture mode */
1708 	uint8_t rx_enh_capture_mode;
1709 	/* Rx per peer enhanced capture mode */
1710 	bool rx_enh_capture_peer;
1711 	struct dp_vdev *rx_enh_monitor_vdev;
1712 	/* RX enhanced capture trailer enable/disable flag */
1713 	bool is_rx_enh_capture_trailer_enabled;
1714 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1715 	/* RX per MPDU/PPDU information */
1716 	struct cdp_rx_indication_mpdu mpdu_ind;
1717 #endif
1718 	/* pool addr for mcast enhance buff */
1719 	struct {
1720 		int size;
1721 		uint32_t paddr;
1722 		char *vaddr;
1723 		struct dp_tx_me_buf_t *freelist;
1724 		int buf_in_use;
1725 		qdf_dma_mem_context(memctx);
1726 	} me_buf;
1727 
1728 	bool hmmc_tid_override_en;
1729 	uint8_t hmmc_tid;
1730 
1731 	/* Number of VAPs with mcast enhancement enabled */
1732 	qdf_atomic_t mc_num_vap_attached;
1733 
1734 	qdf_atomic_t stats_cmd_complete;
1735 
1736 #ifdef IPA_OFFLOAD
1737 	ipa_uc_op_cb_type ipa_uc_op_cb;
1738 	void *usr_ctxt;
1739 	struct dp_ipa_resources ipa_resource;
1740 #endif
1741 
1742 	/* TBD */
1743 
1744 	/* map this pdev to a particular Reo Destination ring */
1745 	enum cdp_host_reo_dest_ring reo_dest;
1746 
1747 #ifndef REMOVE_PKT_LOG
1748 	/* Packet log mode */
1749 	uint8_t rx_pktlog_mode;
1750 #endif
1751 
1752 	/* WDI event handlers */
1753 	struct wdi_event_subscribe_t **wdi_event_list;
1754 
1755 	/* ppdu_id of last received HTT TX stats */
1756 	uint32_t last_ppdu_id;
1757 	struct {
1758 		uint8_t last_user;
1759 		qdf_nbuf_t buf;
1760 	} tx_ppdu_info;
1761 
1762 	bool tx_sniffer_enable;
1763 	/* mirror copy mode */
1764 	enum m_copy_mode mcopy_mode;
1765 	bool cfr_rcc_mode;
1766 	bool enable_reap_timer_non_pkt;
1767 	bool bpr_enable;
1768 
1769 	/* enable time latency check for tx completion */
1770 	bool latency_capture_enable;
1771 
1772 	/* enable calculation of delay stats*/
1773 	bool delay_stats_flag;
1774 	struct {
1775 		uint16_t tx_ppdu_id;
1776 		uint16_t tx_peer_id;
1777 		uint16_t rx_ppdu_id;
1778 	} m_copy_id;
1779 
1780 	/* To check if PPDU Tx stats are enabled for Pktlog */
1781 	bool pktlog_ppdu_stats;
1782 
1783 	void *dp_txrx_handle; /* Advanced data path handle */
1784 
1785 #ifdef ATH_SUPPORT_NAC_RSSI
1786 	bool nac_rssi_filtering;
1787 #endif
1788 	/* list of ppdu tlvs */
1789 	TAILQ_HEAD(, ppdu_info) ppdu_info_list;
1790 	uint32_t tlv_count;
1791 	uint32_t list_depth;
1792 	uint32_t ppdu_id;
1793 	bool first_nbuf;
1794 	struct {
1795 		qdf_nbuf_t last_nbuf; /*Ptr to mgmt last buf */
1796 		uint8_t *mgmt_buf; /* Ptr to mgmt. payload in HTT ppdu stats */
1797 		uint32_t mgmt_buf_len; /* Len of mgmt. payload in ppdu stats */
1798 		uint32_t ppdu_id;
1799 	} mgmtctrl_frm_info;
1800 
1801 	/* Current noise-floor reading for the pdev channel */
1802 	int16_t chan_noise_floor;
1803 
1804 	/*
1805 	 * For multiradio device, this flag indicates if
1806 	 * this radio is primary or secondary.
1807 	 *
1808 	 * For HK 1.0, this is used for WAR for the AST issue.
1809 	 * HK 1.x mandates creation of only 1 AST entry with same MAC address
1810 	 * across 2 radios. is_primary indicates the radio on which DP should
1811 	 * install HW AST entry if there is a request to add 2 AST entries
1812 	 * with same MAC address across 2 radios
1813 	 */
1814 	uint8_t is_primary;
1815 	/* Context of cal client timer */
1816 	struct cdp_cal_client *cal_client_ctx;
1817 	struct cdp_tx_sojourn_stats sojourn_stats;
1818 	qdf_nbuf_t sojourn_buf;
1819 
1820 	/* peer pointer for collecting invalid peer stats */
1821 	struct dp_peer *invalid_peer;
1822 
1823 	union dp_rx_desc_list_elem_t *free_list_head;
1824 	union dp_rx_desc_list_elem_t *free_list_tail;
1825 	/* Pdev level flag to check peer based pktlog enabled or
1826 	 * disabled
1827 	 */
1828 	uint8_t dp_peer_based_pktlog;
1829 
1830 	/* Cached peer_id from htt_peer_details_tlv */
1831 	uint16_t fw_stats_peer_id;
1832 
1833 	/* qdf_event for fw_peer_stats */
1834 	qdf_event_t fw_peer_stats_event;
1835 
1836 	/* User configured max number of tx buffers */
1837 	uint32_t num_tx_allowed;
1838 
1839 	/* unique cookie required for peer session */
1840 	uint32_t next_peer_cookie;
1841 
1842 	/*
1843 	 * Run time enabled when the first protocol tag is added,
1844 	 * run time disabled when the last protocol tag is deleted
1845 	 */
1846 	bool  is_rx_protocol_tagging_enabled;
1847 
1848 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1849 	/*
1850 	 * The protocol type is used as array index to save
1851 	 * user provided tag info
1852 	 */
1853 	struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX];
1854 
1855 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
1856 	/*
1857 	 * Track msdus received from each reo ring separately to avoid
1858 	 * simultaneous writes from different core
1859 	 */
1860 	struct rx_protocol_tag_stats
1861 		reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX];
1862 	/* Track msdus received from expection ring separately */
1863 	struct rx_protocol_tag_stats
1864 		rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
1865 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
1866 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1867 
1868 	/* tx packet capture enhancement */
1869 	enum cdp_tx_enh_capture_mode tx_capture_enabled;
1870 	struct dp_pdev_tx_capture tx_capture;
1871 
1872 	uint32_t *ppdu_tlv_buf; /* Buffer to hold HTT ppdu stats TLVs*/
1873 
1874 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
1875 	/**
1876 	 * Pointer to DP Flow FST at SOC level if
1877 	 * is_rx_flow_search_table_per_pdev is true
1878 	 */
1879 	struct dp_rx_fst *rx_fst;
1880 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
1881 
1882 #ifdef FEATURE_TSO_STATS
1883 	/* TSO Id to index into TSO packet information */
1884 	qdf_atomic_t tso_idx;
1885 #endif /* FEATURE_TSO_STATS */
1886 
1887 #ifdef WLAN_SUPPORT_DATA_STALL
1888 	data_stall_detect_cb data_stall_detect_callback;
1889 #endif /* WLAN_SUPPORT_DATA_STALL */
1890 
1891 	struct dp_mon_filter **filter;	/* Monitor Filter pointer */
1892 
1893 #ifdef QCA_SUPPORT_FULL_MON
1894 	/* List to maintain all MPDUs for a PPDU in monitor mode */
1895 	TAILQ_HEAD(, dp_mon_mpdu) mon_mpdu_q;
1896 
1897 	/* TODO: define per-user mpdu list
1898 	 * struct dp_mon_mpdu_list mpdu_list[MAX_MU_USERS];
1899 	 */
1900 	struct hal_rx_mon_desc_info *mon_desc;
1901 #endif
1902 	qdf_nbuf_t mcopy_status_nbuf;
1903 
1904 	/* Flag to hold on to monitor destination ring */
1905 	bool hold_mon_dest_ring;
1906 
1907 	/* Maintains first status buffer's paddr of a PPDU */
1908 	uint64_t status_buf_addr;
1909 };
1910 
1911 struct dp_peer;
1912 
1913 /* VDEV structure for data path state */
1914 struct dp_vdev {
1915 	/* OS device abstraction */
1916 	qdf_device_t osdev;
1917 
1918 	/* physical device that is the parent of this virtual device */
1919 	struct dp_pdev *pdev;
1920 
1921 	/* VDEV operating mode */
1922 	enum wlan_op_mode opmode;
1923 
1924 	/* VDEV subtype */
1925 	enum wlan_op_subtype subtype;
1926 
1927 	/* Tx encapsulation type for this VAP */
1928 	enum htt_cmn_pkt_type tx_encap_type;
1929 
1930 	/* Rx Decapsulation type for this VAP */
1931 	enum htt_cmn_pkt_type rx_decap_type;
1932 
1933 	/* BSS peer */
1934 	struct dp_peer *vap_bss_peer;
1935 
1936 	/* WDS enabled */
1937 	bool wds_enabled;
1938 
1939 	/* MEC enabled */
1940 	bool mec_enabled;
1941 
1942 	/* WDS Aging timer period */
1943 	uint32_t wds_aging_timer_val;
1944 
1945 	/* NAWDS enabled */
1946 	bool nawds_enabled;
1947 
1948 	/* Multicast enhancement enabled */
1949 	uint8_t mcast_enhancement_en;
1950 
1951 	/* HW TX Checksum Enabled Flag */
1952 	uint8_t csum_enabled;
1953 
1954 	/* vdev_id - ID used to specify a particular vdev to the target */
1955 	uint8_t vdev_id;
1956 
1957 	/* Default HTT meta data for this VDEV */
1958 	/* TBD: check alignment constraints */
1959 	uint16_t htt_tcl_metadata;
1960 
1961 	/* Mesh mode vdev */
1962 	uint32_t mesh_vdev;
1963 
1964 	/* Mesh mode rx filter setting */
1965 	uint32_t mesh_rx_filter;
1966 
1967 	/* DSCP-TID mapping table ID */
1968 	uint8_t dscp_tid_map_id;
1969 
1970 	/* Address search type to be set in TX descriptor */
1971 	uint8_t search_type;
1972 
1973 	/* AST hash value for BSS peer in HW valid for STA VAP*/
1974 	uint16_t bss_ast_hash;
1975 
1976 	/* vdev lmac_id */
1977 	int lmac_id;
1978 
1979 	bool multipass_en;
1980 
1981 	/* Address search flags to be configured in HAL descriptor */
1982 	uint8_t hal_desc_addr_search_flags;
1983 
1984 	/* Handle to the OS shim SW's virtual device */
1985 	ol_osif_vdev_handle osif_vdev;
1986 
1987 	/* Handle to the UMAC handle */
1988 	struct cdp_ctrl_objmgr_vdev *ctrl_vdev;
1989 
1990 	/* MAC address */
1991 	union dp_align_mac_addr mac_addr;
1992 
1993 	/* node in the pdev's list of vdevs */
1994 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
1995 
1996 	/* dp_peer list */
1997 	TAILQ_HEAD(, dp_peer) peer_list;
1998 
1999 	/* RX call back function to flush GRO packets*/
2000 	ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;
2001 	/* default RX call back function called by dp */
2002 	ol_txrx_rx_fp osif_rx;
2003 	/* callback to deliver rx frames to the OS */
2004 	ol_txrx_rx_fp osif_rx_stack;
2005 	/* Callback to handle rx fisa frames */
2006 	ol_txrx_fisa_rx_fp osif_fisa_rx;
2007 	ol_txrx_fisa_flush_fp osif_fisa_flush;
2008 
2009 	/* call back function to flush out queued rx packets*/
2010 	ol_txrx_rx_flush_fp osif_rx_flush;
2011 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
2012 	ol_txrx_get_key_fp osif_get_key;
2013 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
2014 
2015 #ifdef notyet
2016 	/* callback to check if the msdu is an WAI (WAPI) frame */
2017 	ol_rx_check_wai_fp osif_check_wai;
2018 #endif
2019 
2020 	/* proxy arp function */
2021 	ol_txrx_proxy_arp_fp osif_proxy_arp;
2022 
2023 	/* callback to hand rx monitor 802.11 MPDU to the OS shim */
2024 	ol_txrx_rx_mon_fp osif_rx_mon;
2025 
2026 	ol_txrx_mcast_me_fp me_convert;
2027 
2028 	/* completion function used by this vdev*/
2029 	ol_txrx_completion_fp tx_comp;
2030 
2031 	/* deferred vdev deletion state */
2032 	struct {
2033 		/* VDEV delete pending */
2034 		int pending;
2035 		/*
2036 		* callback and a context argument to provide a
2037 		* notification for when the vdev is deleted.
2038 		*/
2039 		ol_txrx_vdev_delete_cb callback;
2040 		void *context;
2041 	} delete;
2042 
2043 	/* tx data delivery notification callback function */
2044 	struct {
2045 		ol_txrx_data_tx_cb func;
2046 		void *ctxt;
2047 	} tx_non_std_data_callback;
2048 
2049 
2050 	/* safe mode control to bypass the encrypt and decipher process*/
2051 	uint32_t safemode;
2052 
2053 	/* rx filter related */
2054 	uint32_t drop_unenc;
2055 #ifdef notyet
2056 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
2057 	uint32_t filters_num;
2058 #endif
2059 	/* TDLS Link status */
2060 	bool tdls_link_connected;
2061 	bool is_tdls_frame;
2062 
2063 	/* per vdev rx nbuf queue */
2064 	qdf_nbuf_queue_t rxq;
2065 
2066 	uint8_t tx_ring_id;
2067 	struct dp_tx_desc_pool_s *tx_desc;
2068 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
2069 
2070 	/* VDEV Stats */
2071 	struct cdp_vdev_stats stats;
2072 
2073 	/* Is this a proxySTA VAP */
2074 	bool proxysta_vdev;
2075 	/* Is isolation mode enabled */
2076 	bool isolation_vdev;
2077 
2078 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2079 	struct dp_tx_desc_pool_s *pool;
2080 #endif
2081 	/* AP BRIDGE enabled */
2082 	bool ap_bridge_enabled;
2083 
2084 	enum cdp_sec_type  sec_type;
2085 
2086 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
2087 	bool raw_mode_war;
2088 
2089 
2090 	/* AST hash index for BSS peer in HW valid for STA VAP*/
2091 	uint16_t bss_ast_idx;
2092 
2093 	/* Capture timestamp of previous tx packet enqueued */
2094 	uint64_t prev_tx_enq_tstamp;
2095 
2096 	/* Capture timestamp of previous rx packet delivered */
2097 	uint64_t prev_rx_deliver_tstamp;
2098 
2099 	/* 8021p PCP-TID mapping table ID */
2100 	uint8_t tidmap_tbl_id;
2101 
2102 	/* 8021p PCP-TID map values */
2103 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
2104 
2105 	/* TIDmap priority */
2106 	uint8_t tidmap_prty;
2107 	/* Self Peer in STA mode */
2108 	struct dp_peer *vap_self_peer;
2109 
2110 #ifdef QCA_MULTIPASS_SUPPORT
2111 	uint16_t *iv_vlan_map;
2112 
2113 	/* dp_peer special list */
2114 	TAILQ_HEAD(, dp_peer) mpass_peer_list;
2115 	DP_MUTEX_TYPE mpass_peer_mutex;
2116 #endif
2117 	/* Extended data path handle */
2118 	struct cdp_ext_vdev *vdev_dp_ext_handle;
2119 #ifdef VDEV_PEER_PROTOCOL_COUNT
2120 	/*
2121 	 * Rx-Ingress and Tx-Egress are in the lower level DP layer
2122 	 * Rx-Egress and Tx-ingress are handled in osif layer for DP
2123 	 * So
2124 	 * Rx-Egress and Tx-ingress mask definitions are in OSIF layer
2125 	 * Rx-Ingress and Tx-Egress definitions are here below
2126 	 */
2127 #define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1
2128 #define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2
2129 #define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4
2130 #define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8
2131 	bool peer_protocol_count_track;
2132 	int peer_protocol_count_dropmask;
2133 #endif
2134 
2135 	/* vap bss peer mac addr */
2136 	uint8_t vap_bss_peer_mac_addr[QDF_MAC_ADDR_SIZE];
2137 
2138 	/* callback to collect connectivity stats */
2139 	ol_txrx_stats_rx_fp stats_cb;
2140 };
2141 
2142 
2143 enum {
2144 	dp_sec_mcast = 0,
2145 	dp_sec_ucast
2146 };
2147 
2148 #ifdef WDS_VENDOR_EXTENSION
2149 typedef struct {
2150 	uint8_t	wds_tx_mcast_4addr:1,
2151 		wds_tx_ucast_4addr:1,
2152 		wds_rx_filter:1,      /* enforce rx filter */
2153 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
2154 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
2155 
2156 } dp_ecm_policy;
2157 #endif
2158 
2159 /*
2160  * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
2161  * @cached_bufq: nbuff list to enqueue rx packets
2162  * @bufq_lock: spinlock for nbuff list access
2163  * @thres: maximum threshold for number of rx buff to enqueue
2164  * @entries: number of entries
2165  * @dropped: number of packets dropped
2166  */
2167 struct dp_peer_cached_bufq {
2168 	qdf_list_t cached_bufq;
2169 	qdf_spinlock_t bufq_lock;
2170 	uint32_t thresh;
2171 	uint32_t entries;
2172 	uint32_t dropped;
2173 };
2174 
2175 /**
2176  * enum dp_peer_ast_flowq
2177  * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue
2178  * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue
2179  * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP
2180  * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP
2181  */
2182 enum dp_peer_ast_flowq {
2183 	DP_PEER_AST_FLOWQ_HI_PRIO,
2184 	DP_PEER_AST_FLOWQ_LOW_PRIO,
2185 	DP_PEER_AST_FLOWQ_UDP,
2186 	DP_PEER_AST_FLOWQ_NON_UDP,
2187 	DP_PEER_AST_FLOWQ_MAX,
2188 };
2189 
2190 /*
2191  * struct dp_ast_flow_override_info - ast override info
2192  * @ast_index - ast indexes in peer map message
2193  * @ast_valid_mask - ast valid mask for each ast index
2194  * @ast_flow_mask - ast flow mask for each ast index
2195  * @tid_valid_low_pri_mask - per tid mask for low priority flow
2196  * @tid_valid_hi_pri_mask - per tid mask for hi priority flow
2197  */
2198 struct dp_ast_flow_override_info {
2199 	uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX];
2200 	uint8_t ast_valid_mask;
2201 	uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX];
2202 	uint8_t tid_valid_low_pri_mask;
2203 	uint8_t tid_valid_hi_pri_mask;
2204 };
2205 
2206 /*
2207  * struct dp_peer_ast_params - ast parameters for a msdu flow-queue
2208  * @ast_index - ast index populated by FW
2209  * @is_valid - ast flow valid mask
2210  * @valid_tid_mask - per tid mask for this ast index
2211  * @flowQ - flow queue id associated with this ast index
2212  */
2213 struct dp_peer_ast_params {
2214 	uint16_t ast_idx;
2215 	uint8_t is_valid;
2216 	uint8_t valid_tid_mask;
2217 	uint8_t flowQ;
2218 };
2219 
2220 /* Peer structure for data path state */
2221 struct dp_peer {
2222 	/* VDEV to which this peer is associated */
2223 	struct dp_vdev *vdev;
2224 
2225 	struct dp_ast_entry *self_ast_entry;
2226 
2227 	qdf_atomic_t ref_cnt;
2228 
2229 	/* peer ID for this peer */
2230 	uint16_t peer_id;
2231 
2232 	union dp_align_mac_addr mac_addr;
2233 
2234 	/* node in the vdev's list of peers */
2235 	TAILQ_ENTRY(dp_peer) peer_list_elem;
2236 	/* node in the hash table bin's list of peers */
2237 	TAILQ_ENTRY(dp_peer) hash_list_elem;
2238 
2239 	/* TID structures */
2240 	struct dp_rx_tid rx_tid[DP_MAX_TIDS];
2241 	struct dp_peer_tx_capture tx_capture;
2242 
2243 
2244 	/* TBD: No transmit TID state required? */
2245 
2246 	struct {
2247 		enum cdp_sec_type sec_type;
2248 		u_int32_t michael_key[2]; /* relevant for TKIP */
2249 	} security[2]; /* 0 -> multicast, 1 -> unicast */
2250 
2251 	/* NAWDS Flag and Bss Peer bit */
2252 	uint8_t nawds_enabled:1, /* NAWDS flag */
2253 		bss_peer:1, /* set for bss peer */
2254 		wds_enabled:1, /* WDS peer */
2255 		authorize:1, /* Set when authorized */
2256 		nac:1, /* NAC Peer*/
2257 		tx_cap_enabled:1, /* Peer's tx-capture is enabled */
2258 		rx_cap_enabled:1, /* Peer's rx-capture is enabled */
2259 		valid:1; /* valid bit */
2260 
2261 #ifdef QCA_SUPPORT_PEER_ISOLATION
2262 	bool isolation; /* enable peer isolation for this peer */
2263 #endif
2264 
2265 	/* MCL specific peer local id */
2266 	uint16_t local_id;
2267 	enum ol_txrx_peer_state state;
2268 	qdf_spinlock_t peer_info_lock;
2269 
2270 	/* Peer Stats */
2271 	struct cdp_peer_stats stats;
2272 
2273 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
2274 	/* TBD */
2275 
2276 #ifdef WDS_VENDOR_EXTENSION
2277 	dp_ecm_policy wds_ecm;
2278 #endif
2279 	bool delete_in_progress;
2280 
2281 	/* Active Block ack sessions */
2282 	uint16_t active_ba_session_cnt;
2283 
2284 	/* Current HW buffersize setting */
2285 	uint16_t hw_buffer_size;
2286 
2287 	/*
2288 	 * Flag to check if sessions with 256 buffersize
2289 	 * should be terminated.
2290 	 */
2291 	uint8_t kill_256_sessions;
2292 	qdf_atomic_t is_default_route_set;
2293 	/* Peer level flag to check peer based pktlog enabled or
2294 	 * disabled
2295 	 */
2296 	uint8_t peer_based_pktlog_filter;
2297 
2298 	/* rdk statistics context */
2299 	struct cdp_peer_rate_stats_ctx *wlanstats_ctx;
2300 	/* average sojourn time */
2301 	qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
2302 
2303 #ifdef QCA_MULTIPASS_SUPPORT
2304 	/* node in the special peer list element */
2305 	TAILQ_ENTRY(dp_peer) mpass_peer_list_elem;
2306 	/* vlan id for key */
2307 	uint16_t vlan_id;
2308 #endif
2309 
2310 #ifdef PEER_CACHE_RX_PKTS
2311 	qdf_atomic_t flush_in_progress;
2312 	struct dp_peer_cached_bufq bufq_info;
2313 #endif
2314 #ifdef FEATURE_PERPKT_INFO
2315 	/* delayed ba ppdu stats handling */
2316 	struct cdp_delayed_tx_completion_ppdu_user delayed_ba_ppdu_stats;
2317 	/* delayed ba flag */
2318 	bool last_delayed_ba;
2319 	/* delayed ba ppdu id */
2320 	uint32_t last_delayed_ba_ppduid;
2321 #endif
2322 #ifdef QCA_PEER_MULTIQ_SUPPORT
2323 	struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
2324 #endif
2325 };
2326 
2327 /*
2328  * dp_invalid_peer_msg
2329  * @nbuf: data buffer
2330  * @wh: 802.11 header
2331  * @vdev_id: id of vdev
2332  */
2333 struct dp_invalid_peer_msg {
2334 	qdf_nbuf_t nbuf;
2335 	struct ieee80211_frame *wh;
2336 	uint8_t vdev_id;
2337 };
2338 
2339 /*
2340  * dp_tx_me_buf_t: ME buffer
2341  * next: pointer to next buffer
2342  * data: Destination Mac address
2343  */
2344 struct dp_tx_me_buf_t {
2345 	/* Note: ME buf pool initialization logic expects next pointer to
2346 	 * be the first element. Dont add anything before next */
2347 	struct dp_tx_me_buf_t *next;
2348 	uint8_t data[QDF_MAC_ADDR_SIZE];
2349 };
2350 
2351 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2352 struct hal_rx_fst;
2353 
2354 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
2355 struct dp_rx_fse {
2356 	/* HAL Rx Flow Search Entry which matches HW definition */
2357 	void *hal_rx_fse;
2358 	/* Toeplitz hash value */
2359 	uint32_t flow_hash;
2360 	/* Flow index, equivalent to hash value truncated to FST size */
2361 	uint32_t flow_id;
2362 	/* Stats tracking for this flow */
2363 	struct cdp_flow_stats stats;
2364 	/* Flag indicating whether flow is IPv4 address tuple */
2365 	uint8_t is_ipv4_addr_entry;
2366 	/* Flag indicating whether flow is valid */
2367 	uint8_t is_valid;
2368 };
2369 
2370 struct dp_rx_fst {
2371 	/* Software (DP) FST */
2372 	uint8_t *base;
2373 	/* Pointer to HAL FST */
2374 	struct hal_rx_fst *hal_rx_fst;
2375 	/* Base physical address of HAL RX HW FST */
2376 	uint64_t hal_rx_fst_base_paddr;
2377 	/* Maximum number of flows FSE supports */
2378 	uint16_t max_entries;
2379 	/* Num entries in flow table */
2380 	uint16_t num_entries;
2381 	/* SKID Length */
2382 	uint16_t max_skid_length;
2383 	/* Hash mask to obtain legitimate hash entry */
2384 	uint32_t hash_mask;
2385 	/* Timer for bundling of flows */
2386 	qdf_timer_t cache_invalidate_timer;
2387 	/**
2388 	 * Flag which tracks whether cache update
2389 	 * is needed on timer expiry
2390 	 */
2391 	qdf_atomic_t is_cache_update_pending;
2392 	/* Flag to indicate completion of FSE setup in HW/FW */
2393 	bool fse_setup_done;
2394 };
2395 
2396 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
2397 #elif WLAN_SUPPORT_RX_FISA
2398 
2399 enum fisa_aggr_ret {
2400 	FISA_AGGR_DONE,
2401 	FISA_AGGR_NOT_ELIGIBLE,
2402 	FISA_FLUSH_FLOW
2403 };
2404 
2405 struct dp_fisa_rx_sw_ft {
2406 	/* HAL Rx Flow Search Entry which matches HW definition */
2407 	void *hw_fse;
2408 	/* Toeplitz hash value */
2409 	uint32_t flow_hash;
2410 	/* Flow index, equivalent to hash value truncated to FST size */
2411 	uint32_t flow_id;
2412 	/* Stats tracking for this flow */
2413 	struct cdp_flow_stats stats;
2414 	/* Flag indicating whether flow is IPv4 address tuple */
2415 	uint8_t is_ipv4_addr_entry;
2416 	/* Flag indicating whether flow is valid */
2417 	uint8_t is_valid;
2418 	uint8_t is_populated;
2419 	uint8_t is_flow_udp;
2420 	uint8_t is_flow_tcp;
2421 	qdf_nbuf_t head_skb;
2422 	uint16_t cumulative_l4_checksum;
2423 	uint16_t adjusted_cumulative_ip_length;
2424 	uint16_t cur_aggr;
2425 	uint16_t napi_flush_cumulative_l4_checksum;
2426 	uint16_t napi_flush_cumulative_ip_length;
2427 	qdf_nbuf_t last_skb;
2428 	uint32_t head_skb_ip_hdr_offset;
2429 	uint32_t head_skb_l4_hdr_offset;
2430 	struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
2431 	uint8_t napi_id;
2432 	struct dp_vdev *vdev;
2433 	uint64_t bytes_aggregated;
2434 	uint32_t flush_count;
2435 	uint32_t aggr_count;
2436 	uint8_t do_not_aggregate;
2437 	uint16_t hal_cumultive_ip_len;
2438 	struct dp_soc *soc_hdl;
2439 };
2440 
2441 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_fisa_rx_sw_ft)
2442 
2443 struct dp_rx_fst {
2444 	/* Software (DP) FST */
2445 	uint8_t *base;
2446 	/* Pointer to HAL FST */
2447 	struct hal_rx_fst *hal_rx_fst;
2448 	/* Base physical address of HAL RX HW FST */
2449 	uint64_t hal_rx_fst_base_paddr;
2450 	/* Maximum number of flows FSE supports */
2451 	uint16_t max_entries;
2452 	/* Num entries in flow table */
2453 	uint16_t num_entries;
2454 	/* SKID Length */
2455 	uint16_t max_skid_length;
2456 	/* Hash mask to obtain legitimate hash entry */
2457 	uint32_t hash_mask;
2458 	/* Lock for adding/deleting entries of FST */
2459 	qdf_spinlock_t dp_rx_fst_lock;
2460 	uint32_t add_flow_count;
2461 	uint32_t del_flow_count;
2462 	uint32_t hash_collision_cnt;
2463 	struct dp_soc *soc_hdl;
2464 };
2465 
2466 #endif /* WLAN_SUPPORT_RX_FISA */
2467 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
2468 
2469 #ifdef WLAN_FEATURE_STATS_EXT
2470 /*
2471  * dp_req_rx_hw_stats_t: RX peer HW stats query structure
2472  * @pending_tid_query_cnt: pending tid stats count which waits for REO status
2473  * @is_query_timeout: flag to show is stats query timeout
2474  */
2475 struct dp_req_rx_hw_stats_t {
2476 	qdf_atomic_t pending_tid_stats_cnt;
2477 	bool is_query_timeout;
2478 };
2479 #endif
2480 
2481 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id);
2482 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
2483 					    uint32_t mac_id);
2484 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
2485 
2486 #endif /* _DP_TYPES_H_ */
2487