xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_TYPES_H_
20 #define _DP_TYPES_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include <qdf_lock.h>
25 #include <qdf_atomic.h>
26 #include <qdf_util.h>
27 #include <qdf_list.h>
28 #include <qdf_lro.h>
29 #include <queue.h>
30 #include <htt_common.h>
31 #include <htt_stats.h>
32 #include <cdp_txrx_cmn.h>
33 #ifdef DP_MOB_DEFS
34 #include <cds_ieee80211_common.h>
35 #endif
36 #include <wdi_event_api.h>    /* WDI subscriber event list */
37 
38 #include "hal_hw_headers.h"
39 #include <hal_tx.h>
40 #include <hal_reo.h>
41 #include "wlan_cfg.h"
42 #include "hal_rx.h"
43 #include <hal_api.h>
44 #include <hal_api_mon.h>
45 #include "hal_rx.h"
46 //#include "hal_rx_flow.h"
47 
48 #define MAX_BW 7
49 #define MAX_RETRIES 4
50 #define MAX_RECEPTION_TYPES 4
51 
52 #define MINIDUMP_STR_SIZE 25
53 #ifndef REMOVE_PKT_LOG
54 #include <pktlog.h>
55 #endif
56 
57 #ifdef WLAN_TX_PKT_CAPTURE_ENH
58 #include "dp_tx_capture.h"
59 #endif
60 
61 #define REPT_MU_MIMO 1
62 #define REPT_MU_OFDMA_MIMO 3
63 #define DP_VO_TID 6
64  /** MAX TID MAPS AVAILABLE PER PDEV */
65 #define DP_MAX_TID_MAPS 16
66 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */
67 #define DSCP_TID_MAP_MAX (64 + 6)
68 #define DP_IP_DSCP_SHIFT 2
69 #define DP_IP_DSCP_MASK 0x3f
70 #define DP_FC0_SUBTYPE_QOS 0x80
71 #define DP_QOS_TID 0x0f
72 #define DP_IPV6_PRIORITY_SHIFT 20
73 #define MAX_MON_LINK_DESC_BANKS 2
74 #define DP_VDEV_ALL 0xff
75 
76 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
77 #define MAX_PDEV_CNT 1
78 #define WLAN_DP_RESET_MON_BUF_RING_FILTER
79 #else
80 #define MAX_PDEV_CNT 3
81 #endif
82 
83 /* Max no. of VDEV per PSOC */
84 #ifdef WLAN_PSOC_MAX_VDEVS
85 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS
86 #else
87 #define MAX_VDEV_CNT 51
88 #endif
89 
90 /* Max no. of VDEVs, a PDEV can support */
91 #ifdef WLAN_PDEV_MAX_VDEVS
92 #define DP_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS
93 #else
94 #define DP_PDEV_MAX_VDEVS 17
95 #endif
96 
97 #define MAX_TXDESC_POOLS 4
98 #define MAX_RXDESC_POOLS 4
99 #define MAX_REO_DEST_RINGS 4
100 #define EXCEPTION_DEST_RING_ID 0
101 #define MAX_TCL_DATA_RINGS 4
102 #define MAX_IDLE_SCATTER_BUFS 16
103 #define DP_MAX_IRQ_PER_CONTEXT 12
104 #define DEFAULT_HW_PEER_ID 0xffff
105 
106 #define WBM_INT_ERROR_ALL 0
107 #define WBM_INT_ERROR_REO_NULL_BUFFER 1
108 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2
109 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3
110 #define WBM_INT_ERROR_REO_BUFF_REAPED 4
111 #define MAX_WBM_INT_ERROR_REASONS 5
112 
113 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
114 /* Maximum retries for Delba per tid per peer */
115 #define DP_MAX_DELBA_RETRY 3
116 
117 #define PCP_TID_MAP_MAX 8
118 #define MAX_MU_USERS 37
119 
120 #define REO_CMD_EVENT_HIST_MAX 64
121 
122 /* 2G PHYB */
123 #define PHYB_2G_LMAC_ID 2
124 #define PHYB_2G_TARGET_PDEV_ID 2
125 
126 /* Flags for skippig s/w tid classification */
127 #define DP_TX_HW_DSCP_TID_MAP_VALID 0x1
128 #define DP_TXRX_HLOS_TID_OVERRIDE_ENABLED 0x2
129 #define DP_TX_MESH_ENABLED 0x4
130 
131 enum rx_pktlog_mode {
132 	DP_RX_PKTLOG_DISABLED = 0,
133 	DP_RX_PKTLOG_FULL,
134 	DP_RX_PKTLOG_LITE,
135 };
136 
137 /* enum m_copy_mode - Available mcopy mode
138  *
139  */
140 enum m_copy_mode {
141 	M_COPY_DISABLED = 0,
142 	M_COPY = 2,
143 	M_COPY_EXTENDED = 4,
144 };
145 
146 struct msdu_list {
147 	qdf_nbuf_t head;
148 	qdf_nbuf_t tail;
149 	uint32 sum_len;
150 };
151 
152 struct dp_soc_cmn;
153 struct dp_pdev;
154 struct dp_vdev;
155 struct dp_tx_desc_s;
156 struct dp_soc;
157 union dp_rx_desc_list_elem_t;
158 struct cdp_peer_rate_stats_ctx;
159 struct cdp_soc_rate_stats_ctx;
160 struct dp_rx_fst;
161 struct dp_mon_filter;
162 struct dp_mon_mpdu;
163 
164 /**
165  * enum for DP peer state
166  */
167 enum dp_peer_state {
168 	DP_PEER_STATE_NONE,
169 	DP_PEER_STATE_INIT,
170 	DP_PEER_STATE_ACTIVE,
171 	DP_PEER_STATE_LOGICAL_DELETE,
172 	DP_PEER_STATE_INACTIVE,
173 	DP_PEER_STATE_FREED,
174 	DP_PEER_STATE_INVALID,
175 };
176 
177 /**
178  * enum for modules ids of
179  */
180 enum dp_mod_id {
181 	DP_MOD_ID_TX_COMP = 0,
182 	DP_MOD_ID_RX = 1,
183 	DP_MOD_ID_HTT_COMP = 2,
184 	DP_MOD_ID_RX_ERR = 3,
185 	DP_MOD_ID_TX_PPDU_STATS = 4,
186 	DP_MOD_ID_RX_PPDU_STATS = 5,
187 	DP_MOD_ID_CDP = 6,
188 	DP_MOD_ID_GENERIC_STATS = 7,
189 	DP_MOD_ID_TX_MULTIPASS = 8,
190 	DP_MOD_ID_TX_CAPTURE = 9,
191 	DP_MOD_ID_NSS_OFFLOAD = 10,
192 	DP_MOD_ID_CONFIG = 11,
193 	DP_MOD_ID_HTT = 12,
194 	DP_MOD_ID_IPA = 13,
195 	DP_MOD_ID_AST = 14,
196 	DP_MOD_ID_MCAST2UCAST = 15,
197 	DP_MOD_ID_CHILD = 16,
198 	DP_MOD_ID_MESH = 17,
199 	DP_MOD_ID_TX_EXCEPTION = 18,
200 	DP_MOD_ID_TDLS = 19,
201 	DP_MOD_ID_MISC = 20,
202 	DP_MOD_ID_MSCS = 21,
203 	DP_MOD_ID_TX = 22,
204 	DP_MOD_ID_MAX = 23,
205 };
206 
207 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
208 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
209 
210 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
211 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
212 
213 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
214 	TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase))
215 
216 #define DP_MUTEX_TYPE qdf_spinlock_t
217 
218 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
219 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
220 
221 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
222     ((_a)[0] == 0x33 &&                         \
223      (_a)[1] == 0x33)
224 
225 #define DP_FRAME_IS_BROADCAST(_a)              \
226     ((_a)[0] == 0xff &&                         \
227      (_a)[1] == 0xff &&                         \
228      (_a)[2] == 0xff &&                         \
229      (_a)[3] == 0xff &&                         \
230      (_a)[4] == 0xff &&                         \
231      (_a)[5] == 0xff)
232 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
233 		(_llc)->llc_ssap == 0xaa && \
234 		(_llc)->llc_un.type_snap.control == 0x3)
235 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
236 #define DP_FRAME_FC0_TYPE_MASK 0x0c
237 #define DP_FRAME_FC0_TYPE_DATA 0x08
238 #define DP_FRAME_IS_DATA(_frame) \
239 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
240 
241 /**
242  * macros to convert hw mac id to sw mac id:
243  * mac ids used by hardware start from a value of 1 while
244  * those in host software start from a value of 0. Use the
245  * macros below to convert between mac ids used by software and
246  * hardware
247  */
248 #define DP_SW2HW_MACID(id) ((id) + 1)
249 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
250 
251 /**
252  * Number of Tx Queues
253  * enum and macro to define how many threshold levels is used
254  * for the AC based flow control
255  */
256 #ifdef QCA_AC_BASED_FLOW_CONTROL
257 enum dp_fl_ctrl_threshold {
258 	DP_TH_BE_BK = 0,
259 	DP_TH_VI,
260 	DP_TH_VO,
261 	DP_TH_HI,
262 };
263 
264 #define FL_TH_MAX (4)
265 #define FL_TH_VI_PERCENTAGE (80)
266 #define FL_TH_VO_PERCENTAGE (60)
267 #define FL_TH_HI_PERCENTAGE (40)
268 #endif
269 
270 /**
271  * enum dp_intr_mode
272  * @DP_INTR_INTEGRATED: Line interrupts
273  * @DP_INTR_MSI: MSI interrupts
274  * @DP_INTR_POLL: Polling
275  */
276 enum dp_intr_mode {
277 	DP_INTR_INTEGRATED = 0,
278 	DP_INTR_MSI,
279 	DP_INTR_POLL,
280 };
281 
282 /**
283  * enum dp_tx_frm_type
284  * @dp_tx_frm_std: Regular frame, no added header fragments
285  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
286  * @dp_tx_frm_sg: SG segment
287  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
288  * @dp_tx_frm_me: Multicast to Unicast Converted frame
289  * @dp_tx_frm_raw: Raw Frame
290  */
291 enum dp_tx_frm_type {
292 	dp_tx_frm_std = 0,
293 	dp_tx_frm_tso,
294 	dp_tx_frm_sg,
295 	dp_tx_frm_audio,
296 	dp_tx_frm_me,
297 	dp_tx_frm_raw,
298 };
299 
300 /**
301  * enum dp_ast_type
302  * @dp_ast_type_wds: WDS peer AST type
303  * @dp_ast_type_static: static ast entry type
304  * @dp_ast_type_mec: Multicast echo ast entry type
305  */
306 enum dp_ast_type {
307 	dp_ast_type_wds = 0,
308 	dp_ast_type_static,
309 	dp_ast_type_mec,
310 };
311 
312 /**
313  * enum dp_nss_cfg
314  * @dp_nss_cfg_default: No radios are offloaded
315  * @dp_nss_cfg_first_radio: First radio offloaded
316  * @dp_nss_cfg_second_radio: Second radio offloaded
317  * @dp_nss_cfg_dbdc: Dual radios offloaded
318  * @dp_nss_cfg_dbtc: Three radios offloaded
319  */
320 enum dp_nss_cfg {
321 	dp_nss_cfg_default = 0x0,
322 	dp_nss_cfg_first_radio = 0x1,
323 	dp_nss_cfg_second_radio = 0x2,
324 	dp_nss_cfg_dbdc = 0x3,
325 	dp_nss_cfg_dbtc = 0x7,
326 	dp_nss_cfg_max
327 };
328 
329 #ifdef WLAN_TX_PKT_CAPTURE_ENH
330 #define DP_CPU_RING_MAP_1 1
331 #endif
332 
333 /**
334  * dp_cpu_ring_map_type - dp tx cpu ring map
335  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
336  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
337  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
338  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
339  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
340  * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring
341  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
342  */
343 enum dp_cpu_ring_map_types {
344 	DP_NSS_DEFAULT_MAP,
345 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
346 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
347 	DP_NSS_DBDC_OFFLOADED_MAP,
348 	DP_NSS_DBTC_OFFLOADED_MAP,
349 #ifdef WLAN_TX_PKT_CAPTURE_ENH
350 	DP_SINGLE_TX_RING_MAP,
351 #endif
352 	DP_NSS_CPU_RING_MAP_MAX
353 };
354 
355 /**
356  * dp_rx_nbuf_frag_info - Hold vaddr and paddr for a buffer
357  *
358  * paddr: Physical address of buffer allocated.
359  * nbuf: Allocated nbuf in case of nbuf approach.
360  * vaddr: Virtual address of frag allocated in case of frag approach.
361  */
362 struct dp_rx_nbuf_frag_info {
363 	qdf_dma_addr_t paddr;
364 	union {
365 		qdf_nbuf_t nbuf;
366 		qdf_frag_t vaddr;
367 	} virt_addr;
368 };
369 
370 /**
371  * enum dp_ctxt - context type
372  * @DP_PDEV_TYPE: PDEV context
373  */
374 enum dp_ctxt_type {
375 	DP_PDEV_TYPE
376 };
377 
378 /**
379  * enum dp_desc_type - source type for multiple pages allocation
380  * @DP_TX_DESC_TYPE: DP SW TX descriptor
381  * @DP_TX_EXT_DESC_TYPE: DP TX msdu extension descriptor
382  * @DP_TX_EXT_DESC_LINK_TYPE: DP link descriptor for msdu ext_desc
383  * @DP_TX_TSO_DESC_TYPE: DP TX TSO descriptor
384  * @DP_TX_TSO_NUM_SEG_TYPE: DP TX number of segments
385  * @DP_RX_DESC_BUF_TYPE: DP RX SW descriptor
386  * @DP_RX_DESC_STATUS_TYPE: DP RX SW descriptor for monitor status
387  * @DP_HW_LINK_DESC_TYPE: DP HW link descriptor
388  */
389 enum dp_desc_type {
390 	DP_TX_DESC_TYPE,
391 	DP_TX_EXT_DESC_TYPE,
392 	DP_TX_EXT_DESC_LINK_TYPE,
393 	DP_TX_TSO_DESC_TYPE,
394 	DP_TX_TSO_NUM_SEG_TYPE,
395 	DP_RX_DESC_BUF_TYPE,
396 	DP_RX_DESC_STATUS_TYPE,
397 	DP_HW_LINK_DESC_TYPE,
398 };
399 
400 /**
401  * struct rx_desc_pool
402  * @pool_size: number of RX descriptor in the pool
403  * @elem_size: Element size
404  * @desc_pages: Multi page descriptors
405  * @array: pointer to array of RX descriptor
406  * @freelist: pointer to free RX descriptor link list
407  * @lock: Protection for the RX descriptor pool
408  * @owner: owner for nbuf
409  * @buf_size: Buffer size
410  * @buf_alignment: Buffer alignment
411  * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
412  * @desc_type: type of desc this pool serves
413  */
414 struct rx_desc_pool {
415 	uint32_t pool_size;
416 #ifdef RX_DESC_MULTI_PAGE_ALLOC
417 	uint16_t elem_size;
418 	struct qdf_mem_multi_page_t desc_pages;
419 #else
420 	union dp_rx_desc_list_elem_t *array;
421 #endif
422 	union dp_rx_desc_list_elem_t *freelist;
423 	qdf_spinlock_t lock;
424 	uint8_t owner;
425 	uint16_t buf_size;
426 	uint8_t buf_alignment;
427 	bool rx_mon_dest_frag_enable;
428 	enum dp_desc_type desc_type;
429 };
430 
431 /**
432  * struct dp_tx_ext_desc_elem_s
433  * @next: next extension descriptor pointer
434  * @vaddr: hlos virtual address pointer
435  * @paddr: physical address pointer for descriptor
436  * @flags: mark features for extension descriptor
437  */
438 struct dp_tx_ext_desc_elem_s {
439 	struct dp_tx_ext_desc_elem_s *next;
440 	void *vaddr;
441 	qdf_dma_addr_t paddr;
442 	uint16_t flags;
443 };
444 
445 /**
446  * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool
447  * @elem_count: Number of descriptors in the pool
448  * @elem_size: Size of each descriptor
449  * @num_free: Number of free descriptors
450  * @msdu_ext_desc: MSDU extension descriptor
451  * @desc_pages: multiple page allocation information for actual descriptors
452  * @link_elem_size: size of the link descriptor in cacheable memory used for
453  * 		    chaining the extension descriptors
454  * @desc_link_pages: multiple page allocation information for link descriptors
455  */
456 struct dp_tx_ext_desc_pool_s {
457 	uint16_t elem_count;
458 	int elem_size;
459 	uint16_t num_free;
460 	struct qdf_mem_multi_page_t desc_pages;
461 	int link_elem_size;
462 	struct qdf_mem_multi_page_t desc_link_pages;
463 	struct dp_tx_ext_desc_elem_s *freelist;
464 	qdf_spinlock_t lock;
465 	qdf_dma_mem_context(memctx);
466 };
467 
468 /**
469  * struct dp_tx_desc_s - Tx Descriptor
470  * @next: Next in the chain of descriptors in freelist or in the completion list
471  * @nbuf: Buffer Address
472  * @msdu_ext_desc: MSDU extension descriptor
473  * @id: Descriptor ID
474  * @vdev_id: vdev_id of vdev over which the packet was transmitted
475  * @pdev: Handle to pdev
476  * @pool_id: Pool ID - used when releasing the descriptor
477  * @flags: Flags to track the state of descriptor and special frame handling
478  * @comp: Pool ID - used when releasing the descriptor
479  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
480  * 		   This is maintained in descriptor to allow more efficient
481  * 		   processing in completion event processing code.
482  * 		    This field is filled in with the htt_pkt_type enum.
483  * @frm_type: Frame Type - ToDo check if this is redundant
484  * @pkt_offset: Offset from which the actual packet data starts
485  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
486  *		Tx completion of ME packet
487  * @pool: handle to flow_pool this descriptor belongs to.
488  */
489 struct dp_tx_desc_s {
490 	struct dp_tx_desc_s *next;
491 	qdf_nbuf_t nbuf;
492 	uint16_t length;
493 	uint16_t flags;
494 	uint32_t id;
495 	qdf_dma_addr_t dma_addr;
496 	uint8_t vdev_id;
497 	uint8_t tx_status;
498 	uint16_t peer_id;
499 	struct dp_pdev *pdev;
500 	uint8_t tx_encap_type;
501 	uint8_t frm_type;
502 	uint8_t pkt_offset;
503 	uint8_t  pool_id;
504 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
505 	void *me_buffer;
506 	void *tso_desc;
507 	void *tso_num_desc;
508 	uint64_t timestamp;
509 	struct hal_tx_desc_comp_s comp;
510 };
511 
512 /**
513  * enum flow_pool_status - flow pool status
514  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
515  *				and network queues are unpaused
516  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
517  *			   and network queues are paused
518  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
519  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
520  */
521 enum flow_pool_status {
522 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
523 	FLOW_POOL_ACTIVE_PAUSED = 1,
524 	FLOW_POOL_BE_BK_PAUSED = 2,
525 	FLOW_POOL_VI_PAUSED = 3,
526 	FLOW_POOL_VO_PAUSED = 4,
527 	FLOW_POOL_INVALID = 5,
528 	FLOW_POOL_INACTIVE = 6,
529 };
530 
531 /**
532  * struct dp_tx_tso_seg_pool_s
533  * @pool_size: total number of pool elements
534  * @num_free: free element count
535  * @freelist: first free element pointer
536  * @desc_pages: multiple page allocation information for actual descriptors
537  * @lock: lock for accessing the pool
538  */
539 struct dp_tx_tso_seg_pool_s {
540 	uint16_t pool_size;
541 	uint16_t num_free;
542 	struct qdf_tso_seg_elem_t *freelist;
543 	struct qdf_mem_multi_page_t desc_pages;
544 	qdf_spinlock_t lock;
545 };
546 
547 /**
548  * struct dp_tx_tso_num_seg_pool_s {
549  * @num_seg_pool_size: total number of pool elements
550  * @num_free: free element count
551  * @freelist: first free element pointer
552  * @desc_pages: multiple page allocation information for actual descriptors
553  * @lock: lock for accessing the pool
554  */
555 
556 struct dp_tx_tso_num_seg_pool_s {
557 	uint16_t num_seg_pool_size;
558 	uint16_t num_free;
559 	struct qdf_tso_num_seg_elem_t *freelist;
560 	struct qdf_mem_multi_page_t desc_pages;
561 	/*tso mutex */
562 	qdf_spinlock_t lock;
563 };
564 
565 /**
566  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
567  * @elem_size: Size of each descriptor in the pool
568  * @pool_size: Total number of descriptors in the pool
569  * @num_free: Number of free descriptors
570  * @num_allocated: Number of used descriptors
571  * @freelist: Chain of free descriptors
572  * @desc_pages: multiple page allocation information for actual descriptors
573  * @num_invalid_bin: Deleted pool with pending Tx completions.
574  * @flow_pool_array_lock: Lock when operating on flow_pool_array.
575  * @flow_pool_array: List of allocated flow pools
576  * @lock- Lock for descriptor allocation/free from/to the pool
577  */
578 struct dp_tx_desc_pool_s {
579 	uint16_t elem_size;
580 	uint32_t num_allocated;
581 	struct dp_tx_desc_s *freelist;
582 	struct qdf_mem_multi_page_t desc_pages;
583 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
584 	uint16_t pool_size;
585 	uint8_t flow_pool_id;
586 	uint8_t num_invalid_bin;
587 	uint16_t avail_desc;
588 	enum flow_pool_status status;
589 	enum htt_flow_type flow_type;
590 #ifdef QCA_AC_BASED_FLOW_CONTROL
591 	uint16_t stop_th[FL_TH_MAX];
592 	uint16_t start_th[FL_TH_MAX];
593 	qdf_time_t max_pause_time[FL_TH_MAX];
594 	qdf_time_t latest_pause_time[FL_TH_MAX];
595 #else
596 	uint16_t stop_th;
597 	uint16_t start_th;
598 #endif
599 	uint16_t pkt_drop_no_desc;
600 	qdf_spinlock_t flow_pool_lock;
601 	uint8_t pool_create_cnt;
602 	void *pool_owner_ctx;
603 #else
604 	uint16_t elem_count;
605 	uint32_t num_free;
606 	qdf_spinlock_t lock;
607 #endif
608 };
609 
610 /**
611  * struct dp_txrx_pool_stats - flow pool related statistics
612  * @pool_map_count: flow pool map received
613  * @pool_unmap_count: flow pool unmap received
614  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
615  */
616 struct dp_txrx_pool_stats {
617 	uint16_t pool_map_count;
618 	uint16_t pool_unmap_count;
619 	uint16_t pkt_drop_no_pool;
620 };
621 
622 /**
623  * struct dp_srng - DP srng structure
624  * @hal_srng: hal_srng handle
625  * @base_vaddr_unaligned: un-aligned virtual base address of the srng ring
626  * @base_vaddr_aligned: aligned virtual base address of the srng ring
627  * @base_paddr_unaligned: un-aligned physical base address of the srng ring
628  * @base_paddr_aligned: aligned physical base address of the srng ring
629  * @alloc_size: size of the srng ring
630  * @cached: is the srng ring memory cached or un-cached memory
631  * @irq: irq number of the srng ring
632  * @num_entries: number of entries in the srng ring
633  */
634 struct dp_srng {
635 	hal_ring_handle_t hal_srng;
636 	void *base_vaddr_unaligned;
637 	void *base_vaddr_aligned;
638 	qdf_dma_addr_t base_paddr_unaligned;
639 	qdf_dma_addr_t base_paddr_aligned;
640 	uint32_t alloc_size;
641 	uint8_t cached;
642 	int irq;
643 	uint32_t num_entries;
644 #ifdef DP_MEM_PRE_ALLOC
645 	uint8_t is_mem_prealloc;
646 #endif
647 };
648 
649 struct dp_rx_reorder_array_elem {
650 	qdf_nbuf_t head;
651 	qdf_nbuf_t tail;
652 };
653 
654 #define DP_RX_BA_INACTIVE 0
655 #define DP_RX_BA_ACTIVE 1
656 #define DP_RX_BA_IN_PROGRESS 2
657 struct dp_reo_cmd_info {
658 	uint16_t cmd;
659 	enum hal_reo_cmd_type cmd_type;
660 	void *data;
661 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
662 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
663 };
664 
665 /* Rx TID */
666 struct dp_rx_tid {
667 	/* TID */
668 	int tid;
669 
670 	/* Num of addba requests */
671 	uint32_t num_of_addba_req;
672 
673 	/* Num of addba responses */
674 	uint32_t num_of_addba_resp;
675 
676 	/* Num of delba requests */
677 	uint32_t num_of_delba_req;
678 
679 	/* Num of addba responses successful */
680 	uint32_t num_addba_rsp_success;
681 
682 	/* Num of addba responses failed */
683 	uint32_t num_addba_rsp_failed;
684 
685 	/* pn size */
686 	uint8_t pn_size;
687 	/* REO TID queue descriptors */
688 	void *hw_qdesc_vaddr_unaligned;
689 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
690 	qdf_dma_addr_t hw_qdesc_paddr;
691 	uint32_t hw_qdesc_alloc_size;
692 
693 	/* RX ADDBA session state */
694 	int ba_status;
695 
696 	/* RX BA window size */
697 	uint16_t ba_win_size;
698 
699 	/* Starting sequence number in Addba request */
700 	uint16_t startseqnum;
701 
702 	/* TODO: Check the following while adding defragmentation support */
703 	struct dp_rx_reorder_array_elem *array;
704 	/* base - single rx reorder element used for non-aggr cases */
705 	struct dp_rx_reorder_array_elem base;
706 
707 	/* only used for defrag right now */
708 	TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem;
709 
710 	/* Store dst desc for reinjection */
711 	hal_ring_desc_t dst_ring_desc;
712 	struct dp_rx_desc *head_frag_desc;
713 
714 	/* rx_tid lock */
715 	qdf_spinlock_t tid_lock;
716 
717 	/* Sequence and fragments that are being processed currently */
718 	uint32_t curr_seq_num;
719 	uint32_t curr_frag_num;
720 
721 	/* head PN number */
722 	uint64_t pn128[2];
723 
724 	uint32_t defrag_timeout_ms;
725 	uint16_t dialogtoken;
726 	uint16_t statuscode;
727 	/* user defined ADDBA response status code */
728 	uint16_t userstatuscode;
729 
730 	/* Store ppdu_id when 2k exception is received */
731 	uint32_t ppdu_id_2k;
732 
733 	/* Delba Tx completion status */
734 	uint8_t delba_tx_status;
735 
736 	/* Delba Tx retry count */
737 	uint8_t delba_tx_retry;
738 
739 	/* Delba stats */
740 	uint32_t delba_tx_success_cnt;
741 	uint32_t delba_tx_fail_cnt;
742 
743 	/* Delba reason code for retries */
744 	uint8_t delba_rcode;
745 
746 	/* Coex Override preserved windows size 1 based */
747 	uint16_t rx_ba_win_size_override;
748 
749 	/* Peer TID statistics */
750 	struct cdp_peer_tid_stats stats;
751 };
752 
753 /**
754  * struct dp_intr_stats - DP Interrupt Stats for an interrupt context
755  * @num_tx_ring_masks: interrupts with tx_ring_mask set
756  * @num_rx_ring_masks: interrupts with rx_ring_mask set
757  * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set
758  * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set
759  * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set
760  * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set
761  * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set
762  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
763  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
764  * @num_masks: total number of times the interrupt was received
765  *
766  * Counter for individual masks are incremented only if there are any packets
767  * on that ring.
768  */
769 struct dp_intr_stats {
770 	uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS];
771 	uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS];
772 	uint32_t num_rx_mon_ring_masks;
773 	uint32_t num_rx_err_ring_masks;
774 	uint32_t num_rx_wbm_rel_ring_masks;
775 	uint32_t num_reo_status_ring_masks;
776 	uint32_t num_rxdma2host_ring_masks;
777 	uint32_t num_host2rxdma_ring_masks;
778 	uint32_t num_masks;
779 };
780 
781 /* per interrupt context  */
782 struct dp_intr {
783 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
784 				associated with this napi context */
785 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
786 				with this interrupt context */
787 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
788 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
789 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
790 	uint8_t reo_status_ring_mask; /* REO command response ring */
791 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
792 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
793 	/* Host to RXDMA monitor  buffer ring */
794 	uint8_t host2rxdma_mon_ring_mask;
795 	struct dp_soc *soc;    /* Reference to SoC structure ,
796 				to get DMA ring handles */
797 	qdf_lro_ctx_t lro_ctx;
798 	uint8_t dp_intr_id;
799 
800 	/* Interrupt Stats for individual masks */
801 	struct dp_intr_stats intr_stats;
802 };
803 
804 #define REO_DESC_FREELIST_SIZE 64
805 #define REO_DESC_FREE_DEFER_MS 1000
806 struct reo_desc_list_node {
807 	qdf_list_node_t node;
808 	unsigned long free_ts;
809 	struct dp_rx_tid rx_tid;
810 	bool resend_update_reo_cmd;
811 	uint32_t pending_ext_desc_size;
812 };
813 
814 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
815 /**
816  * struct reo_cmd_event_record: Elements to record for each reo command
817  * @cmd_type: reo command type
818  * @cmd_return_status: reo command post status
819  * @timestamp: record timestamp for the reo command
820  */
821 struct reo_cmd_event_record {
822 	enum hal_reo_cmd_type cmd_type;
823 	uint8_t cmd_return_status;
824 	uint32_t timestamp;
825 };
826 
827 /**
828  * struct reo_cmd_event_history: Account for reo cmd events
829  * @index: record number
830  * @cmd_record: list of records
831  */
832 struct reo_cmd_event_history {
833 	qdf_atomic_t index;
834 	struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX];
835 };
836 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
837 
838 /* SoC level data path statistics */
839 struct dp_soc_stats {
840 	struct {
841 		uint32_t added;
842 		uint32_t deleted;
843 		uint32_t aged_out;
844 		uint32_t map_err;
845 		uint32_t ast_mismatch;
846 	} ast;
847 
848 	/* SOC level TX stats */
849 	struct {
850 		/* Total packets transmitted */
851 		struct cdp_pkt_info egress;
852 		/* packets dropped on tx because of no peer */
853 		struct cdp_pkt_info tx_invalid_peer;
854 		/* descriptors in each tcl ring */
855 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
856 		/* Descriptors in use at soc */
857 		uint32_t desc_in_use;
858 		/* tqm_release_reason == FW removed */
859 		uint32_t dropped_fw_removed;
860 		/* tx completion release_src != TQM or FW */
861 		uint32_t invalid_release_source;
862 		/* tx completion wbm_internal_error */
863 		uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS];
864 		/* tx completion non_wbm_internal_error */
865 		uint32_t non_wbm_internal_err;
866 		/* TX Comp loop packet limit hit */
867 		uint32_t tx_comp_loop_pkt_limit_hit;
868 		/* Head pointer Out of sync at the end of dp_tx_comp_handler */
869 		uint32_t hp_oos2;
870 	} tx;
871 
872 	/* SOC level RX stats */
873 	struct {
874 		/* Total rx packets count */
875 		struct cdp_pkt_info ingress;
876 		/* Rx errors */
877 		/* Total Packets in Rx Error ring */
878 		uint32_t err_ring_pkts;
879 		/* No of Fragments */
880 		uint32_t rx_frags;
881 		/* No of incomplete fragments in waitlist */
882 		uint32_t rx_frag_wait;
883 		/* Fragments dropped due to errors */
884 		uint32_t rx_frag_err;
885 		/* Fragments received OOR causing sequence num mismatch */
886 		uint32_t rx_frag_oor;
887 		/* Fragments dropped due to len errors in skb */
888 		uint32_t rx_frag_err_len_error;
889 		/* Fragments dropped due to no peer found */
890 		uint32_t rx_frag_err_no_peer;
891 		/* No of reinjected packets */
892 		uint32_t reo_reinject;
893 		/* Reap loop packet limit hit */
894 		uint32_t reap_loop_pkt_limit_hit;
895 		/* Head pointer Out of sync at the end of dp_rx_process */
896 		uint32_t hp_oos2;
897 		/* Rx ring near full */
898 		uint32_t near_full;
899 		/* Break ring reaping as not all scattered msdu received */
900 		uint32_t msdu_scatter_wait_break;
901 		/* Number of bar frames received */
902 		uint32_t bar_frame;
903 		/* Number of frames routed from rxdma */
904 		uint32_t rxdma2rel_route_drop;
905 		/* Number of frames routed from reo*/
906 		uint32_t reo2rel_route_drop;
907 
908 		struct {
909 			/* Invalid RBM error count */
910 			uint32_t invalid_rbm;
911 			/* Invalid VDEV Error count */
912 			uint32_t invalid_vdev;
913 			/* Invalid PDEV error count */
914 			uint32_t invalid_pdev;
915 
916 			/* Packets delivered to stack that no related peer */
917 			uint32_t pkt_delivered_no_peer;
918 			/* Defrag peer uninit error count */
919 			uint32_t defrag_peer_uninit;
920 			/* Invalid sa_idx or da_idx*/
921 			uint32_t invalid_sa_da_idx;
922 			/* MSDU DONE failures */
923 			uint32_t msdu_done_fail;
924 			/* Invalid PEER Error count */
925 			struct cdp_pkt_info rx_invalid_peer;
926 			/* Invalid PEER ID count */
927 			struct cdp_pkt_info rx_invalid_peer_id;
928 			/* Invalid packet length */
929 			struct cdp_pkt_info rx_invalid_pkt_len;
930 			/* HAL ring access Fail error count */
931 			uint32_t hal_ring_access_fail;
932 			/* HAL ring access full Fail error count */
933 			uint32_t hal_ring_access_full_fail;
934 			/* RX DMA error count */
935 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
936 			/* RX REO DEST Desc Invalid Magic count */
937 			uint32_t rx_desc_invalid_magic;
938 			/* REO Error count */
939 			uint32_t reo_error[HAL_REO_ERR_MAX];
940 			/* HAL REO ERR Count */
941 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
942 			/* HAL REO DEST Duplicate count */
943 			uint32_t hal_reo_dest_dup;
944 			/* HAL WBM RELEASE Duplicate count */
945 			uint32_t hal_wbm_rel_dup;
946 			/* HAL RXDMA error Duplicate count */
947 			uint32_t hal_rxdma_err_dup;
948 			/* ipa smmu map duplicate count */
949 			uint32_t ipa_smmu_map_dup;
950 			/* ipa smmu unmap duplicate count */
951 			uint32_t ipa_smmu_unmap_dup;
952 			/* ipa smmu unmap while ipa pipes is disabled */
953 			uint32_t ipa_unmap_no_pipe;
954 			/* REO cmd send fail/requeue count */
955 			uint32_t reo_cmd_send_fail;
956 			/* REO cmd send drain count */
957 			uint32_t reo_cmd_send_drain;
958 			/* RX msdu drop count due to scatter */
959 			uint32_t scatter_msdu;
960 			/* RX msdu drop count due to invalid cookie */
961 			uint32_t invalid_cookie;
962 			/* Count of stale cookie read in RX path */
963 			uint32_t stale_cookie;
964 			/* Delba sent count due to RX 2k jump */
965 			uint32_t rx_2k_jump_delba_sent;
966 			/* RX 2k jump msdu indicated to stack count */
967 			uint32_t rx_2k_jump_to_stack;
968 			/* RX 2k jump msdu dropped count */
969 			uint32_t rx_2k_jump_drop;
970 			/* REO OOR msdu drop count */
971 			uint32_t reo_err_oor_drop;
972 			/* REO OOR msdu indicated to stack count */
973 			uint32_t reo_err_oor_to_stack;
974 			/* REO OOR scattered msdu count */
975 			uint32_t reo_err_oor_sg_count;
976 			/* RX msdu rejected count on delivery to vdev stack_fn*/
977 			uint32_t rejected;
978 			/* Incorrect msdu count in MPDU desc info */
979 			uint32_t msdu_count_mismatch;
980 			/* RX raw frame dropped count */
981 			uint32_t raw_frm_drop;
982 			/* Stale link desc cookie count*/
983 			uint32_t invalid_link_cookie;
984 			/* Nbuf sanity failure */
985 			uint32_t nbuf_sanity_fail;
986 			/* Duplicate link desc refilled */
987 			uint32_t dup_refill_link_desc;
988 			/* Incorrect msdu continuation bit in MSDU desc */
989 			uint32_t msdu_continuation_err;
990 			/* REO OOR eapol drop count */
991 			uint32_t reo_err_oor_eapol_drop;
992 		} err;
993 
994 		/* packet count per core - per ring */
995 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
996 	} rx;
997 
998 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
999 	struct reo_cmd_event_history cmd_event_history;
1000 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1001 };
1002 
1003 union dp_align_mac_addr {
1004 	uint8_t raw[QDF_MAC_ADDR_SIZE];
1005 	struct {
1006 		uint16_t bytes_ab;
1007 		uint16_t bytes_cd;
1008 		uint16_t bytes_ef;
1009 	} align2;
1010 	struct {
1011 		uint32_t bytes_abcd;
1012 		uint16_t bytes_ef;
1013 	} align4;
1014 	struct __attribute__((__packed__)) {
1015 		uint16_t bytes_ab;
1016 		uint32_t bytes_cdef;
1017 	} align4_2;
1018 };
1019 
1020 /**
1021  * struct dp_ast_free_cb_params - HMWDS free callback cookie
1022  * @mac_addr: ast mac address
1023  * @peer_mac_addr: mac address of peer
1024  * @type: ast entry type
1025  * @vdev_id: vdev_id
1026  * @flags: ast flags
1027  */
1028 struct dp_ast_free_cb_params {
1029 	union dp_align_mac_addr mac_addr;
1030 	union dp_align_mac_addr peer_mac_addr;
1031 	enum cdp_txrx_ast_entry_type type;
1032 	uint8_t vdev_id;
1033 	uint32_t flags;
1034 };
1035 
1036 /*
1037  * dp_ast_entry
1038  *
1039  * @ast_idx: Hardware AST Index
1040  * @peer_id: Next Hop peer_id (for non-WDS nodes, this will be point to
1041  *           associated peer with this MAC address)
1042  * @mac_addr:  MAC Address for this AST entry
1043  * @next_hop: Set to 1 if this is for a WDS node
1044  * @is_active: flag to indicate active data traffic on this node
1045  *             (used for aging out/expiry)
1046  * @ase_list_elem: node in peer AST list
1047  * @is_bss: flag to indicate if entry corresponds to bss peer
1048  * @is_mapped: flag to indicate that we have mapped the AST entry
1049  *             in ast_table
1050  * @pdev_id: pdev ID
1051  * @vdev_id: vdev ID
1052  * @ast_hash_value: hast value in HW
1053  * @ref_cnt: reference count
1054  * @type: flag to indicate type of the entry(static/WDS/MEC)
1055  * @delete_in_progress: Flag to indicate that delete commands send to FW
1056  *                      and host is waiting for response from FW
1057  * @callback: ast free/unmap callback
1058  * @cookie: argument to callback
1059  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
1060  */
1061 struct dp_ast_entry {
1062 	uint16_t ast_idx;
1063 	uint16_t peer_id;
1064 	union dp_align_mac_addr mac_addr;
1065 	bool next_hop;
1066 	bool is_active;
1067 	bool is_mapped;
1068 	uint8_t pdev_id;
1069 	uint8_t vdev_id;
1070 	uint16_t ast_hash_value;
1071 	qdf_atomic_t ref_cnt;
1072 	enum cdp_txrx_ast_entry_type type;
1073 	bool delete_in_progress;
1074 	txrx_ast_free_cb callback;
1075 	void *cookie;
1076 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
1077 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
1078 };
1079 
1080 /* SOC level htt stats */
1081 struct htt_t2h_stats {
1082 	/* lock to protect htt_stats_msg update */
1083 	qdf_spinlock_t lock;
1084 
1085 	/* work queue to process htt stats */
1086 	qdf_work_t work;
1087 
1088 	/* T2H Ext stats message queue */
1089 	qdf_nbuf_queue_t msg;
1090 
1091 	/* number of completed stats in htt_stats_msg */
1092 	uint32_t num_stats;
1093 };
1094 
1095 struct link_desc_bank {
1096 	void *base_vaddr_unaligned;
1097 	void *base_vaddr;
1098 	qdf_dma_addr_t base_paddr_unaligned;
1099 	qdf_dma_addr_t base_paddr;
1100 	uint32_t size;
1101 };
1102 
1103 struct rx_buff_pool {
1104 	qdf_nbuf_queue_head_t emerg_nbuf_q;
1105 	uint32_t nbuf_fail_cnt;
1106 	bool is_initialized;
1107 };
1108 
1109 /*
1110  * The logic for get current index of these history is dependent on this
1111  * value being power of 2.
1112  */
1113 #define DP_RX_HIST_MAX 2048
1114 #define DP_RX_ERR_HIST_MAX 2048
1115 #define DP_RX_REINJECT_HIST_MAX 1024
1116 
1117 QDF_COMPILE_TIME_ASSERT(rx_history_size,
1118 			(DP_RX_HIST_MAX &
1119 			 (DP_RX_HIST_MAX - 1)) == 0);
1120 QDF_COMPILE_TIME_ASSERT(rx_err_history_size,
1121 			(DP_RX_ERR_HIST_MAX &
1122 			 (DP_RX_ERR_HIST_MAX - 1)) == 0);
1123 QDF_COMPILE_TIME_ASSERT(rx_reinject_history_size,
1124 			(DP_RX_REINJECT_HIST_MAX &
1125 			 (DP_RX_REINJECT_HIST_MAX - 1)) == 0);
1126 
1127 /**
1128  * struct dp_buf_info_record - ring buffer info
1129  * @hbi: HW ring buffer info
1130  * @timestamp: timestamp when this entry was recorded
1131  */
1132 struct dp_buf_info_record {
1133 	struct hal_buf_info hbi;
1134 	uint64_t timestamp;
1135 };
1136 
1137 /* struct dp_rx_history - rx ring hisotry
1138  * @index: Index where the last entry is written
1139  * @entry: history entries
1140  */
1141 struct dp_rx_history {
1142 	qdf_atomic_t index;
1143 	struct dp_buf_info_record entry[DP_RX_HIST_MAX];
1144 };
1145 
1146 /* struct dp_rx_err_history - rx err ring hisotry
1147  * @index: Index where the last entry is written
1148  * @entry: history entries
1149  */
1150 struct dp_rx_err_history {
1151 	qdf_atomic_t index;
1152 	struct dp_buf_info_record entry[DP_RX_ERR_HIST_MAX];
1153 };
1154 
1155 /* struct dp_rx_reinject_history - rx reinject ring hisotry
1156  * @index: Index where the last entry is written
1157  * @entry: history entries
1158  */
1159 struct dp_rx_reinject_history {
1160 	qdf_atomic_t index;
1161 	struct dp_buf_info_record entry[DP_RX_REINJECT_HIST_MAX];
1162 };
1163 
1164 /* structure to record recent operation related variable */
1165 struct dp_last_op_info {
1166 	/* last link desc buf info through WBM release ring */
1167 	struct hal_buf_info wbm_rel_link_desc;
1168 	/* last link desc buf info through REO reinject ring */
1169 	struct hal_buf_info reo_reinject_link_desc;
1170 };
1171 
1172 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1173 
1174 /**
1175  * struct dp_swlm_tcl_data - params for tcl register write coalescing
1176  *			     descision making
1177  * @nbuf: TX packet
1178  * @tid: tid for transmitting the current packet
1179  * @num_ll_connections: Number of low latency connections on this vdev
1180  *
1181  * This structure contains the information required by the software
1182  * latency manager to decide on whether to coalesce the current TCL
1183  * register write or not.
1184  */
1185 struct dp_swlm_tcl_data {
1186 	qdf_nbuf_t nbuf;
1187 	uint8_t tid;
1188 	uint8_t num_ll_connections;
1189 };
1190 
1191 /**
1192  * union swlm_data - SWLM query data
1193  * @tcl_data: data for TCL query in SWLM
1194  */
1195 union swlm_data {
1196 	struct dp_swlm_tcl_data *tcl_data;
1197 };
1198 
1199 /**
1200  * struct dp_swlm_ops - SWLM ops
1201  * @tcl_wr_coalesce_check: handler to check if the current TCL register
1202  *			   write can be coalesced or not
1203  */
1204 struct dp_swlm_ops {
1205 	int (*tcl_wr_coalesce_check)(struct dp_soc *soc,
1206 				     struct dp_swlm_tcl_data *tcl_data);
1207 };
1208 
1209 /**
1210  * struct dp_swlm_stats - Stats for Software Latency manager.
1211  * @tcl.timer_flush_success: Num TCL HP writes success from timer context
1212  * @tcl.timer_flush_fail: Num TCL HP writes failure from timer context
1213  * @tcl.tid_fail: Num TCL register write coalescing skips, since the pkt
1214  *		 was being transmitted on a TID above coalescing threshold
1215  * @tcl.sp_frames: Num TCL register write coalescing skips, since the pkt
1216  *		  being transmitted was a special frame
1217  * @tcl.ll_connection: Num TCL register write coalescing skips, since the
1218  *		       vdev has low latency connections
1219  * @tcl.bytes_thresh_reached: Num TCL HP writes flush after the coalescing
1220  *			     bytes threshold was reached
1221  * @tcl.time_thresh_reached: Num TCL HP writes flush after the coalescing
1222  *			    session time expired
1223  * @tcl.tput_criteria_fail: Num TCL HP writes coalescing fails, since the
1224  *			   throughput did not meet session threshold
1225  * @tcl.coalesce_success: Num of TCL HP writes coalesced successfully.
1226  * @tcl.coalesce_fail: Num of TCL HP writes coalesces failed
1227  */
1228 struct dp_swlm_stats {
1229 	struct {
1230 		uint32_t timer_flush_success;
1231 		uint32_t timer_flush_fail;
1232 		uint32_t tid_fail;
1233 		uint32_t sp_frames;
1234 		uint32_t ll_connection;
1235 		uint32_t bytes_thresh_reached;
1236 		uint32_t time_thresh_reached;
1237 		uint32_t tput_criteria_fail;
1238 		uint32_t coalesce_success;
1239 		uint32_t coalesce_fail;
1240 	} tcl;
1241 };
1242 
1243 /**
1244  * struct dp_swlm_params: Parameters for different modules in the
1245  *			  Software latency manager.
1246  * @tcl.flush_timer: Timer for flushing the coalesced TCL HP writes
1247  * @tcl.rx_traffic_thresh: Threshold for RX traffic, to begin TCL register
1248  *			   write coalescing
1249  * @tcl.tx_traffic_thresh: Threshold for TX traffic, to begin TCL register
1250  *			   write coalescing
1251  * @tcl.sampling_time: Sampling time to test the throughput threshold
1252  * @tcl.sampling_session_tx_bytes: Num bytes transmitted in the sampling time
1253  * @tcl.bytes_flush_thresh: Bytes threshold to flush the TCL HP register write
1254  * @tcl.time_flush_thresh: Time threshold to flush the TCL HP register write
1255  * @tcl.tx_thresh_multiplier: Multiplier to deduce the bytes threshold after
1256  *			      which the TCL HP register is written, thereby
1257  *			      ending the coalescing.
1258  * @tcl.coalesce_end_time: End timestamp for current coalescing session
1259  * @tcl.bytes_coalesced: Num bytes coalesced in the current session
1260  */
1261 struct dp_swlm_params {
1262 	struct {
1263 		qdf_timer_t flush_timer;
1264 		uint32_t rx_traffic_thresh;
1265 		uint32_t tx_traffic_thresh;
1266 		uint32_t sampling_time;
1267 		uint32_t sampling_session_tx_bytes;
1268 		uint32_t bytes_flush_thresh;
1269 		uint32_t time_flush_thresh;
1270 		uint32_t tx_thresh_multiplier;
1271 		uint64_t coalesce_end_time;
1272 		uint32_t bytes_coalesced;
1273 	} tcl;
1274 };
1275 
1276 /**
1277  * struct dp_swlm - Software latency manager context
1278  * @ops: SWLM ops pointers
1279  * @is_enabled: SWLM enabled/disabled
1280  * @is_init: SWLM module initialized
1281  * @stats: SWLM stats
1282  * @params: SWLM SRNG params
1283  * @tcl_flush_timer: flush timer for TCL register writes
1284  */
1285 struct dp_swlm {
1286 	struct dp_swlm_ops *ops;
1287 	uint8_t is_enabled:1,
1288 		is_init:1;
1289 	struct dp_swlm_stats stats;
1290 	struct dp_swlm_params params;
1291 };
1292 #endif
1293 
1294 /* SOC level structure for data path */
1295 struct dp_soc {
1296 	/**
1297 	 * re-use memory section starts
1298 	 */
1299 
1300 	/* Common base structure - Should be the first member */
1301 	struct cdp_soc_t cdp_soc;
1302 
1303 	/* SoC Obj */
1304 	struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
1305 
1306 	/* OS device abstraction */
1307 	qdf_device_t osdev;
1308 
1309 	/*cce disable*/
1310 	bool cce_disable;
1311 
1312 	/* WLAN config context */
1313 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
1314 
1315 	/* HTT handle for host-fw interaction */
1316 	struct htt_soc *htt_handle;
1317 
1318 	/* Commint init done */
1319 	qdf_atomic_t cmn_init_done;
1320 
1321 	/* Opaque hif handle */
1322 	struct hif_opaque_softc *hif_handle;
1323 
1324 	/* PDEVs on this SOC */
1325 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
1326 
1327 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
1328 	struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT];
1329 
1330 	struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW];
1331 
1332 	/* RXDMA error destination ring */
1333 	struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW];
1334 
1335 	/* RXDMA monitor buffer replenish ring */
1336 	struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW];
1337 
1338 	/* RXDMA monitor destination ring */
1339 	struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW];
1340 
1341 	/* RXDMA monitor status ring. TBD: Check format of this ring */
1342 	struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW];
1343 
1344 	/* Number of PDEVs */
1345 	uint8_t pdev_count;
1346 
1347 	/*ast override support in HW*/
1348 	bool ast_override_support;
1349 
1350 	/*number of hw dscp tid map*/
1351 	uint8_t num_hw_dscp_tid_map;
1352 
1353 	/* HAL SOC handle */
1354 	hal_soc_handle_t hal_soc;
1355 
1356 	/* Device ID coming from Bus sub-system */
1357 	uint32_t device_id;
1358 
1359 	/* Link descriptor pages */
1360 	struct qdf_mem_multi_page_t link_desc_pages;
1361 
1362 	/* total link descriptors for regular RX and TX */
1363 	uint32_t total_link_descs;
1364 
1365 	/* monitor link descriptor pages */
1366 	struct qdf_mem_multi_page_t mon_link_desc_pages[MAX_NUM_LMAC_HW];
1367 
1368 	/* total link descriptors for monitor mode for each radio */
1369 	uint32_t total_mon_link_descs[MAX_NUM_LMAC_HW];
1370 
1371 	/* Monitor Link descriptor memory banks */
1372 	struct link_desc_bank
1373 		mon_link_desc_banks[MAX_NUM_LMAC_HW][MAX_MON_LINK_DESC_BANKS];
1374 	uint32_t num_mon_link_desc_banks[MAX_NUM_LMAC_HW];
1375 
1376 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
1377 	struct dp_srng wbm_idle_link_ring;
1378 
1379 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
1380 	 */
1381 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
1382 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
1383 	uint32_t num_scatter_bufs;
1384 
1385 	/* Tx SW descriptor pool */
1386 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
1387 
1388 	/* Tx MSDU Extension descriptor pool */
1389 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
1390 
1391 	/* Tx TSO descriptor pool */
1392 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
1393 
1394 	/* Tx TSO Num of segments pool */
1395 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
1396 
1397 	/* REO destination rings */
1398 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
1399 
1400 	/* REO exception ring - See if should combine this with reo_dest_ring */
1401 	struct dp_srng reo_exception_ring;
1402 
1403 	/* REO reinjection ring */
1404 	struct dp_srng reo_reinject_ring;
1405 
1406 	/* REO command ring */
1407 	struct dp_srng reo_cmd_ring;
1408 
1409 	/* REO command status ring */
1410 	struct dp_srng reo_status_ring;
1411 
1412 	/* WBM Rx release ring */
1413 	struct dp_srng rx_rel_ring;
1414 
1415 	/* TCL data ring */
1416 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
1417 
1418 	/* Number of TCL data rings */
1419 	uint8_t num_tcl_data_rings;
1420 
1421 	/* TCL CMD_CREDIT ring */
1422 	bool init_tcl_cmd_cred_ring;
1423 
1424 	/* It is used as credit based ring on QCN9000 else command ring */
1425 	struct dp_srng tcl_cmd_credit_ring;
1426 
1427 	/* TCL command status ring */
1428 	struct dp_srng tcl_status_ring;
1429 
1430 	/* WBM Tx completion rings */
1431 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
1432 
1433 	/* Common WBM link descriptor release ring (SW to WBM) */
1434 	struct dp_srng wbm_desc_rel_ring;
1435 
1436 	/* DP Interrupts */
1437 	struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
1438 
1439 	/* Monitor mode mac id to dp_intr_id map */
1440 	int mon_intr_id_lmac_map[MAX_NUM_LMAC_HW];
1441 	/* Rx SW descriptor pool for RXDMA monitor buffer */
1442 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
1443 
1444 	/* Rx SW descriptor pool for RXDMA status buffer */
1445 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
1446 
1447 	/* Rx SW descriptor pool for RXDMA buffer */
1448 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
1449 
1450 	/* Number of REO destination rings */
1451 	uint8_t num_reo_dest_rings;
1452 
1453 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1454 	/* lock to control access to soc TX descriptors */
1455 	qdf_spinlock_t flow_pool_array_lock;
1456 
1457 	/* pause callback to pause TX queues as per flow control */
1458 	tx_pause_callback pause_cb;
1459 
1460 	/* flow pool related statistics */
1461 	struct dp_txrx_pool_stats pool_stats;
1462 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
1463 
1464 	uint32_t wbm_idle_scatter_buf_size;
1465 
1466 	/* VDEVs on this SOC */
1467 	struct dp_vdev *vdev_id_map[MAX_VDEV_CNT];
1468 
1469 	/* Tx H/W queues lock */
1470 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
1471 
1472 	/* Tx ring map for interrupt processing */
1473 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
1474 
1475 	/* Rx ring map for interrupt processing */
1476 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
1477 
1478 	/* peer ID to peer object map (array of pointers to peer objects) */
1479 	struct dp_peer **peer_id_to_obj_map;
1480 
1481 	struct {
1482 		unsigned mask;
1483 		unsigned idx_bits;
1484 		TAILQ_HEAD(, dp_peer) * bins;
1485 	} peer_hash;
1486 
1487 	/* rx defrag state – TBD: do we need this per radio? */
1488 	struct {
1489 		struct {
1490 			TAILQ_HEAD(, dp_rx_tid) waitlist;
1491 			uint32_t timeout_ms;
1492 			uint32_t next_flush_ms;
1493 			qdf_spinlock_t defrag_lock;
1494 		} defrag;
1495 		struct {
1496 			int defrag_timeout_check;
1497 			int dup_check;
1498 		} flags;
1499 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
1500 		qdf_spinlock_t reo_cmd_lock;
1501 	} rx;
1502 
1503 	/* optional rx processing function */
1504 	void (*rx_opt_proc)(
1505 		struct dp_vdev *vdev,
1506 		struct dp_peer *peer,
1507 		unsigned tid,
1508 		qdf_nbuf_t msdu_list);
1509 
1510 	/* pool addr for mcast enhance buff */
1511 	struct {
1512 		int size;
1513 		uint32_t paddr;
1514 		uint32_t *vaddr;
1515 		struct dp_tx_me_buf_t *freelist;
1516 		int buf_in_use;
1517 		qdf_dma_mem_context(memctx);
1518 	} me_buf;
1519 
1520 	/* Protect peer hash table */
1521 	DP_MUTEX_TYPE peer_hash_lock;
1522 	/* Protect peer_id_to_objmap */
1523 	DP_MUTEX_TYPE peer_map_lock;
1524 
1525 	/* maximum value for peer_id */
1526 	uint32_t max_peers;
1527 
1528 	/* SoC level data path statistics */
1529 	struct dp_soc_stats stats;
1530 
1531 	/* Enable processing of Tx completion status words */
1532 	bool process_tx_status;
1533 	bool process_rx_status;
1534 	struct dp_ast_entry **ast_table;
1535 	struct {
1536 		unsigned mask;
1537 		unsigned idx_bits;
1538 		TAILQ_HEAD(, dp_ast_entry) * bins;
1539 	} ast_hash;
1540 
1541 	struct dp_rx_history *rx_ring_history[MAX_REO_DEST_RINGS];
1542 	struct dp_rx_err_history *rx_err_ring_history;
1543 	struct dp_rx_reinject_history *rx_reinject_ring_history;
1544 
1545 	qdf_spinlock_t ast_lock;
1546 	/*Timer for AST entry ageout maintainance */
1547 	qdf_timer_t ast_aging_timer;
1548 
1549 	/*Timer counter for WDS AST entry ageout*/
1550 	uint8_t wds_ast_aging_timer_cnt;
1551 
1552 	/*interrupt timer*/
1553 	qdf_timer_t mon_reap_timer;
1554 	uint8_t reap_timer_init;
1555 	qdf_timer_t lmac_reap_timer;
1556 	uint8_t lmac_timer_init;
1557 	qdf_timer_t int_timer;
1558 	uint8_t intr_mode;
1559 	uint8_t lmac_polled_mode;
1560 	qdf_timer_t mon_vdev_timer;
1561 	uint8_t mon_vdev_timer_state;
1562 
1563 	qdf_list_t reo_desc_freelist;
1564 	qdf_spinlock_t reo_desc_freelist_lock;
1565 
1566 	/* htt stats */
1567 	struct htt_t2h_stats htt_stats;
1568 
1569 	void *external_txrx_handle; /* External data path handle */
1570 #ifdef IPA_OFFLOAD
1571 	/* IPA uC datapath offload Wlan Tx resources */
1572 	struct {
1573 		/* Resource info to be passed to IPA */
1574 		qdf_dma_addr_t ipa_tcl_ring_base_paddr;
1575 		void *ipa_tcl_ring_base_vaddr;
1576 		uint32_t ipa_tcl_ring_size;
1577 		qdf_dma_addr_t ipa_tcl_hp_paddr;
1578 		uint32_t alloc_tx_buf_cnt;
1579 
1580 		qdf_dma_addr_t ipa_wbm_ring_base_paddr;
1581 		void *ipa_wbm_ring_base_vaddr;
1582 		uint32_t ipa_wbm_ring_size;
1583 		qdf_dma_addr_t ipa_wbm_tp_paddr;
1584 
1585 		/* TX buffers populated into the WBM ring */
1586 		void **tx_buf_pool_vaddr_unaligned;
1587 		qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
1588 	} ipa_uc_tx_rsc;
1589 
1590 	/* IPA uC datapath offload Wlan Rx resources */
1591 	struct {
1592 		/* Resource info to be passed to IPA */
1593 		qdf_dma_addr_t ipa_reo_ring_base_paddr;
1594 		void *ipa_reo_ring_base_vaddr;
1595 		uint32_t ipa_reo_ring_size;
1596 		qdf_dma_addr_t ipa_reo_tp_paddr;
1597 
1598 		/* Resource info to be passed to firmware and IPA */
1599 		qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
1600 		void *ipa_rx_refill_buf_ring_base_vaddr;
1601 		uint32_t ipa_rx_refill_buf_ring_size;
1602 		qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
1603 	} ipa_uc_rx_rsc;
1604 
1605 	qdf_atomic_t ipa_pipes_enabled;
1606 	bool ipa_first_tx_db_access;
1607 #endif
1608 
1609 #ifdef WLAN_FEATURE_STATS_EXT
1610 	struct {
1611 		uint32_t rx_mpdu_received;
1612 		uint32_t rx_mpdu_missed;
1613 	} ext_stats;
1614 	qdf_event_t rx_hw_stats_event;
1615 	qdf_spinlock_t rx_hw_stats_lock;
1616 	bool is_last_stats_ctx_init;
1617 #endif /* WLAN_FEATURE_STATS_EXT */
1618 
1619 	/* Smart monitor capability for HKv2 */
1620 	uint8_t hw_nac_monitor_support;
1621 	/* Flag to indicate if HTT v2 is enabled*/
1622 	bool is_peer_map_unmap_v2;
1623 	/* Per peer per Tid ba window size support */
1624 	uint8_t per_tid_basize_max_tid;
1625 	/* Soc level flag to enable da_war */
1626 	uint8_t da_war_enabled;
1627 	/* number of active ast entries */
1628 	uint32_t num_ast_entries;
1629 	/* rdk rate statistics context at soc level*/
1630 	struct cdp_soc_rate_stats_ctx *rate_stats_ctx;
1631 	/* rdk rate statistics control flag */
1632 	bool rdkstats_enabled;
1633 
1634 	/* 8021p PCP-TID map values */
1635 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
1636 	/* TID map priority value */
1637 	uint8_t tidmap_prty;
1638 	/* Pointer to global per ring type specific configuration table */
1639 	struct wlan_srng_cfg *wlan_srng_cfg;
1640 	/* Num Tx outstanding on device */
1641 	qdf_atomic_t num_tx_outstanding;
1642 	/* Num Tx exception on device */
1643 	qdf_atomic_t num_tx_exception;
1644 	/* Num Tx allowed */
1645 	uint32_t num_tx_allowed;
1646 	/* Preferred HW mode */
1647 	uint8_t preferred_hw_mode;
1648 
1649 	/**
1650 	 * Flag to indicate whether WAR to address single cache entry
1651 	 * invalidation bug is enabled or not
1652 	 */
1653 	bool is_rx_fse_full_cache_invalidate_war_enabled;
1654 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
1655 	/**
1656 	 * Pointer to DP RX Flow FST at SOC level if
1657 	 * is_rx_flow_search_table_per_pdev is false
1658 	 * TBD: rx_fst[num_macs] if we decide to have per mac FST
1659 	 */
1660 	struct dp_rx_fst *rx_fst;
1661 #ifdef WLAN_SUPPORT_RX_FISA
1662 	uint8_t fisa_enable;
1663 
1664 	/**
1665 	 * Params used for controlling the fisa aggregation dynamically
1666 	 */
1667 	struct {
1668 		qdf_atomic_t skip_fisa;
1669 		uint8_t fisa_force_flush[MAX_REO_DEST_RINGS];
1670 	} skip_fisa_param;
1671 #endif
1672 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
1673 	/* Full monitor mode support */
1674 	bool full_mon_mode;
1675 	/* SG supported for msdu continued packets from wbm release ring */
1676 	bool wbm_release_desc_rx_sg_support;
1677 	bool peer_map_attach_success;
1678 	/* Flag to disable mac1 ring interrupts */
1679 	bool disable_mac1_intr;
1680 	/* Flag to disable mac2 ring interrupts */
1681 	bool disable_mac2_intr;
1682 
1683 	struct {
1684 		/* 1st msdu in sg for msdu continued packets in wbm rel ring */
1685 		bool wbm_is_first_msdu_in_sg;
1686 		/* Wbm sg list head */
1687 		qdf_nbuf_t wbm_sg_nbuf_head;
1688 		/* Wbm sg list tail */
1689 		qdf_nbuf_t wbm_sg_nbuf_tail;
1690 		uint32_t wbm_sg_desc_msdu_len;
1691 	} wbm_sg_param;
1692 	/* Number of msdu exception descriptors */
1693 	uint32_t num_msdu_exception_desc;
1694 
1695 	/* RX buffer params */
1696 	struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
1697 	/* Save recent operation related variable */
1698 	struct dp_last_op_info last_op_info;
1699 	TAILQ_HEAD(, dp_peer) inactive_peer_list;
1700 	qdf_spinlock_t inactive_peer_list_lock;
1701 	TAILQ_HEAD(, dp_vdev) inactive_vdev_list;
1702 	qdf_spinlock_t inactive_vdev_list_lock;
1703 	/* lock to protect vdev_id_map table*/
1704 	qdf_spinlock_t vdev_map_lock;
1705 
1706 	/* Flow Search Table is in CMEM */
1707 	bool fst_in_cmem;
1708 
1709 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1710 	struct dp_swlm swlm;
1711 #endif
1712 };
1713 
1714 #ifdef IPA_OFFLOAD
1715 /**
1716  * dp_ipa_resources - Resources needed for IPA
1717  */
1718 struct dp_ipa_resources {
1719 	qdf_shared_mem_t tx_ring;
1720 	uint32_t tx_num_alloc_buffer;
1721 
1722 	qdf_shared_mem_t tx_comp_ring;
1723 	qdf_shared_mem_t rx_rdy_ring;
1724 	qdf_shared_mem_t rx_refill_ring;
1725 
1726 	/* IPA UC doorbell registers paddr */
1727 	qdf_dma_addr_t tx_comp_doorbell_paddr;
1728 	uint32_t *tx_comp_doorbell_vaddr;
1729 	qdf_dma_addr_t rx_ready_doorbell_paddr;
1730 
1731 	bool is_db_ddr_mapped;
1732 };
1733 #endif
1734 
1735 #define MAX_RX_MAC_RINGS 2
1736 /* Same as NAC_MAX_CLENT */
1737 #define DP_NAC_MAX_CLIENT  24
1738 
1739 /*
1740  * 24 bits cookie size
1741  * 10 bits page id 0 ~ 1023 for MCL
1742  * 3 bits page id 0 ~ 7 for WIN
1743  * WBM Idle List Desc size = 128,
1744  * Num descs per page = 4096/128 = 32 for MCL
1745  * Num descs per page = 2MB/128 = 16384 for WIN
1746  */
1747 /*
1748  * Macros to setup link descriptor cookies - for link descriptors, we just
1749  * need first 3 bits to store bank/page ID for WIN. The
1750  * remaining bytes will be used to set a unique ID, which will
1751  * be useful in debugging
1752  */
1753 #ifdef MAX_ALLOC_PAGE_SIZE
1754 #define LINK_DESC_PAGE_ID_MASK  0x007FE0
1755 #define LINK_DESC_ID_SHIFT      5
1756 #define LINK_DESC_COOKIE(_desc_id, _page_id) \
1757 	((((_page_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_desc_id))
1758 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
1759 	(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
1760 #else
1761 #define LINK_DESC_PAGE_ID_MASK  0x7
1762 #define LINK_DESC_ID_SHIFT      3
1763 #define LINK_DESC_COOKIE(_desc_id, _page_id) \
1764 	((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_page_id))
1765 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
1766 	((_cookie) & LINK_DESC_PAGE_ID_MASK)
1767 #endif
1768 #define LINK_DESC_ID_START 0x8000
1769 
1770 /* same as ieee80211_nac_param */
1771 enum dp_nac_param_cmd {
1772 	/* IEEE80211_NAC_PARAM_ADD */
1773 	DP_NAC_PARAM_ADD = 1,
1774 	/* IEEE80211_NAC_PARAM_DEL */
1775 	DP_NAC_PARAM_DEL,
1776 	/* IEEE80211_NAC_PARAM_LIST */
1777 	DP_NAC_PARAM_LIST,
1778 };
1779 
1780 /**
1781  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
1782  * @neighbour_peers_macaddr: neighbour peer's mac address
1783  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
1784  * @ast_entry: ast_entry for neighbour peer
1785  * @rssi: rssi value
1786  */
1787 struct dp_neighbour_peer {
1788 	/* MAC address of neighbour's peer */
1789 	union dp_align_mac_addr neighbour_peers_macaddr;
1790 	struct dp_vdev *vdev;
1791 	struct dp_ast_entry *ast_entry;
1792 	uint8_t rssi;
1793 	/* node in the list of neighbour's peer */
1794 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
1795 };
1796 
1797 #ifdef WLAN_TX_PKT_CAPTURE_ENH
1798 #define WLAN_TX_PKT_CAPTURE_ENH 1
1799 #define DP_TX_PPDU_PROC_THRESHOLD 8
1800 #define DP_TX_PPDU_PROC_TIMEOUT 10
1801 #endif
1802 
1803 /**
1804  * struct ppdu_info - PPDU Status info descriptor
1805  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
1806  * @sched_cmdid: schedule command id, which will be same in a burst
1807  * @max_ppdu_id: wrap around for ppdu id
1808  * @last_tlv_cnt: Keep track for missing ppdu tlvs
1809  * @last_user: last ppdu processed for user
1810  * @is_ampdu: set if Ampdu aggregate
1811  * @nbuf: ppdu descriptor payload
1812  * @ppdu_desc: ppdu descriptor
1813  * @ppdu_info_list_elem: linked list of ppdu tlvs
1814  * @ppdu_info_queue_elem: Singly linked list (queue) of ppdu tlvs
1815  * @mpdu_compltn_common_tlv: Successful tlv counter from COMPLTN COMMON tlv
1816  * @mpdu_ack_ba_tlv: Successful tlv counter from ACK BA tlv
1817  */
1818 struct ppdu_info {
1819 	uint32_t ppdu_id;
1820 	uint32_t sched_cmdid;
1821 	uint32_t max_ppdu_id;
1822 	uint32_t tsf_l32;
1823 	uint16_t tlv_bitmap;
1824 	uint16_t last_tlv_cnt;
1825 	uint16_t last_user:8,
1826 		 is_ampdu:1;
1827 	qdf_nbuf_t nbuf;
1828 	struct cdp_tx_completion_ppdu *ppdu_desc;
1829 #ifdef WLAN_TX_PKT_CAPTURE_ENH
1830 	union {
1831 		TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem;
1832 		STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem;
1833 	} ulist;
1834 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem
1835 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem
1836 #else
1837 	TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
1838 #endif
1839 	uint8_t compltn_common_tlv;
1840 	uint8_t ack_ba_tlv;
1841 	bool done;
1842 };
1843 
1844 /**
1845  * struct msdu_completion_info - wbm msdu completion info
1846  * @ppdu_id            - Unique ppduid assigned by firmware for every tx packet
1847  * @peer_id            - peer_id
1848  * @tid                - tid which used during transmit
1849  * @first_msdu         - first msdu indication
1850  * @last_msdu          - last msdu indication
1851  * @msdu_part_of_amsdu - msdu part of amsdu
1852  * @transmit_cnt       - retried count
1853  * @status             - transmit status
1854  * @tsf                - timestamp which it transmitted
1855  */
1856 struct msdu_completion_info {
1857 	uint32_t ppdu_id;
1858 	uint16_t peer_id;
1859 	uint8_t tid;
1860 	uint8_t first_msdu:1,
1861 		last_msdu:1,
1862 		msdu_part_of_amsdu:1;
1863 	uint8_t transmit_cnt;
1864 	uint8_t status;
1865 	uint32_t tsf;
1866 };
1867 
1868 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1869 struct rx_protocol_tag_map {
1870 	/* This is the user configured tag for the said protocol type */
1871 	uint16_t tag;
1872 };
1873 
1874 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
1875 struct rx_protocol_tag_stats {
1876 	uint32_t tag_ctr;
1877 };
1878 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
1879 
1880 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1881 
1882 #ifndef WLAN_TX_PKT_CAPTURE_ENH
1883 struct dp_pdev_tx_capture {
1884 };
1885 
1886 struct dp_peer_tx_capture {
1887 };
1888 #endif
1889 #ifdef WLAN_RX_PKT_CAPTURE_ENH
1890 /* Template data to be set for Enhanced RX Monitor packets */
1891 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a
1892 
1893 /**
1894  * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern
1895  * at end of each MSDU in monitor-lite mode
1896  * @reserved1: reserved for future use
1897  * @reserved2: reserved for future use
1898  * @flow_tag: flow tag value read from skb->cb
1899  * @protocol_tag: protocol tag value read from skb->cb
1900  */
1901 struct dp_rx_mon_enh_trailer_data {
1902 	uint16_t reserved1;
1903 	uint16_t reserved2;
1904 	uint16_t flow_tag;
1905 	uint16_t protocol_tag;
1906 };
1907 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
1908 
1909 #ifdef HTT_STATS_DEBUGFS_SUPPORT
1910 /* Number of debugfs entries created for HTT stats */
1911 #define PDEV_HTT_STATS_DBGFS_SIZE HTT_DBG_NUM_EXT_STATS
1912 
1913 /* struct pdev_htt_stats_dbgfs_priv - Structure to maintain debugfs information
1914  * of HTT stats
1915  * @pdev: dp pdev of debugfs entry
1916  * @stats_id: stats id of debugfs entry
1917  */
1918 struct pdev_htt_stats_dbgfs_priv {
1919 	struct dp_pdev *pdev;
1920 	uint16_t stats_id;
1921 };
1922 
1923 /* struct pdev_htt_stats_dbgfs_cfg - PDEV level data structure for debugfs
1924  * support for HTT stats
1925  * @debugfs_entry: qdf_debugfs directory entry
1926  * @m: qdf debugfs file handler
1927  * @pdev_htt_stats_dbgfs_ops: File operations of entry created
1928  * @priv: HTT stats debugfs private object
1929  * @htt_stats_dbgfs_event: HTT stats event for debugfs support
1930  * @lock: HTT stats debugfs lock
1931  * @htt_stats_dbgfs_msg_process: Function callback to print HTT stats
1932  */
1933 struct pdev_htt_stats_dbgfs_cfg {
1934 	qdf_dentry_t debugfs_entry[PDEV_HTT_STATS_DBGFS_SIZE];
1935 	qdf_debugfs_file_t m;
1936 	struct qdf_debugfs_fops
1937 			pdev_htt_stats_dbgfs_ops[PDEV_HTT_STATS_DBGFS_SIZE - 1];
1938 	struct pdev_htt_stats_dbgfs_priv priv[PDEV_HTT_STATS_DBGFS_SIZE - 1];
1939 	qdf_event_t htt_stats_dbgfs_event;
1940 	qdf_mutex_t lock;
1941 	void (*htt_stats_dbgfs_msg_process)(void *data, A_INT32 len);
1942 };
1943 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
1944 
1945 /* PDEV level structure for data path */
1946 struct dp_pdev {
1947 	/**
1948 	 * Re-use Memory Section Starts
1949 	 */
1950 
1951 	/* PDEV Id */
1952 	int pdev_id;
1953 
1954 	/* LMAC Id */
1955 	int lmac_id;
1956 
1957 	/* Target pdev  Id */
1958 	int target_pdev_id;
1959 
1960 	/* TXRX SOC handle */
1961 	struct dp_soc *soc;
1962 
1963 	/* Stuck count on monitor destination ring MPDU process */
1964 	uint32_t mon_dest_ring_stuck_cnt;
1965 
1966 	bool pdev_deinit;
1967 
1968 	/* pdev status down or up required to handle dynamic hw
1969 	 * mode switch between DBS and DBS_SBS.
1970 	 * 1 = down
1971 	 * 0 = up
1972 	 */
1973 	bool is_pdev_down;
1974 
1975 	/* Second ring used to replenish rx buffers */
1976 	struct dp_srng rx_refill_buf_ring2;
1977 
1978 	/* Empty ring used by firmware to post rx buffers to the MAC */
1979 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
1980 
1981 	int ch_band_lmac_id_mapping[REG_BAND_UNKNOWN];
1982 
1983 	/* wlan_cfg pdev ctxt*/
1984 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
1985 
1986 	/**
1987 	 * TODO: See if we need a ring map here for LMAC rings.
1988 	 * 1. Monitor rings are currently planning to be processed on receiving
1989 	 * PPDU end interrupts and hence wont need ring based interrupts.
1990 	 * 2. Rx buffer rings will be replenished during REO destination
1991 	 * processing and doesn't require regular interrupt handling - we will
1992 	 * only handle low water mark interrupts which is not expected
1993 	 * frequently
1994 	 */
1995 
1996 	/* VDEV list */
1997 	TAILQ_HEAD(, dp_vdev) vdev_list;
1998 
1999 	/* vdev list lock */
2000 	qdf_spinlock_t vdev_list_lock;
2001 
2002 	/* Number of vdevs this device have */
2003 	uint16_t vdev_count;
2004 
2005 	/* PDEV transmit lock */
2006 	qdf_spinlock_t tx_lock;
2007 
2008 #ifndef REMOVE_PKT_LOG
2009 	bool pkt_log_init;
2010 	/* Pktlog pdev */
2011 	struct pktlog_dev_t *pl_dev;
2012 #endif /* #ifndef REMOVE_PKT_LOG */
2013 
2014 	/* Monitor mode interface and status storage */
2015 	struct dp_vdev *monitor_vdev;
2016 
2017 	/* Monitor mode operation channel */
2018 	int mon_chan_num;
2019 
2020 	/* Monitor mode operation frequency */
2021 	qdf_freq_t mon_chan_freq;
2022 
2023 	/* Monitor mode band */
2024 	enum reg_wifi_band mon_chan_band;
2025 
2026 	/* monitor mode lock */
2027 	qdf_spinlock_t mon_lock;
2028 
2029 	/*tx_mutex for me*/
2030 	DP_MUTEX_TYPE tx_mutex;
2031 
2032 	/* monitor */
2033 	bool monitor_configured;
2034 
2035 	/* Smart Mesh */
2036 	bool filter_neighbour_peers;
2037 
2038 	/*flag to indicate neighbour_peers_list not empty */
2039 	bool neighbour_peers_added;
2040 	/* smart mesh mutex */
2041 	qdf_spinlock_t neighbour_peer_mutex;
2042 	/* Neighnour peer list */
2043 	TAILQ_HEAD(, dp_neighbour_peer) neighbour_peers_list;
2044 	/* msdu chain head & tail */
2045 	qdf_nbuf_t invalid_peer_head_msdu;
2046 	qdf_nbuf_t invalid_peer_tail_msdu;
2047 
2048 	/* Band steering  */
2049 	/* TBD */
2050 
2051 	/* PDEV level data path statistics */
2052 	struct cdp_pdev_stats stats;
2053 
2054 	/* Global RX decap mode for the device */
2055 	enum htt_pkt_type rx_decap_mode;
2056 
2057 	/* Enhanced Stats is enabled */
2058 	bool enhanced_stats_en;
2059 
2060 	/* advance filter mode and type*/
2061 	uint8_t mon_filter_mode;
2062 	uint16_t fp_mgmt_filter;
2063 	uint16_t fp_ctrl_filter;
2064 	uint16_t fp_data_filter;
2065 	uint16_t mo_mgmt_filter;
2066 	uint16_t mo_ctrl_filter;
2067 	uint16_t mo_data_filter;
2068 	uint16_t md_data_filter;
2069 
2070 	qdf_atomic_t num_tx_outstanding;
2071 	int32_t tx_descs_max;
2072 
2073 	qdf_atomic_t num_tx_exception;
2074 
2075 	/* MCL specific local peer handle */
2076 	struct {
2077 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
2078 		uint8_t freelist;
2079 		qdf_spinlock_t lock;
2080 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
2081 	} local_peer_ids;
2082 
2083 	/* dscp_tid_map_*/
2084 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
2085 
2086 	struct hal_rx_ppdu_info ppdu_info;
2087 
2088 	/* operating channel */
2089 	struct {
2090 		uint8_t num;
2091 		uint8_t band;
2092 		uint16_t freq;
2093 	} operating_channel;
2094 
2095 	qdf_nbuf_queue_t rx_status_q;
2096 	uint32_t mon_ppdu_status;
2097 	struct cdp_mon_status rx_mon_recv_status;
2098 	/* monitor mode status/destination ring PPDU and MPDU count */
2099 	struct cdp_pdev_mon_stats rx_mon_stats;
2100 	/* to track duplicate link descriptor indications by HW for a WAR */
2101 	uint64_t mon_last_linkdesc_paddr;
2102 	/* to track duplicate buffer indications by HW for a WAR */
2103 	uint32_t mon_last_buf_cookie;
2104 	/* 128 bytes mpdu header queue per user for ppdu */
2105 	qdf_nbuf_queue_t mpdu_q[MAX_MU_USERS];
2106 	/* is this a mpdu header TLV and not msdu header TLV */
2107 	bool is_mpdu_hdr[MAX_MU_USERS];
2108 	/* per user 128 bytes msdu header list for MPDU */
2109 	struct msdu_list msdu_list[MAX_MU_USERS];
2110 	/* RX enhanced capture mode */
2111 	uint8_t rx_enh_capture_mode;
2112 	/* Rx per peer enhanced capture mode */
2113 	bool rx_enh_capture_peer;
2114 	struct dp_vdev *rx_enh_monitor_vdev;
2115 	/* RX enhanced capture trailer enable/disable flag */
2116 	bool is_rx_enh_capture_trailer_enabled;
2117 #ifdef WLAN_RX_PKT_CAPTURE_ENH
2118 	/* RX per MPDU/PPDU information */
2119 	struct cdp_rx_indication_mpdu mpdu_ind;
2120 #endif
2121 	/* pool addr for mcast enhance buff */
2122 	struct {
2123 		int size;
2124 		uint32_t paddr;
2125 		char *vaddr;
2126 		struct dp_tx_me_buf_t *freelist;
2127 		int buf_in_use;
2128 		qdf_dma_mem_context(memctx);
2129 	} me_buf;
2130 
2131 	bool hmmc_tid_override_en;
2132 	uint8_t hmmc_tid;
2133 
2134 	/* Number of VAPs with mcast enhancement enabled */
2135 	qdf_atomic_t mc_num_vap_attached;
2136 
2137 	qdf_atomic_t stats_cmd_complete;
2138 
2139 #ifdef IPA_OFFLOAD
2140 	ipa_uc_op_cb_type ipa_uc_op_cb;
2141 	void *usr_ctxt;
2142 	struct dp_ipa_resources ipa_resource;
2143 #endif
2144 
2145 	/* TBD */
2146 
2147 	/* map this pdev to a particular Reo Destination ring */
2148 	enum cdp_host_reo_dest_ring reo_dest;
2149 
2150 	/* Packet log mode */
2151 	uint8_t rx_pktlog_mode;
2152 
2153 	/* WDI event handlers */
2154 	struct wdi_event_subscribe_t **wdi_event_list;
2155 
2156 	/* ppdu_id of last received HTT TX stats */
2157 	uint32_t last_ppdu_id;
2158 	struct {
2159 		uint8_t last_user;
2160 		qdf_nbuf_t buf;
2161 	} tx_ppdu_info;
2162 
2163 	bool tx_sniffer_enable;
2164 	/* mirror copy mode */
2165 	enum m_copy_mode mcopy_mode;
2166 	bool cfr_rcc_mode;
2167 	bool enable_reap_timer_non_pkt;
2168 	bool bpr_enable;
2169 
2170 	/* enable time latency check for tx completion */
2171 	bool latency_capture_enable;
2172 
2173 	/* enable calculation of delay stats*/
2174 	bool delay_stats_flag;
2175 	struct {
2176 		uint32_t tx_ppdu_id;
2177 		uint16_t tx_peer_id;
2178 		uint32_t rx_ppdu_id;
2179 	} m_copy_id;
2180 
2181 	/* To check if PPDU Tx stats are enabled for Pktlog */
2182 	bool pktlog_ppdu_stats;
2183 
2184 	void *dp_txrx_handle; /* Advanced data path handle */
2185 
2186 #ifdef ATH_SUPPORT_NAC_RSSI
2187 	bool nac_rssi_filtering;
2188 #endif
2189 	/* list of ppdu tlvs */
2190 	TAILQ_HEAD(, ppdu_info) ppdu_info_list;
2191 	TAILQ_HEAD(, ppdu_info) sched_comp_ppdu_list;
2192 
2193 	uint32_t sched_comp_list_depth;
2194 	uint16_t delivered_sched_cmdid;
2195 	uint16_t last_sched_cmdid;
2196 	uint32_t tlv_count;
2197 	uint32_t list_depth;
2198 	uint32_t ppdu_id;
2199 	bool first_nbuf;
2200 	struct {
2201 		qdf_nbuf_t last_nbuf; /*Ptr to mgmt last buf */
2202 		uint8_t *mgmt_buf; /* Ptr to mgmt. payload in HTT ppdu stats */
2203 		uint32_t mgmt_buf_len; /* Len of mgmt. payload in ppdu stats */
2204 		uint32_t ppdu_id;
2205 	} mgmtctrl_frm_info;
2206 
2207 	/* Current noise-floor reading for the pdev channel */
2208 	int16_t chan_noise_floor;
2209 
2210 	/*
2211 	 * For multiradio device, this flag indicates if
2212 	 * this radio is primary or secondary.
2213 	 *
2214 	 * For HK 1.0, this is used for WAR for the AST issue.
2215 	 * HK 1.x mandates creation of only 1 AST entry with same MAC address
2216 	 * across 2 radios. is_primary indicates the radio on which DP should
2217 	 * install HW AST entry if there is a request to add 2 AST entries
2218 	 * with same MAC address across 2 radios
2219 	 */
2220 	uint8_t is_primary;
2221 	/* Context of cal client timer */
2222 	struct cdp_cal_client *cal_client_ctx;
2223 	struct cdp_tx_sojourn_stats sojourn_stats;
2224 	qdf_nbuf_t sojourn_buf;
2225 
2226 	/* peer pointer for collecting invalid peer stats */
2227 	struct dp_peer *invalid_peer;
2228 
2229 	union dp_rx_desc_list_elem_t *free_list_head;
2230 	union dp_rx_desc_list_elem_t *free_list_tail;
2231 	/* Pdev level flag to check peer based pktlog enabled or
2232 	 * disabled
2233 	 */
2234 	uint8_t dp_peer_based_pktlog;
2235 
2236 	/* Cached peer_id from htt_peer_details_tlv */
2237 	uint16_t fw_stats_peer_id;
2238 
2239 	/* qdf_event for fw_peer_stats */
2240 	qdf_event_t fw_peer_stats_event;
2241 
2242 	/* User configured max number of tx buffers */
2243 	uint32_t num_tx_allowed;
2244 
2245 	/* unique cookie required for peer session */
2246 	uint32_t next_peer_cookie;
2247 
2248 	/*
2249 	 * Run time enabled when the first protocol tag is added,
2250 	 * run time disabled when the last protocol tag is deleted
2251 	 */
2252 	bool  is_rx_protocol_tagging_enabled;
2253 
2254 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
2255 	/*
2256 	 * The protocol type is used as array index to save
2257 	 * user provided tag info
2258 	 */
2259 	struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX];
2260 
2261 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
2262 	/*
2263 	 * Track msdus received from each reo ring separately to avoid
2264 	 * simultaneous writes from different core
2265 	 */
2266 	struct rx_protocol_tag_stats
2267 		reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX];
2268 	/* Track msdus received from expection ring separately */
2269 	struct rx_protocol_tag_stats
2270 		rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
2271 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
2272 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
2273 
2274 	/* tx packet capture enhancement */
2275 	enum cdp_tx_enh_capture_mode tx_capture_enabled;
2276 	struct dp_pdev_tx_capture tx_capture;
2277 
2278 	uint32_t *ppdu_tlv_buf; /* Buffer to hold HTT ppdu stats TLVs*/
2279 
2280 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
2281 	/**
2282 	 * Pointer to DP Flow FST at SOC level if
2283 	 * is_rx_flow_search_table_per_pdev is true
2284 	 */
2285 	struct dp_rx_fst *rx_fst;
2286 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
2287 
2288 #ifdef FEATURE_TSO_STATS
2289 	/* TSO Id to index into TSO packet information */
2290 	qdf_atomic_t tso_idx;
2291 #endif /* FEATURE_TSO_STATS */
2292 
2293 #ifdef WLAN_SUPPORT_DATA_STALL
2294 	data_stall_detect_cb data_stall_detect_callback;
2295 #endif /* WLAN_SUPPORT_DATA_STALL */
2296 
2297 	struct dp_mon_filter **filter;	/* Monitor Filter pointer */
2298 
2299 #ifdef QCA_SUPPORT_FULL_MON
2300 	/* List to maintain all MPDUs for a PPDU in monitor mode */
2301 	TAILQ_HEAD(, dp_mon_mpdu) mon_mpdu_q;
2302 
2303 	/* TODO: define per-user mpdu list
2304 	 * struct dp_mon_mpdu_list mpdu_list[MAX_MU_USERS];
2305 	 */
2306 	struct hal_rx_mon_desc_info *mon_desc;
2307 #endif
2308 	qdf_nbuf_t mcopy_status_nbuf;
2309 
2310 	/* Flag to hold on to monitor destination ring */
2311 	bool hold_mon_dest_ring;
2312 
2313 #ifdef WLAN_ATF_ENABLE
2314 	/* ATF stats enable */
2315 	bool dp_atf_stats_enable;
2316 #endif
2317 
2318 	/* Maintains first status buffer's paddr of a PPDU */
2319 	uint64_t status_buf_addr;
2320 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2321 	/* HTT stats debugfs params */
2322 	struct pdev_htt_stats_dbgfs_cfg *dbgfs_cfg;
2323 #endif
2324 };
2325 
2326 struct dp_peer;
2327 
2328 /* VDEV structure for data path state */
2329 struct dp_vdev {
2330 	/* OS device abstraction */
2331 	qdf_device_t osdev;
2332 
2333 	/* physical device that is the parent of this virtual device */
2334 	struct dp_pdev *pdev;
2335 
2336 	/* VDEV operating mode */
2337 	enum wlan_op_mode opmode;
2338 
2339 	/* VDEV subtype */
2340 	enum wlan_op_subtype subtype;
2341 
2342 	/* Tx encapsulation type for this VAP */
2343 	enum htt_cmn_pkt_type tx_encap_type;
2344 
2345 	/* Rx Decapsulation type for this VAP */
2346 	enum htt_cmn_pkt_type rx_decap_type;
2347 
2348 	/* WDS enabled */
2349 	bool wds_enabled;
2350 
2351 	/* MEC enabled */
2352 	bool mec_enabled;
2353 
2354 #ifdef QCA_SUPPORT_WDS_EXTENDED
2355 	bool wds_ext_enabled;
2356 #endif /* QCA_SUPPORT_WDS_EXTENDED */
2357 
2358 	/* WDS Aging timer period */
2359 	uint32_t wds_aging_timer_val;
2360 
2361 	/* NAWDS enabled */
2362 	bool nawds_enabled;
2363 
2364 	/* Multicast enhancement enabled */
2365 	uint8_t mcast_enhancement_en;
2366 
2367 	/* IGMP multicast enhancement enabled */
2368 	uint8_t igmp_mcast_enhanc_en;
2369 
2370 	/* HW TX Checksum Enabled Flag */
2371 	uint8_t csum_enabled;
2372 
2373 	/* vdev_id - ID used to specify a particular vdev to the target */
2374 	uint8_t vdev_id;
2375 
2376 	/* Default HTT meta data for this VDEV */
2377 	/* TBD: check alignment constraints */
2378 	uint16_t htt_tcl_metadata;
2379 
2380 	/* Mesh mode vdev */
2381 	uint32_t mesh_vdev;
2382 
2383 	/* Mesh mode rx filter setting */
2384 	uint32_t mesh_rx_filter;
2385 
2386 	/* DSCP-TID mapping table ID */
2387 	uint8_t dscp_tid_map_id;
2388 
2389 	/* Address search type to be set in TX descriptor */
2390 	uint8_t search_type;
2391 
2392 	/*
2393 	 * Flag to indicate if s/w tid classification should be
2394 	 * skipped
2395 	 */
2396 	uint8_t skip_sw_tid_classification;
2397 
2398 	/* AST hash value for BSS peer in HW valid for STA VAP*/
2399 	uint16_t bss_ast_hash;
2400 
2401 	/* vdev lmac_id */
2402 	int lmac_id;
2403 
2404 	bool multipass_en;
2405 
2406 	/* Address search flags to be configured in HAL descriptor */
2407 	uint8_t hal_desc_addr_search_flags;
2408 
2409 	/* Handle to the OS shim SW's virtual device */
2410 	ol_osif_vdev_handle osif_vdev;
2411 
2412 	/* MAC address */
2413 	union dp_align_mac_addr mac_addr;
2414 
2415 	/* node in the pdev's list of vdevs */
2416 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
2417 
2418 	/* dp_peer list */
2419 	TAILQ_HEAD(, dp_peer) peer_list;
2420 	/* to protect peer_list */
2421 	DP_MUTEX_TYPE peer_list_lock;
2422 
2423 	/* RX call back function to flush GRO packets*/
2424 	ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;
2425 	/* default RX call back function called by dp */
2426 	ol_txrx_rx_fp osif_rx;
2427 	/* callback to deliver rx frames to the OS */
2428 	ol_txrx_rx_fp osif_rx_stack;
2429 	/* Callback to handle rx fisa frames */
2430 	ol_txrx_fisa_rx_fp osif_fisa_rx;
2431 	ol_txrx_fisa_flush_fp osif_fisa_flush;
2432 
2433 	/* call back function to flush out queued rx packets*/
2434 	ol_txrx_rx_flush_fp osif_rx_flush;
2435 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
2436 	ol_txrx_get_key_fp osif_get_key;
2437 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
2438 
2439 #ifdef notyet
2440 	/* callback to check if the msdu is an WAI (WAPI) frame */
2441 	ol_rx_check_wai_fp osif_check_wai;
2442 #endif
2443 
2444 	/* proxy arp function */
2445 	ol_txrx_proxy_arp_fp osif_proxy_arp;
2446 
2447 	/* callback to hand rx monitor 802.11 MPDU to the OS shim */
2448 	ol_txrx_rx_mon_fp osif_rx_mon;
2449 
2450 	ol_txrx_mcast_me_fp me_convert;
2451 
2452 	/* completion function used by this vdev*/
2453 	ol_txrx_completion_fp tx_comp;
2454 
2455 	/* deferred vdev deletion state */
2456 	struct {
2457 		/* VDEV delete pending */
2458 		int pending;
2459 		/*
2460 		* callback and a context argument to provide a
2461 		* notification for when the vdev is deleted.
2462 		*/
2463 		ol_txrx_vdev_delete_cb callback;
2464 		void *context;
2465 	} delete;
2466 
2467 	/* tx data delivery notification callback function */
2468 	struct {
2469 		ol_txrx_data_tx_cb func;
2470 		void *ctxt;
2471 	} tx_non_std_data_callback;
2472 
2473 
2474 	/* safe mode control to bypass the encrypt and decipher process*/
2475 	uint32_t safemode;
2476 
2477 	/* rx filter related */
2478 	uint32_t drop_unenc;
2479 #ifdef notyet
2480 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
2481 	uint32_t filters_num;
2482 #endif
2483 	/* TDLS Link status */
2484 	bool tdls_link_connected;
2485 	bool is_tdls_frame;
2486 
2487 	/* per vdev rx nbuf queue */
2488 	qdf_nbuf_queue_t rxq;
2489 
2490 	uint8_t tx_ring_id;
2491 	struct dp_tx_desc_pool_s *tx_desc;
2492 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
2493 
2494 	/* VDEV Stats */
2495 	struct cdp_vdev_stats stats;
2496 
2497 	/* Is this a proxySTA VAP */
2498 	bool proxysta_vdev;
2499 	/* Is isolation mode enabled */
2500 	bool isolation_vdev;
2501 
2502 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2503 	struct dp_tx_desc_pool_s *pool;
2504 #endif
2505 	/* AP BRIDGE enabled */
2506 	bool ap_bridge_enabled;
2507 
2508 	enum cdp_sec_type  sec_type;
2509 
2510 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
2511 	bool raw_mode_war;
2512 
2513 
2514 	/* AST hash index for BSS peer in HW valid for STA VAP*/
2515 	uint16_t bss_ast_idx;
2516 
2517 	/* Capture timestamp of previous tx packet enqueued */
2518 	uint64_t prev_tx_enq_tstamp;
2519 
2520 	/* Capture timestamp of previous rx packet delivered */
2521 	uint64_t prev_rx_deliver_tstamp;
2522 
2523 	/* 8021p PCP-TID mapping table ID */
2524 	uint8_t tidmap_tbl_id;
2525 
2526 	/* 8021p PCP-TID map values */
2527 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
2528 
2529 	/* TIDmap priority */
2530 	uint8_t tidmap_prty;
2531 
2532 #ifdef QCA_MULTIPASS_SUPPORT
2533 	uint16_t *iv_vlan_map;
2534 
2535 	/* dp_peer special list */
2536 	TAILQ_HEAD(, dp_peer) mpass_peer_list;
2537 	DP_MUTEX_TYPE mpass_peer_mutex;
2538 #endif
2539 	/* Extended data path handle */
2540 	struct cdp_ext_vdev *vdev_dp_ext_handle;
2541 #ifdef VDEV_PEER_PROTOCOL_COUNT
2542 	/*
2543 	 * Rx-Ingress and Tx-Egress are in the lower level DP layer
2544 	 * Rx-Egress and Tx-ingress are handled in osif layer for DP
2545 	 * So
2546 	 * Rx-Egress and Tx-ingress mask definitions are in OSIF layer
2547 	 * Rx-Ingress and Tx-Egress definitions are here below
2548 	 */
2549 #define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1
2550 #define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2
2551 #define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4
2552 #define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8
2553 	bool peer_protocol_count_track;
2554 	int peer_protocol_count_dropmask;
2555 #endif
2556 	/* callback to collect connectivity stats */
2557 	ol_txrx_stats_rx_fp stats_cb;
2558 	uint32_t num_peers;
2559 	/* entry to inactive_list*/
2560 	TAILQ_ENTRY(dp_vdev) inactive_list_elem;
2561 
2562 #ifdef WLAN_SUPPORT_RX_FISA
2563 	/**
2564 	 * Params used for controlling the fisa aggregation dynamically
2565 	 */
2566 	uint8_t fisa_disallowed[MAX_REO_DEST_RINGS];
2567 	uint8_t fisa_force_flushed[MAX_REO_DEST_RINGS];
2568 #endif
2569 	/*
2570 	 * Refcount for VDEV currently incremented when
2571 	 * peer is created for VDEV
2572 	 */
2573 	qdf_atomic_t ref_cnt;
2574 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
2575 	uint8_t num_latency_critical_conn;
2576 };
2577 
2578 
2579 enum {
2580 	dp_sec_mcast = 0,
2581 	dp_sec_ucast
2582 };
2583 
2584 #ifdef WDS_VENDOR_EXTENSION
2585 typedef struct {
2586 	uint8_t	wds_tx_mcast_4addr:1,
2587 		wds_tx_ucast_4addr:1,
2588 		wds_rx_filter:1,      /* enforce rx filter */
2589 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
2590 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
2591 
2592 } dp_ecm_policy;
2593 #endif
2594 
2595 /*
2596  * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
2597  * @cached_bufq: nbuff list to enqueue rx packets
2598  * @bufq_lock: spinlock for nbuff list access
2599  * @thres: maximum threshold for number of rx buff to enqueue
2600  * @entries: number of entries
2601  * @dropped: number of packets dropped
2602  */
2603 struct dp_peer_cached_bufq {
2604 	qdf_list_t cached_bufq;
2605 	qdf_spinlock_t bufq_lock;
2606 	uint32_t thresh;
2607 	uint32_t entries;
2608 	uint32_t dropped;
2609 };
2610 
2611 /**
2612  * enum dp_peer_ast_flowq
2613  * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue
2614  * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue
2615  * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP
2616  * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP
2617  */
2618 enum dp_peer_ast_flowq {
2619 	DP_PEER_AST_FLOWQ_HI_PRIO,
2620 	DP_PEER_AST_FLOWQ_LOW_PRIO,
2621 	DP_PEER_AST_FLOWQ_UDP,
2622 	DP_PEER_AST_FLOWQ_NON_UDP,
2623 	DP_PEER_AST_FLOWQ_MAX,
2624 };
2625 
2626 /*
2627  * struct dp_ast_flow_override_info - ast override info
2628  * @ast_index - ast indexes in peer map message
2629  * @ast_valid_mask - ast valid mask for each ast index
2630  * @ast_flow_mask - ast flow mask for each ast index
2631  * @tid_valid_low_pri_mask - per tid mask for low priority flow
2632  * @tid_valid_hi_pri_mask - per tid mask for hi priority flow
2633  */
2634 struct dp_ast_flow_override_info {
2635 	uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX];
2636 	uint8_t ast_valid_mask;
2637 	uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX];
2638 	uint8_t tid_valid_low_pri_mask;
2639 	uint8_t tid_valid_hi_pri_mask;
2640 };
2641 
2642 /*
2643  * struct dp_peer_ast_params - ast parameters for a msdu flow-queue
2644  * @ast_index - ast index populated by FW
2645  * @is_valid - ast flow valid mask
2646  * @valid_tid_mask - per tid mask for this ast index
2647  * @flowQ - flow queue id associated with this ast index
2648  */
2649 struct dp_peer_ast_params {
2650 	uint16_t ast_idx;
2651 	uint8_t is_valid;
2652 	uint8_t valid_tid_mask;
2653 	uint8_t flowQ;
2654 };
2655 
2656 #ifdef WLAN_SUPPORT_MSCS
2657 /*MSCS Procedure based macros */
2658 #define IEEE80211_MSCS_MAX_ELEM_SIZE    5
2659 #define IEEE80211_TCLAS_MASK_CLA_TYPE_4  4
2660 /*
2661  * struct dp_peer_mscs_parameter - MSCS database obtained from
2662  * MSCS Request and Response in the control path. This data is used
2663  * by the AP to find out what priority to set based on the tuple
2664  * classification during packet processing.
2665  * @user_priority_bitmap - User priority bitmap obtained during
2666  * handshake
2667  * @user_priority_limit - User priority limit obtained during
2668  * handshake
2669  * @classifier_mask - params to be compared during processing
2670  */
2671 struct dp_peer_mscs_parameter {
2672 	uint8_t user_priority_bitmap;
2673 	uint8_t user_priority_limit;
2674 	uint8_t classifier_mask;
2675 };
2676 #endif
2677 
2678 #ifdef QCA_SUPPORT_WDS_EXTENDED
2679 #define WDS_EXT_PEER_INIT_BIT 0
2680 
2681 /**
2682  * struct dp_wds_ext_peer - wds ext peer structure
2683  * This is used when wds extended feature is enabled
2684  * both compile time and run time. It is created
2685  * when 1st 4 address frame is received from
2686  * wds backhaul.
2687  * @osif_vdev: Handle to the OS shim SW's virtual device
2688  * @init: wds ext netdev state
2689  */
2690 struct dp_wds_ext_peer {
2691 	ol_osif_peer_handle osif_peer;
2692 	unsigned long init;
2693 };
2694 #endif /* QCA_SUPPORT_WDS_EXTENDED */
2695 
2696 /* Peer structure for data path state */
2697 struct dp_peer {
2698 	/* VDEV to which this peer is associated */
2699 	struct dp_vdev *vdev;
2700 
2701 	struct dp_ast_entry *self_ast_entry;
2702 
2703 	qdf_atomic_t ref_cnt;
2704 
2705 	/* peer ID for this peer */
2706 	uint16_t peer_id;
2707 
2708 	union dp_align_mac_addr mac_addr;
2709 
2710 	/* node in the vdev's list of peers */
2711 	TAILQ_ENTRY(dp_peer) peer_list_elem;
2712 	/* node in the hash table bin's list of peers */
2713 	TAILQ_ENTRY(dp_peer) hash_list_elem;
2714 
2715 	/* TID structures */
2716 	struct dp_rx_tid rx_tid[DP_MAX_TIDS];
2717 	struct dp_peer_tx_capture tx_capture;
2718 
2719 
2720 	/* TBD: No transmit TID state required? */
2721 
2722 	struct {
2723 		enum cdp_sec_type sec_type;
2724 		u_int32_t michael_key[2]; /* relevant for TKIP */
2725 	} security[2]; /* 0 -> multicast, 1 -> unicast */
2726 
2727 	/* NAWDS Flag and Bss Peer bit */
2728 	uint16_t nawds_enabled:1, /* NAWDS flag */
2729 		bss_peer:1, /* set for bss peer */
2730 		wds_enabled:1, /* WDS peer */
2731 		authorize:1, /* Set when authorized */
2732 		nac:1, /* NAC Peer*/
2733 		tx_cap_enabled:1, /* Peer's tx-capture is enabled */
2734 		rx_cap_enabled:1, /* Peer's rx-capture is enabled */
2735 		valid:1, /* valid bit */
2736 		in_twt:1, /* in TWT session */
2737 		delete_in_progress:1, /* Indicate kickout sent */
2738 		sta_self_peer:1; /* Indicate STA self peer */
2739 
2740 #ifdef QCA_SUPPORT_PEER_ISOLATION
2741 	bool isolation; /* enable peer isolation for this peer */
2742 #endif
2743 
2744 	/* MCL specific peer local id */
2745 	uint16_t local_id;
2746 	enum ol_txrx_peer_state state;
2747 	qdf_spinlock_t peer_info_lock;
2748 
2749 	/* Peer Stats */
2750 	struct cdp_peer_stats stats;
2751 
2752 	/* Peer extended stats */
2753 	struct cdp_peer_ext_stats *pext_stats;
2754 
2755 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
2756 	/* TBD */
2757 
2758 #ifdef WDS_VENDOR_EXTENSION
2759 	dp_ecm_policy wds_ecm;
2760 #endif
2761 
2762 	/* Active Block ack sessions */
2763 	uint16_t active_ba_session_cnt;
2764 
2765 	/* Current HW buffersize setting */
2766 	uint16_t hw_buffer_size;
2767 
2768 	/*
2769 	 * Flag to check if sessions with 256 buffersize
2770 	 * should be terminated.
2771 	 */
2772 	uint8_t kill_256_sessions;
2773 	qdf_atomic_t is_default_route_set;
2774 	/* Peer level flag to check peer based pktlog enabled or
2775 	 * disabled
2776 	 */
2777 	uint8_t peer_based_pktlog_filter;
2778 
2779 	/* rdk statistics context */
2780 	struct cdp_peer_rate_stats_ctx *rdkstats_ctx;
2781 	/* average sojourn time */
2782 	qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
2783 
2784 #ifdef QCA_MULTIPASS_SUPPORT
2785 	/* node in the special peer list element */
2786 	TAILQ_ENTRY(dp_peer) mpass_peer_list_elem;
2787 	/* vlan id for key */
2788 	uint16_t vlan_id;
2789 #endif
2790 
2791 #ifdef PEER_CACHE_RX_PKTS
2792 	qdf_atomic_t flush_in_progress;
2793 	struct dp_peer_cached_bufq bufq_info;
2794 #endif
2795 #ifdef FEATURE_PERPKT_INFO
2796 	/* delayed ba ppdu stats handling */
2797 	struct cdp_delayed_tx_completion_ppdu_user delayed_ba_ppdu_stats;
2798 	/* delayed ba flag */
2799 	bool last_delayed_ba;
2800 	/* delayed ba ppdu id */
2801 	uint32_t last_delayed_ba_ppduid;
2802 #endif
2803 #ifdef QCA_PEER_MULTIQ_SUPPORT
2804 	struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
2805 #endif
2806 	/* entry to inactive_list*/
2807 	TAILQ_ENTRY(dp_peer) inactive_list_elem;
2808 
2809 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
2810 
2811 	uint8_t peer_state;
2812 	qdf_spinlock_t peer_state_lock;
2813 #ifdef WLAN_SUPPORT_MSCS
2814 	struct dp_peer_mscs_parameter mscs_ipv4_parameter, mscs_ipv6_parameter;
2815 	bool mscs_active;
2816 #endif
2817 #ifdef QCA_SUPPORT_WDS_EXTENDED
2818 	struct dp_wds_ext_peer wds_ext;
2819 	ol_txrx_rx_fp osif_rx;
2820 #endif
2821 };
2822 
2823 /*
2824  * dp_invalid_peer_msg
2825  * @nbuf: data buffer
2826  * @wh: 802.11 header
2827  * @vdev_id: id of vdev
2828  */
2829 struct dp_invalid_peer_msg {
2830 	qdf_nbuf_t nbuf;
2831 	struct ieee80211_frame *wh;
2832 	uint8_t vdev_id;
2833 };
2834 
2835 /*
2836  * dp_tx_me_buf_t: ME buffer
2837  * next: pointer to next buffer
2838  * data: Destination Mac address
2839  * paddr_macbuf: physical address for dest_mac
2840  */
2841 struct dp_tx_me_buf_t {
2842 	/* Note: ME buf pool initialization logic expects next pointer to
2843 	 * be the first element. Dont add anything before next */
2844 	struct dp_tx_me_buf_t *next;
2845 	uint8_t data[QDF_MAC_ADDR_SIZE];
2846 	qdf_dma_addr_t paddr_macbuf;
2847 };
2848 
2849 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2850 struct hal_rx_fst;
2851 
2852 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
2853 struct dp_rx_fse {
2854 	/* HAL Rx Flow Search Entry which matches HW definition */
2855 	void *hal_rx_fse;
2856 	/* Toeplitz hash value */
2857 	uint32_t flow_hash;
2858 	/* Flow index, equivalent to hash value truncated to FST size */
2859 	uint32_t flow_id;
2860 	/* Stats tracking for this flow */
2861 	struct cdp_flow_stats stats;
2862 	/* Flag indicating whether flow is IPv4 address tuple */
2863 	uint8_t is_ipv4_addr_entry;
2864 	/* Flag indicating whether flow is valid */
2865 	uint8_t is_valid;
2866 };
2867 
2868 struct dp_rx_fst {
2869 	/* Software (DP) FST */
2870 	uint8_t *base;
2871 	/* Pointer to HAL FST */
2872 	struct hal_rx_fst *hal_rx_fst;
2873 	/* Base physical address of HAL RX HW FST */
2874 	uint64_t hal_rx_fst_base_paddr;
2875 	/* Maximum number of flows FSE supports */
2876 	uint16_t max_entries;
2877 	/* Num entries in flow table */
2878 	uint16_t num_entries;
2879 	/* SKID Length */
2880 	uint16_t max_skid_length;
2881 	/* Hash mask to obtain legitimate hash entry */
2882 	uint32_t hash_mask;
2883 	/* Timer for bundling of flows */
2884 	qdf_timer_t cache_invalidate_timer;
2885 	/**
2886 	 * Flag which tracks whether cache update
2887 	 * is needed on timer expiry
2888 	 */
2889 	qdf_atomic_t is_cache_update_pending;
2890 	/* Flag to indicate completion of FSE setup in HW/FW */
2891 	bool fse_setup_done;
2892 };
2893 
2894 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
2895 #elif WLAN_SUPPORT_RX_FISA
2896 
2897 struct dp_fisa_stats {
2898 	/* flow index invalid from RX HW TLV */
2899 	uint32_t invalid_flow_index;
2900 	uint32_t reo_mismatch;
2901 };
2902 
2903 enum fisa_aggr_ret {
2904 	FISA_AGGR_DONE,
2905 	FISA_AGGR_NOT_ELIGIBLE,
2906 	FISA_FLUSH_FLOW
2907 };
2908 
2909 struct dp_fisa_rx_sw_ft {
2910 	/* HAL Rx Flow Search Entry which matches HW definition */
2911 	void *hw_fse;
2912 	/* Toeplitz hash value */
2913 	uint32_t flow_hash;
2914 	/* Flow index, equivalent to hash value truncated to FST size */
2915 	uint32_t flow_id;
2916 	/* Stats tracking for this flow */
2917 	struct cdp_flow_stats stats;
2918 	/* Flag indicating whether flow is IPv4 address tuple */
2919 	uint8_t is_ipv4_addr_entry;
2920 	/* Flag indicating whether flow is valid */
2921 	uint8_t is_valid;
2922 	uint8_t is_populated;
2923 	uint8_t is_flow_udp;
2924 	uint8_t is_flow_tcp;
2925 	qdf_nbuf_t head_skb;
2926 	uint16_t cumulative_l4_checksum;
2927 	uint16_t adjusted_cumulative_ip_length;
2928 	uint16_t cur_aggr;
2929 	uint16_t napi_flush_cumulative_l4_checksum;
2930 	uint16_t napi_flush_cumulative_ip_length;
2931 	qdf_nbuf_t last_skb;
2932 	uint32_t head_skb_ip_hdr_offset;
2933 	uint32_t head_skb_l4_hdr_offset;
2934 	struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
2935 	uint8_t napi_id;
2936 	struct dp_vdev *vdev;
2937 	uint64_t bytes_aggregated;
2938 	uint32_t flush_count;
2939 	uint32_t aggr_count;
2940 	uint8_t do_not_aggregate;
2941 	uint16_t hal_cumultive_ip_len;
2942 	struct dp_soc *soc_hdl;
2943 	/* last aggregate count fetched from RX PKT TLV */
2944 	uint32_t last_hal_aggr_count;
2945 	uint32_t cur_aggr_gso_size;
2946 	struct udphdr *head_skb_udp_hdr;
2947 	uint16_t frags_cumulative_len;
2948 	/* CMEM parameters */
2949 	uint32_t cmem_offset;
2950 	uint32_t metadata;
2951 	uint32_t reo_dest_indication;
2952 };
2953 
2954 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_fisa_rx_sw_ft)
2955 #define MAX_FSE_CACHE_FL_HST 10
2956 /**
2957  * struct fse_cache_flush_history - Debug history cache flush
2958  * @timestamp: Entry update timestamp
2959  * @flows_added: Number of flows added for this flush
2960  * @flows_deleted: Number of flows deleted for this flush
2961  */
2962 struct fse_cache_flush_history {
2963 	uint64_t timestamp;
2964 	uint32_t flows_added;
2965 	uint32_t flows_deleted;
2966 };
2967 
2968 struct dp_rx_fst {
2969 	/* Software (DP) FST */
2970 	uint8_t *base;
2971 	/* Pointer to HAL FST */
2972 	struct hal_rx_fst *hal_rx_fst;
2973 	/* Base physical address of HAL RX HW FST */
2974 	uint64_t hal_rx_fst_base_paddr;
2975 	/* Maximum number of flows FSE supports */
2976 	uint16_t max_entries;
2977 	/* Num entries in flow table */
2978 	uint16_t num_entries;
2979 	/* SKID Length */
2980 	uint16_t max_skid_length;
2981 	/* Hash mask to obtain legitimate hash entry */
2982 	uint32_t hash_mask;
2983 	/* Lock for adding/deleting entries of FST */
2984 	qdf_spinlock_t dp_rx_fst_lock;
2985 	uint32_t add_flow_count;
2986 	uint32_t del_flow_count;
2987 	uint32_t hash_collision_cnt;
2988 	struct dp_soc *soc_hdl;
2989 	qdf_atomic_t fse_cache_flush_posted;
2990 	qdf_timer_t fse_cache_flush_timer;
2991 	struct fse_cache_flush_history cache_fl_rec[MAX_FSE_CACHE_FL_HST];
2992 	/* FISA DP stats */
2993 	struct dp_fisa_stats stats;
2994 
2995 	/* CMEM params */
2996 	qdf_work_t fst_update_work;
2997 	qdf_workqueue_t *fst_update_wq;
2998 	qdf_list_t fst_update_list;
2999 	uint32_t meta_counter;
3000 	uint32_t cmem_ba;
3001 	qdf_spinlock_t dp_rx_sw_ft_lock[MAX_REO_DEST_RINGS];
3002 	qdf_event_t cmem_resp_event;
3003 	bool flow_deletion_supported;
3004 	bool fst_in_cmem;
3005 };
3006 
3007 #endif /* WLAN_SUPPORT_RX_FISA */
3008 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
3009 
3010 #ifdef WLAN_FEATURE_STATS_EXT
3011 /*
3012  * dp_req_rx_hw_stats_t: RX peer HW stats query structure
3013  * @pending_tid_query_cnt: pending tid stats count which waits for REO status
3014  * @is_query_timeout: flag to show is stats query timeout
3015  */
3016 struct dp_req_rx_hw_stats_t {
3017 	qdf_atomic_t pending_tid_stats_cnt;
3018 	bool is_query_timeout;
3019 };
3020 #endif
3021 
3022 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id);
3023 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
3024 					    uint32_t mac_id);
3025 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
3026 
3027 #endif /* _DP_TYPES_H_ */
3028