xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision 4cfc54cf60be58b902e9fd31baa5eac56a9085a7)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_TYPES_H_
21 #define _DP_TYPES_H_
22 
23 #include <qdf_types.h>
24 #include <qdf_nbuf.h>
25 #include <qdf_lock.h>
26 #include <qdf_atomic.h>
27 #include <qdf_util.h>
28 #include <qdf_list.h>
29 #include <qdf_lro.h>
30 #include <queue.h>
31 #include <htt_common.h>
32 #include <htt.h>
33 #include <htt_stats.h>
34 #include <cdp_txrx_cmn.h>
35 #ifdef DP_MOB_DEFS
36 #include <cds_ieee80211_common.h>
37 #endif
38 #include <wdi_event_api.h>    /* WDI subscriber event list */
39 
40 #include "hal_hw_headers.h"
41 #include <hal_tx.h>
42 #include <hal_reo.h>
43 #include "wlan_cfg.h"
44 #include "hal_rx.h"
45 #include <hal_api.h>
46 #include <hal_api_mon.h>
47 #include "hal_rx.h"
48 //#include "hal_rx_flow.h"
49 
50 #define MAX_BW 8
51 #define MAX_RETRIES 4
52 #define MAX_RECEPTION_TYPES 4
53 
54 #define MINIDUMP_STR_SIZE 25
55 #ifndef REMOVE_PKT_LOG
56 #include <pktlog.h>
57 #endif
58 #include <dp_umac_reset.h>
59 
60 //#include "dp_tx.h"
61 
62 #define REPT_MU_MIMO 1
63 #define REPT_MU_OFDMA_MIMO 3
64 #define DP_VO_TID 6
65  /** MAX TID MAPS AVAILABLE PER PDEV */
66 #define DP_MAX_TID_MAPS 16
67 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */
68 #define DSCP_TID_MAP_MAX (64 + 6)
69 #define DP_IP_DSCP_SHIFT 2
70 #define DP_IP_DSCP_MASK 0x3f
71 #define DP_FC0_SUBTYPE_QOS 0x80
72 #define DP_QOS_TID 0x0f
73 #define DP_IPV6_PRIORITY_SHIFT 20
74 #define MAX_MON_LINK_DESC_BANKS 2
75 #define DP_VDEV_ALL 0xff
76 
77 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
78 #define WLAN_DP_RESET_MON_BUF_RING_FILTER
79 #define MAX_TXDESC_POOLS 6
80 #else
81 #define MAX_TXDESC_POOLS 4
82 #endif
83 
84 #define MAX_RXDESC_POOLS 4
85 
86 /* Max no. of VDEV per PSOC */
87 #ifdef WLAN_PSOC_MAX_VDEVS
88 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS
89 #else
90 #define MAX_VDEV_CNT 51
91 #endif
92 
93 /* Max no. of VDEVs, a PDEV can support */
94 #ifdef WLAN_PDEV_MAX_VDEVS
95 #define DP_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS
96 #else
97 #define DP_PDEV_MAX_VDEVS 17
98 #endif
99 
100 #define EXCEPTION_DEST_RING_ID 0
101 #define MAX_IDLE_SCATTER_BUFS 16
102 #define DP_MAX_IRQ_PER_CONTEXT 12
103 #define DEFAULT_HW_PEER_ID 0xffff
104 
105 #define MAX_AST_AGEOUT_COUNT 128
106 
107 #ifdef TX_ADDR_INDEX_SEARCH
108 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_INDEX_SEARCH
109 #else
110 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_SEARCH_DEFAULT
111 #endif
112 
113 #define WBM_INT_ERROR_ALL 0
114 #define WBM_INT_ERROR_REO_NULL_BUFFER 1
115 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2
116 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3
117 #define WBM_INT_ERROR_REO_BUFF_REAPED 4
118 #define MAX_WBM_INT_ERROR_REASONS 5
119 
120 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
121 /* Maximum retries for Delba per tid per peer */
122 #define DP_MAX_DELBA_RETRY 3
123 
124 #ifdef AST_OFFLOAD_ENABLE
125 #define AST_OFFLOAD_ENABLE_STATUS 1
126 #else
127 #define AST_OFFLOAD_ENABLE_STATUS 0
128 #endif
129 
130 #ifdef FEATURE_MEC_OFFLOAD
131 #define FW_MEC_FW_OFFLOAD_ENABLED 1
132 #else
133 #define FW_MEC_FW_OFFLOAD_ENABLED 0
134 #endif
135 
136 #define PCP_TID_MAP_MAX 8
137 #define MAX_MU_USERS 37
138 
139 #define REO_CMD_EVENT_HIST_MAX 64
140 
141 #define DP_MAX_SRNGS 64
142 
143 /* 2G PHYB */
144 #define PHYB_2G_LMAC_ID 2
145 #define PHYB_2G_TARGET_PDEV_ID 2
146 
147 /* Flags for skippig s/w tid classification */
148 #define DP_TX_HW_DSCP_TID_MAP_VALID 0x1
149 #define DP_TXRX_HLOS_TID_OVERRIDE_ENABLED 0x2
150 #define DP_TX_MESH_ENABLED 0x4
151 #define DP_TX_INVALID_QOS_TAG 0xf
152 
153 #ifdef WLAN_SUPPORT_RX_FISA
154 #define FISA_FLOW_MAX_AGGR_COUNT        16 /* max flow aggregate count */
155 #endif
156 
157 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
158 #define DP_RX_REFILL_BUFF_POOL_SIZE  2048
159 #define DP_RX_REFILL_BUFF_POOL_BURST 64
160 #define DP_RX_REFILL_THRD_THRESHOLD  512
161 #endif
162 
163 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
164 #define DP_SKIP_BAR_UPDATE_TIMEOUT 5000
165 #endif
166 
167 #define DP_TX_MAGIC_PATTERN_INUSE	0xABCD1234
168 #define DP_TX_MAGIC_PATTERN_FREE	0xDEADBEEF
169 
170 enum rx_pktlog_mode {
171 	DP_RX_PKTLOG_DISABLED = 0,
172 	DP_RX_PKTLOG_FULL,
173 	DP_RX_PKTLOG_LITE,
174 };
175 
176 /* enum m_copy_mode - Available mcopy mode
177  *
178  */
179 enum m_copy_mode {
180 	M_COPY_DISABLED = 0,
181 	M_COPY = 2,
182 	M_COPY_EXTENDED = 4,
183 };
184 
185 struct msdu_list {
186 	qdf_nbuf_t head;
187 	qdf_nbuf_t tail;
188 	uint32_t sum_len;
189 };
190 
191 struct dp_soc_cmn;
192 struct dp_pdev;
193 struct dp_vdev;
194 struct dp_tx_desc_s;
195 struct dp_soc;
196 union dp_rx_desc_list_elem_t;
197 struct cdp_peer_rate_stats_ctx;
198 struct cdp_soc_rate_stats_ctx;
199 struct dp_rx_fst;
200 struct dp_mon_filter;
201 struct dp_mon_mpdu;
202 #ifdef BE_PKTLOG_SUPPORT
203 struct dp_mon_filter_be;
204 #endif
205 struct dp_peer;
206 struct dp_txrx_peer;
207 
208 /**
209  * enum for DP peer state
210  */
211 enum dp_peer_state {
212 	DP_PEER_STATE_NONE,
213 	DP_PEER_STATE_INIT,
214 	DP_PEER_STATE_ACTIVE,
215 	DP_PEER_STATE_LOGICAL_DELETE,
216 	DP_PEER_STATE_INACTIVE,
217 	DP_PEER_STATE_FREED,
218 	DP_PEER_STATE_INVALID,
219 };
220 
221 /**
222  * enum for modules ids of
223  */
224 enum dp_mod_id {
225 	DP_MOD_ID_TX_RX,
226 	DP_MOD_ID_TX_COMP,
227 	DP_MOD_ID_RX,
228 	DP_MOD_ID_HTT_COMP,
229 	DP_MOD_ID_RX_ERR,
230 	DP_MOD_ID_TX_PPDU_STATS,
231 	DP_MOD_ID_RX_PPDU_STATS,
232 	DP_MOD_ID_CDP,
233 	DP_MOD_ID_GENERIC_STATS,
234 	DP_MOD_ID_TX_MULTIPASS,
235 	DP_MOD_ID_TX_CAPTURE,
236 	DP_MOD_ID_NSS_OFFLOAD,
237 	DP_MOD_ID_CONFIG,
238 	DP_MOD_ID_HTT,
239 	DP_MOD_ID_IPA,
240 	DP_MOD_ID_AST,
241 	DP_MOD_ID_MCAST2UCAST,
242 	DP_MOD_ID_CHILD,
243 	DP_MOD_ID_MESH,
244 	DP_MOD_ID_TX_EXCEPTION,
245 	DP_MOD_ID_TDLS,
246 	DP_MOD_ID_MISC,
247 	DP_MOD_ID_MSCS,
248 	DP_MOD_ID_TX,
249 	DP_MOD_ID_SAWF,
250 	DP_MOD_ID_REINJECT,
251 	DP_MOD_ID_MAX,
252 };
253 
254 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
255 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
256 
257 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
258 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
259 
260 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
261 	TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase))
262 
263 #define DP_MUTEX_TYPE qdf_spinlock_t
264 
265 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
266 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
267 
268 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
269     ((_a)[0] == 0x33 &&                         \
270      (_a)[1] == 0x33)
271 
272 #define DP_FRAME_IS_BROADCAST(_a)              \
273     ((_a)[0] == 0xff &&                         \
274      (_a)[1] == 0xff &&                         \
275      (_a)[2] == 0xff &&                         \
276      (_a)[3] == 0xff &&                         \
277      (_a)[4] == 0xff &&                         \
278      (_a)[5] == 0xff)
279 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
280 		(_llc)->llc_ssap == 0xaa && \
281 		(_llc)->llc_un.type_snap.control == 0x3)
282 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
283 #define DP_FRAME_FC0_TYPE_MASK 0x0c
284 #define DP_FRAME_FC0_TYPE_DATA 0x08
285 #define DP_FRAME_IS_DATA(_frame) \
286 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
287 
288 /**
289  * macros to convert hw mac id to sw mac id:
290  * mac ids used by hardware start from a value of 1 while
291  * those in host software start from a value of 0. Use the
292  * macros below to convert between mac ids used by software and
293  * hardware
294  */
295 #define DP_SW2HW_MACID(id) ((id) + 1)
296 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
297 
298 /**
299  * Number of Tx Queues
300  * enum and macro to define how many threshold levels is used
301  * for the AC based flow control
302  */
303 #ifdef QCA_AC_BASED_FLOW_CONTROL
304 enum dp_fl_ctrl_threshold {
305 	DP_TH_BE_BK = 0,
306 	DP_TH_VI,
307 	DP_TH_VO,
308 	DP_TH_HI,
309 };
310 
311 #define FL_TH_MAX (4)
312 #define FL_TH_VI_PERCENTAGE (80)
313 #define FL_TH_VO_PERCENTAGE (60)
314 #define FL_TH_HI_PERCENTAGE (40)
315 #endif
316 
317 /**
318  * enum dp_intr_mode
319  * @DP_INTR_INTEGRATED: Line interrupts
320  * @DP_INTR_MSI: MSI interrupts
321  * @DP_INTR_POLL: Polling
322  */
323 enum dp_intr_mode {
324 	DP_INTR_INTEGRATED = 0,
325 	DP_INTR_MSI,
326 	DP_INTR_POLL,
327 	DP_INTR_LEGACY_VIRTUAL_IRQ,
328 };
329 
330 /**
331  * enum dp_tx_frm_type
332  * @dp_tx_frm_std: Regular frame, no added header fragments
333  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
334  * @dp_tx_frm_sg: SG segment
335  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
336  * @dp_tx_frm_me: Multicast to Unicast Converted frame
337  * @dp_tx_frm_raw: Raw Frame
338  */
339 enum dp_tx_frm_type {
340 	dp_tx_frm_std = 0,
341 	dp_tx_frm_tso,
342 	dp_tx_frm_sg,
343 	dp_tx_frm_audio,
344 	dp_tx_frm_me,
345 	dp_tx_frm_raw,
346 };
347 
348 /**
349  * enum dp_ast_type
350  * @dp_ast_type_wds: WDS peer AST type
351  * @dp_ast_type_static: static ast entry type
352  * @dp_ast_type_mec: Multicast echo ast entry type
353  */
354 enum dp_ast_type {
355 	dp_ast_type_wds = 0,
356 	dp_ast_type_static,
357 	dp_ast_type_mec,
358 };
359 
360 /**
361  * enum dp_nss_cfg
362  * @dp_nss_cfg_default: No radios are offloaded
363  * @dp_nss_cfg_first_radio: First radio offloaded
364  * @dp_nss_cfg_second_radio: Second radio offloaded
365  * @dp_nss_cfg_dbdc: Dual radios offloaded
366  * @dp_nss_cfg_dbtc: Three radios offloaded
367  */
368 enum dp_nss_cfg {
369 	dp_nss_cfg_default = 0x0,
370 	dp_nss_cfg_first_radio = 0x1,
371 	dp_nss_cfg_second_radio = 0x2,
372 	dp_nss_cfg_dbdc = 0x3,
373 	dp_nss_cfg_dbtc = 0x7,
374 	dp_nss_cfg_max
375 };
376 
377 #ifdef WLAN_TX_PKT_CAPTURE_ENH
378 #define DP_CPU_RING_MAP_1 1
379 #endif
380 
381 /**
382  * dp_cpu_ring_map_type - dp tx cpu ring map
383  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
384  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
385  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
386  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
387  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
388  * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring
389  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
390  */
391 enum dp_cpu_ring_map_types {
392 	DP_NSS_DEFAULT_MAP,
393 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
394 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
395 	DP_NSS_DBDC_OFFLOADED_MAP,
396 	DP_NSS_DBTC_OFFLOADED_MAP,
397 #ifdef WLAN_TX_PKT_CAPTURE_ENH
398 	DP_SINGLE_TX_RING_MAP,
399 #endif
400 	DP_NSS_CPU_RING_MAP_MAX
401 };
402 
403 /**
404  * dp_rx_nbuf_frag_info - Hold vaddr and paddr for a buffer
405  *
406  * paddr: Physical address of buffer allocated.
407  * nbuf: Allocated nbuf in case of nbuf approach.
408  * vaddr: Virtual address of frag allocated in case of frag approach.
409  */
410 struct dp_rx_nbuf_frag_info {
411 	qdf_dma_addr_t paddr;
412 	union {
413 		qdf_nbuf_t nbuf;
414 		qdf_frag_t vaddr;
415 	} virt_addr;
416 };
417 
418 /**
419  * enum dp_ctxt - context type
420  * @DP_PDEV_TYPE: PDEV context
421  * @DP_RX_RING_HIST_TYPE: Datapath rx ring history
422  * @DP_RX_ERR_RING_HIST_TYPE: Datapath rx error ring history
423  * @DP_RX_REINJECT_RING_HIST_TYPE: Datapath reinject ring history
424  * @DP_RX_REFILL_RING_HIST_TYPE: Datapath rx refill ring history
425  * @DP_TX_HW_DESC_HIST_TYPE: Datapath TX HW descriptor history
426  * @DP_MON_SOC_TYPE: Datapath monitor soc context
427  * @DP_MON_PDEV_TYPE: Datapath monitor pdev context
428  * @DP_MON_STATUS_BUF_HIST_TYPE: DP monitor status buffer history
429  */
430 enum dp_ctxt_type {
431 	DP_PDEV_TYPE,
432 	DP_RX_RING_HIST_TYPE,
433 	DP_RX_ERR_RING_HIST_TYPE,
434 	DP_RX_REINJECT_RING_HIST_TYPE,
435 	DP_TX_TCL_HIST_TYPE,
436 	DP_TX_COMP_HIST_TYPE,
437 	DP_FISA_RX_FT_TYPE,
438 	DP_RX_REFILL_RING_HIST_TYPE,
439 	DP_TX_HW_DESC_HIST_TYPE,
440 	DP_MON_SOC_TYPE,
441 	DP_MON_PDEV_TYPE,
442 	DP_MON_STATUS_BUF_HIST_TYPE,
443 };
444 
445 /**
446  * enum dp_desc_type - source type for multiple pages allocation
447  * @DP_TX_DESC_TYPE: DP SW TX descriptor
448  * @DP_TX_EXT_DESC_TYPE: DP TX msdu extension descriptor
449  * @DP_TX_EXT_DESC_LINK_TYPE: DP link descriptor for msdu ext_desc
450  * @DP_TX_TSO_DESC_TYPE: DP TX TSO descriptor
451  * @DP_TX_TSO_NUM_SEG_TYPE: DP TX number of segments
452  * @DP_RX_DESC_BUF_TYPE: DP RX SW descriptor
453  * @DP_RX_DESC_STATUS_TYPE: DP RX SW descriptor for monitor status
454  * @DP_HW_LINK_DESC_TYPE: DP HW link descriptor
455  * @DP_HW_CC_SPT_PAGE_TYPE: DP pages for HW CC secondary page table
456  */
457 enum dp_desc_type {
458 	DP_TX_DESC_TYPE,
459 	DP_TX_EXT_DESC_TYPE,
460 	DP_TX_EXT_DESC_LINK_TYPE,
461 	DP_TX_TSO_DESC_TYPE,
462 	DP_TX_TSO_NUM_SEG_TYPE,
463 	DP_RX_DESC_BUF_TYPE,
464 	DP_RX_DESC_STATUS_TYPE,
465 	DP_HW_LINK_DESC_TYPE,
466 	DP_HW_CC_SPT_PAGE_TYPE,
467 };
468 
469 /**
470  * struct rx_desc_pool
471  * @pool_size: number of RX descriptor in the pool
472  * @elem_size: Element size
473  * @desc_pages: Multi page descriptors
474  * @array: pointer to array of RX descriptor
475  * @freelist: pointer to free RX descriptor link list
476  * @lock: Protection for the RX descriptor pool
477  * @owner: owner for nbuf
478  * @buf_size: Buffer size
479  * @buf_alignment: Buffer alignment
480  * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
481  * @desc_type: type of desc this pool serves
482  */
483 struct rx_desc_pool {
484 	uint32_t pool_size;
485 #ifdef RX_DESC_MULTI_PAGE_ALLOC
486 	uint16_t elem_size;
487 	struct qdf_mem_multi_page_t desc_pages;
488 #else
489 	union dp_rx_desc_list_elem_t *array;
490 #endif
491 	union dp_rx_desc_list_elem_t *freelist;
492 	qdf_spinlock_t lock;
493 	uint8_t owner;
494 	uint16_t buf_size;
495 	uint8_t buf_alignment;
496 	bool rx_mon_dest_frag_enable;
497 	enum dp_desc_type desc_type;
498 };
499 
500 /**
501  * struct dp_tx_ext_desc_elem_s
502  * @next: next extension descriptor pointer
503  * @vaddr: hlos virtual address pointer
504  * @paddr: physical address pointer for descriptor
505  * @flags: mark features for extension descriptor
506  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
507  *		Tx completion of ME packet
508  * @tso_desc: Pointer to Tso desc
509  * @tso_num_desc: Pointer to tso_num_desc
510  */
511 struct dp_tx_ext_desc_elem_s {
512 	struct dp_tx_ext_desc_elem_s *next;
513 	void *vaddr;
514 	qdf_dma_addr_t paddr;
515 	uint16_t flags;
516 	struct dp_tx_me_buf_t *me_buffer;
517 	struct qdf_tso_seg_elem_t *tso_desc;
518 	struct qdf_tso_num_seg_elem_t *tso_num_desc;
519 };
520 
521 /**
522  * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool
523  * @elem_count: Number of descriptors in the pool
524  * @elem_size: Size of each descriptor
525  * @num_free: Number of free descriptors
526  * @msdu_ext_desc: MSDU extension descriptor
527  * @desc_pages: multiple page allocation information for actual descriptors
528  * @link_elem_size: size of the link descriptor in cacheable memory used for
529  * 		    chaining the extension descriptors
530  * @desc_link_pages: multiple page allocation information for link descriptors
531  */
532 struct dp_tx_ext_desc_pool_s {
533 	uint16_t elem_count;
534 	int elem_size;
535 	uint16_t num_free;
536 	struct qdf_mem_multi_page_t desc_pages;
537 	int link_elem_size;
538 	struct qdf_mem_multi_page_t desc_link_pages;
539 	struct dp_tx_ext_desc_elem_s *freelist;
540 	qdf_spinlock_t lock;
541 	qdf_dma_mem_context(memctx);
542 };
543 
544 /**
545  * struct dp_tx_desc_s - Tx Descriptor
546  * @next: Next in the chain of descriptors in freelist or in the completion list
547  * @nbuf: Buffer Address
548  * @msdu_ext_desc: MSDU extension descriptor
549  * @id: Descriptor ID
550  * @vdev_id: vdev_id of vdev over which the packet was transmitted
551  * @pdev: Handle to pdev
552  * @pool_id: Pool ID - used when releasing the descriptor
553  * @flags: Flags to track the state of descriptor and special frame handling
554  * @comp: Pool ID - used when releasing the descriptor
555  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
556  * 		   This is maintained in descriptor to allow more efficient
557  * 		   processing in completion event processing code.
558  * 		   This field is filled in with the htt_pkt_type enum.
559  * @buffer_src: buffer source TQM, REO, FW etc.
560  * @frm_type: Frame Type - ToDo check if this is redundant
561  * @pkt_offset: Offset from which the actual packet data starts
562  * @pool: handle to flow_pool this descriptor belongs to.
563  */
564 struct dp_tx_desc_s {
565 	struct dp_tx_desc_s *next;
566 	qdf_nbuf_t nbuf;
567 	uint16_t length;
568 #ifdef DP_TX_TRACKING
569 	uint32_t magic;
570 	uint64_t timestamp_tick;
571 #endif
572 	uint16_t flags;
573 	uint32_t id;
574 	qdf_dma_addr_t dma_addr;
575 	uint8_t vdev_id;
576 	uint8_t tx_status;
577 	uint16_t peer_id;
578 	struct dp_pdev *pdev;
579 	uint8_t tx_encap_type:2,
580 		buffer_src:3,
581 		reserved:3;
582 	uint8_t frm_type;
583 	uint8_t pkt_offset;
584 	uint8_t  pool_id;
585 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
586 	qdf_ktime_t timestamp;
587 	struct hal_tx_desc_comp_s comp;
588 };
589 
590 #ifdef QCA_AC_BASED_FLOW_CONTROL
591 /**
592  * enum flow_pool_status - flow pool status
593  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
594  *				and network queues are unpaused
595  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
596  *			   and network queues are paused
597  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
598  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
599  * @FLOW_POOL_ACTIVE_UNPAUSED_REATTACH: pool is reattached but network
600  *					queues are not paused
601  */
602 enum flow_pool_status {
603 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
604 	FLOW_POOL_ACTIVE_PAUSED = 1,
605 	FLOW_POOL_BE_BK_PAUSED = 2,
606 	FLOW_POOL_VI_PAUSED = 3,
607 	FLOW_POOL_VO_PAUSED = 4,
608 	FLOW_POOL_INVALID = 5,
609 	FLOW_POOL_INACTIVE = 6,
610 	FLOW_POOL_ACTIVE_UNPAUSED_REATTACH = 7,
611 };
612 
613 #else
614 /**
615  * enum flow_pool_status - flow pool status
616  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
617  *				and network queues are unpaused
618  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
619  *			   and network queues are paused
620  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
621  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
622  */
623 enum flow_pool_status {
624 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
625 	FLOW_POOL_ACTIVE_PAUSED = 1,
626 	FLOW_POOL_BE_BK_PAUSED = 2,
627 	FLOW_POOL_VI_PAUSED = 3,
628 	FLOW_POOL_VO_PAUSED = 4,
629 	FLOW_POOL_INVALID = 5,
630 	FLOW_POOL_INACTIVE = 6,
631 };
632 
633 #endif
634 
635 /**
636  * struct dp_tx_tso_seg_pool_s
637  * @pool_size: total number of pool elements
638  * @num_free: free element count
639  * @freelist: first free element pointer
640  * @desc_pages: multiple page allocation information for actual descriptors
641  * @lock: lock for accessing the pool
642  */
643 struct dp_tx_tso_seg_pool_s {
644 	uint16_t pool_size;
645 	uint16_t num_free;
646 	struct qdf_tso_seg_elem_t *freelist;
647 	struct qdf_mem_multi_page_t desc_pages;
648 	qdf_spinlock_t lock;
649 };
650 
651 /**
652  * struct dp_tx_tso_num_seg_pool_s {
653  * @num_seg_pool_size: total number of pool elements
654  * @num_free: free element count
655  * @freelist: first free element pointer
656  * @desc_pages: multiple page allocation information for actual descriptors
657  * @lock: lock for accessing the pool
658  */
659 
660 struct dp_tx_tso_num_seg_pool_s {
661 	uint16_t num_seg_pool_size;
662 	uint16_t num_free;
663 	struct qdf_tso_num_seg_elem_t *freelist;
664 	struct qdf_mem_multi_page_t desc_pages;
665 	/*tso mutex */
666 	qdf_spinlock_t lock;
667 };
668 
669 /**
670  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
671  * @elem_size: Size of each descriptor in the pool
672  * @pool_size: Total number of descriptors in the pool
673  * @num_free: Number of free descriptors
674  * @num_allocated: Number of used descriptors
675  * @freelist: Chain of free descriptors
676  * @desc_pages: multiple page allocation information for actual descriptors
677  * @num_invalid_bin: Deleted pool with pending Tx completions.
678  * @flow_pool_array_lock: Lock when operating on flow_pool_array.
679  * @flow_pool_array: List of allocated flow pools
680  * @lock- Lock for descriptor allocation/free from/to the pool
681  */
682 struct dp_tx_desc_pool_s {
683 	uint16_t elem_size;
684 	uint32_t num_allocated;
685 	struct dp_tx_desc_s *freelist;
686 	struct qdf_mem_multi_page_t desc_pages;
687 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
688 	uint16_t pool_size;
689 	uint8_t flow_pool_id;
690 	uint8_t num_invalid_bin;
691 	uint16_t avail_desc;
692 	enum flow_pool_status status;
693 	enum htt_flow_type flow_type;
694 #ifdef QCA_AC_BASED_FLOW_CONTROL
695 	uint16_t stop_th[FL_TH_MAX];
696 	uint16_t start_th[FL_TH_MAX];
697 	qdf_time_t max_pause_time[FL_TH_MAX];
698 	qdf_time_t latest_pause_time[FL_TH_MAX];
699 #else
700 	uint16_t stop_th;
701 	uint16_t start_th;
702 #endif
703 	uint16_t pkt_drop_no_desc;
704 	qdf_spinlock_t flow_pool_lock;
705 	uint8_t pool_create_cnt;
706 	void *pool_owner_ctx;
707 #else
708 	uint16_t elem_count;
709 	uint32_t num_free;
710 	qdf_spinlock_t lock;
711 #endif
712 };
713 
714 /**
715  * struct dp_txrx_pool_stats - flow pool related statistics
716  * @pool_map_count: flow pool map received
717  * @pool_unmap_count: flow pool unmap received
718  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
719  */
720 struct dp_txrx_pool_stats {
721 	uint16_t pool_map_count;
722 	uint16_t pool_unmap_count;
723 	uint16_t pkt_drop_no_pool;
724 };
725 
726 /**
727  * struct dp_srng - DP srng structure
728  * @hal_srng: hal_srng handle
729  * @base_vaddr_unaligned: un-aligned virtual base address of the srng ring
730  * @base_vaddr_aligned: aligned virtual base address of the srng ring
731  * @base_paddr_unaligned: un-aligned physical base address of the srng ring
732  * @base_paddr_aligned: aligned physical base address of the srng ring
733  * @alloc_size: size of the srng ring
734  * @cached: is the srng ring memory cached or un-cached memory
735  * @irq: irq number of the srng ring
736  * @num_entries: number of entries in the srng ring
737  * @is_mem_prealloc: Is this srng memeory pre-allocated
738  * @crit_thresh: Critical threshold for near-full processing of this srng
739  * @safe_thresh: Safe threshold for near-full processing of this srng
740  * @near_full: Flag to indicate srng is near-full
741  */
742 struct dp_srng {
743 	hal_ring_handle_t hal_srng;
744 	void *base_vaddr_unaligned;
745 	void *base_vaddr_aligned;
746 	qdf_dma_addr_t base_paddr_unaligned;
747 	qdf_dma_addr_t base_paddr_aligned;
748 	uint32_t alloc_size;
749 	uint8_t cached;
750 	int irq;
751 	uint32_t num_entries;
752 #ifdef DP_MEM_PRE_ALLOC
753 	uint8_t is_mem_prealloc;
754 #endif
755 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
756 	uint16_t crit_thresh;
757 	uint16_t safe_thresh;
758 	qdf_atomic_t near_full;
759 #endif
760 };
761 
762 struct dp_rx_reorder_array_elem {
763 	qdf_nbuf_t head;
764 	qdf_nbuf_t tail;
765 };
766 
767 #define DP_RX_BA_INACTIVE 0
768 #define DP_RX_BA_ACTIVE 1
769 #define DP_RX_BA_IN_PROGRESS 2
770 struct dp_reo_cmd_info {
771 	uint16_t cmd;
772 	enum hal_reo_cmd_type cmd_type;
773 	void *data;
774 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
775 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
776 };
777 
778 struct dp_peer_delay_stats {
779 	struct cdp_delay_tid_stats delay_tid_stats[CDP_MAX_DATA_TIDS]
780 						  [CDP_MAX_TXRX_CTX];
781 };
782 
783 /* Rx TID defrag*/
784 struct dp_rx_tid_defrag {
785 	/* TID */
786 	int tid;
787 
788 	/* only used for defrag right now */
789 	TAILQ_ENTRY(dp_rx_tid_defrag) defrag_waitlist_elem;
790 
791 	/* Store dst desc for reinjection */
792 	hal_ring_desc_t dst_ring_desc;
793 	struct dp_rx_desc *head_frag_desc;
794 
795 	/* Sequence and fragments that are being processed currently */
796 	uint32_t curr_seq_num;
797 	uint32_t curr_frag_num;
798 
799 	/* TODO: Check the following while adding defragmentation support */
800 	struct dp_rx_reorder_array_elem *array;
801 	/* base - single rx reorder element used for non-aggr cases */
802 	struct dp_rx_reorder_array_elem base;
803 	/* rx_tid lock */
804 	qdf_spinlock_t defrag_tid_lock;
805 
806 	/* head PN number */
807 	uint64_t pn128[2];
808 
809 	uint32_t defrag_timeout_ms;
810 
811 	/* defrag usage only, dp_peer pointer related with this tid */
812 	struct dp_txrx_peer *defrag_peer;
813 };
814 
815 /* Rx TID */
816 struct dp_rx_tid {
817 	/* TID */
818 	int tid;
819 
820 	/* Num of addba requests */
821 	uint32_t num_of_addba_req;
822 
823 	/* Num of addba responses */
824 	uint32_t num_of_addba_resp;
825 
826 	/* Num of delba requests */
827 	uint32_t num_of_delba_req;
828 
829 	/* Num of addba responses successful */
830 	uint32_t num_addba_rsp_success;
831 
832 	/* Num of addba responses failed */
833 	uint32_t num_addba_rsp_failed;
834 
835 	/* pn size */
836 	uint8_t pn_size;
837 	/* REO TID queue descriptors */
838 	void *hw_qdesc_vaddr_unaligned;
839 	void *hw_qdesc_vaddr_aligned;
840 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
841 	qdf_dma_addr_t hw_qdesc_paddr;
842 	uint32_t hw_qdesc_alloc_size;
843 
844 	/* RX ADDBA session state */
845 	int ba_status;
846 
847 	/* RX BA window size */
848 	uint16_t ba_win_size;
849 
850 	/* Starting sequence number in Addba request */
851 	uint16_t startseqnum;
852 	uint16_t dialogtoken;
853 	uint16_t statuscode;
854 	/* user defined ADDBA response status code */
855 	uint16_t userstatuscode;
856 
857 	/* rx_tid lock */
858 	qdf_spinlock_t tid_lock;
859 
860 	/* Store ppdu_id when 2k exception is received */
861 	uint32_t ppdu_id_2k;
862 
863 	/* Delba Tx completion status */
864 	uint8_t delba_tx_status;
865 
866 	/* Delba Tx retry count */
867 	uint8_t delba_tx_retry;
868 
869 	/* Delba stats */
870 	uint32_t delba_tx_success_cnt;
871 	uint32_t delba_tx_fail_cnt;
872 
873 	/* Delba reason code for retries */
874 	uint8_t delba_rcode;
875 
876 	/* Coex Override preserved windows size 1 based */
877 	uint16_t rx_ba_win_size_override;
878 };
879 
880 /**
881  * struct dp_intr_stats - DP Interrupt Stats for an interrupt context
882  * @num_tx_ring_masks: interrupts with tx_ring_mask set
883  * @num_rx_ring_masks: interrupts with rx_ring_mask set
884  * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set
885  * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set
886  * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set
887  * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set
888  * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set
889  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
890  * @num_host2rxdma_mon_ring_masks: interrupts with host2rxdma_ring_mask set
891  * @num_rx_ring_near_full_masks: Near-full interrupts for REO DST ring
892  * @num_tx_comp_ring_near_full_masks: Near-full interrupts for TX completion
893  * @num_rx_wbm_rel_ring_near_full_masks: total number of times the wbm rel ring
894  *                                       near full interrupt was received
895  * @num_reo_status_ring_near_full_masks: total number of times the reo status
896  *                                       near full interrupt was received
897  * @num_near_full_masks: total number of times the near full interrupt
898  *                       was received
899  * @num_masks: total number of times the interrupt was received
900  * @num_host2txmon_ring_masks: interrupts with host2txmon_ring_mask set
901  * @num_near_full_masks: total number of times the interrupt was received
902  * @num_masks: total number of times the near full interrupt was received
903  * @num_tx_mon_ring_masks: interrupts with num_tx_mon_ring_masks set
904  *
905  * Counter for individual masks are incremented only if there are any packets
906  * on that ring.
907  */
908 struct dp_intr_stats {
909 	uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS];
910 	uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS];
911 	uint32_t num_rx_mon_ring_masks;
912 	uint32_t num_rx_err_ring_masks;
913 	uint32_t num_rx_wbm_rel_ring_masks;
914 	uint32_t num_reo_status_ring_masks;
915 	uint32_t num_rxdma2host_ring_masks;
916 	uint32_t num_host2rxdma_ring_masks;
917 	uint32_t num_host2rxdma_mon_ring_masks;
918 	uint32_t num_rx_ring_near_full_masks[MAX_REO_DEST_RINGS];
919 	uint32_t num_tx_comp_ring_near_full_masks[MAX_TCL_DATA_RINGS];
920 	uint32_t num_rx_wbm_rel_ring_near_full_masks;
921 	uint32_t num_reo_status_ring_near_full_masks;
922 	uint32_t num_host2txmon_ring__masks;
923 	uint32_t num_near_full_masks;
924 	uint32_t num_masks;
925 	uint32_t num_tx_mon_ring_masks;
926 };
927 
928 /* per interrupt context  */
929 struct dp_intr {
930 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
931 				associated with this napi context */
932 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
933 				with this interrupt context */
934 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
935 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
936 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
937 	uint8_t reo_status_ring_mask; /* REO command response ring */
938 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
939 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
940 	/* Host to RXDMA monitor  buffer ring */
941 	uint8_t host2rxdma_mon_ring_mask;
942 	/* RX REO rings near full interrupt mask */
943 	uint8_t rx_near_full_grp_1_mask;
944 	/* RX REO rings near full interrupt mask */
945 	uint8_t rx_near_full_grp_2_mask;
946 	/* WBM TX completion rings near full interrupt mask */
947 	uint8_t tx_ring_near_full_mask;
948 	uint8_t host2txmon_ring_mask; /* Tx monitor buffer ring */
949 	uint8_t tx_mon_ring_mask;  /* Tx monitor ring mask (0-2) */
950 	struct dp_soc *soc;    /* Reference to SoC structure ,
951 				to get DMA ring handles */
952 	qdf_lro_ctx_t lro_ctx;
953 	uint8_t dp_intr_id;
954 
955 	/* Interrupt Stats for individual masks */
956 	struct dp_intr_stats intr_stats;
957 	uint8_t umac_reset_intr_mask;  /* UMAC reset interrupt mask */
958 };
959 
960 #define REO_DESC_FREELIST_SIZE 64
961 #define REO_DESC_FREE_DEFER_MS 1000
962 struct reo_desc_list_node {
963 	qdf_list_node_t node;
964 	unsigned long free_ts;
965 	struct dp_rx_tid rx_tid;
966 	bool resend_update_reo_cmd;
967 	uint32_t pending_ext_desc_size;
968 #ifdef REO_QDESC_HISTORY
969 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
970 #endif
971 };
972 
973 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
974 #define REO_DESC_DEFERRED_FREELIST_SIZE 256
975 #define REO_DESC_DEFERRED_FREE_MS 30000
976 
977 struct reo_desc_deferred_freelist_node {
978 	qdf_list_node_t node;
979 	unsigned long free_ts;
980 	void *hw_qdesc_vaddr_unaligned;
981 	qdf_dma_addr_t hw_qdesc_paddr;
982 	uint32_t hw_qdesc_alloc_size;
983 #ifdef REO_QDESC_HISTORY
984 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
985 #endif /* REO_QDESC_HISTORY */
986 };
987 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
988 
989 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
990 /**
991  * struct reo_cmd_event_record: Elements to record for each reo command
992  * @cmd_type: reo command type
993  * @cmd_return_status: reo command post status
994  * @timestamp: record timestamp for the reo command
995  */
996 struct reo_cmd_event_record {
997 	enum hal_reo_cmd_type cmd_type;
998 	uint8_t cmd_return_status;
999 	uint64_t timestamp;
1000 };
1001 
1002 /**
1003  * struct reo_cmd_event_history: Account for reo cmd events
1004  * @index: record number
1005  * @cmd_record: list of records
1006  */
1007 struct reo_cmd_event_history {
1008 	qdf_atomic_t index;
1009 	struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX];
1010 };
1011 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1012 
1013 /* SoC level data path statistics */
1014 struct dp_soc_stats {
1015 	struct {
1016 		uint32_t added;
1017 		uint32_t deleted;
1018 		uint32_t aged_out;
1019 		uint32_t map_err;
1020 		uint32_t ast_mismatch;
1021 	} ast;
1022 
1023 	struct {
1024 		uint32_t added;
1025 		uint32_t deleted;
1026 	} mec;
1027 
1028 	/* SOC level TX stats */
1029 	struct {
1030 		/* Total packets transmitted */
1031 		struct cdp_pkt_info egress[MAX_TCL_DATA_RINGS];
1032 		/* Enqueues per tcl ring */
1033 		uint32_t tcl_enq[MAX_TCL_DATA_RINGS];
1034 		/* packets dropped on tx because of no peer */
1035 		struct cdp_pkt_info tx_invalid_peer;
1036 		/* descriptors in each tcl ring */
1037 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
1038 		/* Descriptors in use at soc */
1039 		uint32_t desc_in_use;
1040 		/* tqm_release_reason == FW removed */
1041 		uint32_t dropped_fw_removed;
1042 		/* tx completion release_src != TQM or FW */
1043 		uint32_t invalid_release_source;
1044 		/* tx completion wbm_internal_error */
1045 		uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS];
1046 		/* tx completion non_wbm_internal_error */
1047 		uint32_t non_wbm_internal_err;
1048 		/* TX Comp loop packet limit hit */
1049 		uint32_t tx_comp_loop_pkt_limit_hit;
1050 		/* Head pointer Out of sync at the end of dp_tx_comp_handler */
1051 		uint32_t hp_oos2;
1052 		/* tx desc freed as part of vdev detach */
1053 		uint32_t tx_comp_exception;
1054 		/* TQM drops after/during peer delete */
1055 		uint64_t tqm_drop_no_peer;
1056 		/* Number of tx completions reaped per WBM2SW release ring */
1057 		uint32_t tx_comp[MAX_TCL_DATA_RINGS];
1058 	} tx;
1059 
1060 	/* SOC level RX stats */
1061 	struct {
1062 		/* Total rx packets count */
1063 		struct cdp_pkt_info ingress;
1064 		/* Rx errors */
1065 		/* Total Packets in Rx Error ring */
1066 		uint32_t err_ring_pkts;
1067 		/* No of Fragments */
1068 		uint32_t rx_frags;
1069 		/* No of incomplete fragments in waitlist */
1070 		uint32_t rx_frag_wait;
1071 		/* Fragments dropped due to errors */
1072 		uint32_t rx_frag_err;
1073 		/* Fragments received OOR causing sequence num mismatch */
1074 		uint32_t rx_frag_oor;
1075 		/* Fragments dropped due to len errors in skb */
1076 		uint32_t rx_frag_err_len_error;
1077 		/* Fragments dropped due to no peer found */
1078 		uint32_t rx_frag_err_no_peer;
1079 		/* No of reinjected packets */
1080 		uint32_t reo_reinject;
1081 		/* Reap loop packet limit hit */
1082 		uint32_t reap_loop_pkt_limit_hit;
1083 		/* Head pointer Out of sync at the end of dp_rx_process */
1084 		uint32_t hp_oos2;
1085 		/* Rx ring near full */
1086 		uint32_t near_full;
1087 		/* Break ring reaping as not all scattered msdu received */
1088 		uint32_t msdu_scatter_wait_break;
1089 		/* Number of bar frames received */
1090 		uint32_t bar_frame;
1091 		/* Number of frames routed from rxdma */
1092 		uint32_t rxdma2rel_route_drop;
1093 		/* Number of frames routed from reo*/
1094 		uint32_t reo2rel_route_drop;
1095 
1096 		struct {
1097 			/* Invalid RBM error count */
1098 			uint32_t invalid_rbm;
1099 			/* Invalid VDEV Error count */
1100 			uint32_t invalid_vdev;
1101 			/* Invalid PDEV error count */
1102 			uint32_t invalid_pdev;
1103 
1104 			/* Packets delivered to stack that no related peer */
1105 			uint32_t pkt_delivered_no_peer;
1106 			/* Defrag peer uninit error count */
1107 			uint32_t defrag_peer_uninit;
1108 			/* Invalid sa_idx or da_idx*/
1109 			uint32_t invalid_sa_da_idx;
1110 			/* MSDU DONE failures */
1111 			uint32_t msdu_done_fail;
1112 			/* Invalid PEER Error count */
1113 			struct cdp_pkt_info rx_invalid_peer;
1114 			/* Invalid PEER ID count */
1115 			struct cdp_pkt_info rx_invalid_peer_id;
1116 			/* Invalid packet length */
1117 			struct cdp_pkt_info rx_invalid_pkt_len;
1118 			/* HAL ring access Fail error count */
1119 			uint32_t hal_ring_access_fail;
1120 			/* HAL ring access full Fail error count */
1121 			uint32_t hal_ring_access_full_fail;
1122 			/* RX DMA error count */
1123 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
1124 			/* RX REO DEST Desc Invalid Magic count */
1125 			uint32_t rx_desc_invalid_magic;
1126 			/* REO Error count */
1127 			uint32_t reo_error[HAL_REO_ERR_MAX];
1128 			/* HAL REO ERR Count */
1129 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
1130 			/* HAL REO DEST Duplicate count */
1131 			uint32_t hal_reo_dest_dup;
1132 			/* HAL WBM RELEASE Duplicate count */
1133 			uint32_t hal_wbm_rel_dup;
1134 			/* HAL RXDMA error Duplicate count */
1135 			uint32_t hal_rxdma_err_dup;
1136 			/* ipa smmu map duplicate count */
1137 			uint32_t ipa_smmu_map_dup;
1138 			/* ipa smmu unmap duplicate count */
1139 			uint32_t ipa_smmu_unmap_dup;
1140 			/* ipa smmu unmap while ipa pipes is disabled */
1141 			uint32_t ipa_unmap_no_pipe;
1142 			/* REO cmd send fail/requeue count */
1143 			uint32_t reo_cmd_send_fail;
1144 			/* REO cmd send drain count */
1145 			uint32_t reo_cmd_send_drain;
1146 			/* RX msdu drop count due to scatter */
1147 			uint32_t scatter_msdu;
1148 			/* RX msdu drop count due to invalid cookie */
1149 			uint32_t invalid_cookie;
1150 			/* Count of stale cookie read in RX path */
1151 			uint32_t stale_cookie;
1152 			/* Delba sent count due to RX 2k jump */
1153 			uint32_t rx_2k_jump_delba_sent;
1154 			/* RX 2k jump msdu indicated to stack count */
1155 			uint32_t rx_2k_jump_to_stack;
1156 			/* RX 2k jump msdu dropped count */
1157 			uint32_t rx_2k_jump_drop;
1158 			/* REO ERR msdu buffer received */
1159 			uint32_t reo_err_msdu_buf_rcved;
1160 			/* REO ERR msdu buffer with invalid coookie received */
1161 			uint32_t reo_err_msdu_buf_invalid_cookie;
1162 			/* REO OOR msdu drop count */
1163 			uint32_t reo_err_oor_drop;
1164 			/* REO OOR msdu indicated to stack count */
1165 			uint32_t reo_err_oor_to_stack;
1166 			/* REO OOR scattered msdu count */
1167 			uint32_t reo_err_oor_sg_count;
1168 			/* RX msdu rejected count on delivery to vdev stack_fn*/
1169 			uint32_t rejected;
1170 			/* Incorrect msdu count in MPDU desc info */
1171 			uint32_t msdu_count_mismatch;
1172 			/* RX raw frame dropped count */
1173 			uint32_t raw_frm_drop;
1174 			/* Stale link desc cookie count*/
1175 			uint32_t invalid_link_cookie;
1176 			/* Nbuf sanity failure */
1177 			uint32_t nbuf_sanity_fail;
1178 			/* Duplicate link desc refilled */
1179 			uint32_t dup_refill_link_desc;
1180 			/* Incorrect msdu continuation bit in MSDU desc */
1181 			uint32_t msdu_continuation_err;
1182 			/* count of start sequence (ssn) updates */
1183 			uint32_t ssn_update_count;
1184 			/* count of bar handling fail */
1185 			uint32_t bar_handle_fail_count;
1186 			/* EAPOL drop count in intrabss scenario */
1187 			uint32_t intrabss_eapol_drop;
1188 			/* PN check failed for 2K-jump or OOR error */
1189 			uint32_t pn_in_dest_check_fail;
1190 			/* MSDU len err count */
1191 			uint32_t msdu_len_err;
1192 			/* Rx flush count */
1193 			uint32_t rx_flush_count;
1194 			/* Rx invalid tid count */
1195 			uint32_t rx_invalid_tid_err;
1196 		} err;
1197 
1198 		/* packet count per core - per ring */
1199 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
1200 	} rx;
1201 
1202 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1203 	struct reo_cmd_event_history cmd_event_history;
1204 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1205 };
1206 
1207 union dp_align_mac_addr {
1208 	uint8_t raw[QDF_MAC_ADDR_SIZE];
1209 	struct {
1210 		uint16_t bytes_ab;
1211 		uint16_t bytes_cd;
1212 		uint16_t bytes_ef;
1213 	} align2;
1214 	struct {
1215 		uint32_t bytes_abcd;
1216 		uint16_t bytes_ef;
1217 	} align4;
1218 	struct __attribute__((__packed__)) {
1219 		uint16_t bytes_ab;
1220 		uint32_t bytes_cdef;
1221 	} align4_2;
1222 };
1223 
1224 /**
1225  * struct dp_ast_free_cb_params - HMWDS free callback cookie
1226  * @mac_addr: ast mac address
1227  * @peer_mac_addr: mac address of peer
1228  * @type: ast entry type
1229  * @vdev_id: vdev_id
1230  * @flags: ast flags
1231  */
1232 struct dp_ast_free_cb_params {
1233 	union dp_align_mac_addr mac_addr;
1234 	union dp_align_mac_addr peer_mac_addr;
1235 	enum cdp_txrx_ast_entry_type type;
1236 	uint8_t vdev_id;
1237 	uint32_t flags;
1238 };
1239 
1240 /*
1241  * dp_ast_entry
1242  *
1243  * @ast_idx: Hardware AST Index
1244  * @peer_id: Next Hop peer_id (for non-WDS nodes, this will be point to
1245  *           associated peer with this MAC address)
1246  * @mac_addr:  MAC Address for this AST entry
1247  * @next_hop: Set to 1 if this is for a WDS node
1248  * @is_active: flag to indicate active data traffic on this node
1249  *             (used for aging out/expiry)
1250  * @ase_list_elem: node in peer AST list
1251  * @is_bss: flag to indicate if entry corresponds to bss peer
1252  * @is_mapped: flag to indicate that we have mapped the AST entry
1253  *             in ast_table
1254  * @pdev_id: pdev ID
1255  * @vdev_id: vdev ID
1256  * @ast_hash_value: hast value in HW
1257  * @ref_cnt: reference count
1258  * @type: flag to indicate type of the entry(static/WDS/MEC)
1259  * @delete_in_progress: Flag to indicate that delete commands send to FW
1260  *                      and host is waiting for response from FW
1261  * @callback: ast free/unmap callback
1262  * @cookie: argument to callback
1263  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
1264  */
1265 struct dp_ast_entry {
1266 	uint16_t ast_idx;
1267 	uint16_t peer_id;
1268 	union dp_align_mac_addr mac_addr;
1269 	bool next_hop;
1270 	bool is_active;
1271 	bool is_mapped;
1272 	uint8_t pdev_id;
1273 	uint8_t vdev_id;
1274 	uint16_t ast_hash_value;
1275 	qdf_atomic_t ref_cnt;
1276 	enum cdp_txrx_ast_entry_type type;
1277 	bool delete_in_progress;
1278 	txrx_ast_free_cb callback;
1279 	void *cookie;
1280 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
1281 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
1282 };
1283 
1284 /*
1285  * dp_mec_entry
1286  *
1287  * @mac_addr:  MAC Address for this MEC entry
1288  * @is_active: flag to indicate active data traffic on this node
1289  *             (used for aging out/expiry)
1290  * @pdev_id: pdev ID
1291  * @vdev_id: vdev ID
1292  * @hash_list_elem: node in soc MEC hash list (mac address used as hash)
1293  */
1294 struct dp_mec_entry {
1295 	union dp_align_mac_addr mac_addr;
1296 	bool is_active;
1297 	uint8_t pdev_id;
1298 	uint8_t vdev_id;
1299 
1300 	TAILQ_ENTRY(dp_mec_entry) hash_list_elem;
1301 };
1302 
1303 /* SOC level htt stats */
1304 struct htt_t2h_stats {
1305 	/* lock to protect htt_stats_msg update */
1306 	qdf_spinlock_t lock;
1307 
1308 	/* work queue to process htt stats */
1309 	qdf_work_t work;
1310 
1311 	/* T2H Ext stats message queue */
1312 	qdf_nbuf_queue_t msg;
1313 
1314 	/* number of completed stats in htt_stats_msg */
1315 	uint32_t num_stats;
1316 };
1317 
1318 struct link_desc_bank {
1319 	void *base_vaddr_unaligned;
1320 	void *base_vaddr;
1321 	qdf_dma_addr_t base_paddr_unaligned;
1322 	qdf_dma_addr_t base_paddr;
1323 	uint32_t size;
1324 };
1325 
1326 struct rx_buff_pool {
1327 	qdf_nbuf_queue_head_t emerg_nbuf_q;
1328 	uint32_t nbuf_fail_cnt;
1329 	bool is_initialized;
1330 };
1331 
1332 struct rx_refill_buff_pool {
1333 	bool is_initialized;
1334 	uint16_t head;
1335 	uint16_t tail;
1336 	struct dp_pdev *dp_pdev;
1337 	uint16_t max_bufq_len;
1338 	qdf_nbuf_t buf_elem[2048];
1339 };
1340 
1341 #ifdef DP_TX_HW_DESC_HISTORY
1342 #define DP_TX_HW_DESC_HIST_MAX 6144
1343 
1344 struct dp_tx_hw_desc_evt {
1345 	uint8_t tcl_desc[HAL_TX_DESC_LEN_BYTES];
1346 	uint64_t posted;
1347 	uint32_t hp;
1348 	uint32_t tp;
1349 };
1350 
1351 /* struct dp_tx_hw_desc_history - TX HW desc hisotry
1352  * @index: Index where the last entry is written
1353  * @entry: history entries
1354  */
1355 struct dp_tx_hw_desc_history {
1356 	uint64_t index;
1357 	struct dp_tx_hw_desc_evt entry[DP_TX_HW_DESC_HIST_MAX];
1358 };
1359 #endif
1360 
1361 /*
1362  * enum dp_mon_status_process_event - Events for monitor status buffer record
1363  * @DP_MON_STATUS_BUF_REAP: Monitor status buffer is reaped from ring
1364  * @DP_MON_STATUS_BUF_ENQUEUE: Status buffer is enqueued to local queue
1365  * @DP_MON_STATUS_BUF_DEQUEUE: Status buffer is dequeued from local queue
1366  */
1367 enum dp_mon_status_process_event {
1368 	DP_MON_STATUS_BUF_REAP,
1369 	DP_MON_STATUS_BUF_ENQUEUE,
1370 	DP_MON_STATUS_BUF_DEQUEUE,
1371 };
1372 
1373 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
1374 #define DP_MON_STATUS_HIST_MAX	2048
1375 
1376 /**
1377  * struct dp_buf_info_record - ring buffer info
1378  * @hbi: HW ring buffer info
1379  * @timestamp: timestamp when this entry was recorded
1380  * @event: event
1381  * @rx_desc: RX descriptor corresponding to the received buffer
1382  * @nbuf: buffer attached to rx_desc, if event is REAP, else the buffer
1383  *	  which was enqueued or dequeued.
1384  * @rx_desc_nbuf_data: nbuf data pointer.
1385  */
1386 struct dp_mon_stat_info_record {
1387 	struct hal_buf_info hbi;
1388 	uint64_t timestamp;
1389 	enum dp_mon_status_process_event event;
1390 	void *rx_desc;
1391 	qdf_nbuf_t nbuf;
1392 	uint8_t *rx_desc_nbuf_data;
1393 };
1394 
1395 /* struct dp_rx_history - rx ring hisotry
1396  * @index: Index where the last entry is written
1397  * @entry: history entries
1398  */
1399 struct dp_mon_status_ring_history {
1400 	qdf_atomic_t index;
1401 	struct dp_mon_stat_info_record entry[DP_MON_STATUS_HIST_MAX];
1402 };
1403 #endif
1404 
1405 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1406 /*
1407  * The logic for get current index of these history is dependent on this
1408  * value being power of 2.
1409  */
1410 #define DP_RX_HIST_MAX 2048
1411 #define DP_RX_ERR_HIST_MAX 2048
1412 #define DP_RX_REINJECT_HIST_MAX 1024
1413 #define DP_RX_REFILL_HIST_MAX 2048
1414 
1415 QDF_COMPILE_TIME_ASSERT(rx_history_size,
1416 			(DP_RX_HIST_MAX &
1417 			 (DP_RX_HIST_MAX - 1)) == 0);
1418 QDF_COMPILE_TIME_ASSERT(rx_err_history_size,
1419 			(DP_RX_ERR_HIST_MAX &
1420 			 (DP_RX_ERR_HIST_MAX - 1)) == 0);
1421 QDF_COMPILE_TIME_ASSERT(rx_reinject_history_size,
1422 			(DP_RX_REINJECT_HIST_MAX &
1423 			 (DP_RX_REINJECT_HIST_MAX - 1)) == 0);
1424 QDF_COMPILE_TIME_ASSERT(rx_refill_history_size,
1425 			(DP_RX_REFILL_HIST_MAX &
1426 			(DP_RX_REFILL_HIST_MAX - 1)) == 0);
1427 
1428 
1429 /**
1430  * struct dp_buf_info_record - ring buffer info
1431  * @hbi: HW ring buffer info
1432  * @timestamp: timestamp when this entry was recorded
1433  */
1434 struct dp_buf_info_record {
1435 	struct hal_buf_info hbi;
1436 	uint64_t timestamp;
1437 };
1438 
1439 /**
1440  * struct dp_refill_info_record - ring refill buffer info
1441  * @hp: HP value after refill
1442  * @tp: cached tail value during refill
1443  * @num_req: number of buffers requested to refill
1444  * @num_refill: number of buffers refilled to ring
1445  * @timestamp: timestamp when this entry was recorded
1446  */
1447 struct dp_refill_info_record {
1448 	uint32_t hp;
1449 	uint32_t tp;
1450 	uint32_t num_req;
1451 	uint32_t num_refill;
1452 	uint64_t timestamp;
1453 };
1454 
1455 /* struct dp_rx_history - rx ring hisotry
1456  * @index: Index where the last entry is written
1457  * @entry: history entries
1458  */
1459 struct dp_rx_history {
1460 	qdf_atomic_t index;
1461 	struct dp_buf_info_record entry[DP_RX_HIST_MAX];
1462 };
1463 
1464 /* struct dp_rx_err_history - rx err ring hisotry
1465  * @index: Index where the last entry is written
1466  * @entry: history entries
1467  */
1468 struct dp_rx_err_history {
1469 	qdf_atomic_t index;
1470 	struct dp_buf_info_record entry[DP_RX_ERR_HIST_MAX];
1471 };
1472 
1473 /* struct dp_rx_reinject_history - rx reinject ring hisotry
1474  * @index: Index where the last entry is written
1475  * @entry: history entries
1476  */
1477 struct dp_rx_reinject_history {
1478 	qdf_atomic_t index;
1479 	struct dp_buf_info_record entry[DP_RX_REINJECT_HIST_MAX];
1480 };
1481 
1482 /* struct dp_rx_refill_history - rx buf refill hisotry
1483  * @index: Index where the last entry is written
1484  * @entry: history entries
1485  */
1486 struct dp_rx_refill_history {
1487 	qdf_atomic_t index;
1488 	struct dp_refill_info_record entry[DP_RX_REFILL_HIST_MAX];
1489 };
1490 
1491 #endif
1492 
1493 enum dp_tx_event_type {
1494 	DP_TX_DESC_INVAL_EVT = 0,
1495 	DP_TX_DESC_MAP,
1496 	DP_TX_DESC_COOKIE,
1497 	DP_TX_DESC_FLUSH,
1498 	DP_TX_DESC_UNMAP,
1499 	DP_TX_COMP_UNMAP,
1500 	DP_TX_COMP_UNMAP_ERR,
1501 	DP_TX_COMP_MSDU_EXT,
1502 };
1503 
1504 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
1505 /* Size must be in 2 power, for bitwise index rotation */
1506 #define DP_TX_TCL_HISTORY_SIZE 0x4000
1507 #define DP_TX_COMP_HISTORY_SIZE 0x4000
1508 
1509 struct dp_tx_desc_event {
1510 	qdf_nbuf_t skb;
1511 	dma_addr_t paddr;
1512 	uint32_t sw_cookie;
1513 	enum dp_tx_event_type type;
1514 	uint64_t ts;
1515 };
1516 
1517 struct dp_tx_tcl_history {
1518 	qdf_atomic_t index;
1519 	struct dp_tx_desc_event entry[DP_TX_TCL_HISTORY_SIZE];
1520 };
1521 
1522 struct dp_tx_comp_history {
1523 	qdf_atomic_t index;
1524 	struct dp_tx_desc_event entry[DP_TX_COMP_HISTORY_SIZE];
1525 };
1526 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
1527 
1528 /* structure to record recent operation related variable */
1529 struct dp_last_op_info {
1530 	/* last link desc buf info through WBM release ring */
1531 	struct hal_buf_info wbm_rel_link_desc;
1532 	/* last link desc buf info through REO reinject ring */
1533 	struct hal_buf_info reo_reinject_link_desc;
1534 };
1535 
1536 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1537 
1538 /**
1539  * struct dp_swlm_tcl_data - params for tcl register write coalescing
1540  *			     descision making
1541  * @nbuf: TX packet
1542  * @tid: tid for transmitting the current packet
1543  * @num_ll_connections: Number of low latency connections on this vdev
1544  * @ring_id: TCL ring id
1545  * @pkt_len: Packet length
1546  *
1547  * This structure contains the information required by the software
1548  * latency manager to decide on whether to coalesce the current TCL
1549  * register write or not.
1550  */
1551 struct dp_swlm_tcl_data {
1552 	qdf_nbuf_t nbuf;
1553 	uint8_t tid;
1554 	uint8_t num_ll_connections;
1555 	uint8_t ring_id;
1556 	uint32_t pkt_len;
1557 };
1558 
1559 /**
1560  * union swlm_data - SWLM query data
1561  * @tcl_data: data for TCL query in SWLM
1562  */
1563 union swlm_data {
1564 	struct dp_swlm_tcl_data *tcl_data;
1565 };
1566 
1567 /**
1568  * struct dp_swlm_ops - SWLM ops
1569  * @tcl_wr_coalesce_check: handler to check if the current TCL register
1570  *			   write can be coalesced or not
1571  */
1572 struct dp_swlm_ops {
1573 	int (*tcl_wr_coalesce_check)(struct dp_soc *soc,
1574 				     struct dp_swlm_tcl_data *tcl_data);
1575 };
1576 
1577 /**
1578  * struct dp_swlm_stats - Stats for Software Latency manager.
1579  * @tcl.timer_flush_success: Num TCL HP writes success from timer context
1580  * @tcl.timer_flush_fail: Num TCL HP writes failure from timer context
1581  * @tcl.tid_fail: Num TCL register write coalescing skips, since the pkt
1582  *		 was being transmitted on a TID above coalescing threshold
1583  * @tcl.sp_frames: Num TCL register write coalescing skips, since the pkt
1584  *		  being transmitted was a special frame
1585  * @tcl.ll_connection: Num TCL register write coalescing skips, since the
1586  *		       vdev has low latency connections
1587  * @tcl.bytes_thresh_reached: Num TCL HP writes flush after the coalescing
1588  *			     bytes threshold was reached
1589  * @tcl.time_thresh_reached: Num TCL HP writes flush after the coalescing
1590  *			    session time expired
1591  * @tcl.tput_criteria_fail: Num TCL HP writes coalescing fails, since the
1592  *			   throughput did not meet session threshold
1593  * @tcl.coalesce_success: Num of TCL HP writes coalesced successfully.
1594  * @tcl.coalesce_fail: Num of TCL HP writes coalesces failed
1595  */
1596 struct dp_swlm_stats {
1597 	struct {
1598 		uint32_t timer_flush_success;
1599 		uint32_t timer_flush_fail;
1600 		uint32_t tid_fail;
1601 		uint32_t sp_frames;
1602 		uint32_t ll_connection;
1603 		uint32_t bytes_thresh_reached;
1604 		uint32_t time_thresh_reached;
1605 		uint32_t tput_criteria_fail;
1606 		uint32_t coalesce_success;
1607 		uint32_t coalesce_fail;
1608 	} tcl[MAX_TCL_DATA_RINGS];
1609 };
1610 
1611 /**
1612  * struct dp_swlm_tcl_params: Parameters based on TCL for different modules
1613  *			      in the Software latency manager.
1614  * @soc: DP soc reference
1615  * @ring_id: TCL ring id
1616  * @flush_timer: Timer for flushing the coalesced TCL HP writes
1617  * @sampling_session_tx_bytes: Num bytes transmitted in the sampling time
1618  * @bytes_flush_thresh: Bytes threshold to flush the TCL HP register write
1619  * @coalesce_end_time: End timestamp for current coalescing session
1620  * @bytes_coalesced: Num bytes coalesced in the current session
1621  * @prev_tx_packets: Previous TX packets accounted
1622  * @prev_tx_bytes: Previous TX bytes accounted
1623  * @prev_rx_bytes: Previous RX bytes accounted
1624  * @expire_time: expiry time for sample
1625  * @tput_pass_cnt: threshold throughput pass counter
1626  */
1627 struct dp_swlm_tcl_params {
1628 	struct dp_soc *soc;
1629 	uint32_t ring_id;
1630 	qdf_timer_t flush_timer;
1631 	uint32_t sampling_session_tx_bytes;
1632 	uint32_t bytes_flush_thresh;
1633 	uint64_t coalesce_end_time;
1634 	uint32_t bytes_coalesced;
1635 	uint32_t prev_tx_packets;
1636 	uint32_t prev_tx_bytes;
1637 	uint32_t prev_rx_bytes;
1638 	uint64_t expire_time;
1639 	uint32_t tput_pass_cnt;
1640 };
1641 
1642 /**
1643  * struct dp_swlm_params: Parameters for different modules in the
1644  *			  Software latency manager.
1645  * @rx_traffic_thresh: Threshold for RX traffic, to begin TCL register
1646  *			   write coalescing
1647  * @tx_traffic_thresh: Threshold for TX traffic, to begin TCL register
1648  *			   write coalescing
1649  * @sampling_time: Sampling time to test the throughput threshold
1650  * @time_flush_thresh: Time threshold to flush the TCL HP register write
1651  * @tx_thresh_multiplier: Multiplier to deduce the bytes threshold after
1652  *			      which the TCL HP register is written, thereby
1653  *			      ending the coalescing.
1654  * @tx_pkt_thresh: Threshold for TX packet count, to begin TCL register
1655  *		       write coalescing
1656  * @tcl: TCL ring specific params
1657  */
1658 
1659 struct dp_swlm_params {
1660 	uint32_t rx_traffic_thresh;
1661 	uint32_t tx_traffic_thresh;
1662 	uint32_t sampling_time;
1663 	uint32_t time_flush_thresh;
1664 	uint32_t tx_thresh_multiplier;
1665 	uint32_t tx_pkt_thresh;
1666 	struct dp_swlm_tcl_params tcl[MAX_TCL_DATA_RINGS];
1667 };
1668 
1669 /**
1670  * struct dp_swlm - Software latency manager context
1671  * @ops: SWLM ops pointers
1672  * @is_enabled: SWLM enabled/disabled
1673  * @is_init: SWLM module initialized
1674  * @stats: SWLM stats
1675  * @params: SWLM SRNG params
1676  * @tcl_flush_timer: flush timer for TCL register writes
1677  */
1678 struct dp_swlm {
1679 	struct dp_swlm_ops *ops;
1680 	uint8_t is_enabled:1,
1681 		is_init:1;
1682 	struct dp_swlm_stats stats;
1683 	struct dp_swlm_params params;
1684 };
1685 #endif
1686 
1687 #ifdef IPA_OFFLOAD
1688 /* IPA uC datapath offload Wlan Tx resources */
1689 struct ipa_dp_tx_rsc {
1690 	/* Resource info to be passed to IPA */
1691 	qdf_dma_addr_t ipa_tcl_ring_base_paddr;
1692 	void *ipa_tcl_ring_base_vaddr;
1693 	uint32_t ipa_tcl_ring_size;
1694 	qdf_dma_addr_t ipa_tcl_hp_paddr;
1695 	uint32_t alloc_tx_buf_cnt;
1696 
1697 	qdf_dma_addr_t ipa_wbm_ring_base_paddr;
1698 	void *ipa_wbm_ring_base_vaddr;
1699 	uint32_t ipa_wbm_ring_size;
1700 	qdf_dma_addr_t ipa_wbm_tp_paddr;
1701 	/* WBM2SW HP shadow paddr */
1702 	qdf_dma_addr_t ipa_wbm_hp_shadow_paddr;
1703 
1704 	/* TX buffers populated into the WBM ring */
1705 	void **tx_buf_pool_vaddr_unaligned;
1706 	qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
1707 };
1708 #endif
1709 
1710 struct dp_tx_msdu_info_s;
1711 /*
1712  * enum dp_context_type- DP Context Type
1713  * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
1714  * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
1715  * @DP_CONTEXT_TYPE_VDEV: Context type DP VDEV
1716  * @DP_CONTEXT_TYPE_PEER: Context type DP PEER
1717  * @DP_CONTEXT_TYPE_MON_SOC: Context type DP MON SOC
1718  * @DP_CONTEXT_TYPE_MON_PDEV: Context type DP MON PDEV
1719  *
1720  * Helper enums to be used to retrieve the size of the corresponding
1721  * data structure by passing the type.
1722  */
1723 enum dp_context_type {
1724 	DP_CONTEXT_TYPE_SOC,
1725 	DP_CONTEXT_TYPE_PDEV,
1726 	DP_CONTEXT_TYPE_VDEV,
1727 	DP_CONTEXT_TYPE_PEER,
1728 	DP_CONTEXT_TYPE_MON_SOC,
1729 	DP_CONTEXT_TYPE_MON_PDEV
1730 };
1731 
1732 /*
1733  * struct dp_arch_ops- DP target specific arch ops
1734  * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
1735  * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
1736  * @tx_hw_enqueue: enqueue TX data to HW
1737  * @tx_comp_get_params_from_hal_desc: get software tx descriptor and release
1738  * 				      source from HAL desc for wbm release ring
1739  * @dp_service_near_full_srngs: Handler for servicing the near full IRQ
1740  * @txrx_set_vdev_param: target specific ops while setting vdev params
1741  * @dp_srng_test_and_update_nf_params: Check if the srng is in near full state
1742  *				and set the near-full params.
1743  */
1744 struct dp_arch_ops {
1745 	/* INIT/DEINIT Arch Ops */
1746 	QDF_STATUS (*txrx_soc_attach)(struct dp_soc *soc,
1747 				      struct cdp_soc_attach_params *params);
1748 	QDF_STATUS (*txrx_soc_detach)(struct dp_soc *soc);
1749 	QDF_STATUS (*txrx_soc_init)(struct dp_soc *soc);
1750 	QDF_STATUS (*txrx_soc_deinit)(struct dp_soc *soc);
1751 	QDF_STATUS (*txrx_soc_srng_alloc)(struct dp_soc *soc);
1752 	QDF_STATUS (*txrx_soc_srng_init)(struct dp_soc *soc);
1753 	void (*txrx_soc_srng_deinit)(struct dp_soc *soc);
1754 	void (*txrx_soc_srng_free)(struct dp_soc *soc);
1755 	QDF_STATUS (*txrx_pdev_attach)(struct dp_pdev *pdev,
1756 				       struct cdp_pdev_attach_params *params);
1757 	QDF_STATUS (*txrx_pdev_detach)(struct dp_pdev *pdev);
1758 	QDF_STATUS (*txrx_vdev_attach)(struct dp_soc *soc,
1759 				       struct dp_vdev *vdev);
1760 	QDF_STATUS (*txrx_vdev_detach)(struct dp_soc *soc,
1761 				       struct dp_vdev *vdev);
1762 	QDF_STATUS (*txrx_peer_map_attach)(struct dp_soc *soc);
1763 	void (*txrx_peer_map_detach)(struct dp_soc *soc);
1764 	QDF_STATUS (*dp_rxdma_ring_sel_cfg)(struct dp_soc *soc);
1765 	void (*soc_cfg_attach)(struct dp_soc *soc);
1766 
1767 	/* TX RX Arch Ops */
1768 	QDF_STATUS (*tx_hw_enqueue)(struct dp_soc *soc, struct dp_vdev *vdev,
1769 				    struct dp_tx_desc_s *tx_desc,
1770 				    uint16_t fw_metadata,
1771 				    struct cdp_tx_exception_metadata *metadata,
1772 				    struct dp_tx_msdu_info_s *msdu_info);
1773 
1774 	 void (*tx_comp_get_params_from_hal_desc)(struct dp_soc *soc,
1775 						  void *tx_comp_hal_desc,
1776 						  struct dp_tx_desc_s **desc);
1777 	void (*dp_tx_process_htt_completion)(struct dp_soc *soc,
1778 					     struct dp_tx_desc_s *tx_desc,
1779 					     uint8_t *status,
1780 					     uint8_t ring_id);
1781 
1782 	uint32_t (*dp_rx_process)(struct dp_intr *int_ctx,
1783 				  hal_ring_handle_t hal_ring_hdl,
1784 				  uint8_t reo_ring_num, uint32_t quota);
1785 
1786 	QDF_STATUS (*dp_tx_desc_pool_init)(struct dp_soc *soc,
1787 					   uint32_t num_elem,
1788 					   uint8_t pool_id);
1789 	void (*dp_tx_desc_pool_deinit)(
1790 				struct dp_soc *soc,
1791 				struct dp_tx_desc_pool_s *tx_desc_pool,
1792 				uint8_t pool_id);
1793 
1794 	QDF_STATUS (*dp_rx_desc_pool_init)(struct dp_soc *soc,
1795 					   struct rx_desc_pool *rx_desc_pool,
1796 					   uint32_t pool_id);
1797 	void (*dp_rx_desc_pool_deinit)(struct dp_soc *soc,
1798 				       struct rx_desc_pool *rx_desc_pool,
1799 				       uint32_t pool_id);
1800 
1801 	QDF_STATUS (*dp_wbm_get_rx_desc_from_hal_desc)(
1802 						struct dp_soc *soc,
1803 						void *ring_desc,
1804 						struct dp_rx_desc **r_rx_desc);
1805 
1806 	bool
1807 	(*dp_rx_intrabss_handle_nawds)(struct dp_soc *soc,
1808 				       struct dp_txrx_peer *ta_txrx_peer,
1809 				       qdf_nbuf_t nbuf_copy,
1810 				       struct cdp_tid_rx_stats *tid_stats);
1811 
1812 	struct dp_rx_desc *(*dp_rx_desc_cookie_2_va)(struct dp_soc *soc,
1813 						     uint32_t cookie);
1814 	uint32_t (*dp_service_near_full_srngs)(struct dp_soc *soc,
1815 					       struct dp_intr *int_ctx,
1816 					       uint32_t dp_budget);
1817 	void (*tx_implicit_rbm_set)(struct dp_soc *soc, uint8_t tx_ring_id,
1818 				    uint8_t bm_id);
1819 	uint16_t (*dp_rx_peer_metadata_peer_id_get)(struct dp_soc *soc,
1820 						    uint32_t peer_metadata);
1821 	/* Control Arch Ops */
1822 	QDF_STATUS (*txrx_set_vdev_param)(struct dp_soc *soc,
1823 					  struct dp_vdev *vdev,
1824 					  enum cdp_vdev_param_type param,
1825 					  cdp_config_param_type val);
1826 
1827 	/* Misc Arch Ops */
1828 	qdf_size_t (*txrx_get_context_size)(enum dp_context_type);
1829 	qdf_size_t (*txrx_get_mon_context_size)(enum dp_context_type);
1830 	int (*dp_srng_test_and_update_nf_params)(struct dp_soc *soc,
1831 						 struct dp_srng *dp_srng,
1832 						 int *max_reap_limit);
1833 
1834 	/* MLO ops */
1835 #ifdef WLAN_FEATURE_11BE_MLO
1836 #ifdef WLAN_MCAST_MLO
1837 	void (*dp_tx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
1838 				    qdf_nbuf_t nbuf);
1839 	bool (*dp_rx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
1840 				    struct dp_txrx_peer *peer, qdf_nbuf_t nbuf);
1841 #endif
1842 	void (*mlo_peer_find_hash_detach)(struct dp_soc *soc);
1843 	QDF_STATUS (*mlo_peer_find_hash_attach)(struct dp_soc *soc);
1844 	void (*mlo_peer_find_hash_add)(struct dp_soc *soc,
1845 				       struct dp_peer *peer);
1846 	void (*mlo_peer_find_hash_remove)(struct dp_soc *soc,
1847 					  struct dp_peer *peer);
1848 	struct dp_peer *(*mlo_peer_find_hash_find)(struct dp_soc *soc,
1849 						   uint8_t *peer_mac_addr,
1850 						   int mac_addr_is_aligned,
1851 						   enum dp_mod_id mod_id,
1852 						   uint8_t vdev_id);
1853 #endif
1854 	void (*get_rx_hash_key)(struct dp_soc *soc,
1855 				struct cdp_lro_hash_config *lro_hash);
1856 	void (*txrx_print_peer_stats)(struct cdp_peer_stats *peer_stats,
1857 				      enum peer_stats_type stats_type);
1858 	/* Dp peer reorder queue setup */
1859 	QDF_STATUS (*dp_peer_rx_reorder_queue_setup)(struct dp_soc *soc,
1860 						     struct dp_peer *peer,
1861 						     int tid,
1862 						     uint32_t ba_window_size);
1863 	struct dp_peer *(*dp_find_peer_by_destmac)(struct dp_soc *soc,
1864 						   uint8_t *dest_mac_addr,
1865 						   uint8_t vdev_id);
1866 	QDF_STATUS
1867 	(*dp_tx_compute_hw_delay)(struct dp_soc *soc,
1868 				  struct dp_vdev *vdev,
1869 				  struct hal_tx_completion_status *ts,
1870 				  uint32_t *delay_us);
1871 };
1872 
1873 /**
1874  * struct dp_soc_features: Data structure holding the SOC level feature flags.
1875  * @pn_in_reo_dest: PN provided by hardware in the REO destination ring.
1876  * @dmac_cmn_src_rxbuf_ring_enabled: Flag to indicate DMAC mode common Rx
1877  *				     buffer source rings
1878  * @rssi_dbm_conv_support: Rssi dbm converstion support param.
1879  */
1880 struct dp_soc_features {
1881 	uint8_t pn_in_reo_dest:1,
1882 		dmac_cmn_src_rxbuf_ring_enabled:1;
1883 	bool rssi_dbm_conv_support;
1884 };
1885 
1886 enum sysfs_printing_mode {
1887 	PRINTING_MODE_DISABLED = 0,
1888 	PRINTING_MODE_ENABLED
1889 };
1890 
1891 #ifdef WLAN_SYSFS_DP_STATS
1892 /**
1893  * struct sysfs_stats_config: Data structure holding stats sysfs config.
1894  * @rw_stats_lock: Lock to read and write to stat_type and pdev_id.
1895  * @sysfs_read_lock: Lock held while another stat req is being executed.
1896  * @sysfs_write_user_buffer: Lock to change buff len, max buf len
1897  * and *buf.
1898  * @sysfs_txrx_fw_request_done: Event to wait for firmware response.
1899  * @stat_type_requested: stat type requested.
1900  * @mac_id: mac id for which stat type are requested.
1901  * @printing_mode: Should a print go through.
1902  * @process_id: Process allowed to write to buffer.
1903  * @curr_buffer_length: Curr length of buffer written
1904  * @max_buffer_length: Max buffer length.
1905  * @buf: Sysfs buffer.
1906  */
1907 struct sysfs_stats_config {
1908 	/* lock held to read stats */
1909 	qdf_spinlock_t rw_stats_lock;
1910 	qdf_mutex_t sysfs_read_lock;
1911 	qdf_spinlock_t sysfs_write_user_buffer;
1912 	qdf_event_t sysfs_txrx_fw_request_done;
1913 	uint32_t stat_type_requested;
1914 	uint32_t mac_id;
1915 	enum sysfs_printing_mode printing_mode;
1916 	int process_id;
1917 	uint16_t curr_buffer_length;
1918 	uint16_t max_buffer_length;
1919 	char *buf;
1920 };
1921 #endif
1922 
1923 /* SOC level structure for data path */
1924 struct dp_soc {
1925 	/**
1926 	 * re-use memory section starts
1927 	 */
1928 
1929 	/* Common base structure - Should be the first member */
1930 	struct cdp_soc_t cdp_soc;
1931 
1932 	/* SoC Obj */
1933 	struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
1934 
1935 	/* OS device abstraction */
1936 	qdf_device_t osdev;
1937 
1938 	/*cce disable*/
1939 	bool cce_disable;
1940 
1941 	/* WLAN config context */
1942 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
1943 
1944 	/* HTT handle for host-fw interaction */
1945 	struct htt_soc *htt_handle;
1946 
1947 	/* Commint init done */
1948 	qdf_atomic_t cmn_init_done;
1949 
1950 	/* Opaque hif handle */
1951 	struct hif_opaque_softc *hif_handle;
1952 
1953 	/* PDEVs on this SOC */
1954 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
1955 
1956 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
1957 	struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT];
1958 
1959 	struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW];
1960 
1961 	/* RXDMA error destination ring */
1962 	struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW];
1963 
1964 	/* RXDMA monitor buffer replenish ring */
1965 	struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW];
1966 
1967 	/* RXDMA monitor destination ring */
1968 	struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW];
1969 
1970 	/* RXDMA monitor status ring. TBD: Check format of this ring */
1971 	struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW];
1972 
1973 	/* Number of PDEVs */
1974 	uint8_t pdev_count;
1975 
1976 	/*ast override support in HW*/
1977 	bool ast_override_support;
1978 
1979 	/*number of hw dscp tid map*/
1980 	uint8_t num_hw_dscp_tid_map;
1981 
1982 	/* HAL SOC handle */
1983 	hal_soc_handle_t hal_soc;
1984 
1985 	/* rx monitor pkt tlv size */
1986 	uint16_t rx_mon_pkt_tlv_size;
1987 	/* rx pkt tlv size */
1988 	uint16_t rx_pkt_tlv_size;
1989 
1990 	struct dp_arch_ops arch_ops;
1991 
1992 	/* Device ID coming from Bus sub-system */
1993 	uint32_t device_id;
1994 
1995 	/* Link descriptor pages */
1996 	struct qdf_mem_multi_page_t link_desc_pages;
1997 
1998 	/* total link descriptors for regular RX and TX */
1999 	uint32_t total_link_descs;
2000 
2001 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
2002 	struct dp_srng wbm_idle_link_ring;
2003 
2004 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
2005 	 */
2006 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
2007 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
2008 	uint32_t num_scatter_bufs;
2009 
2010 	/* Tx SW descriptor pool */
2011 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
2012 
2013 	/* Tx MSDU Extension descriptor pool */
2014 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
2015 
2016 	/* Tx TSO descriptor pool */
2017 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
2018 
2019 	/* Tx TSO Num of segments pool */
2020 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
2021 
2022 	/* REO destination rings */
2023 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
2024 
2025 	/* REO exception ring - See if should combine this with reo_dest_ring */
2026 	struct dp_srng reo_exception_ring;
2027 
2028 	/* REO reinjection ring */
2029 	struct dp_srng reo_reinject_ring;
2030 
2031 	/* REO command ring */
2032 	struct dp_srng reo_cmd_ring;
2033 
2034 	/* REO command status ring */
2035 	struct dp_srng reo_status_ring;
2036 
2037 	/* WBM Rx release ring */
2038 	struct dp_srng rx_rel_ring;
2039 
2040 	/* TCL data ring */
2041 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
2042 
2043 	/* Number of Tx comp rings */
2044 	uint8_t num_tx_comp_rings;
2045 
2046 	/* Number of TCL data rings */
2047 	uint8_t num_tcl_data_rings;
2048 
2049 	/* TCL CMD_CREDIT ring */
2050 	bool init_tcl_cmd_cred_ring;
2051 
2052 	/* It is used as credit based ring on QCN9000 else command ring */
2053 	struct dp_srng tcl_cmd_credit_ring;
2054 
2055 	/* TCL command status ring */
2056 	struct dp_srng tcl_status_ring;
2057 
2058 	/* WBM Tx completion rings */
2059 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
2060 
2061 	/* Common WBM link descriptor release ring (SW to WBM) */
2062 	struct dp_srng wbm_desc_rel_ring;
2063 
2064 	/* DP Interrupts */
2065 	struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
2066 
2067 	/* Monitor mode mac id to dp_intr_id map */
2068 	int mon_intr_id_lmac_map[MAX_NUM_LMAC_HW];
2069 	/* Rx SW descriptor pool for RXDMA monitor buffer */
2070 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
2071 
2072 	/* Rx SW descriptor pool for RXDMA status buffer */
2073 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
2074 
2075 	/* Rx SW descriptor pool for RXDMA buffer */
2076 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
2077 
2078 	/* Number of REO destination rings */
2079 	uint8_t num_reo_dest_rings;
2080 
2081 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2082 	/* lock to control access to soc TX descriptors */
2083 	qdf_spinlock_t flow_pool_array_lock;
2084 
2085 	/* pause callback to pause TX queues as per flow control */
2086 	tx_pause_callback pause_cb;
2087 
2088 	/* flow pool related statistics */
2089 	struct dp_txrx_pool_stats pool_stats;
2090 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
2091 
2092 	uint32_t wbm_idle_scatter_buf_size;
2093 
2094 	/* VDEVs on this SOC */
2095 	struct dp_vdev *vdev_id_map[MAX_VDEV_CNT];
2096 
2097 	/* Tx H/W queues lock */
2098 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
2099 
2100 	/* Tx ring map for interrupt processing */
2101 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2102 
2103 	/* Rx ring map for interrupt processing */
2104 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2105 
2106 	/* peer ID to peer object map (array of pointers to peer objects) */
2107 	struct dp_peer **peer_id_to_obj_map;
2108 
2109 	struct {
2110 		unsigned mask;
2111 		unsigned idx_bits;
2112 		TAILQ_HEAD(, dp_peer) * bins;
2113 	} peer_hash;
2114 
2115 	/* rx defrag state – TBD: do we need this per radio? */
2116 	struct {
2117 		struct {
2118 			TAILQ_HEAD(, dp_rx_tid_defrag) waitlist;
2119 			uint32_t timeout_ms;
2120 			uint32_t next_flush_ms;
2121 			qdf_spinlock_t defrag_lock;
2122 		} defrag;
2123 		struct {
2124 			int defrag_timeout_check;
2125 			int dup_check;
2126 		} flags;
2127 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
2128 		qdf_spinlock_t reo_cmd_lock;
2129 	} rx;
2130 
2131 	/* optional rx processing function */
2132 	void (*rx_opt_proc)(
2133 		struct dp_vdev *vdev,
2134 		struct dp_peer *peer,
2135 		unsigned tid,
2136 		qdf_nbuf_t msdu_list);
2137 
2138 	/* pool addr for mcast enhance buff */
2139 	struct {
2140 		int size;
2141 		uint32_t paddr;
2142 		uint32_t *vaddr;
2143 		struct dp_tx_me_buf_t *freelist;
2144 		int buf_in_use;
2145 		qdf_dma_mem_context(memctx);
2146 	} me_buf;
2147 
2148 	/* Protect peer hash table */
2149 	DP_MUTEX_TYPE peer_hash_lock;
2150 	/* Protect peer_id_to_objmap */
2151 	DP_MUTEX_TYPE peer_map_lock;
2152 
2153 	/* maximum number of suppoerted peers */
2154 	uint32_t max_peers;
2155 	/* maximum value for peer_id */
2156 	uint32_t max_peer_id;
2157 
2158 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2159 	uint32_t peer_id_shift;
2160 	uint32_t peer_id_mask;
2161 #endif
2162 
2163 	/* SoC level data path statistics */
2164 	struct dp_soc_stats stats;
2165 #ifdef WLAN_SYSFS_DP_STATS
2166 	/* sysfs config for DP stats */
2167 	struct sysfs_stats_config *sysfs_config;
2168 #endif
2169 	/* timestamp to keep track of msdu buffers received on reo err ring */
2170 	uint64_t rx_route_err_start_pkt_ts;
2171 
2172 	/* Num RX Route err in a given window to keep track of rate of errors */
2173 	uint32_t rx_route_err_in_window;
2174 
2175 	/* Enable processing of Tx completion status words */
2176 	bool process_tx_status;
2177 	bool process_rx_status;
2178 	struct dp_ast_entry **ast_table;
2179 	struct {
2180 		unsigned mask;
2181 		unsigned idx_bits;
2182 		TAILQ_HEAD(, dp_ast_entry) * bins;
2183 	} ast_hash;
2184 
2185 #ifdef DP_TX_HW_DESC_HISTORY
2186 	struct dp_tx_hw_desc_history *tx_hw_desc_history;
2187 #endif
2188 
2189 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2190 	struct dp_rx_history *rx_ring_history[MAX_REO_DEST_RINGS];
2191 	struct dp_rx_refill_history *rx_refill_ring_history[MAX_PDEV_CNT];
2192 	struct dp_rx_err_history *rx_err_ring_history;
2193 	struct dp_rx_reinject_history *rx_reinject_ring_history;
2194 #endif
2195 
2196 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
2197 	struct dp_mon_status_ring_history *mon_status_ring_history;
2198 #endif
2199 
2200 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
2201 	struct dp_tx_tcl_history *tx_tcl_history;
2202 	struct dp_tx_comp_history *tx_comp_history;
2203 #endif
2204 
2205 	qdf_spinlock_t ast_lock;
2206 	/*Timer for AST entry ageout maintainance */
2207 	qdf_timer_t ast_aging_timer;
2208 
2209 	/*Timer counter for WDS AST entry ageout*/
2210 	uint8_t wds_ast_aging_timer_cnt;
2211 	bool pending_ageout;
2212 	bool ast_offload_support;
2213 	uint32_t max_ast_ageout_count;
2214 	uint8_t eapol_over_control_port;
2215 
2216 	uint8_t sta_mode_search_policy;
2217 	qdf_timer_t lmac_reap_timer;
2218 	uint8_t lmac_timer_init;
2219 	qdf_timer_t int_timer;
2220 	uint8_t intr_mode;
2221 	uint8_t lmac_polled_mode;
2222 
2223 	qdf_list_t reo_desc_freelist;
2224 	qdf_spinlock_t reo_desc_freelist_lock;
2225 
2226 	/* htt stats */
2227 	struct htt_t2h_stats htt_stats;
2228 
2229 	void *external_txrx_handle; /* External data path handle */
2230 #ifdef IPA_OFFLOAD
2231 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc;
2232 #ifdef IPA_WDI3_TX_TWO_PIPES
2233 	/* Resources for the alternative IPA TX pipe */
2234 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc_alt;
2235 #endif
2236 
2237 	/* IPA uC datapath offload Wlan Rx resources */
2238 	struct {
2239 		/* Resource info to be passed to IPA */
2240 		qdf_dma_addr_t ipa_reo_ring_base_paddr;
2241 		void *ipa_reo_ring_base_vaddr;
2242 		uint32_t ipa_reo_ring_size;
2243 		qdf_dma_addr_t ipa_reo_tp_paddr;
2244 
2245 		/* Resource info to be passed to firmware and IPA */
2246 		qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
2247 		void *ipa_rx_refill_buf_ring_base_vaddr;
2248 		uint32_t ipa_rx_refill_buf_ring_size;
2249 		qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
2250 	} ipa_uc_rx_rsc;
2251 
2252 	qdf_atomic_t ipa_pipes_enabled;
2253 	bool ipa_first_tx_db_access;
2254 	qdf_spinlock_t ipa_rx_buf_map_lock;
2255 	bool ipa_rx_buf_map_lock_initialized;
2256 	uint8_t ipa_reo_ctx_lock_required[MAX_REO_DEST_RINGS];
2257 #endif
2258 
2259 #ifdef WLAN_FEATURE_STATS_EXT
2260 	struct {
2261 		uint32_t rx_mpdu_received;
2262 		uint32_t rx_mpdu_missed;
2263 	} ext_stats;
2264 	qdf_event_t rx_hw_stats_event;
2265 	qdf_spinlock_t rx_hw_stats_lock;
2266 	bool is_last_stats_ctx_init;
2267 #endif /* WLAN_FEATURE_STATS_EXT */
2268 
2269 	/* Indicates HTT map/unmap versions*/
2270 	uint8_t peer_map_unmap_versions;
2271 	/* Per peer per Tid ba window size support */
2272 	uint8_t per_tid_basize_max_tid;
2273 	/* Soc level flag to enable da_war */
2274 	uint8_t da_war_enabled;
2275 	/* number of active ast entries */
2276 	uint32_t num_ast_entries;
2277 	/* peer extended rate statistics context at soc level*/
2278 	struct cdp_soc_rate_stats_ctx *rate_stats_ctx;
2279 	/* peer extended rate statistics control flag */
2280 	bool peerstats_enabled;
2281 
2282 	/* 8021p PCP-TID map values */
2283 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
2284 	/* TID map priority value */
2285 	uint8_t tidmap_prty;
2286 	/* Pointer to global per ring type specific configuration table */
2287 	struct wlan_srng_cfg *wlan_srng_cfg;
2288 	/* Num Tx outstanding on device */
2289 	qdf_atomic_t num_tx_outstanding;
2290 	/* Num Tx exception on device */
2291 	qdf_atomic_t num_tx_exception;
2292 	/* Num Tx allowed */
2293 	uint32_t num_tx_allowed;
2294 	/* Preferred HW mode */
2295 	uint8_t preferred_hw_mode;
2296 
2297 	/**
2298 	 * Flag to indicate whether WAR to address single cache entry
2299 	 * invalidation bug is enabled or not
2300 	 */
2301 	bool is_rx_fse_full_cache_invalidate_war_enabled;
2302 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2303 	/**
2304 	 * Pointer to DP RX Flow FST at SOC level if
2305 	 * is_rx_flow_search_table_per_pdev is false
2306 	 * TBD: rx_fst[num_macs] if we decide to have per mac FST
2307 	 */
2308 	struct dp_rx_fst *rx_fst;
2309 #ifdef WLAN_SUPPORT_RX_FISA
2310 	uint8_t fisa_enable;
2311 	uint8_t fisa_lru_del_enable;
2312 	/**
2313 	 * Params used for controlling the fisa aggregation dynamically
2314 	 */
2315 	struct {
2316 		qdf_atomic_t skip_fisa;
2317 		uint8_t fisa_force_flush[MAX_REO_DEST_RINGS];
2318 	} skip_fisa_param;
2319 
2320 	/**
2321 	 * CMEM address and size for FST in CMEM, This is the address
2322 	 * shared during init time.
2323 	 */
2324 	uint64_t fst_cmem_base;
2325 	uint64_t fst_cmem_size;
2326 #endif
2327 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
2328 	/* SG supported for msdu continued packets from wbm release ring */
2329 	bool wbm_release_desc_rx_sg_support;
2330 	bool peer_map_attach_success;
2331 	/* Flag to disable mac1 ring interrupts */
2332 	bool disable_mac1_intr;
2333 	/* Flag to disable mac2 ring interrupts */
2334 	bool disable_mac2_intr;
2335 
2336 	struct {
2337 		/* 1st msdu in sg for msdu continued packets in wbm rel ring */
2338 		bool wbm_is_first_msdu_in_sg;
2339 		/* Wbm sg list head */
2340 		qdf_nbuf_t wbm_sg_nbuf_head;
2341 		/* Wbm sg list tail */
2342 		qdf_nbuf_t wbm_sg_nbuf_tail;
2343 		uint32_t wbm_sg_desc_msdu_len;
2344 	} wbm_sg_param;
2345 	/* Number of msdu exception descriptors */
2346 	uint32_t num_msdu_exception_desc;
2347 
2348 	/* RX buffer params */
2349 	struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
2350 	struct rx_refill_buff_pool rx_refill_buff_pool;
2351 	/* Save recent operation related variable */
2352 	struct dp_last_op_info last_op_info;
2353 	TAILQ_HEAD(, dp_peer) inactive_peer_list;
2354 	qdf_spinlock_t inactive_peer_list_lock;
2355 	TAILQ_HEAD(, dp_vdev) inactive_vdev_list;
2356 	qdf_spinlock_t inactive_vdev_list_lock;
2357 	/* lock to protect vdev_id_map table*/
2358 	qdf_spinlock_t vdev_map_lock;
2359 
2360 	/* Flow Search Table is in CMEM */
2361 	bool fst_in_cmem;
2362 
2363 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2364 	struct dp_swlm swlm;
2365 #endif
2366 
2367 #ifdef FEATURE_RUNTIME_PM
2368 	/* DP Rx timestamp */
2369 	qdf_time_t rx_last_busy;
2370 	/* Dp runtime refcount */
2371 	qdf_atomic_t dp_runtime_refcount;
2372 	/* Dp tx pending count in RTPM */
2373 	qdf_atomic_t tx_pending_rtpm;
2374 #endif
2375 	/* Invalid buffer that allocated for RX buffer */
2376 	qdf_nbuf_queue_t invalid_buf_queue;
2377 
2378 #ifdef FEATURE_MEC
2379 	/** @mec_lock: spinlock for MEC table */
2380 	qdf_spinlock_t mec_lock;
2381 	/** @mec_cnt: number of active mec entries */
2382 	qdf_atomic_t mec_cnt;
2383 	struct {
2384 		/** @mask: mask bits */
2385 		uint32_t mask;
2386 		/** @idx_bits: index to shift bits */
2387 		uint32_t idx_bits;
2388 		/** @bins: MEC table */
2389 		TAILQ_HEAD(, dp_mec_entry) * bins;
2390 	} mec_hash;
2391 #endif
2392 
2393 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
2394 	qdf_list_t reo_desc_deferred_freelist;
2395 	qdf_spinlock_t reo_desc_deferred_freelist_lock;
2396 	bool reo_desc_deferred_freelist_init;
2397 #endif
2398 	/* BM id for first WBM2SW  ring */
2399 	uint32_t wbm_sw0_bm_id;
2400 
2401 	/* Store arch_id from device_id */
2402 	uint16_t arch_id;
2403 
2404 	/* link desc ID start per device type */
2405 	uint32_t link_desc_id_start;
2406 
2407 	/* CMEM buffer target reserved for host usage */
2408 	uint64_t cmem_base;
2409 	/* CMEM size in bytes */
2410 	uint64_t cmem_total_size;
2411 	/* CMEM free size in bytes */
2412 	uint64_t cmem_avail_size;
2413 
2414 	/* SOC level feature flags */
2415 	struct dp_soc_features features;
2416 
2417 #ifdef WIFI_MONITOR_SUPPORT
2418 	struct dp_mon_soc *monitor_soc;
2419 #endif
2420 	uint8_t rxdma2sw_rings_not_supported:1,
2421 		wbm_sg_last_msdu_war:1,
2422 		mec_fw_offload:1,
2423 		multi_peer_grp_cmd_supported:1;
2424 
2425 	/* Number of Rx refill rings */
2426 	uint8_t num_rx_refill_buf_rings;
2427 #ifdef FEATURE_RUNTIME_PM
2428 	/* flag to indicate vote for runtime_pm for high tput castt*/
2429 	qdf_atomic_t rtpm_high_tput_flag;
2430 #endif
2431 	/* Buffer manager ID for idle link descs */
2432 	uint8_t idle_link_bm_id;
2433 	qdf_atomic_t ref_count;
2434 
2435 	unsigned long vdev_stats_id_map;
2436 	bool txmon_hw_support;
2437 
2438 #ifdef DP_UMAC_HW_RESET_SUPPORT
2439 	struct dp_soc_umac_reset_ctx umac_reset_ctx;
2440 #endif
2441 	/* PPDU to link_id mapping parameters */
2442 	uint8_t link_id_offset;
2443 	uint8_t link_id_bits;
2444 };
2445 
2446 #ifdef IPA_OFFLOAD
2447 /**
2448  * dp_ipa_resources - Resources needed for IPA
2449  */
2450 struct dp_ipa_resources {
2451 	qdf_shared_mem_t tx_ring;
2452 	uint32_t tx_num_alloc_buffer;
2453 
2454 	qdf_shared_mem_t tx_comp_ring;
2455 	qdf_shared_mem_t rx_rdy_ring;
2456 	qdf_shared_mem_t rx_refill_ring;
2457 
2458 	/* IPA UC doorbell registers paddr */
2459 	qdf_dma_addr_t tx_comp_doorbell_paddr;
2460 	uint32_t *tx_comp_doorbell_vaddr;
2461 	qdf_dma_addr_t rx_ready_doorbell_paddr;
2462 
2463 	bool is_db_ddr_mapped;
2464 
2465 #ifdef IPA_WDI3_TX_TWO_PIPES
2466 	qdf_shared_mem_t tx_alt_ring;
2467 	uint32_t tx_alt_ring_num_alloc_buffer;
2468 	qdf_shared_mem_t tx_alt_comp_ring;
2469 
2470 	/* IPA UC doorbell registers paddr */
2471 	qdf_dma_addr_t tx_alt_comp_doorbell_paddr;
2472 	uint32_t *tx_alt_comp_doorbell_vaddr;
2473 #endif
2474 };
2475 #endif
2476 
2477 #define MAX_RX_MAC_RINGS 2
2478 /* Same as NAC_MAX_CLENT */
2479 #define DP_NAC_MAX_CLIENT  24
2480 
2481 /*
2482  * 24 bits cookie size
2483  * 10 bits page id 0 ~ 1023 for MCL
2484  * 3 bits page id 0 ~ 7 for WIN
2485  * WBM Idle List Desc size = 128,
2486  * Num descs per page = 4096/128 = 32 for MCL
2487  * Num descs per page = 2MB/128 = 16384 for WIN
2488  */
2489 /*
2490  * Macros to setup link descriptor cookies - for link descriptors, we just
2491  * need first 3 bits to store bank/page ID for WIN. The
2492  * remaining bytes will be used to set a unique ID, which will
2493  * be useful in debugging
2494  */
2495 #ifdef MAX_ALLOC_PAGE_SIZE
2496 #define LINK_DESC_PAGE_ID_MASK  0x007FE0
2497 #define LINK_DESC_ID_SHIFT      5
2498 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
2499 	((((_page_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_desc_id))
2500 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
2501 	(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
2502 #else
2503 #define LINK_DESC_PAGE_ID_MASK  0x7
2504 #define LINK_DESC_ID_SHIFT      3
2505 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
2506 	((((_desc_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_page_id))
2507 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
2508 	((_cookie) & LINK_DESC_PAGE_ID_MASK)
2509 #endif
2510 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
2511 #define LINK_DESC_ID_START_20_BITS_COOKIE 0x4000
2512 
2513 /* same as ieee80211_nac_param */
2514 enum dp_nac_param_cmd {
2515 	/* IEEE80211_NAC_PARAM_ADD */
2516 	DP_NAC_PARAM_ADD = 1,
2517 	/* IEEE80211_NAC_PARAM_DEL */
2518 	DP_NAC_PARAM_DEL,
2519 	/* IEEE80211_NAC_PARAM_LIST */
2520 	DP_NAC_PARAM_LIST,
2521 };
2522 
2523 /**
2524  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
2525  * @neighbour_peers_macaddr: neighbour peer's mac address
2526  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
2527  * @ast_entry: ast_entry for neighbour peer
2528  * @rssi: rssi value
2529  */
2530 struct dp_neighbour_peer {
2531 	/* MAC address of neighbour's peer */
2532 	union dp_align_mac_addr neighbour_peers_macaddr;
2533 	struct dp_vdev *vdev;
2534 	struct dp_ast_entry *ast_entry;
2535 	uint8_t rssi;
2536 	/* node in the list of neighbour's peer */
2537 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
2538 };
2539 
2540 #ifdef WLAN_TX_PKT_CAPTURE_ENH
2541 #define WLAN_TX_PKT_CAPTURE_ENH 1
2542 #define DP_TX_PPDU_PROC_THRESHOLD 8
2543 #define DP_TX_PPDU_PROC_TIMEOUT 10
2544 #endif
2545 
2546 /**
2547  * struct ppdu_info - PPDU Status info descriptor
2548  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
2549  * @sched_cmdid: schedule command id, which will be same in a burst
2550  * @max_ppdu_id: wrap around for ppdu id
2551  * @last_tlv_cnt: Keep track for missing ppdu tlvs
2552  * @last_user: last ppdu processed for user
2553  * @is_ampdu: set if Ampdu aggregate
2554  * @nbuf: ppdu descriptor payload
2555  * @ppdu_desc: ppdu descriptor
2556  * @ppdu_info_list_elem: linked list of ppdu tlvs
2557  * @ppdu_info_queue_elem: Singly linked list (queue) of ppdu tlvs
2558  * @mpdu_compltn_common_tlv: Successful tlv counter from COMPLTN COMMON tlv
2559  * @mpdu_ack_ba_tlv: Successful tlv counter from ACK BA tlv
2560  */
2561 struct ppdu_info {
2562 	uint32_t ppdu_id;
2563 	uint32_t sched_cmdid;
2564 	uint32_t max_ppdu_id;
2565 	uint32_t tsf_l32;
2566 	uint16_t tlv_bitmap;
2567 	uint16_t last_tlv_cnt;
2568 	uint16_t last_user:8,
2569 		 is_ampdu:1;
2570 	qdf_nbuf_t nbuf;
2571 	struct cdp_tx_completion_ppdu *ppdu_desc;
2572 #ifdef WLAN_TX_PKT_CAPTURE_ENH
2573 	union {
2574 		TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem;
2575 		STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem;
2576 	} ulist;
2577 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem
2578 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem
2579 #else
2580 	TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
2581 #endif
2582 	uint8_t compltn_common_tlv;
2583 	uint8_t ack_ba_tlv;
2584 	bool done;
2585 };
2586 
2587 /**
2588  * struct msdu_completion_info - wbm msdu completion info
2589  * @ppdu_id            - Unique ppduid assigned by firmware for every tx packet
2590  * @peer_id            - peer_id
2591  * @tid                - tid which used during transmit
2592  * @first_msdu         - first msdu indication
2593  * @last_msdu          - last msdu indication
2594  * @msdu_part_of_amsdu - msdu part of amsdu
2595  * @transmit_cnt       - retried count
2596  * @status             - transmit status
2597  * @tsf                - timestamp which it transmitted
2598  */
2599 struct msdu_completion_info {
2600 	uint32_t ppdu_id;
2601 	uint16_t peer_id;
2602 	uint8_t tid;
2603 	uint8_t first_msdu:1,
2604 		last_msdu:1,
2605 		msdu_part_of_amsdu:1;
2606 	uint8_t transmit_cnt;
2607 	uint8_t status;
2608 	uint32_t tsf;
2609 };
2610 
2611 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
2612 struct rx_protocol_tag_map {
2613 	/* This is the user configured tag for the said protocol type */
2614 	uint16_t tag;
2615 };
2616 
2617 /**
2618  * rx_protocol_tag_stats - protocol statistics
2619  * @tag_ctr: number of rx msdus matching this tag
2620  * @mon_tag_ctr: number of msdus matching this tag in mon path
2621  */
2622 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
2623 struct rx_protocol_tag_stats {
2624 	uint32_t tag_ctr;
2625 #ifdef QCA_TEST_MON_PF_TAGS_STATS
2626 	uint32_t mon_tag_ctr;
2627 #endif
2628 };
2629 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
2630 
2631 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
2632 
2633 #ifdef WLAN_RX_PKT_CAPTURE_ENH
2634 /* Template data to be set for Enhanced RX Monitor packets */
2635 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a
2636 
2637 /**
2638  * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern
2639  * at end of each MSDU in monitor-lite mode
2640  * @reserved1: reserved for future use
2641  * @reserved2: reserved for future use
2642  * @flow_tag: flow tag value read from skb->cb
2643  * @protocol_tag: protocol tag value read from skb->cb
2644  */
2645 struct dp_rx_mon_enh_trailer_data {
2646 	uint16_t reserved1;
2647 	uint16_t reserved2;
2648 	uint16_t flow_tag;
2649 	uint16_t protocol_tag;
2650 };
2651 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
2652 
2653 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2654 /* Number of debugfs entries created for HTT stats */
2655 #define PDEV_HTT_STATS_DBGFS_SIZE HTT_DBG_NUM_EXT_STATS
2656 
2657 /* struct pdev_htt_stats_dbgfs_priv - Structure to maintain debugfs information
2658  * of HTT stats
2659  * @pdev: dp pdev of debugfs entry
2660  * @stats_id: stats id of debugfs entry
2661  */
2662 struct pdev_htt_stats_dbgfs_priv {
2663 	struct dp_pdev *pdev;
2664 	uint16_t stats_id;
2665 };
2666 
2667 /* struct pdev_htt_stats_dbgfs_cfg - PDEV level data structure for debugfs
2668  * support for HTT stats
2669  * @debugfs_entry: qdf_debugfs directory entry
2670  * @m: qdf debugfs file handler
2671  * @pdev_htt_stats_dbgfs_ops: File operations of entry created
2672  * @priv: HTT stats debugfs private object
2673  * @htt_stats_dbgfs_event: HTT stats event for debugfs support
2674  * @lock: HTT stats debugfs lock
2675  * @htt_stats_dbgfs_msg_process: Function callback to print HTT stats
2676  */
2677 struct pdev_htt_stats_dbgfs_cfg {
2678 	qdf_dentry_t debugfs_entry[PDEV_HTT_STATS_DBGFS_SIZE];
2679 	qdf_debugfs_file_t m;
2680 	struct qdf_debugfs_fops
2681 			pdev_htt_stats_dbgfs_ops[PDEV_HTT_STATS_DBGFS_SIZE - 1];
2682 	struct pdev_htt_stats_dbgfs_priv priv[PDEV_HTT_STATS_DBGFS_SIZE - 1];
2683 	qdf_event_t htt_stats_dbgfs_event;
2684 	qdf_mutex_t lock;
2685 	void (*htt_stats_dbgfs_msg_process)(void *data, A_INT32 len);
2686 };
2687 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
2688 
2689 struct dp_srng_ring_state {
2690 	enum hal_ring_type ring_type;
2691 	uint32_t sw_head;
2692 	uint32_t sw_tail;
2693 	uint32_t hw_head;
2694 	uint32_t hw_tail;
2695 
2696 };
2697 
2698 struct dp_soc_srngs_state {
2699 	uint32_t seq_num;
2700 	uint32_t max_ring_id;
2701 	struct dp_srng_ring_state ring_state[DP_MAX_SRNGS];
2702 	TAILQ_ENTRY(dp_soc_srngs_state) list_elem;
2703 };
2704 
2705 #ifdef WLAN_FEATURE_11BE_MLO
2706 /* struct dp_mlo_sync_timestamp - PDEV level data structure for storing
2707  * MLO timestamp received via HTT msg.
2708  * msg_type: This would be set to HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND
2709  * pdev_id: pdev_id
2710  * chip_id: chip_id
2711  * mac_clk_freq: mac clock frequency of the mac HW block in MHz
2712  * sync_tstmp_lo_us: lower 32 bits of the WLAN global time stamp (in us) at
2713  *                   which last sync interrupt was received
2714  * sync_tstmp_hi_us: upper 32 bits of the WLAN global time stamp (in us) at
2715  *                   which last sync interrupt was received
2716  * mlo_offset_lo_us: lower 32 bits of the MLO time stamp offset in us
2717  * mlo_offset_hi_us: upper 32 bits of the MLO time stamp offset in us
2718  * mlo_offset_clks:  MLO time stamp offset in clock ticks for sub us
2719  * mlo_comp_us:      MLO time stamp compensation applied in us
2720  * mlo_comp_clks:    MLO time stamp compensation applied in clock ticks
2721  *                   for sub us resolution
2722  * mlo_comp_timer:   period of MLO compensation timer at which compensation
2723  *                   is applied, in us
2724  */
2725 struct dp_mlo_sync_timestamp {
2726 	uint32_t msg_type:8,
2727 		 pdev_id:2,
2728 		 chip_id:2,
2729 		 rsvd1:4,
2730 		 mac_clk_freq:16;
2731 	uint32_t sync_tstmp_lo_us;
2732 	uint32_t sync_tstmp_hi_us;
2733 	uint32_t mlo_offset_lo_us;
2734 	uint32_t mlo_offset_hi_us;
2735 	uint32_t mlo_offset_clks;
2736 	uint32_t mlo_comp_us:16,
2737 		 mlo_comp_clks:10,
2738 		 rsvd2:6;
2739 	uint32_t mlo_comp_timer:22,
2740 		 rsvd3:10;
2741 };
2742 #endif
2743 
2744 /* PDEV level structure for data path */
2745 struct dp_pdev {
2746 	/**
2747 	 * Re-use Memory Section Starts
2748 	 */
2749 
2750 	/* PDEV Id */
2751 	int pdev_id;
2752 
2753 	/* LMAC Id */
2754 	int lmac_id;
2755 
2756 	/* Target pdev  Id */
2757 	int target_pdev_id;
2758 
2759 	/* TXRX SOC handle */
2760 	struct dp_soc *soc;
2761 
2762 	bool pdev_deinit;
2763 
2764 	/* pdev status down or up required to handle dynamic hw
2765 	 * mode switch between DBS and DBS_SBS.
2766 	 * 1 = down
2767 	 * 0 = up
2768 	 */
2769 	bool is_pdev_down;
2770 
2771 	/* Enhanced Stats is enabled */
2772 	bool enhanced_stats_en;
2773 
2774 	/* Second ring used to replenish rx buffers */
2775 	struct dp_srng rx_refill_buf_ring2;
2776 
2777 	/* Empty ring used by firmware to post rx buffers to the MAC */
2778 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
2779 
2780 	int ch_band_lmac_id_mapping[REG_BAND_UNKNOWN];
2781 
2782 	/* wlan_cfg pdev ctxt*/
2783 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
2784 
2785 	/**
2786 	 * TODO: See if we need a ring map here for LMAC rings.
2787 	 * 1. Monitor rings are currently planning to be processed on receiving
2788 	 * PPDU end interrupts and hence wont need ring based interrupts.
2789 	 * 2. Rx buffer rings will be replenished during REO destination
2790 	 * processing and doesn't require regular interrupt handling - we will
2791 	 * only handle low water mark interrupts which is not expected
2792 	 * frequently
2793 	 */
2794 
2795 	/* VDEV list */
2796 	TAILQ_HEAD(, dp_vdev) vdev_list;
2797 
2798 	/* vdev list lock */
2799 	qdf_spinlock_t vdev_list_lock;
2800 
2801 	/* Number of vdevs this device have */
2802 	uint16_t vdev_count;
2803 
2804 	/* PDEV transmit lock */
2805 	qdf_spinlock_t tx_lock;
2806 
2807 	/*tx_mutex for me*/
2808 	DP_MUTEX_TYPE tx_mutex;
2809 
2810 	/* msdu chain head & tail */
2811 	qdf_nbuf_t invalid_peer_head_msdu;
2812 	qdf_nbuf_t invalid_peer_tail_msdu;
2813 
2814 	/* Band steering  */
2815 	/* TBD */
2816 
2817 	/* PDEV level data path statistics */
2818 	struct cdp_pdev_stats stats;
2819 
2820 	/* Global RX decap mode for the device */
2821 	enum htt_pkt_type rx_decap_mode;
2822 
2823 	qdf_atomic_t num_tx_outstanding;
2824 	int32_t tx_descs_max;
2825 
2826 	qdf_atomic_t num_tx_exception;
2827 
2828 	/* MCL specific local peer handle */
2829 	struct {
2830 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
2831 		uint8_t freelist;
2832 		qdf_spinlock_t lock;
2833 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
2834 	} local_peer_ids;
2835 
2836 	/* dscp_tid_map_*/
2837 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
2838 
2839 	/* operating channel */
2840 	struct {
2841 		uint8_t num;
2842 		uint8_t band;
2843 		uint16_t freq;
2844 	} operating_channel;
2845 
2846 	/* pool addr for mcast enhance buff */
2847 	struct {
2848 		int size;
2849 		uint32_t paddr;
2850 		char *vaddr;
2851 		struct dp_tx_me_buf_t *freelist;
2852 		int buf_in_use;
2853 		qdf_dma_mem_context(memctx);
2854 	} me_buf;
2855 
2856 	bool hmmc_tid_override_en;
2857 	uint8_t hmmc_tid;
2858 
2859 	/* Number of VAPs with mcast enhancement enabled */
2860 	qdf_atomic_t mc_num_vap_attached;
2861 
2862 	qdf_atomic_t stats_cmd_complete;
2863 
2864 #ifdef IPA_OFFLOAD
2865 	ipa_uc_op_cb_type ipa_uc_op_cb;
2866 	void *usr_ctxt;
2867 	struct dp_ipa_resources ipa_resource;
2868 #endif
2869 
2870 	/* TBD */
2871 
2872 	/* map this pdev to a particular Reo Destination ring */
2873 	enum cdp_host_reo_dest_ring reo_dest;
2874 
2875 	/* WDI event handlers */
2876 	struct wdi_event_subscribe_t **wdi_event_list;
2877 
2878 	bool cfr_rcc_mode;
2879 
2880 	/* enable time latency check for tx completion */
2881 	bool latency_capture_enable;
2882 
2883 	/* enable calculation of delay stats*/
2884 	bool delay_stats_flag;
2885 	void *dp_txrx_handle; /* Advanced data path handle */
2886 	uint32_t ppdu_id;
2887 	bool first_nbuf;
2888 	/* Current noise-floor reading for the pdev channel */
2889 	int16_t chan_noise_floor;
2890 
2891 	/*
2892 	 * For multiradio device, this flag indicates if
2893 	 * this radio is primary or secondary.
2894 	 *
2895 	 * For HK 1.0, this is used for WAR for the AST issue.
2896 	 * HK 1.x mandates creation of only 1 AST entry with same MAC address
2897 	 * across 2 radios. is_primary indicates the radio on which DP should
2898 	 * install HW AST entry if there is a request to add 2 AST entries
2899 	 * with same MAC address across 2 radios
2900 	 */
2901 	uint8_t is_primary;
2902 	struct cdp_tx_sojourn_stats sojourn_stats;
2903 	qdf_nbuf_t sojourn_buf;
2904 
2905 	union dp_rx_desc_list_elem_t *free_list_head;
2906 	union dp_rx_desc_list_elem_t *free_list_tail;
2907 	/* Cached peer_id from htt_peer_details_tlv */
2908 	uint16_t fw_stats_peer_id;
2909 
2910 	/* qdf_event for fw_peer_stats */
2911 	qdf_event_t fw_peer_stats_event;
2912 
2913 	/* User configured max number of tx buffers */
2914 	uint32_t num_tx_allowed;
2915 
2916 	/* unique cookie required for peer session */
2917 	uint32_t next_peer_cookie;
2918 
2919 	/*
2920 	 * Run time enabled when the first protocol tag is added,
2921 	 * run time disabled when the last protocol tag is deleted
2922 	 */
2923 	bool  is_rx_protocol_tagging_enabled;
2924 
2925 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
2926 	/*
2927 	 * The protocol type is used as array index to save
2928 	 * user provided tag info
2929 	 */
2930 	struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX];
2931 
2932 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
2933 	/*
2934 	 * Track msdus received from each reo ring separately to avoid
2935 	 * simultaneous writes from different core
2936 	 */
2937 	struct rx_protocol_tag_stats
2938 		reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX];
2939 	/* Track msdus received from expection ring separately */
2940 	struct rx_protocol_tag_stats
2941 		rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
2942 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
2943 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
2944 
2945 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
2946 	/**
2947 	 * Pointer to DP Flow FST at SOC level if
2948 	 * is_rx_flow_search_table_per_pdev is true
2949 	 */
2950 	struct dp_rx_fst *rx_fst;
2951 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
2952 
2953 #ifdef FEATURE_TSO_STATS
2954 	/* TSO Id to index into TSO packet information */
2955 	qdf_atomic_t tso_idx;
2956 #endif /* FEATURE_TSO_STATS */
2957 
2958 #ifdef WLAN_SUPPORT_DATA_STALL
2959 	data_stall_detect_cb data_stall_detect_callback;
2960 #endif /* WLAN_SUPPORT_DATA_STALL */
2961 
2962 	/* flag to indicate whether LRO hash command has been sent to FW */
2963 	uint8_t is_lro_hash_configured;
2964 
2965 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2966 	/* HTT stats debugfs params */
2967 	struct pdev_htt_stats_dbgfs_cfg *dbgfs_cfg;
2968 #endif
2969 	struct {
2970 		qdf_work_t work;
2971 		qdf_workqueue_t *work_queue;
2972 		uint32_t seq_num;
2973 		uint8_t queue_depth;
2974 		qdf_spinlock_t list_lock;
2975 
2976 		TAILQ_HEAD(, dp_soc_srngs_state) list;
2977 	} bkp_stats;
2978 #ifdef WIFI_MONITOR_SUPPORT
2979 	struct dp_mon_pdev *monitor_pdev;
2980 #endif
2981 #ifdef WLAN_FEATURE_11BE_MLO
2982 	struct dp_mlo_sync_timestamp timestamp;
2983 #endif
2984 	/* Is isolation mode enabled */
2985 	bool  isolation;
2986 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
2987 	uint8_t is_first_wakeup_packet;
2988 #endif
2989 #ifdef CONNECTIVITY_PKTLOG
2990 	/* packetdump callback functions */
2991 	ol_txrx_pktdump_cb dp_tx_packetdump_cb;
2992 	ol_txrx_pktdump_cb dp_rx_packetdump_cb;
2993 #endif
2994 };
2995 
2996 struct dp_peer;
2997 
2998 #ifdef DP_RX_UDP_OVER_PEER_ROAM
2999 #define WLAN_ROAM_PEER_AUTH_STATUS_NONE 0x0
3000 /**
3001  * This macro is equivalent to macro ROAM_AUTH_STATUS_AUTHENTICATED used
3002  * in connection mgr
3003  */
3004 #define WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED 0x2
3005 #endif
3006 
3007 /* VDEV structure for data path state */
3008 struct dp_vdev {
3009 	/* OS device abstraction */
3010 	qdf_device_t osdev;
3011 
3012 	/* physical device that is the parent of this virtual device */
3013 	struct dp_pdev *pdev;
3014 
3015 	/* VDEV operating mode */
3016 	enum wlan_op_mode opmode;
3017 
3018 	/* VDEV subtype */
3019 	enum wlan_op_subtype subtype;
3020 
3021 	/* Tx encapsulation type for this VAP */
3022 	enum htt_cmn_pkt_type tx_encap_type;
3023 
3024 	/* Rx Decapsulation type for this VAP */
3025 	enum htt_cmn_pkt_type rx_decap_type;
3026 
3027 	/* WDS enabled */
3028 	bool wds_enabled;
3029 
3030 	/* MEC enabled */
3031 	bool mec_enabled;
3032 
3033 #ifdef QCA_SUPPORT_WDS_EXTENDED
3034 	bool wds_ext_enabled;
3035 #endif /* QCA_SUPPORT_WDS_EXTENDED */
3036 	bool drop_3addr_mcast;
3037 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
3038 	bool skip_bar_update;
3039 	unsigned long skip_bar_update_last_ts;
3040 #endif
3041 	/* WDS Aging timer period */
3042 	uint32_t wds_aging_timer_val;
3043 
3044 	/* NAWDS enabled */
3045 	bool nawds_enabled;
3046 
3047 	/* Multicast enhancement enabled */
3048 	uint8_t mcast_enhancement_en;
3049 
3050 	/* IGMP multicast enhancement enabled */
3051 	uint8_t igmp_mcast_enhanc_en;
3052 
3053 	/* vdev_id - ID used to specify a particular vdev to the target */
3054 	uint8_t vdev_id;
3055 
3056 	/* Default HTT meta data for this VDEV */
3057 	/* TBD: check alignment constraints */
3058 	uint16_t htt_tcl_metadata;
3059 
3060 	/* Mesh mode vdev */
3061 	uint32_t mesh_vdev;
3062 
3063 	/* Mesh mode rx filter setting */
3064 	uint32_t mesh_rx_filter;
3065 
3066 	/* DSCP-TID mapping table ID */
3067 	uint8_t dscp_tid_map_id;
3068 
3069 	/* Address search type to be set in TX descriptor */
3070 	uint8_t search_type;
3071 
3072 	/*
3073 	 * Flag to indicate if s/w tid classification should be
3074 	 * skipped
3075 	 */
3076 	uint8_t skip_sw_tid_classification;
3077 
3078 	/* Flag to enable peer authorization */
3079 	uint8_t peer_authorize;
3080 
3081 	/* AST hash value for BSS peer in HW valid for STA VAP*/
3082 	uint16_t bss_ast_hash;
3083 
3084 	/* vdev lmac_id */
3085 	int lmac_id;
3086 
3087 	bool multipass_en;
3088 
3089 	/* Address search flags to be configured in HAL descriptor */
3090 	uint8_t hal_desc_addr_search_flags;
3091 
3092 	/* Handle to the OS shim SW's virtual device */
3093 	ol_osif_vdev_handle osif_vdev;
3094 
3095 	/* MAC address */
3096 	union dp_align_mac_addr mac_addr;
3097 
3098 #ifdef WLAN_FEATURE_11BE_MLO
3099 	/* MLO MAC address corresponding to vdev */
3100 	union dp_align_mac_addr mld_mac_addr;
3101 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
3102 	bool mlo_vdev;
3103 #endif
3104 #endif
3105 
3106 	/* node in the pdev's list of vdevs */
3107 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
3108 
3109 	/* dp_peer list */
3110 	TAILQ_HEAD(, dp_peer) peer_list;
3111 	/* to protect peer_list */
3112 	DP_MUTEX_TYPE peer_list_lock;
3113 
3114 	/* RX call back function to flush GRO packets*/
3115 	ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;
3116 	/* default RX call back function called by dp */
3117 	ol_txrx_rx_fp osif_rx;
3118 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
3119 	/* callback to receive eapol frames */
3120 	ol_txrx_rx_fp osif_rx_eapol;
3121 #endif
3122 	/* callback to deliver rx frames to the OS */
3123 	ol_txrx_rx_fp osif_rx_stack;
3124 	/* Callback to handle rx fisa frames */
3125 	ol_txrx_fisa_rx_fp osif_fisa_rx;
3126 	ol_txrx_fisa_flush_fp osif_fisa_flush;
3127 
3128 	/* call back function to flush out queued rx packets*/
3129 	ol_txrx_rx_flush_fp osif_rx_flush;
3130 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
3131 	ol_txrx_get_key_fp osif_get_key;
3132 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
3133 
3134 #ifdef notyet
3135 	/* callback to check if the msdu is an WAI (WAPI) frame */
3136 	ol_rx_check_wai_fp osif_check_wai;
3137 #endif
3138 
3139 	/* proxy arp function */
3140 	ol_txrx_proxy_arp_fp osif_proxy_arp;
3141 
3142 	ol_txrx_mcast_me_fp me_convert;
3143 
3144 	/* completion function used by this vdev*/
3145 	ol_txrx_completion_fp tx_comp;
3146 
3147 	ol_txrx_get_tsf_time get_tsf_time;
3148 
3149 	/* callback to classify critical packets */
3150 	ol_txrx_classify_critical_pkt_fp tx_classify_critical_pkt_cb;
3151 
3152 	/* deferred vdev deletion state */
3153 	struct {
3154 		/* VDEV delete pending */
3155 		int pending;
3156 		/*
3157 		* callback and a context argument to provide a
3158 		* notification for when the vdev is deleted.
3159 		*/
3160 		ol_txrx_vdev_delete_cb callback;
3161 		void *context;
3162 	} delete;
3163 
3164 	/* tx data delivery notification callback function */
3165 	struct {
3166 		ol_txrx_data_tx_cb func;
3167 		void *ctxt;
3168 	} tx_non_std_data_callback;
3169 
3170 
3171 	/* safe mode control to bypass the encrypt and decipher process*/
3172 	uint32_t safemode;
3173 
3174 	/* rx filter related */
3175 	uint32_t drop_unenc;
3176 #ifdef notyet
3177 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
3178 	uint32_t filters_num;
3179 #endif
3180 	/* TDLS Link status */
3181 	bool tdls_link_connected;
3182 	bool is_tdls_frame;
3183 
3184 	/* per vdev rx nbuf queue */
3185 	qdf_nbuf_queue_t rxq;
3186 
3187 	uint8_t tx_ring_id;
3188 	struct dp_tx_desc_pool_s *tx_desc;
3189 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
3190 
3191 	/* VDEV Stats */
3192 	struct cdp_vdev_stats stats;
3193 
3194 	/* Is this a proxySTA VAP */
3195 	uint8_t proxysta_vdev : 1, /* Is this a proxySTA VAP */
3196 		wrap_vdev : 1, /* Is this a QWRAP AP VAP */
3197 		isolation_vdev : 1, /* Is this a QWRAP AP VAP */
3198 		reserved : 5; /* Reserved */
3199 
3200 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3201 	struct dp_tx_desc_pool_s *pool;
3202 #endif
3203 	/* AP BRIDGE enabled */
3204 	bool ap_bridge_enabled;
3205 
3206 	enum cdp_sec_type  sec_type;
3207 
3208 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
3209 	bool raw_mode_war;
3210 
3211 
3212 	/* AST hash index for BSS peer in HW valid for STA VAP*/
3213 	uint16_t bss_ast_idx;
3214 
3215 	/* Capture timestamp of previous tx packet enqueued */
3216 	uint64_t prev_tx_enq_tstamp;
3217 
3218 	/* Capture timestamp of previous rx packet delivered */
3219 	uint64_t prev_rx_deliver_tstamp;
3220 
3221 	/* 8021p PCP-TID mapping table ID */
3222 	uint8_t tidmap_tbl_id;
3223 
3224 	/* 8021p PCP-TID map values */
3225 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
3226 
3227 	/* TIDmap priority */
3228 	uint8_t tidmap_prty;
3229 
3230 #ifdef QCA_MULTIPASS_SUPPORT
3231 	uint16_t *iv_vlan_map;
3232 
3233 	/* dp_peer special list */
3234 	TAILQ_HEAD(, dp_txrx_peer) mpass_peer_list;
3235 	DP_MUTEX_TYPE mpass_peer_mutex;
3236 #endif
3237 	/* Extended data path handle */
3238 	struct cdp_ext_vdev *vdev_dp_ext_handle;
3239 #ifdef VDEV_PEER_PROTOCOL_COUNT
3240 	/*
3241 	 * Rx-Ingress and Tx-Egress are in the lower level DP layer
3242 	 * Rx-Egress and Tx-ingress are handled in osif layer for DP
3243 	 * So
3244 	 * Rx-Egress and Tx-ingress mask definitions are in OSIF layer
3245 	 * Rx-Ingress and Tx-Egress definitions are here below
3246 	 */
3247 #define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1
3248 #define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2
3249 #define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4
3250 #define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8
3251 	bool peer_protocol_count_track;
3252 	int peer_protocol_count_dropmask;
3253 #endif
3254 	/* callback to collect connectivity stats */
3255 	ol_txrx_stats_rx_fp stats_cb;
3256 	uint32_t num_peers;
3257 	/* entry to inactive_list*/
3258 	TAILQ_ENTRY(dp_vdev) inactive_list_elem;
3259 
3260 #ifdef WLAN_SUPPORT_RX_FISA
3261 	/**
3262 	 * Params used for controlling the fisa aggregation dynamically
3263 	 */
3264 	uint8_t fisa_disallowed[MAX_REO_DEST_RINGS];
3265 	uint8_t fisa_force_flushed[MAX_REO_DEST_RINGS];
3266 #endif
3267 	/*
3268 	 * Refcount for VDEV currently incremented when
3269 	 * peer is created for VDEV
3270 	 */
3271 	qdf_atomic_t ref_cnt;
3272 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
3273 	uint8_t num_latency_critical_conn;
3274 #ifdef WLAN_SUPPORT_MESH_LATENCY
3275 	uint8_t peer_tid_latency_enabled;
3276 	/* tid latency configuration parameters */
3277 	struct {
3278 		uint32_t service_interval;
3279 		uint32_t burst_size;
3280 		uint8_t latency_tid;
3281 	} mesh_tid_latency_config;
3282 #endif
3283 #ifdef WIFI_MONITOR_SUPPORT
3284 	struct dp_mon_vdev *monitor_vdev;
3285 #endif
3286 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(CONFIG_SAWF)
3287 	/* Delta between TQM clock and TSF clock */
3288 	uint32_t delta_tsf;
3289 #endif
3290 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
3291 	/* Indicate if uplink delay report is enabled or not */
3292 	qdf_atomic_t ul_delay_report;
3293 	/* accumulative delay for every TX completion */
3294 	qdf_atomic_t ul_delay_accum;
3295 	/* accumulative number of packets delay has accumulated */
3296 	qdf_atomic_t ul_pkts_accum;
3297 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
3298 
3299 	/* vdev_stats_id - ID used for stats collection by FW from HW*/
3300 	uint8_t vdev_stats_id;
3301 #ifdef HW_TX_DELAY_STATS_ENABLE
3302 	/* hw tx delay stats enable */
3303 	uint8_t hw_tx_delay_stats_enabled;
3304 #endif
3305 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3306 	uint32_t roaming_peer_status;
3307 	union dp_align_mac_addr roaming_peer_mac;
3308 #endif
3309 };
3310 
3311 enum {
3312 	dp_sec_mcast = 0,
3313 	dp_sec_ucast
3314 };
3315 
3316 #ifdef WDS_VENDOR_EXTENSION
3317 typedef struct {
3318 	uint8_t	wds_tx_mcast_4addr:1,
3319 		wds_tx_ucast_4addr:1,
3320 		wds_rx_filter:1,      /* enforce rx filter */
3321 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
3322 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
3323 
3324 } dp_ecm_policy;
3325 #endif
3326 
3327 /*
3328  * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
3329  * @cached_bufq: nbuff list to enqueue rx packets
3330  * @bufq_lock: spinlock for nbuff list access
3331  * @thres: maximum threshold for number of rx buff to enqueue
3332  * @entries: number of entries
3333  * @dropped: number of packets dropped
3334  */
3335 struct dp_peer_cached_bufq {
3336 	qdf_list_t cached_bufq;
3337 	qdf_spinlock_t bufq_lock;
3338 	uint32_t thresh;
3339 	uint32_t entries;
3340 	uint32_t dropped;
3341 };
3342 
3343 /**
3344  * enum dp_peer_ast_flowq
3345  * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue
3346  * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue
3347  * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP
3348  * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP
3349  */
3350 enum dp_peer_ast_flowq {
3351 	DP_PEER_AST_FLOWQ_HI_PRIO,
3352 	DP_PEER_AST_FLOWQ_LOW_PRIO,
3353 	DP_PEER_AST_FLOWQ_UDP,
3354 	DP_PEER_AST_FLOWQ_NON_UDP,
3355 	DP_PEER_AST_FLOWQ_MAX,
3356 };
3357 
3358 /*
3359  * struct dp_ast_flow_override_info - ast override info
3360  * @ast_index - ast indexes in peer map message
3361  * @ast_valid_mask - ast valid mask for each ast index
3362  * @ast_flow_mask - ast flow mask for each ast index
3363  * @tid_valid_low_pri_mask - per tid mask for low priority flow
3364  * @tid_valid_hi_pri_mask - per tid mask for hi priority flow
3365  */
3366 struct dp_ast_flow_override_info {
3367 	uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX];
3368 	uint8_t ast_valid_mask;
3369 	uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX];
3370 	uint8_t tid_valid_low_pri_mask;
3371 	uint8_t tid_valid_hi_pri_mask;
3372 };
3373 
3374 /*
3375  * struct dp_peer_ast_params - ast parameters for a msdu flow-queue
3376  * @ast_index - ast index populated by FW
3377  * @is_valid - ast flow valid mask
3378  * @valid_tid_mask - per tid mask for this ast index
3379  * @flowQ - flow queue id associated with this ast index
3380  */
3381 struct dp_peer_ast_params {
3382 	uint16_t ast_idx;
3383 	uint8_t is_valid;
3384 	uint8_t valid_tid_mask;
3385 	uint8_t flowQ;
3386 };
3387 
3388 #ifdef WLAN_SUPPORT_SCS
3389 /* SCS procedures macros */
3390 /* SCS Procedures - SCS parameters
3391  * obtained from SCS request are stored
3392  * in a peer based database for traffic
3393  * classification.
3394  */
3395 #define IEEE80211_SCS_MAX_NO_OF_ELEM 10
3396 #endif
3397 
3398 #define DP_MLO_FLOW_INFO_MAX	3
3399 
3400 /**
3401  * struct dp_mlo_flow_override_info - Flow override info
3402  * @ast_idx: Primary TCL AST Index
3403  * @ast_idx_valid: Is AST index valid
3404  * @chip_id: CHIP ID
3405  * @tidmask: tidmask
3406  * @cache_set_num: Cache set number
3407  */
3408 struct dp_mlo_flow_override_info {
3409 	uint16_t ast_idx;
3410 	uint8_t ast_idx_valid;
3411 	uint8_t chip_id;
3412 	uint8_t tidmask;
3413 	uint8_t cache_set_num;
3414 };
3415 
3416 /**
3417  * struct dp_mlo_link_info - Link info
3418  * @peer_chip_id: Peer Chip ID
3419  * @vdev_id: Vdev ID
3420  */
3421 struct dp_mlo_link_info {
3422 	uint8_t peer_chip_id;
3423 	uint8_t vdev_id;
3424 };
3425 
3426 #ifdef WLAN_SUPPORT_MSCS
3427 /*MSCS Procedure based macros */
3428 #define IEEE80211_MSCS_MAX_ELEM_SIZE    5
3429 #define IEEE80211_TCLAS_MASK_CLA_TYPE_4  4
3430 /*
3431  * struct dp_peer_mscs_parameter - MSCS database obtained from
3432  * MSCS Request and Response in the control path. This data is used
3433  * by the AP to find out what priority to set based on the tuple
3434  * classification during packet processing.
3435  * @user_priority_bitmap - User priority bitmap obtained during
3436  * handshake
3437  * @user_priority_limit - User priority limit obtained during
3438  * handshake
3439  * @classifier_mask - params to be compared during processing
3440  */
3441 struct dp_peer_mscs_parameter {
3442 	uint8_t user_priority_bitmap;
3443 	uint8_t user_priority_limit;
3444 	uint8_t classifier_mask;
3445 };
3446 #endif
3447 
3448 #ifdef QCA_SUPPORT_WDS_EXTENDED
3449 #define WDS_EXT_PEER_INIT_BIT 0
3450 
3451 /**
3452  * struct dp_wds_ext_peer - wds ext peer structure
3453  * This is used when wds extended feature is enabled
3454  * both compile time and run time. It is created
3455  * when 1st 4 address frame is received from
3456  * wds backhaul.
3457  * @osif_vdev: Handle to the OS shim SW's virtual device
3458  * @init: wds ext netdev state
3459  */
3460 struct dp_wds_ext_peer {
3461 	ol_osif_peer_handle osif_peer;
3462 	unsigned long init;
3463 };
3464 #endif /* QCA_SUPPORT_WDS_EXTENDED */
3465 
3466 #ifdef WLAN_SUPPORT_MESH_LATENCY
3467 /*Advanced Mesh latency feature based macros */
3468 /*
3469  * struct dp_peer_mesh_latency parameter - Mesh latency related
3470  * parameters. This data is updated per peer per TID based on
3471  * the flow tuple classification in external rule database
3472  * during packet processing.
3473  * @service_interval_dl - Service interval associated with TID in DL
3474  * @burst_size_dl - Burst size additive over multiple flows in DL
3475  * @service_interval_ul - Service interval associated with TID in UL
3476  * @burst_size_ul - Burst size additive over multiple flows in UL
3477  * @ac - custom ac derived from service interval
3478  * @msduq - MSDU queue number within TID
3479  */
3480 struct dp_peer_mesh_latency_parameter {
3481 	uint32_t service_interval_dl;
3482 	uint32_t burst_size_dl;
3483 	uint32_t service_interval_ul;
3484 	uint32_t burst_size_ul;
3485 	uint8_t ac;
3486 	uint8_t msduq;
3487 };
3488 #endif
3489 
3490 #ifdef WLAN_FEATURE_11BE_MLO
3491 /* Max number of links for MLO connection */
3492 #define DP_MAX_MLO_LINKS 3
3493 
3494 /**
3495  * struct dp_peer_link_info - link peer information for MLO
3496  * @mac_add: Mac address
3497  * @vdev_id: Vdev ID for current link peer
3498  * @is_valid: flag for link peer info valid or not
3499  * @chip_id: chip id
3500  */
3501 struct dp_peer_link_info {
3502 	union dp_align_mac_addr mac_addr;
3503 	uint8_t vdev_id;
3504 	uint8_t is_valid;
3505 	uint8_t chip_id;
3506 };
3507 
3508 /**
3509  * struct dp_mld_link_peers - this structure is used to get link peers
3510 			      pointer from mld peer
3511  * @link_peers: link peers pointer array
3512  * @num_links: number of link peers fetched
3513  */
3514 struct dp_mld_link_peers {
3515 	struct dp_peer *link_peers[DP_MAX_MLO_LINKS];
3516 	uint8_t num_links;
3517 };
3518 #endif
3519 
3520 typedef void *dp_txrx_ref_handle;
3521 
3522 /**
3523  * struct dp_peer_per_pkt_tx_stats- Peer Tx stats updated in per pkt
3524  *				Tx completion path
3525  * @cdp_pkt_info ucast: Unicast Packet Count
3526  * @cdp_pkt_info mcast: Multicast Packet Count
3527  * @cdp_pkt_info bcast: Broadcast Packet Count
3528  * @cdp_pkt_info nawds_mcast: NAWDS Multicast Packet Count
3529  * @cdp_pkt_info tx_success: Successful Tx Packets
3530  * @nawds_mcast_drop: NAWDS Multicast Drop Count
3531  * @ofdma: Total Packets as ofdma
3532  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
3533  * @amsdu_cnt: Number of MSDUs part of AMSDU
3534  * @cdp_pkt_info fw_rem: Discarded by firmware
3535  * @fw_rem_notx: firmware_discard_untransmitted
3536  * @fw_rem_tx: firmware_discard_transmitted
3537  * @age_out: aged out in mpdu/msdu queues
3538  * @fw_reason1: discarded by firmware reason 1
3539  * @fw_reason2: discarded by firmware reason 2
3540  * @fw_reason3: discarded by firmware reason  3
3541  * @fw_rem_no_match: dropped due to fw no match command
3542  * @drop_threshold: dropped due to HW threshold
3543  * @drop_link_desc_na: dropped due resource not available in HW
3544  * @invalid_drop: Invalid msdu drop
3545  * @mcast_vdev_drop: MCAST drop configured for VDEV in HW
3546  * @invalid_rr: Invalid TQM release reason
3547  * @failed_retry_count: packets failed due to retry above 802.11 retry limit
3548  * @retry_count: packets successfully send after one or more retry
3549  * @multiple_retry_count: packets successfully sent after more than one retry
3550  * @no_ack_count: no ack pkt count for different protocols
3551  * @tx_success_twt: Successful Tx Packets in TWT session
3552  * @last_tx_ts: last timestamp in jiffies when tx comp occurred
3553  * @avg_sojourn_msdu[CDP_DATA_TID_MAX]: Avg sojourn msdu stat
3554  * @protocol_trace_cnt: per-peer protocol counter
3555  * @release_src_not_tqm: Counter to keep track of release source is not TQM
3556  *			 in TX completion status processing
3557  */
3558 struct dp_peer_per_pkt_tx_stats {
3559 	struct cdp_pkt_info ucast;
3560 	struct cdp_pkt_info mcast;
3561 	struct cdp_pkt_info bcast;
3562 	struct cdp_pkt_info nawds_mcast;
3563 	struct cdp_pkt_info tx_success;
3564 	uint32_t nawds_mcast_drop;
3565 	uint32_t ofdma;
3566 	uint32_t non_amsdu_cnt;
3567 	uint32_t amsdu_cnt;
3568 	struct {
3569 		struct cdp_pkt_info fw_rem;
3570 		uint32_t fw_rem_notx;
3571 		uint32_t fw_rem_tx;
3572 		uint32_t age_out;
3573 		uint32_t fw_reason1;
3574 		uint32_t fw_reason2;
3575 		uint32_t fw_reason3;
3576 		uint32_t fw_rem_queue_disable;
3577 		uint32_t fw_rem_no_match;
3578 		uint32_t drop_threshold;
3579 		uint32_t drop_link_desc_na;
3580 		uint32_t invalid_drop;
3581 		uint32_t mcast_vdev_drop;
3582 		uint32_t invalid_rr;
3583 	} dropped;
3584 	uint32_t failed_retry_count;
3585 	uint32_t retry_count;
3586 	uint32_t multiple_retry_count;
3587 	uint32_t no_ack_count[QDF_PROTO_SUBTYPE_MAX];
3588 	struct cdp_pkt_info tx_success_twt;
3589 	unsigned long last_tx_ts;
3590 	qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
3591 #ifdef VDEV_PEER_PROTOCOL_COUNT
3592 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
3593 #endif
3594 	uint32_t release_src_not_tqm;
3595 };
3596 
3597 /**
3598  * struct dp_peer_extd_tx_stats - Peer Tx stats updated in either
3599  *	per pkt Tx completion path when macro QCA_ENHANCED_STATS_SUPPORT is
3600  *	disabled or in HTT Tx PPDU completion path when macro is enabled
3601  * @stbc: Packets in STBC
3602  * @ldpc: Packets in LDPC
3603  * @retries: Packet retries
3604  * @pkt_type[DOT11_MAX]: pkt count for different .11 modes
3605  * @wme_ac_type[WME_AC_MAX]: Wireless Multimedia type Count
3606  * @excess_retries_per_ac[WME_AC_MAX]: Wireless Multimedia type Count
3607  * @ampdu_cnt: completion of aggregation
3608  * @non_ampdu_cnt: tx completion not aggregated
3609  * @num_ppdu_cookie_valid: no. of valid ppdu cookies rcvd from FW
3610  * @tx_ppdus: ppdus in tx
3611  * @tx_mpdus_success: mpdus successful in tx
3612  * @tx_mpdus_tried: mpdus tried in tx
3613  * @tx_rate: Tx Rate in kbps
3614  * @last_tx_rate: Last tx rate for unicast packets
3615  * @last_tx_rate_mcs: Tx rate mcs for unicast packets
3616  * @mcast_last_tx_rate: Last tx rate for multicast packets
3617  * @mcast_last_tx_rate_mcs: Last tx rate mcs for multicast
3618  * @rnd_avg_tx_rate: Rounded average tx rate
3619  * @avg_tx_rate: Average TX rate
3620  * @tx_ratecode: Tx rate code of last frame
3621  * @pream_punct_cnt: Preamble Punctured count
3622  * @sgi_count[MAX_GI]: SGI count
3623  * @nss[SS_COUNT]: Packet count for different num_spatial_stream values
3624  * @bw[MAX_BW]: Packet Count for different bandwidths
3625  * @ru_start: RU start index
3626  * @ru_tones: RU tones size
3627  * @ru_loc: pkt info for RU location 26/ 52/ 106/ 242/ 484 counter
3628  * @transmit_type: pkt info for tx transmit type
3629  * @mu_group_id: mumimo mu group id
3630  * @last_ack_rssi: RSSI of last acked packet
3631  * @nss_info: NSS 1,2, ...8
3632  * @mcs_info: MCS index
3633  * @bw_info: Bandwidth
3634  *       <enum 0 bw_20_MHz>
3635  *       <enum 1 bw_40_MHz>
3636  *       <enum 2 bw_80_MHz>
3637  *       <enum 3 bw_160_MHz>
3638  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
3639  *       <enum 1     0_4_us_sgi > Legacy short GI
3640  *       <enum 2     1_6_us_sgi > HE related GI
3641  *       <enum 3     3_2_us_sgi > HE
3642  * @preamble_info: preamble
3643  * @retries_mpdu: mpdu number of successfully transmitted after retries
3644  * @mpdu_success_with_retries: mpdu retry count in case of successful tx
3645  * @su_be_ppdu_cnt: SU Tx packet count for 11BE
3646  * @mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX]: MU Tx packet count for 11BE
3647  * @punc_bw[MAX_PUNCTURED_MODE]: MSDU count for punctured bw
3648  */
3649 struct dp_peer_extd_tx_stats {
3650 	uint32_t stbc;
3651 	uint32_t ldpc;
3652 	uint32_t retries;
3653 	struct cdp_pkt_type pkt_type[DOT11_MAX];
3654 	uint32_t wme_ac_type[WME_AC_MAX];
3655 	uint32_t excess_retries_per_ac[WME_AC_MAX];
3656 	uint32_t ampdu_cnt;
3657 	uint32_t non_ampdu_cnt;
3658 	uint32_t num_ppdu_cookie_valid;
3659 	uint32_t tx_ppdus;
3660 	uint32_t tx_mpdus_success;
3661 	uint32_t tx_mpdus_tried;
3662 
3663 	uint32_t tx_rate;
3664 	uint32_t last_tx_rate;
3665 	uint32_t last_tx_rate_mcs;
3666 	uint32_t mcast_last_tx_rate;
3667 	uint32_t mcast_last_tx_rate_mcs;
3668 	uint64_t rnd_avg_tx_rate;
3669 	uint64_t avg_tx_rate;
3670 	uint16_t tx_ratecode;
3671 
3672 	uint32_t sgi_count[MAX_GI];
3673 	uint32_t pream_punct_cnt;
3674 	uint32_t nss[SS_COUNT];
3675 	uint32_t bw[MAX_BW];
3676 	uint32_t ru_start;
3677 	uint32_t ru_tones;
3678 	struct cdp_tx_pkt_info ru_loc[MAX_RU_LOCATIONS];
3679 
3680 	struct cdp_tx_pkt_info transmit_type[MAX_TRANSMIT_TYPES];
3681 	uint32_t mu_group_id[MAX_MU_GROUP_ID];
3682 
3683 	uint32_t last_ack_rssi;
3684 
3685 	uint32_t nss_info:4,
3686 		 mcs_info:4,
3687 		 bw_info:4,
3688 		 gi_info:4,
3689 		 preamble_info:4;
3690 
3691 	uint32_t retries_mpdu;
3692 	uint32_t mpdu_success_with_retries;
3693 #ifdef WLAN_FEATURE_11BE
3694 	struct cdp_pkt_type su_be_ppdu_cnt;
3695 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
3696 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
3697 #endif
3698 };
3699 
3700 /**
3701  * struct dp_peer_per_pkt_rx_stats - Peer Rx stats updated in per pkt Rx path
3702  * @rcvd_reo[CDP_MAX_RX_RINGS]: Packets received on the reo ring
3703  * @unicast: Total unicast packets
3704  * @multicast: Total multicast packets
3705  * @bcast:  Broadcast Packet Count
3706  * @raw: Raw Pakets received
3707  * @nawds_mcast_drop: Total NAWDS multicast packets dropped
3708  * @mec_drop: Total MEC packets dropped
3709  * @last_rx_ts: last timestamp in jiffies when RX happened
3710  * @intra_bss.pkts: Intra BSS packets received
3711  * @intra_bss.fail: Intra BSS packets failed
3712  * @intra_bss.mdns_no_fws: Intra BSS MDNS packets not forwarded
3713  * @mic_err: Rx MIC errors CCMP
3714  * @decrypt_err: Rx Decryption Errors CRC
3715  * @fcserr: rx MIC check failed (CCMP)
3716  * @pn_err: pn check failed
3717  * @oor_err: Rx OOR errors
3718  * @jump_2k_err: 2k jump errors
3719  * @rxdma_wifi_parse_err: rxdma wifi parse errors
3720  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
3721  * @amsdu_cnt: Number of MSDUs part of AMSDU
3722  * @rx_retries: retries of packet in rx
3723  * @multipass_rx_pkt_drop: Dropped multipass rx pkt
3724  * @peer_unauth_rx_pkt_drop: Unauth rx packet drops
3725  * @policy_check_drop: policy check drops
3726  * @to_stack_twt: Total packets sent up the stack in TWT session
3727  * @protocol_trace_cnt: per-peer protocol counters
3728  */
3729 struct dp_peer_per_pkt_rx_stats {
3730 	struct cdp_pkt_info rcvd_reo[CDP_MAX_RX_RINGS];
3731 	struct cdp_pkt_info unicast;
3732 	struct cdp_pkt_info multicast;
3733 	struct cdp_pkt_info bcast;
3734 	struct cdp_pkt_info raw;
3735 	uint32_t nawds_mcast_drop;
3736 	struct cdp_pkt_info mec_drop;
3737 	unsigned long last_rx_ts;
3738 	struct {
3739 		struct cdp_pkt_info pkts;
3740 		struct cdp_pkt_info fail;
3741 		uint32_t mdns_no_fwd;
3742 	} intra_bss;
3743 	struct {
3744 		uint32_t mic_err;
3745 		uint32_t decrypt_err;
3746 		uint32_t fcserr;
3747 		uint32_t pn_err;
3748 		uint32_t oor_err;
3749 		uint32_t jump_2k_err;
3750 		uint32_t rxdma_wifi_parse_err;
3751 	} err;
3752 	uint32_t non_amsdu_cnt;
3753 	uint32_t amsdu_cnt;
3754 	uint32_t rx_retries;
3755 	uint32_t multipass_rx_pkt_drop;
3756 	uint32_t peer_unauth_rx_pkt_drop;
3757 	uint32_t policy_check_drop;
3758 	struct cdp_pkt_info to_stack_twt;
3759 #ifdef VDEV_PEER_PROTOCOL_COUNT
3760 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
3761 #endif
3762 	uint32_t mcast_3addr_drop;
3763 };
3764 
3765 /**
3766  * struct dp_peer_extd_rx_stats - Peer Rx stats updated in either
3767  *	per pkt Rx path when macro QCA_ENHANCED_STATS_SUPPORT is disabled or in
3768  *	Rx monitor patch when macro is enabled
3769  * @pkt_type[DOT11_MAX]: pkt counter for different .11 modes
3770  * @wme_ac_type[WME_AC_MAX]: Wireless Multimedia type Count
3771  * @mpdu_cnt_fcs_ok: SU Rx success mpdu count
3772  * @mpdu_cnt_fcs_err: SU Rx fail mpdu count
3773  * @non_ampdu_cnt: Number of MSDUs with no MPDU level aggregation
3774  * @ampdu_cnt: Number of MSDUs part of AMSPU
3775  * @rx_mpdus: mpdu in rx
3776  * @rx_ppdus: ppdu in rx
3777  * @su_ax_ppdu_cnt: SU Rx packet count for .11ax
3778  * @rx_mu[TXRX_TYPE_MU_MAX]: Rx MU stats
3779  * @reception_type[MAX_RECEPTION_TYPES]: Reception type of packets
3780  * @ppdu_cnt[MAX_RECEPTION_TYPES]: PPDU packet count in reception type
3781  * @sgi_count[MAX_GI]: sgi count
3782  * @nss[SS_COUNT]: packet count in spatiel Streams
3783  * @ppdu_nss[SS_COUNT]: PPDU packet count in spatial streams
3784  * @bw[MAX_BW]: Packet Count in different bandwidths
3785  * @rx_mpdu_cnt[MAX_MCS]: rx mpdu count per MCS rate
3786  * @rx_rate: Rx rate
3787  * @last_rx_rate: Previous rx rate
3788  * @rnd_avg_rx_rate: Rounded average rx rate
3789  * @avg_rx_rate: Average Rx rate
3790  * @rx_ratecode: Rx rate code of last frame
3791  * @avg_snr: Average snr
3792  * @rx_snr_measured_time: Time at which snr is measured
3793  * @snr: SNR of received signal
3794  * @last_snr: Previous snr
3795  * @nss_info: NSS 1,2, ...8
3796  * @mcs_info: MCS index
3797  * @bw_info: Bandwidth
3798  *       <enum 0 bw_20_MHz>
3799  *       <enum 1 bw_40_MHz>
3800  *       <enum 2 bw_80_MHz>
3801  *       <enum 3 bw_160_MHz>
3802  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
3803  *       <enum 1     0_4_us_sgi > Legacy short GI
3804  *       <enum 2     1_6_us_sgi > HE related GI
3805  *       <enum 3     3_2_us_sgi > HE
3806  * @preamble_info: preamble
3807  * @mpdu_retry_cnt: retries of mpdu in rx
3808  * @su_be_ppdu_cnt: SU Rx packet count for BE
3809  * @mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX]: MU rx packet count for BE
3810  * @punc_bw[MAX_PUNCTURED_MODE]: MSDU count for punctured bw
3811  */
3812 struct dp_peer_extd_rx_stats {
3813 	struct cdp_pkt_type pkt_type[DOT11_MAX];
3814 	uint32_t wme_ac_type[WME_AC_MAX];
3815 	uint32_t mpdu_cnt_fcs_ok;
3816 	uint32_t mpdu_cnt_fcs_err;
3817 	uint32_t non_ampdu_cnt;
3818 	uint32_t ampdu_cnt;
3819 	uint32_t rx_mpdus;
3820 	uint32_t rx_ppdus;
3821 
3822 	struct cdp_pkt_type su_ax_ppdu_cnt;
3823 	struct cdp_rx_mu rx_mu[TXRX_TYPE_MU_MAX];
3824 	uint32_t reception_type[MAX_RECEPTION_TYPES];
3825 	uint32_t ppdu_cnt[MAX_RECEPTION_TYPES];
3826 
3827 	uint32_t sgi_count[MAX_GI];
3828 	uint32_t nss[SS_COUNT];
3829 	uint32_t ppdu_nss[SS_COUNT];
3830 	uint32_t bw[MAX_BW];
3831 	uint32_t rx_mpdu_cnt[MAX_MCS];
3832 
3833 	uint32_t rx_rate;
3834 	uint32_t last_rx_rate;
3835 	uint32_t rnd_avg_rx_rate;
3836 	uint32_t avg_rx_rate;
3837 	uint32_t rx_ratecode;
3838 
3839 	uint32_t avg_snr;
3840 	uint32_t rx_snr_measured_time;
3841 	uint8_t snr;
3842 	uint8_t last_snr;
3843 
3844 	uint32_t nss_info:4,
3845 		 mcs_info:4,
3846 		 bw_info:4,
3847 		 gi_info:4,
3848 		 preamble_info:4;
3849 
3850 	uint32_t mpdu_retry_cnt;
3851 #ifdef WLAN_FEATURE_11BE
3852 	struct cdp_pkt_type su_be_ppdu_cnt;
3853 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
3854 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
3855 #endif
3856 };
3857 
3858 /**
3859  * struct dp_peer_per_pkt_stats - Per pkt stats for peer
3860  * @tx: Per pkt Tx stats
3861  * @rx: Per pkt Rx stats
3862  */
3863 struct dp_peer_per_pkt_stats {
3864 	struct dp_peer_per_pkt_tx_stats tx;
3865 	struct dp_peer_per_pkt_rx_stats rx;
3866 };
3867 
3868 /**
3869  * struct dp_peer_extd_stats - Stats from extended path for peer
3870  * @tx: Extended path tx stats
3871  * @rx: Extended path rx stats
3872  */
3873 struct dp_peer_extd_stats {
3874 	struct dp_peer_extd_tx_stats tx;
3875 	struct dp_peer_extd_rx_stats rx;
3876 };
3877 
3878 /**
3879  * struct dp_peer_stats - Peer stats
3880  * @per_pkt_stats: Per packet path stats
3881  * @extd_stats: Extended path stats
3882  */
3883 struct dp_peer_stats {
3884 	struct dp_peer_per_pkt_stats per_pkt_stats;
3885 #ifndef QCA_ENHANCED_STATS_SUPPORT
3886 	struct dp_peer_extd_stats extd_stats;
3887 #endif
3888 };
3889 
3890 /**
3891  * struct dp_txrx_peer: DP txrx_peer strcuture used in per pkt path
3892  * @tx_failed: Total Tx failure
3893  * @cdp_pkt_info comp_pkt: Pkt Info for which completions were received
3894  * @to_stack: Total packets sent up the stack
3895  * @stats: Peer stats
3896  * @delay_stats: Peer delay stats
3897  * @jitter_stats: Peer jitter stats
3898  * @bw: bandwidth of peer connection
3899  * @mpdu_retry_threshold: MPDU retry threshold to increment tx bad count
3900  */
3901 struct dp_txrx_peer {
3902 	/* Core TxRx Peer */
3903 
3904 	/* VDEV to which this peer is associated */
3905 	struct dp_vdev *vdev;
3906 
3907 	/* peer ID for this peer */
3908 	uint16_t peer_id;
3909 
3910 	uint8_t authorize:1, /* Set when authorized */
3911 		in_twt:1, /* in TWT session */
3912 		hw_txrx_stats_en:1, /*Indicate HW offload vdev stats */
3913 		mld_peer:1; /* MLD peer*/
3914 
3915 	uint32_t tx_failed;
3916 	struct cdp_pkt_info comp_pkt;
3917 	struct cdp_pkt_info to_stack;
3918 
3919 	struct dp_peer_stats stats;
3920 
3921 	struct dp_peer_delay_stats *delay_stats;
3922 
3923 	struct cdp_peer_tid_stats *jitter_stats;
3924 
3925 	struct {
3926 		enum cdp_sec_type sec_type;
3927 		u_int32_t michael_key[2]; /* relevant for TKIP */
3928 	} security[2]; /* 0 -> multicast, 1 -> unicast */
3929 
3930 	uint16_t nawds_enabled:1, /* NAWDS flag */
3931 		bss_peer:1, /* set for bss peer */
3932 		isolation:1, /* enable peer isolation for this peer */
3933 		wds_enabled:1; /* WDS peer */
3934 #ifdef WDS_VENDOR_EXTENSION
3935 	dp_ecm_policy wds_ecm;
3936 #endif
3937 #ifdef PEER_CACHE_RX_PKTS
3938 	qdf_atomic_t flush_in_progress;
3939 	struct dp_peer_cached_bufq bufq_info;
3940 #endif
3941 #ifdef QCA_MULTIPASS_SUPPORT
3942 	/* node in the special peer list element */
3943 	TAILQ_ENTRY(dp_txrx_peer) mpass_peer_list_elem;
3944 	/* vlan id for key */
3945 	uint16_t vlan_id;
3946 #endif
3947 #ifdef QCA_SUPPORT_WDS_EXTENDED
3948 	struct dp_wds_ext_peer wds_ext;
3949 	ol_txrx_rx_fp osif_rx;
3950 #endif
3951 	struct dp_rx_tid_defrag rx_tid[DP_MAX_TIDS];
3952 #ifdef CONFIG_SAWF
3953 	struct dp_peer_sawf_stats *sawf_stats;
3954 #endif
3955 #ifdef DP_PEER_EXTENDED_API
3956 	enum cdp_peer_bw bw;
3957 	uint8_t mpdu_retry_threshold;
3958 #endif
3959 };
3960 
3961 /* Peer structure for data path state */
3962 struct dp_peer {
3963 	struct dp_txrx_peer *txrx_peer;
3964 #ifdef WIFI_MONITOR_SUPPORT
3965 	struct dp_mon_peer *monitor_peer;
3966 #endif
3967 	/* peer ID for this peer */
3968 	uint16_t peer_id;
3969 
3970 	/* VDEV to which this peer is associated */
3971 	struct dp_vdev *vdev;
3972 
3973 	struct dp_ast_entry *self_ast_entry;
3974 
3975 	qdf_atomic_t ref_cnt;
3976 
3977 	union dp_align_mac_addr mac_addr;
3978 
3979 	/* node in the vdev's list of peers */
3980 	TAILQ_ENTRY(dp_peer) peer_list_elem;
3981 	/* node in the hash table bin's list of peers */
3982 	TAILQ_ENTRY(dp_peer) hash_list_elem;
3983 
3984 	/* TID structures pointer */
3985 	struct dp_rx_tid *rx_tid;
3986 
3987 	/* TBD: No transmit TID state required? */
3988 
3989 	struct {
3990 		enum cdp_sec_type sec_type;
3991 		u_int32_t michael_key[2]; /* relevant for TKIP */
3992 	} security[2]; /* 0 -> multicast, 1 -> unicast */
3993 
3994 	/* NAWDS Flag and Bss Peer bit */
3995 	uint16_t bss_peer:1, /* set for bss peer */
3996 		authorize:1, /* Set when authorized */
3997 		valid:1, /* valid bit */
3998 		delete_in_progress:1, /* Indicate kickout sent */
3999 		sta_self_peer:1, /* Indicate STA self peer */
4000 		is_tdls_peer:1; /* Indicate TDLS peer */
4001 
4002 #ifdef WLAN_FEATURE_11BE_MLO
4003 	uint8_t first_link:1, /* first link peer for MLO */
4004 		primary_link:1; /* primary link for MLO */
4005 #endif
4006 
4007 	/* MCL specific peer local id */
4008 	uint16_t local_id;
4009 	enum ol_txrx_peer_state state;
4010 	qdf_spinlock_t peer_info_lock;
4011 
4012 	/* Peer calibrated stats */
4013 	struct cdp_calibr_stats stats;
4014 
4015 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
4016 	/* TBD */
4017 
4018 	/* Active Block ack sessions */
4019 	uint16_t active_ba_session_cnt;
4020 
4021 	/* Current HW buffersize setting */
4022 	uint16_t hw_buffer_size;
4023 
4024 	/*
4025 	 * Flag to check if sessions with 256 buffersize
4026 	 * should be terminated.
4027 	 */
4028 	uint8_t kill_256_sessions;
4029 	qdf_atomic_t is_default_route_set;
4030 
4031 #ifdef QCA_PEER_MULTIQ_SUPPORT
4032 	struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
4033 #endif
4034 	/* entry to inactive_list*/
4035 	TAILQ_ENTRY(dp_peer) inactive_list_elem;
4036 
4037 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
4038 
4039 	uint8_t peer_state;
4040 	qdf_spinlock_t peer_state_lock;
4041 #ifdef WLAN_SUPPORT_SCS
4042 	struct cdp_scs_params scs[IEEE80211_SCS_MAX_NO_OF_ELEM];
4043 	bool scs_is_active;
4044 	uint8_t no_of_scs_sessions;
4045 #endif
4046 #ifdef WLAN_SUPPORT_MSCS
4047 	struct dp_peer_mscs_parameter mscs_ipv4_parameter, mscs_ipv6_parameter;
4048 	bool mscs_active;
4049 #endif
4050 #ifdef WLAN_SUPPORT_MESH_LATENCY
4051 	struct dp_peer_mesh_latency_parameter mesh_latency_params[DP_MAX_TIDS];
4052 #endif
4053 #ifdef WLAN_FEATURE_11BE_MLO
4054 	/* peer type */
4055 	enum cdp_peer_type peer_type;
4056 	/*---------for link peer---------*/
4057 	struct dp_peer *mld_peer;
4058 
4059 	/*---------for mld peer----------*/
4060 	struct dp_peer_link_info link_peers[DP_MAX_MLO_LINKS];
4061 	uint8_t num_links;
4062 	DP_MUTEX_TYPE link_peers_info_lock;
4063 #endif
4064 #ifdef CONFIG_SAWF_DEF_QUEUES
4065 	struct dp_peer_sawf *sawf;
4066 #endif
4067 };
4068 
4069 /*
4070  * dp_invalid_peer_msg
4071  * @nbuf: data buffer
4072  * @wh: 802.11 header
4073  * @vdev_id: id of vdev
4074  */
4075 struct dp_invalid_peer_msg {
4076 	qdf_nbuf_t nbuf;
4077 	struct ieee80211_frame *wh;
4078 	uint8_t vdev_id;
4079 };
4080 
4081 /*
4082  * dp_tx_me_buf_t: ME buffer
4083  * next: pointer to next buffer
4084  * data: Destination Mac address
4085  * paddr_macbuf: physical address for dest_mac
4086  */
4087 struct dp_tx_me_buf_t {
4088 	/* Note: ME buf pool initialization logic expects next pointer to
4089 	 * be the first element. Dont add anything before next */
4090 	struct dp_tx_me_buf_t *next;
4091 	uint8_t data[QDF_MAC_ADDR_SIZE];
4092 	qdf_dma_addr_t paddr_macbuf;
4093 };
4094 
4095 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
4096 struct hal_rx_fst;
4097 
4098 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4099 struct dp_rx_fse {
4100 	/* HAL Rx Flow Search Entry which matches HW definition */
4101 	void *hal_rx_fse;
4102 	/* Toeplitz hash value */
4103 	uint32_t flow_hash;
4104 	/* Flow index, equivalent to hash value truncated to FST size */
4105 	uint32_t flow_id;
4106 	/* Stats tracking for this flow */
4107 	struct cdp_flow_stats stats;
4108 	/* Flag indicating whether flow is IPv4 address tuple */
4109 	uint8_t is_ipv4_addr_entry;
4110 	/* Flag indicating whether flow is valid */
4111 	uint8_t is_valid;
4112 };
4113 
4114 struct dp_rx_fst {
4115 	/* Software (DP) FST */
4116 	uint8_t *base;
4117 	/* Pointer to HAL FST */
4118 	struct hal_rx_fst *hal_rx_fst;
4119 	/* Base physical address of HAL RX HW FST */
4120 	uint64_t hal_rx_fst_base_paddr;
4121 	/* Maximum number of flows FSE supports */
4122 	uint16_t max_entries;
4123 	/* Num entries in flow table */
4124 	uint16_t num_entries;
4125 	/* SKID Length */
4126 	uint16_t max_skid_length;
4127 	/* Hash mask to obtain legitimate hash entry */
4128 	uint32_t hash_mask;
4129 	/* Timer for bundling of flows */
4130 	qdf_timer_t cache_invalidate_timer;
4131 	/**
4132 	 * Flag which tracks whether cache update
4133 	 * is needed on timer expiry
4134 	 */
4135 	qdf_atomic_t is_cache_update_pending;
4136 	/* Flag to indicate completion of FSE setup in HW/FW */
4137 	bool fse_setup_done;
4138 };
4139 
4140 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
4141 #elif WLAN_SUPPORT_RX_FISA
4142 
4143 struct dp_fisa_stats {
4144 	/* flow index invalid from RX HW TLV */
4145 	uint32_t invalid_flow_index;
4146 	uint32_t reo_mismatch;
4147 };
4148 
4149 enum fisa_aggr_ret {
4150 	FISA_AGGR_DONE,
4151 	FISA_AGGR_NOT_ELIGIBLE,
4152 	FISA_FLUSH_FLOW
4153 };
4154 
4155 /**
4156  * struct fisa_pkt_hist - FISA Packet history structure
4157  * @tlv_hist: array of TLV history
4158  * @ts: array of timestamps of fisa packets
4159  * @idx: index indicating the next location to be used in the array.
4160  */
4161 struct fisa_pkt_hist {
4162 	uint8_t *tlv_hist;
4163 	qdf_time_t ts_hist[FISA_FLOW_MAX_AGGR_COUNT];
4164 	uint32_t idx;
4165 };
4166 
4167 struct dp_fisa_rx_sw_ft {
4168 	/* HAL Rx Flow Search Entry which matches HW definition */
4169 	void *hw_fse;
4170 	/* hash value */
4171 	uint32_t flow_hash;
4172 	/* toeplitz hash value*/
4173 	uint32_t flow_id_toeplitz;
4174 	/* Flow index, equivalent to hash value truncated to FST size */
4175 	uint32_t flow_id;
4176 	/* Stats tracking for this flow */
4177 	struct cdp_flow_stats stats;
4178 	/* Flag indicating whether flow is IPv4 address tuple */
4179 	uint8_t is_ipv4_addr_entry;
4180 	/* Flag indicating whether flow is valid */
4181 	uint8_t is_valid;
4182 	uint8_t is_populated;
4183 	uint8_t is_flow_udp;
4184 	uint8_t is_flow_tcp;
4185 	qdf_nbuf_t head_skb;
4186 	uint16_t cumulative_l4_checksum;
4187 	uint16_t adjusted_cumulative_ip_length;
4188 	uint16_t cur_aggr;
4189 	uint16_t napi_flush_cumulative_l4_checksum;
4190 	uint16_t napi_flush_cumulative_ip_length;
4191 	qdf_nbuf_t last_skb;
4192 	uint32_t head_skb_ip_hdr_offset;
4193 	uint32_t head_skb_l4_hdr_offset;
4194 	struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
4195 	uint8_t napi_id;
4196 	struct dp_vdev *vdev;
4197 	uint64_t bytes_aggregated;
4198 	uint32_t flush_count;
4199 	uint32_t aggr_count;
4200 	uint8_t do_not_aggregate;
4201 	uint16_t hal_cumultive_ip_len;
4202 	struct dp_soc *soc_hdl;
4203 	/* last aggregate count fetched from RX PKT TLV */
4204 	uint32_t last_hal_aggr_count;
4205 	uint32_t cur_aggr_gso_size;
4206 	struct udphdr *head_skb_udp_hdr;
4207 	uint16_t frags_cumulative_len;
4208 	/* CMEM parameters */
4209 	uint32_t cmem_offset;
4210 	uint32_t metadata;
4211 	uint32_t reo_dest_indication;
4212 	qdf_time_t flow_init_ts;
4213 	qdf_time_t last_accessed_ts;
4214 #ifdef WLAN_SUPPORT_RX_FISA_HIST
4215 	struct fisa_pkt_hist pkt_hist;
4216 #endif
4217 };
4218 
4219 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_fisa_rx_sw_ft)
4220 #define MAX_FSE_CACHE_FL_HST 10
4221 /**
4222  * struct fse_cache_flush_history - Debug history cache flush
4223  * @timestamp: Entry update timestamp
4224  * @flows_added: Number of flows added for this flush
4225  * @flows_deleted: Number of flows deleted for this flush
4226  */
4227 struct fse_cache_flush_history {
4228 	uint64_t timestamp;
4229 	uint32_t flows_added;
4230 	uint32_t flows_deleted;
4231 };
4232 
4233 struct dp_rx_fst {
4234 	/* Software (DP) FST */
4235 	uint8_t *base;
4236 	/* Pointer to HAL FST */
4237 	struct hal_rx_fst *hal_rx_fst;
4238 	/* Base physical address of HAL RX HW FST */
4239 	uint64_t hal_rx_fst_base_paddr;
4240 	/* Maximum number of flows FSE supports */
4241 	uint16_t max_entries;
4242 	/* Num entries in flow table */
4243 	uint16_t num_entries;
4244 	/* SKID Length */
4245 	uint16_t max_skid_length;
4246 	/* Hash mask to obtain legitimate hash entry */
4247 	uint32_t hash_mask;
4248 	/* Lock for adding/deleting entries of FST */
4249 	qdf_spinlock_t dp_rx_fst_lock;
4250 	uint32_t add_flow_count;
4251 	uint32_t del_flow_count;
4252 	uint32_t hash_collision_cnt;
4253 	struct dp_soc *soc_hdl;
4254 	qdf_atomic_t fse_cache_flush_posted;
4255 	qdf_timer_t fse_cache_flush_timer;
4256 	/* Allow FSE cache flush cmd to FW */
4257 	bool fse_cache_flush_allow;
4258 	struct fse_cache_flush_history cache_fl_rec[MAX_FSE_CACHE_FL_HST];
4259 	/* FISA DP stats */
4260 	struct dp_fisa_stats stats;
4261 
4262 	/* CMEM params */
4263 	qdf_work_t fst_update_work;
4264 	qdf_workqueue_t *fst_update_wq;
4265 	qdf_list_t fst_update_list;
4266 	uint32_t meta_counter;
4267 	uint32_t cmem_ba;
4268 	qdf_spinlock_t dp_rx_sw_ft_lock[MAX_REO_DEST_RINGS];
4269 	qdf_event_t cmem_resp_event;
4270 	bool flow_deletion_supported;
4271 	bool fst_in_cmem;
4272 	bool pm_suspended;
4273 };
4274 
4275 #endif /* WLAN_SUPPORT_RX_FISA */
4276 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
4277 
4278 #ifdef WLAN_FEATURE_STATS_EXT
4279 /*
4280  * dp_req_rx_hw_stats_t: RX peer HW stats query structure
4281  * @pending_tid_query_cnt: pending tid stats count which waits for REO status
4282  * @is_query_timeout: flag to show is stats query timeout
4283  */
4284 struct dp_req_rx_hw_stats_t {
4285 	qdf_atomic_t pending_tid_stats_cnt;
4286 	bool is_query_timeout;
4287 };
4288 #endif
4289 /* soc level structure to declare arch specific ops for DP */
4290 
4291 
4292 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id);
4293 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
4294 					    uint32_t mac_id);
4295 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
4296 
4297 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
4298 void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc);
4299 #else
4300 static inline void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc) {}
4301 #endif
4302 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
4303 			 int ring_type, uint32_t num_entries,
4304 			 bool cached);
4305 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng);
4306 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
4307 			int ring_type, int ring_num, int mac_id);
4308 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
4309 		    int ring_type, int ring_num);
4310 void dp_print_peer_txrx_stats_be(struct cdp_peer_stats *peer_stats,
4311 				 enum peer_stats_type stats_type);
4312 void dp_print_peer_txrx_stats_li(struct cdp_peer_stats *peer_stats,
4313 				 enum peer_stats_type stats_type);
4314 
4315 enum timer_yield_status
4316 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
4317 			  uint64_t start_time);
4318 
4319 /*
4320  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
4321  * @vdev: Datapath VDEV handle
4322  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4323  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4324  *
4325  * Return: None
4326  */
4327 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
4328 				  enum cdp_host_reo_dest_ring *reo_dest,
4329 				  bool *hash_based);
4330 
4331 /**
4332  * dp_reo_remap_config() - configure reo remap register value based
4333  *                         nss configuration.
4334  *		based on offload_radio value below remap configuration
4335  *		get applied.
4336  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
4337  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
4338  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
4339  *		3 - both Radios handled by NSS (remap not required)
4340  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
4341  *
4342  * @remap0: output parameter indicates reo remap 0 register value
4343  * @remap1: output parameter indicates reo remap 1 register value
4344  * @remap2: output parameter indicates reo remap 2 register value
4345  * Return: bool type, true if remap is configured else false.
4346  */
4347 
4348 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4349 			 uint32_t *remap1, uint32_t *remap2);
4350 
4351 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
4352 /**
4353  * dp_tx_comp_get_prefetched_params_from_hal_desc() - Get prefetched TX desc
4354  * @soc: DP soc handle
4355  * @tx_comp_hal_desc: HAL TX Comp Descriptor
4356  * @r_tx_desc: SW Tx Descriptor retrieved from HAL desc.
4357  *
4358  * Return: None
4359  */
4360 void dp_tx_comp_get_prefetched_params_from_hal_desc(
4361 					struct dp_soc *soc,
4362 					void *tx_comp_hal_desc,
4363 					struct dp_tx_desc_s **r_tx_desc);
4364 #endif
4365 #endif /* _DP_TYPES_H_ */
4366