xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_TYPES_H_
21 #define _DP_TYPES_H_
22 
23 #include <qdf_types.h>
24 #include <qdf_nbuf.h>
25 #include <qdf_lock.h>
26 #include <qdf_atomic.h>
27 #include <qdf_util.h>
28 #include <qdf_list.h>
29 #include <qdf_lro.h>
30 #include <queue.h>
31 #include <htt_common.h>
32 #include <htt.h>
33 #include <htt_stats.h>
34 #include <cdp_txrx_cmn.h>
35 #ifdef DP_MOB_DEFS
36 #include <cds_ieee80211_common.h>
37 #endif
38 #include <wdi_event_api.h>    /* WDI subscriber event list */
39 
40 #include "hal_hw_headers.h"
41 #include <hal_tx.h>
42 #include <hal_reo.h>
43 #include "wlan_cfg.h"
44 #include "hal_rx.h"
45 #include <hal_api.h>
46 #include <hal_api_mon.h>
47 #include "hal_rx.h"
48 //#include "hal_rx_flow.h"
49 
50 #define MAX_BW 8
51 #define MAX_RETRIES 4
52 #define MAX_RECEPTION_TYPES 4
53 
54 #define MINIDUMP_STR_SIZE 25
55 #ifndef REMOVE_PKT_LOG
56 #include <pktlog.h>
57 #endif
58 #include <dp_umac_reset.h>
59 
60 //#include "dp_tx.h"
61 
62 #define REPT_MU_MIMO 1
63 #define REPT_MU_OFDMA_MIMO 3
64 #define DP_VO_TID 6
65  /** MAX TID MAPS AVAILABLE PER PDEV */
66 #define DP_MAX_TID_MAPS 16
67 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */
68 #define DSCP_TID_MAP_MAX (64 + 6)
69 #define DP_IP_DSCP_SHIFT 2
70 #define DP_IP_DSCP_MASK 0x3f
71 #define DP_FC0_SUBTYPE_QOS 0x80
72 #define DP_QOS_TID 0x0f
73 #define DP_IPV6_PRIORITY_SHIFT 20
74 #define MAX_MON_LINK_DESC_BANKS 2
75 #define DP_VDEV_ALL 0xff
76 
77 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
78 #define WLAN_DP_RESET_MON_BUF_RING_FILTER
79 #define MAX_TXDESC_POOLS 6
80 #else
81 #define MAX_TXDESC_POOLS 4
82 #endif
83 
84 #define MAX_RXDESC_POOLS 4
85 #define MAX_PPE_TXDESC_POOLS 1
86 
87 /* Max no. of VDEV per PSOC */
88 #ifdef WLAN_PSOC_MAX_VDEVS
89 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS
90 #else
91 #define MAX_VDEV_CNT 51
92 #endif
93 
94 /* Max no. of VDEVs, a PDEV can support */
95 #ifdef WLAN_PDEV_MAX_VDEVS
96 #define DP_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS
97 #else
98 #define DP_PDEV_MAX_VDEVS 17
99 #endif
100 
101 #define EXCEPTION_DEST_RING_ID 0
102 #define MAX_IDLE_SCATTER_BUFS 16
103 #define DP_MAX_IRQ_PER_CONTEXT 12
104 #define DEFAULT_HW_PEER_ID 0xffff
105 
106 #define MAX_AST_AGEOUT_COUNT 128
107 
108 #ifdef TX_ADDR_INDEX_SEARCH
109 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_INDEX_SEARCH
110 #else
111 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_SEARCH_DEFAULT
112 #endif
113 
114 #define WBM_INT_ERROR_ALL 0
115 #define WBM_INT_ERROR_REO_NULL_BUFFER 1
116 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2
117 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3
118 #define WBM_INT_ERROR_REO_BUFF_REAPED 4
119 #define MAX_WBM_INT_ERROR_REASONS 5
120 
121 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
122 /* Maximum retries for Delba per tid per peer */
123 #define DP_MAX_DELBA_RETRY 3
124 
125 #ifdef AST_OFFLOAD_ENABLE
126 #define AST_OFFLOAD_ENABLE_STATUS 1
127 #else
128 #define AST_OFFLOAD_ENABLE_STATUS 0
129 #endif
130 
131 #ifdef FEATURE_MEC_OFFLOAD
132 #define FW_MEC_FW_OFFLOAD_ENABLED 1
133 #else
134 #define FW_MEC_FW_OFFLOAD_ENABLED 0
135 #endif
136 
137 #define PCP_TID_MAP_MAX 8
138 #define MAX_MU_USERS 37
139 
140 #define REO_CMD_EVENT_HIST_MAX 64
141 
142 #define DP_MAX_SRNGS 64
143 
144 /* 2G PHYB */
145 #define PHYB_2G_LMAC_ID 2
146 #define PHYB_2G_TARGET_PDEV_ID 2
147 
148 /* Flags for skippig s/w tid classification */
149 #define DP_TX_HW_DSCP_TID_MAP_VALID 0x1
150 #define DP_TXRX_HLOS_TID_OVERRIDE_ENABLED 0x2
151 #define DP_TX_MESH_ENABLED 0x4
152 #define DP_TX_INVALID_QOS_TAG 0xf
153 
154 #ifdef WLAN_SUPPORT_RX_FISA
155 #define FISA_FLOW_MAX_AGGR_COUNT        16 /* max flow aggregate count */
156 #endif
157 
158 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
159 #define DP_RX_REFILL_BUFF_POOL_SIZE  2048
160 #define DP_RX_REFILL_BUFF_POOL_BURST 64
161 #define DP_RX_REFILL_THRD_THRESHOLD  512
162 #endif
163 
164 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
165 #define DP_SKIP_BAR_UPDATE_TIMEOUT 5000
166 #endif
167 
168 #define DP_TX_MAGIC_PATTERN_INUSE	0xABCD1234
169 #define DP_TX_MAGIC_PATTERN_FREE	0xDEADBEEF
170 
171 #ifdef IPA_OFFLOAD
172 #define DP_PEER_REO_STATS_TID_SHIFT 16
173 #define DP_PEER_REO_STATS_TID_MASK 0xFFFF0000
174 #define DP_PEER_REO_STATS_PEER_ID_MASK 0x0000FFFF
175 #define DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid) \
176 	((comb_peer_id_tid & DP_PEER_REO_STATS_TID_MASK) >> \
177 	DP_PEER_REO_STATS_TID_SHIFT)
178 #define DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid) \
179 	(comb_peer_id_tid & DP_PEER_REO_STATS_PEER_ID_MASK)
180 #endif
181 
182 enum rx_pktlog_mode {
183 	DP_RX_PKTLOG_DISABLED = 0,
184 	DP_RX_PKTLOG_FULL,
185 	DP_RX_PKTLOG_LITE,
186 };
187 
188 /* enum m_copy_mode - Available mcopy mode
189  *
190  */
191 enum m_copy_mode {
192 	M_COPY_DISABLED = 0,
193 	M_COPY = 2,
194 	M_COPY_EXTENDED = 4,
195 };
196 
197 struct msdu_list {
198 	qdf_nbuf_t head;
199 	qdf_nbuf_t tail;
200 	uint32_t sum_len;
201 };
202 
203 struct dp_soc_cmn;
204 struct dp_pdev;
205 struct dp_vdev;
206 struct dp_tx_desc_s;
207 struct dp_soc;
208 union dp_rx_desc_list_elem_t;
209 struct cdp_peer_rate_stats_ctx;
210 struct cdp_soc_rate_stats_ctx;
211 struct dp_rx_fst;
212 struct dp_mon_filter;
213 struct dp_mon_mpdu;
214 #ifdef BE_PKTLOG_SUPPORT
215 struct dp_mon_filter_be;
216 #endif
217 struct dp_peer;
218 struct dp_txrx_peer;
219 
220 /**
221  * enum for DP peer state
222  */
223 enum dp_peer_state {
224 	DP_PEER_STATE_NONE,
225 	DP_PEER_STATE_INIT,
226 	DP_PEER_STATE_ACTIVE,
227 	DP_PEER_STATE_LOGICAL_DELETE,
228 	DP_PEER_STATE_INACTIVE,
229 	DP_PEER_STATE_FREED,
230 	DP_PEER_STATE_INVALID,
231 };
232 
233 /**
234  * enum for modules ids of
235  */
236 enum dp_mod_id {
237 	DP_MOD_ID_TX_RX,
238 	DP_MOD_ID_TX_COMP,
239 	DP_MOD_ID_RX,
240 	DP_MOD_ID_HTT_COMP,
241 	DP_MOD_ID_RX_ERR,
242 	DP_MOD_ID_TX_PPDU_STATS,
243 	DP_MOD_ID_RX_PPDU_STATS,
244 	DP_MOD_ID_CDP,
245 	DP_MOD_ID_GENERIC_STATS,
246 	DP_MOD_ID_TX_MULTIPASS,
247 	DP_MOD_ID_TX_CAPTURE,
248 	DP_MOD_ID_NSS_OFFLOAD,
249 	DP_MOD_ID_CONFIG,
250 	DP_MOD_ID_HTT,
251 	DP_MOD_ID_IPA,
252 	DP_MOD_ID_AST,
253 	DP_MOD_ID_MCAST2UCAST,
254 	DP_MOD_ID_CHILD,
255 	DP_MOD_ID_MESH,
256 	DP_MOD_ID_TX_EXCEPTION,
257 	DP_MOD_ID_TDLS,
258 	DP_MOD_ID_MISC,
259 	DP_MOD_ID_MSCS,
260 	DP_MOD_ID_TX,
261 	DP_MOD_ID_SAWF,
262 	DP_MOD_ID_REINJECT,
263 	DP_MOD_ID_SCS,
264 	DP_MOD_ID_UMAC_RESET,
265 	DP_MOD_ID_MAX,
266 };
267 
268 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
269 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
270 
271 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
272 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
273 
274 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
275 	TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase))
276 
277 #define DP_MUTEX_TYPE qdf_spinlock_t
278 
279 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
280 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
281 
282 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
283     ((_a)[0] == 0x33 &&                         \
284      (_a)[1] == 0x33)
285 
286 #define DP_FRAME_IS_BROADCAST(_a)              \
287     ((_a)[0] == 0xff &&                         \
288      (_a)[1] == 0xff &&                         \
289      (_a)[2] == 0xff &&                         \
290      (_a)[3] == 0xff &&                         \
291      (_a)[4] == 0xff &&                         \
292      (_a)[5] == 0xff)
293 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
294 		(_llc)->llc_ssap == 0xaa && \
295 		(_llc)->llc_un.type_snap.control == 0x3)
296 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
297 #define DP_FRAME_FC0_TYPE_MASK 0x0c
298 #define DP_FRAME_FC0_TYPE_DATA 0x08
299 #define DP_FRAME_IS_DATA(_frame) \
300 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
301 
302 /**
303  * macros to convert hw mac id to sw mac id:
304  * mac ids used by hardware start from a value of 1 while
305  * those in host software start from a value of 0. Use the
306  * macros below to convert between mac ids used by software and
307  * hardware
308  */
309 #define DP_SW2HW_MACID(id) ((id) + 1)
310 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
311 
312 /**
313  * Number of Tx Queues
314  * enum and macro to define how many threshold levels is used
315  * for the AC based flow control
316  */
317 #ifdef QCA_AC_BASED_FLOW_CONTROL
318 enum dp_fl_ctrl_threshold {
319 	DP_TH_BE_BK = 0,
320 	DP_TH_VI,
321 	DP_TH_VO,
322 	DP_TH_HI,
323 };
324 
325 #define FL_TH_MAX (4)
326 #define FL_TH_VI_PERCENTAGE (80)
327 #define FL_TH_VO_PERCENTAGE (60)
328 #define FL_TH_HI_PERCENTAGE (40)
329 #endif
330 
331 /**
332  * enum dp_intr_mode
333  * @DP_INTR_INTEGRATED: Line interrupts
334  * @DP_INTR_MSI: MSI interrupts
335  * @DP_INTR_POLL: Polling
336  */
337 enum dp_intr_mode {
338 	DP_INTR_INTEGRATED = 0,
339 	DP_INTR_MSI,
340 	DP_INTR_POLL,
341 	DP_INTR_LEGACY_VIRTUAL_IRQ,
342 };
343 
344 /**
345  * enum dp_tx_frm_type
346  * @dp_tx_frm_std: Regular frame, no added header fragments
347  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
348  * @dp_tx_frm_sg: SG segment
349  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
350  * @dp_tx_frm_me: Multicast to Unicast Converted frame
351  * @dp_tx_frm_raw: Raw Frame
352  */
353 enum dp_tx_frm_type {
354 	dp_tx_frm_std = 0,
355 	dp_tx_frm_tso,
356 	dp_tx_frm_sg,
357 	dp_tx_frm_audio,
358 	dp_tx_frm_me,
359 	dp_tx_frm_raw,
360 };
361 
362 /**
363  * enum dp_ast_type
364  * @dp_ast_type_wds: WDS peer AST type
365  * @dp_ast_type_static: static ast entry type
366  * @dp_ast_type_mec: Multicast echo ast entry type
367  */
368 enum dp_ast_type {
369 	dp_ast_type_wds = 0,
370 	dp_ast_type_static,
371 	dp_ast_type_mec,
372 };
373 
374 /**
375  * enum dp_nss_cfg
376  * @dp_nss_cfg_default: No radios are offloaded
377  * @dp_nss_cfg_first_radio: First radio offloaded
378  * @dp_nss_cfg_second_radio: Second radio offloaded
379  * @dp_nss_cfg_dbdc: Dual radios offloaded
380  * @dp_nss_cfg_dbtc: Three radios offloaded
381  */
382 enum dp_nss_cfg {
383 	dp_nss_cfg_default = 0x0,
384 	dp_nss_cfg_first_radio = 0x1,
385 	dp_nss_cfg_second_radio = 0x2,
386 	dp_nss_cfg_dbdc = 0x3,
387 	dp_nss_cfg_dbtc = 0x7,
388 	dp_nss_cfg_max
389 };
390 
391 #ifdef WLAN_TX_PKT_CAPTURE_ENH
392 #define DP_CPU_RING_MAP_1 1
393 #endif
394 
395 /**
396  * dp_cpu_ring_map_type - dp tx cpu ring map
397  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
398  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
399  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
400  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
401  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
402  * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring
403  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
404  */
405 enum dp_cpu_ring_map_types {
406 	DP_NSS_DEFAULT_MAP,
407 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
408 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
409 	DP_NSS_DBDC_OFFLOADED_MAP,
410 	DP_NSS_DBTC_OFFLOADED_MAP,
411 #ifdef WLAN_TX_PKT_CAPTURE_ENH
412 	DP_SINGLE_TX_RING_MAP,
413 #endif
414 	DP_NSS_CPU_RING_MAP_MAX
415 };
416 
417 /**
418  * dp_rx_nbuf_frag_info - Hold vaddr and paddr for a buffer
419  *
420  * paddr: Physical address of buffer allocated.
421  * nbuf: Allocated nbuf in case of nbuf approach.
422  * vaddr: Virtual address of frag allocated in case of frag approach.
423  */
424 struct dp_rx_nbuf_frag_info {
425 	qdf_dma_addr_t paddr;
426 	union {
427 		qdf_nbuf_t nbuf;
428 		qdf_frag_t vaddr;
429 	} virt_addr;
430 };
431 
432 /**
433  * enum dp_ctxt - context type
434  * @DP_PDEV_TYPE: PDEV context
435  * @DP_RX_RING_HIST_TYPE: Datapath rx ring history
436  * @DP_RX_ERR_RING_HIST_TYPE: Datapath rx error ring history
437  * @DP_RX_REINJECT_RING_HIST_TYPE: Datapath reinject ring history
438  * @DP_RX_REFILL_RING_HIST_TYPE: Datapath rx refill ring history
439  * @DP_TX_HW_DESC_HIST_TYPE: Datapath TX HW descriptor history
440  * @DP_MON_SOC_TYPE: Datapath monitor soc context
441  * @DP_MON_PDEV_TYPE: Datapath monitor pdev context
442  * @DP_MON_STATUS_BUF_HIST_TYPE: DP monitor status buffer history
443  */
444 enum dp_ctxt_type {
445 	DP_PDEV_TYPE,
446 	DP_RX_RING_HIST_TYPE,
447 	DP_RX_ERR_RING_HIST_TYPE,
448 	DP_RX_REINJECT_RING_HIST_TYPE,
449 	DP_TX_TCL_HIST_TYPE,
450 	DP_TX_COMP_HIST_TYPE,
451 	DP_FISA_RX_FT_TYPE,
452 	DP_RX_REFILL_RING_HIST_TYPE,
453 	DP_TX_HW_DESC_HIST_TYPE,
454 	DP_MON_SOC_TYPE,
455 	DP_MON_PDEV_TYPE,
456 	DP_MON_STATUS_BUF_HIST_TYPE,
457 };
458 
459 /**
460  * enum dp_desc_type - source type for multiple pages allocation
461  * @DP_TX_DESC_TYPE: DP SW TX descriptor
462  * @DP_TX_PPEDS_DESC_TYPE: DP PPE-DS Tx descriptor
463  * @DP_TX_EXT_DESC_TYPE: DP TX msdu extension descriptor
464  * @DP_TX_EXT_DESC_LINK_TYPE: DP link descriptor for msdu ext_desc
465  * @DP_TX_TSO_DESC_TYPE: DP TX TSO descriptor
466  * @DP_TX_TSO_NUM_SEG_TYPE: DP TX number of segments
467  * @DP_RX_DESC_BUF_TYPE: DP RX SW descriptor
468  * @DP_RX_DESC_STATUS_TYPE: DP RX SW descriptor for monitor status
469  * @DP_HW_LINK_DESC_TYPE: DP HW link descriptor
470  * @DP_HW_CC_SPT_PAGE_TYPE: DP pages for HW CC secondary page table
471  */
472 enum dp_desc_type {
473 	DP_TX_DESC_TYPE,
474 	DP_TX_PPEDS_DESC_TYPE,
475 	DP_TX_EXT_DESC_TYPE,
476 	DP_TX_EXT_DESC_LINK_TYPE,
477 	DP_TX_TSO_DESC_TYPE,
478 	DP_TX_TSO_NUM_SEG_TYPE,
479 	DP_RX_DESC_BUF_TYPE,
480 	DP_RX_DESC_STATUS_TYPE,
481 	DP_HW_LINK_DESC_TYPE,
482 	DP_HW_CC_SPT_PAGE_TYPE,
483 };
484 
485 /**
486  * struct rx_desc_pool
487  * @pool_size: number of RX descriptor in the pool
488  * @elem_size: Element size
489  * @desc_pages: Multi page descriptors
490  * @array: pointer to array of RX descriptor
491  * @freelist: pointer to free RX descriptor link list
492  * @lock: Protection for the RX descriptor pool
493  * @owner: owner for nbuf
494  * @buf_size: Buffer size
495  * @buf_alignment: Buffer alignment
496  * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
497  * @desc_type: type of desc this pool serves
498  */
499 struct rx_desc_pool {
500 	uint32_t pool_size;
501 #ifdef RX_DESC_MULTI_PAGE_ALLOC
502 	uint16_t elem_size;
503 	struct qdf_mem_multi_page_t desc_pages;
504 #else
505 	union dp_rx_desc_list_elem_t *array;
506 #endif
507 	union dp_rx_desc_list_elem_t *freelist;
508 	qdf_spinlock_t lock;
509 	uint8_t owner;
510 	uint16_t buf_size;
511 	uint8_t buf_alignment;
512 	bool rx_mon_dest_frag_enable;
513 	enum dp_desc_type desc_type;
514 };
515 
516 /**
517  * struct dp_tx_ext_desc_elem_s
518  * @next: next extension descriptor pointer
519  * @vaddr: hlos virtual address pointer
520  * @paddr: physical address pointer for descriptor
521  * @flags: mark features for extension descriptor
522  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
523  *		Tx completion of ME packet
524  * @tso_desc: Pointer to Tso desc
525  * @tso_num_desc: Pointer to tso_num_desc
526  */
527 struct dp_tx_ext_desc_elem_s {
528 	struct dp_tx_ext_desc_elem_s *next;
529 	void *vaddr;
530 	qdf_dma_addr_t paddr;
531 	uint16_t flags;
532 	struct dp_tx_me_buf_t *me_buffer;
533 	struct qdf_tso_seg_elem_t *tso_desc;
534 	struct qdf_tso_num_seg_elem_t *tso_num_desc;
535 };
536 
537 /**
538  * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool
539  * @elem_count: Number of descriptors in the pool
540  * @elem_size: Size of each descriptor
541  * @num_free: Number of free descriptors
542  * @msdu_ext_desc: MSDU extension descriptor
543  * @desc_pages: multiple page allocation information for actual descriptors
544  * @link_elem_size: size of the link descriptor in cacheable memory used for
545  * 		    chaining the extension descriptors
546  * @desc_link_pages: multiple page allocation information for link descriptors
547  */
548 struct dp_tx_ext_desc_pool_s {
549 	uint16_t elem_count;
550 	int elem_size;
551 	uint16_t num_free;
552 	struct qdf_mem_multi_page_t desc_pages;
553 	int link_elem_size;
554 	struct qdf_mem_multi_page_t desc_link_pages;
555 	struct dp_tx_ext_desc_elem_s *freelist;
556 	qdf_spinlock_t lock;
557 	qdf_dma_mem_context(memctx);
558 };
559 
560 /**
561  * struct dp_tx_desc_s - Tx Descriptor
562  * @next: Next in the chain of descriptors in freelist or in the completion list
563  * @nbuf: Buffer Address
564  * @msdu_ext_desc: MSDU extension descriptor
565  * @id: Descriptor ID
566  * @vdev_id: vdev_id of vdev over which the packet was transmitted
567  * @pdev: Handle to pdev
568  * @pool_id: Pool ID - used when releasing the descriptor
569  * @flags: Flags to track the state of descriptor and special frame handling
570  * @comp: Pool ID - used when releasing the descriptor
571  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
572  * 		   This is maintained in descriptor to allow more efficient
573  * 		   processing in completion event processing code.
574  * 		   This field is filled in with the htt_pkt_type enum.
575  * @buffer_src: buffer source TQM, REO, FW etc.
576  * @frm_type: Frame Type - ToDo check if this is redundant
577  * @pkt_offset: Offset from which the actual packet data starts
578  * @pool: handle to flow_pool this descriptor belongs to.
579  */
580 struct dp_tx_desc_s {
581 	struct dp_tx_desc_s *next;
582 	qdf_nbuf_t nbuf;
583 	uint16_t length;
584 #ifdef DP_TX_TRACKING
585 	uint32_t magic;
586 	uint64_t timestamp_tick;
587 #endif
588 	uint16_t flags;
589 	uint32_t id;
590 	qdf_dma_addr_t dma_addr;
591 	uint8_t vdev_id;
592 	uint8_t tx_status;
593 	uint16_t peer_id;
594 	struct dp_pdev *pdev;
595 	uint8_t tx_encap_type:2,
596 		buffer_src:3,
597 		reserved:3;
598 	uint8_t frm_type;
599 	uint8_t pkt_offset;
600 	uint8_t  pool_id;
601 	unsigned char *shinfo_addr;
602 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
603 	qdf_ktime_t timestamp;
604 	struct hal_tx_desc_comp_s comp;
605 };
606 
607 #ifdef QCA_AC_BASED_FLOW_CONTROL
608 /**
609  * enum flow_pool_status - flow pool status
610  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
611  *				and network queues are unpaused
612  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
613  *			   and network queues are paused
614  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
615  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
616  * @FLOW_POOL_ACTIVE_UNPAUSED_REATTACH: pool is reattached but network
617  *					queues are not paused
618  */
619 enum flow_pool_status {
620 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
621 	FLOW_POOL_ACTIVE_PAUSED = 1,
622 	FLOW_POOL_BE_BK_PAUSED = 2,
623 	FLOW_POOL_VI_PAUSED = 3,
624 	FLOW_POOL_VO_PAUSED = 4,
625 	FLOW_POOL_INVALID = 5,
626 	FLOW_POOL_INACTIVE = 6,
627 	FLOW_POOL_ACTIVE_UNPAUSED_REATTACH = 7,
628 };
629 
630 #else
631 /**
632  * enum flow_pool_status - flow pool status
633  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
634  *				and network queues are unpaused
635  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
636  *			   and network queues are paused
637  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
638  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
639  */
640 enum flow_pool_status {
641 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
642 	FLOW_POOL_ACTIVE_PAUSED = 1,
643 	FLOW_POOL_BE_BK_PAUSED = 2,
644 	FLOW_POOL_VI_PAUSED = 3,
645 	FLOW_POOL_VO_PAUSED = 4,
646 	FLOW_POOL_INVALID = 5,
647 	FLOW_POOL_INACTIVE = 6,
648 };
649 
650 #endif
651 
652 /**
653  * struct dp_tx_tso_seg_pool_s
654  * @pool_size: total number of pool elements
655  * @num_free: free element count
656  * @freelist: first free element pointer
657  * @desc_pages: multiple page allocation information for actual descriptors
658  * @lock: lock for accessing the pool
659  */
660 struct dp_tx_tso_seg_pool_s {
661 	uint16_t pool_size;
662 	uint16_t num_free;
663 	struct qdf_tso_seg_elem_t *freelist;
664 	struct qdf_mem_multi_page_t desc_pages;
665 	qdf_spinlock_t lock;
666 };
667 
668 /**
669  * struct dp_tx_tso_num_seg_pool_s {
670  * @num_seg_pool_size: total number of pool elements
671  * @num_free: free element count
672  * @freelist: first free element pointer
673  * @desc_pages: multiple page allocation information for actual descriptors
674  * @lock: lock for accessing the pool
675  */
676 
677 struct dp_tx_tso_num_seg_pool_s {
678 	uint16_t num_seg_pool_size;
679 	uint16_t num_free;
680 	struct qdf_tso_num_seg_elem_t *freelist;
681 	struct qdf_mem_multi_page_t desc_pages;
682 	/*tso mutex */
683 	qdf_spinlock_t lock;
684 };
685 
686 /**
687  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
688  * @elem_size: Size of each descriptor in the pool
689  * @pool_size: Total number of descriptors in the pool
690  * @num_free: Number of free descriptors
691  * @num_allocated: Number of used descriptors
692  * @freelist: Chain of free descriptors
693  * @desc_pages: multiple page allocation information for actual descriptors
694  * @num_invalid_bin: Deleted pool with pending Tx completions.
695  * @flow_pool_array_lock: Lock when operating on flow_pool_array.
696  * @flow_pool_array: List of allocated flow pools
697  * @lock- Lock for descriptor allocation/free from/to the pool
698  */
699 struct dp_tx_desc_pool_s {
700 	uint16_t elem_size;
701 	uint32_t num_allocated;
702 	struct dp_tx_desc_s *freelist;
703 	struct qdf_mem_multi_page_t desc_pages;
704 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
705 	uint16_t pool_size;
706 	uint8_t flow_pool_id;
707 	uint8_t num_invalid_bin;
708 	uint16_t avail_desc;
709 	enum flow_pool_status status;
710 	enum htt_flow_type flow_type;
711 #ifdef QCA_AC_BASED_FLOW_CONTROL
712 	uint16_t stop_th[FL_TH_MAX];
713 	uint16_t start_th[FL_TH_MAX];
714 	qdf_time_t max_pause_time[FL_TH_MAX];
715 	qdf_time_t latest_pause_time[FL_TH_MAX];
716 #else
717 	uint16_t stop_th;
718 	uint16_t start_th;
719 #endif
720 	uint16_t pkt_drop_no_desc;
721 	qdf_spinlock_t flow_pool_lock;
722 	uint8_t pool_create_cnt;
723 	void *pool_owner_ctx;
724 #else
725 	uint16_t elem_count;
726 	uint32_t num_free;
727 	qdf_spinlock_t lock;
728 #endif
729 };
730 
731 /**
732  * struct dp_txrx_pool_stats - flow pool related statistics
733  * @pool_map_count: flow pool map received
734  * @pool_unmap_count: flow pool unmap received
735  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
736  */
737 struct dp_txrx_pool_stats {
738 	uint16_t pool_map_count;
739 	uint16_t pool_unmap_count;
740 	uint16_t pkt_drop_no_pool;
741 };
742 
743 /**
744  * struct dp_srng - DP srng structure
745  * @hal_srng: hal_srng handle
746  * @base_vaddr_unaligned: un-aligned virtual base address of the srng ring
747  * @base_vaddr_aligned: aligned virtual base address of the srng ring
748  * @base_paddr_unaligned: un-aligned physical base address of the srng ring
749  * @base_paddr_aligned: aligned physical base address of the srng ring
750  * @alloc_size: size of the srng ring
751  * @cached: is the srng ring memory cached or un-cached memory
752  * @irq: irq number of the srng ring
753  * @num_entries: number of entries in the srng ring
754  * @is_mem_prealloc: Is this srng memory pre-allocated
755  * @crit_thresh: Critical threshold for near-full processing of this srng
756  * @safe_thresh: Safe threshold for near-full processing of this srng
757  * @near_full: Flag to indicate srng is near-full
758  */
759 struct dp_srng {
760 	hal_ring_handle_t hal_srng;
761 	void *base_vaddr_unaligned;
762 	void *base_vaddr_aligned;
763 	qdf_dma_addr_t base_paddr_unaligned;
764 	qdf_dma_addr_t base_paddr_aligned;
765 	uint32_t alloc_size;
766 	uint8_t cached;
767 	int irq;
768 	uint32_t num_entries;
769 #ifdef DP_MEM_PRE_ALLOC
770 	uint8_t is_mem_prealloc;
771 #endif
772 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
773 	uint16_t crit_thresh;
774 	uint16_t safe_thresh;
775 	qdf_atomic_t near_full;
776 #endif
777 };
778 
779 struct dp_rx_reorder_array_elem {
780 	qdf_nbuf_t head;
781 	qdf_nbuf_t tail;
782 };
783 
784 #define DP_RX_BA_INACTIVE 0
785 #define DP_RX_BA_ACTIVE 1
786 #define DP_RX_BA_IN_PROGRESS 2
787 struct dp_reo_cmd_info {
788 	uint16_t cmd;
789 	enum hal_reo_cmd_type cmd_type;
790 	void *data;
791 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
792 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
793 };
794 
795 struct dp_peer_delay_stats {
796 	struct cdp_delay_tid_stats delay_tid_stats[CDP_MAX_DATA_TIDS]
797 						  [CDP_MAX_TXRX_CTX];
798 };
799 
800 /* Rx TID defrag*/
801 struct dp_rx_tid_defrag {
802 	/* TID */
803 	int tid;
804 
805 	/* only used for defrag right now */
806 	TAILQ_ENTRY(dp_rx_tid_defrag) defrag_waitlist_elem;
807 
808 	/* Store dst desc for reinjection */
809 	hal_ring_desc_t dst_ring_desc;
810 	struct dp_rx_desc *head_frag_desc;
811 
812 	/* Sequence and fragments that are being processed currently */
813 	uint32_t curr_seq_num;
814 	uint32_t curr_frag_num;
815 
816 	/* TODO: Check the following while adding defragmentation support */
817 	struct dp_rx_reorder_array_elem *array;
818 	/* base - single rx reorder element used for non-aggr cases */
819 	struct dp_rx_reorder_array_elem base;
820 	/* rx_tid lock */
821 	qdf_spinlock_t defrag_tid_lock;
822 
823 	/* head PN number */
824 	uint64_t pn128[2];
825 
826 	uint32_t defrag_timeout_ms;
827 
828 	/* defrag usage only, dp_peer pointer related with this tid */
829 	struct dp_txrx_peer *defrag_peer;
830 };
831 
832 /* Rx TID */
833 struct dp_rx_tid {
834 	/* TID */
835 	int tid;
836 
837 	/* Num of addba requests */
838 	uint32_t num_of_addba_req;
839 
840 	/* Num of addba responses */
841 	uint32_t num_of_addba_resp;
842 
843 	/* Num of delba requests */
844 	uint32_t num_of_delba_req;
845 
846 	/* Num of addba responses successful */
847 	uint32_t num_addba_rsp_success;
848 
849 	/* Num of addba responses failed */
850 	uint32_t num_addba_rsp_failed;
851 
852 	/* pn size */
853 	uint8_t pn_size;
854 	/* REO TID queue descriptors */
855 	void *hw_qdesc_vaddr_unaligned;
856 	void *hw_qdesc_vaddr_aligned;
857 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
858 	qdf_dma_addr_t hw_qdesc_paddr;
859 	uint32_t hw_qdesc_alloc_size;
860 
861 	/* RX ADDBA session state */
862 	int ba_status;
863 
864 	/* RX BA window size */
865 	uint16_t ba_win_size;
866 
867 	/* Starting sequence number in Addba request */
868 	uint16_t startseqnum;
869 	uint16_t dialogtoken;
870 	uint16_t statuscode;
871 	/* user defined ADDBA response status code */
872 	uint16_t userstatuscode;
873 
874 	/* rx_tid lock */
875 	qdf_spinlock_t tid_lock;
876 
877 	/* Store ppdu_id when 2k exception is received */
878 	uint32_t ppdu_id_2k;
879 
880 	/* Delba Tx completion status */
881 	uint8_t delba_tx_status;
882 
883 	/* Delba Tx retry count */
884 	uint8_t delba_tx_retry;
885 
886 	/* Delba stats */
887 	uint32_t delba_tx_success_cnt;
888 	uint32_t delba_tx_fail_cnt;
889 
890 	/* Delba reason code for retries */
891 	uint8_t delba_rcode;
892 
893 	/* Coex Override preserved windows size 1 based */
894 	uint16_t rx_ba_win_size_override;
895 #ifdef IPA_OFFLOAD
896 	/* rx msdu count per tid */
897 	struct cdp_pkt_info rx_msdu_cnt;
898 #endif
899 
900 };
901 
902 /**
903  * struct dp_intr_stats - DP Interrupt Stats for an interrupt context
904  * @num_tx_ring_masks: interrupts with tx_ring_mask set
905  * @num_rx_ring_masks: interrupts with rx_ring_mask set
906  * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set
907  * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set
908  * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set
909  * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set
910  * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set
911  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
912  * @num_host2rxdma_mon_ring_masks: interrupts with host2rxdma_ring_mask set
913  * @num_rx_ring_near_full_masks: Near-full interrupts for REO DST ring
914  * @num_tx_comp_ring_near_full_masks: Near-full interrupts for TX completion
915  * @num_rx_wbm_rel_ring_near_full_masks: total number of times the wbm rel ring
916  *                                       near full interrupt was received
917  * @num_reo_status_ring_near_full_masks: total number of times the reo status
918  *                                       near full interrupt was received
919  * @num_near_full_masks: total number of times the near full interrupt
920  *                       was received
921  * @num_masks: total number of times the interrupt was received
922  * @num_host2txmon_ring_masks: interrupts with host2txmon_ring_mask set
923  * @num_near_full_masks: total number of times the interrupt was received
924  * @num_masks: total number of times the near full interrupt was received
925  * @num_tx_mon_ring_masks: interrupts with num_tx_mon_ring_masks set
926  *
927  * Counter for individual masks are incremented only if there are any packets
928  * on that ring.
929  */
930 struct dp_intr_stats {
931 	uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS];
932 	uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS];
933 	uint32_t num_rx_mon_ring_masks;
934 	uint32_t num_rx_err_ring_masks;
935 	uint32_t num_rx_wbm_rel_ring_masks;
936 	uint32_t num_reo_status_ring_masks;
937 	uint32_t num_rxdma2host_ring_masks;
938 	uint32_t num_host2rxdma_ring_masks;
939 	uint32_t num_host2rxdma_mon_ring_masks;
940 	uint32_t num_rx_ring_near_full_masks[MAX_REO_DEST_RINGS];
941 	uint32_t num_tx_comp_ring_near_full_masks[MAX_TCL_DATA_RINGS];
942 	uint32_t num_rx_wbm_rel_ring_near_full_masks;
943 	uint32_t num_reo_status_ring_near_full_masks;
944 	uint32_t num_host2txmon_ring__masks;
945 	uint32_t num_near_full_masks;
946 	uint32_t num_masks;
947 	uint32_t num_tx_mon_ring_masks;
948 };
949 
950 #ifdef DP_UMAC_HW_RESET_SUPPORT
951 /**
952  * struct dp_intr_bkp - DP per interrupt context ring masks old state
953  * @tx_ring_mask: WBM Tx completion rings (0-2) associated with this napi ctxt
954  * @rx_ring_mask: Rx REO rings (0-3) associated with this interrupt context
955  * @rx_mon_ring_mask: Rx monitor ring mask (0-2)
956  * @rx_err_ring_mask: REO Exception Ring
957  * @rx_wbm_rel_ring_mask: WBM2SW Rx Release Ring
958  * @reo_status_ring_mask: REO command response ring
959  * @rxdma2host_ring_mask: RXDMA to host destination ring
960  * @host2rxdma_ring_mask: Host to RXDMA buffer ring
961  * @host2rxdma_mon_ring_mask: Host to RXDMA monitor  buffer ring
962  * @host2txmon_ring_mask: Tx monitor buffer ring
963  * @tx_mon_ring_mask: Tx monitor ring mask (0-2)
964  *
965  */
966 struct dp_intr_bkp {
967 	uint8_t tx_ring_mask;
968 	uint8_t rx_ring_mask;
969 	uint8_t rx_mon_ring_mask;
970 	uint8_t rx_err_ring_mask;
971 	uint8_t rx_wbm_rel_ring_mask;
972 	uint8_t reo_status_ring_mask;
973 	uint8_t rxdma2host_ring_mask;
974 	uint8_t host2rxdma_ring_mask;
975 	uint8_t host2rxdma_mon_ring_mask;
976 	uint8_t host2txmon_ring_mask;
977 	uint8_t tx_mon_ring_mask;
978 };
979 #endif
980 
981 /* per interrupt context  */
982 struct dp_intr {
983 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
984 				associated with this napi context */
985 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
986 				with this interrupt context */
987 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
988 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
989 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
990 	uint8_t reo_status_ring_mask; /* REO command response ring */
991 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
992 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
993 	/* Host to RXDMA monitor  buffer ring */
994 	uint8_t host2rxdma_mon_ring_mask;
995 	/* RX REO rings near full interrupt mask */
996 	uint8_t rx_near_full_grp_1_mask;
997 	/* RX REO rings near full interrupt mask */
998 	uint8_t rx_near_full_grp_2_mask;
999 	/* WBM TX completion rings near full interrupt mask */
1000 	uint8_t tx_ring_near_full_mask;
1001 	uint8_t host2txmon_ring_mask; /* Tx monitor buffer ring */
1002 	uint8_t tx_mon_ring_mask;  /* Tx monitor ring mask (0-2) */
1003 	struct dp_soc *soc;    /* Reference to SoC structure ,
1004 				to get DMA ring handles */
1005 	qdf_lro_ctx_t lro_ctx;
1006 	uint8_t dp_intr_id;
1007 
1008 	/* Interrupt Stats for individual masks */
1009 	struct dp_intr_stats intr_stats;
1010 	uint8_t umac_reset_intr_mask;  /* UMAC reset interrupt mask */
1011 };
1012 
1013 #define REO_DESC_FREELIST_SIZE 64
1014 #define REO_DESC_FREE_DEFER_MS 1000
1015 struct reo_desc_list_node {
1016 	qdf_list_node_t node;
1017 	unsigned long free_ts;
1018 	struct dp_rx_tid rx_tid;
1019 	bool resend_update_reo_cmd;
1020 	uint32_t pending_ext_desc_size;
1021 #ifdef REO_QDESC_HISTORY
1022 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1023 #endif
1024 };
1025 
1026 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
1027 #define REO_DESC_DEFERRED_FREELIST_SIZE 256
1028 #define REO_DESC_DEFERRED_FREE_MS 30000
1029 
1030 struct reo_desc_deferred_freelist_node {
1031 	qdf_list_node_t node;
1032 	unsigned long free_ts;
1033 	void *hw_qdesc_vaddr_unaligned;
1034 	qdf_dma_addr_t hw_qdesc_paddr;
1035 	uint32_t hw_qdesc_alloc_size;
1036 #ifdef REO_QDESC_HISTORY
1037 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1038 #endif /* REO_QDESC_HISTORY */
1039 };
1040 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
1041 
1042 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1043 /**
1044  * struct reo_cmd_event_record: Elements to record for each reo command
1045  * @cmd_type: reo command type
1046  * @cmd_return_status: reo command post status
1047  * @timestamp: record timestamp for the reo command
1048  */
1049 struct reo_cmd_event_record {
1050 	enum hal_reo_cmd_type cmd_type;
1051 	uint8_t cmd_return_status;
1052 	uint64_t timestamp;
1053 };
1054 
1055 /**
1056  * struct reo_cmd_event_history: Account for reo cmd events
1057  * @index: record number
1058  * @cmd_record: list of records
1059  */
1060 struct reo_cmd_event_history {
1061 	qdf_atomic_t index;
1062 	struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX];
1063 };
1064 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1065 
1066 /* SoC level data path statistics */
1067 struct dp_soc_stats {
1068 	struct {
1069 		uint32_t added;
1070 		uint32_t deleted;
1071 		uint32_t aged_out;
1072 		uint32_t map_err;
1073 		uint32_t ast_mismatch;
1074 	} ast;
1075 
1076 	struct {
1077 		uint32_t added;
1078 		uint32_t deleted;
1079 	} mec;
1080 
1081 	/* SOC level TX stats */
1082 	struct {
1083 		/* Total packets transmitted */
1084 		struct cdp_pkt_info egress[MAX_TCL_DATA_RINGS];
1085 		/* Enqueues per tcl ring */
1086 		uint32_t tcl_enq[MAX_TCL_DATA_RINGS];
1087 		/* packets dropped on tx because of no peer */
1088 		struct cdp_pkt_info tx_invalid_peer;
1089 		/* descriptors in each tcl ring */
1090 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
1091 		/* Descriptors in use at soc */
1092 		uint32_t desc_in_use;
1093 		/* tqm_release_reason == FW removed */
1094 		uint32_t dropped_fw_removed;
1095 		/* tx completion release_src != TQM or FW */
1096 		uint32_t invalid_release_source;
1097 		/* tx completion wbm_internal_error */
1098 		uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS];
1099 		/* tx completion non_wbm_internal_error */
1100 		uint32_t non_wbm_internal_err;
1101 		/* TX Comp loop packet limit hit */
1102 		uint32_t tx_comp_loop_pkt_limit_hit;
1103 		/* Head pointer Out of sync at the end of dp_tx_comp_handler */
1104 		uint32_t hp_oos2;
1105 		/* tx desc freed as part of vdev detach */
1106 		uint32_t tx_comp_exception;
1107 		/* TQM drops after/during peer delete */
1108 		uint64_t tqm_drop_no_peer;
1109 		/* Number of tx completions reaped per WBM2SW release ring */
1110 		uint32_t tx_comp[MAX_TCL_DATA_RINGS];
1111 		/* Number of tx completions force freed */
1112 		uint32_t tx_comp_force_freed;
1113 	} tx;
1114 
1115 	/* SOC level RX stats */
1116 	struct {
1117 		/* Total rx packets count */
1118 		struct cdp_pkt_info ingress;
1119 		/* Rx errors */
1120 		/* Total Packets in Rx Error ring */
1121 		uint32_t err_ring_pkts;
1122 		/* No of Fragments */
1123 		uint32_t rx_frags;
1124 		/* No of incomplete fragments in waitlist */
1125 		uint32_t rx_frag_wait;
1126 		/* Fragments dropped due to errors */
1127 		uint32_t rx_frag_err;
1128 		/* Fragments received OOR causing sequence num mismatch */
1129 		uint32_t rx_frag_oor;
1130 		/* Fragments dropped due to len errors in skb */
1131 		uint32_t rx_frag_err_len_error;
1132 		/* Fragments dropped due to no peer found */
1133 		uint32_t rx_frag_err_no_peer;
1134 		/* No of reinjected packets */
1135 		uint32_t reo_reinject;
1136 		/* Reap loop packet limit hit */
1137 		uint32_t reap_loop_pkt_limit_hit;
1138 		/* Head pointer Out of sync at the end of dp_rx_process */
1139 		uint32_t hp_oos2;
1140 		/* Rx ring near full */
1141 		uint32_t near_full;
1142 		/* Break ring reaping as not all scattered msdu received */
1143 		uint32_t msdu_scatter_wait_break;
1144 		/* Number of bar frames received */
1145 		uint32_t bar_frame;
1146 		/* Number of frames routed from rxdma */
1147 		uint32_t rxdma2rel_route_drop;
1148 		/* Number of frames routed from reo*/
1149 		uint32_t reo2rel_route_drop;
1150 
1151 		struct {
1152 			/* Invalid RBM error count */
1153 			uint32_t invalid_rbm;
1154 			/* Invalid VDEV Error count */
1155 			uint32_t invalid_vdev;
1156 			/* Invalid PDEV error count */
1157 			uint32_t invalid_pdev;
1158 
1159 			/* Packets delivered to stack that no related peer */
1160 			uint32_t pkt_delivered_no_peer;
1161 			/* Defrag peer uninit error count */
1162 			uint32_t defrag_peer_uninit;
1163 			/* Invalid sa_idx or da_idx*/
1164 			uint32_t invalid_sa_da_idx;
1165 			/* MSDU DONE failures */
1166 			uint32_t msdu_done_fail;
1167 			/* Invalid PEER Error count */
1168 			struct cdp_pkt_info rx_invalid_peer;
1169 			/* Invalid PEER ID count */
1170 			struct cdp_pkt_info rx_invalid_peer_id;
1171 			/* Invalid packet length */
1172 			struct cdp_pkt_info rx_invalid_pkt_len;
1173 			/* HAL ring access Fail error count */
1174 			uint32_t hal_ring_access_fail;
1175 			/* HAL ring access full Fail error count */
1176 			uint32_t hal_ring_access_full_fail;
1177 			/* RX DMA error count */
1178 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
1179 			/* RX REO DEST Desc Invalid Magic count */
1180 			uint32_t rx_desc_invalid_magic;
1181 			/* REO Error count */
1182 			uint32_t reo_error[HAL_REO_ERR_MAX];
1183 			/* HAL REO ERR Count */
1184 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
1185 			/* HAL REO DEST Duplicate count */
1186 			uint32_t hal_reo_dest_dup;
1187 			/* HAL WBM RELEASE Duplicate count */
1188 			uint32_t hal_wbm_rel_dup;
1189 			/* HAL RXDMA error Duplicate count */
1190 			uint32_t hal_rxdma_err_dup;
1191 			/* ipa smmu map duplicate count */
1192 			uint32_t ipa_smmu_map_dup;
1193 			/* ipa smmu unmap duplicate count */
1194 			uint32_t ipa_smmu_unmap_dup;
1195 			/* ipa smmu unmap while ipa pipes is disabled */
1196 			uint32_t ipa_unmap_no_pipe;
1197 			/* REO cmd send fail/requeue count */
1198 			uint32_t reo_cmd_send_fail;
1199 			/* REO cmd send drain count */
1200 			uint32_t reo_cmd_send_drain;
1201 			/* RX msdu drop count due to scatter */
1202 			uint32_t scatter_msdu;
1203 			/* RX msdu drop count due to invalid cookie */
1204 			uint32_t invalid_cookie;
1205 			/* Count of stale cookie read in RX path */
1206 			uint32_t stale_cookie;
1207 			/* Delba sent count due to RX 2k jump */
1208 			uint32_t rx_2k_jump_delba_sent;
1209 			/* RX 2k jump msdu indicated to stack count */
1210 			uint32_t rx_2k_jump_to_stack;
1211 			/* RX 2k jump msdu dropped count */
1212 			uint32_t rx_2k_jump_drop;
1213 			/* REO ERR msdu buffer received */
1214 			uint32_t reo_err_msdu_buf_rcved;
1215 			/* REO ERR msdu buffer with invalid coookie received */
1216 			uint32_t reo_err_msdu_buf_invalid_cookie;
1217 			/* REO OOR msdu drop count */
1218 			uint32_t reo_err_oor_drop;
1219 			/* REO OOR msdu indicated to stack count */
1220 			uint32_t reo_err_oor_to_stack;
1221 			/* REO OOR scattered msdu count */
1222 			uint32_t reo_err_oor_sg_count;
1223 			/* REO ERR RAW mpdu drops */
1224 			uint32_t reo_err_raw_mpdu_drop;
1225 			/* RX msdu rejected count on delivery to vdev stack_fn*/
1226 			uint32_t rejected;
1227 			/* Incorrect msdu count in MPDU desc info */
1228 			uint32_t msdu_count_mismatch;
1229 			/* RX raw frame dropped count */
1230 			uint32_t raw_frm_drop;
1231 			/* Stale link desc cookie count*/
1232 			uint32_t invalid_link_cookie;
1233 			/* Nbuf sanity failure */
1234 			uint32_t nbuf_sanity_fail;
1235 			/* Duplicate link desc refilled */
1236 			uint32_t dup_refill_link_desc;
1237 			/* Incorrect msdu continuation bit in MSDU desc */
1238 			uint32_t msdu_continuation_err;
1239 			/* count of start sequence (ssn) updates */
1240 			uint32_t ssn_update_count;
1241 			/* count of bar handling fail */
1242 			uint32_t bar_handle_fail_count;
1243 			/* EAPOL drop count in intrabss scenario */
1244 			uint32_t intrabss_eapol_drop;
1245 			/* PN check failed for 2K-jump or OOR error */
1246 			uint32_t pn_in_dest_check_fail;
1247 			/* MSDU len err count */
1248 			uint32_t msdu_len_err;
1249 			/* Rx flush count */
1250 			uint32_t rx_flush_count;
1251 			/* Rx invalid tid count */
1252 			uint32_t rx_invalid_tid_err;
1253 		} err;
1254 
1255 		/* packet count per core - per ring */
1256 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
1257 	} rx;
1258 
1259 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1260 	struct reo_cmd_event_history cmd_event_history;
1261 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1262 };
1263 
1264 union dp_align_mac_addr {
1265 	uint8_t raw[QDF_MAC_ADDR_SIZE];
1266 	struct {
1267 		uint16_t bytes_ab;
1268 		uint16_t bytes_cd;
1269 		uint16_t bytes_ef;
1270 	} align2;
1271 	struct {
1272 		uint32_t bytes_abcd;
1273 		uint16_t bytes_ef;
1274 	} align4;
1275 	struct __attribute__((__packed__)) {
1276 		uint16_t bytes_ab;
1277 		uint32_t bytes_cdef;
1278 	} align4_2;
1279 };
1280 
1281 /**
1282  * struct dp_ast_free_cb_params - HMWDS free callback cookie
1283  * @mac_addr: ast mac address
1284  * @peer_mac_addr: mac address of peer
1285  * @type: ast entry type
1286  * @vdev_id: vdev_id
1287  * @flags: ast flags
1288  */
1289 struct dp_ast_free_cb_params {
1290 	union dp_align_mac_addr mac_addr;
1291 	union dp_align_mac_addr peer_mac_addr;
1292 	enum cdp_txrx_ast_entry_type type;
1293 	uint8_t vdev_id;
1294 	uint32_t flags;
1295 };
1296 
1297 /*
1298  * dp_ast_entry
1299  *
1300  * @ast_idx: Hardware AST Index
1301  * @peer_id: Next Hop peer_id (for non-WDS nodes, this will be point to
1302  *           associated peer with this MAC address)
1303  * @mac_addr:  MAC Address for this AST entry
1304  * @next_hop: Set to 1 if this is for a WDS node
1305  * @is_active: flag to indicate active data traffic on this node
1306  *             (used for aging out/expiry)
1307  * @ase_list_elem: node in peer AST list
1308  * @is_bss: flag to indicate if entry corresponds to bss peer
1309  * @is_mapped: flag to indicate that we have mapped the AST entry
1310  *             in ast_table
1311  * @pdev_id: pdev ID
1312  * @vdev_id: vdev ID
1313  * @ast_hash_value: hast value in HW
1314  * @ref_cnt: reference count
1315  * @type: flag to indicate type of the entry(static/WDS/MEC)
1316  * @delete_in_progress: Flag to indicate that delete commands send to FW
1317  *                      and host is waiting for response from FW
1318  * @callback: ast free/unmap callback
1319  * @cookie: argument to callback
1320  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
1321  */
1322 struct dp_ast_entry {
1323 	uint16_t ast_idx;
1324 	uint16_t peer_id;
1325 	union dp_align_mac_addr mac_addr;
1326 	bool next_hop;
1327 	bool is_active;
1328 	bool is_mapped;
1329 	uint8_t pdev_id;
1330 	uint8_t vdev_id;
1331 	uint16_t ast_hash_value;
1332 	qdf_atomic_t ref_cnt;
1333 	enum cdp_txrx_ast_entry_type type;
1334 	bool delete_in_progress;
1335 	txrx_ast_free_cb callback;
1336 	void *cookie;
1337 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
1338 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
1339 };
1340 
1341 /*
1342  * dp_mec_entry
1343  *
1344  * @mac_addr:  MAC Address for this MEC entry
1345  * @is_active: flag to indicate active data traffic on this node
1346  *             (used for aging out/expiry)
1347  * @pdev_id: pdev ID
1348  * @vdev_id: vdev ID
1349  * @hash_list_elem: node in soc MEC hash list (mac address used as hash)
1350  */
1351 struct dp_mec_entry {
1352 	union dp_align_mac_addr mac_addr;
1353 	bool is_active;
1354 	uint8_t pdev_id;
1355 	uint8_t vdev_id;
1356 
1357 	TAILQ_ENTRY(dp_mec_entry) hash_list_elem;
1358 };
1359 
1360 /* SOC level htt stats */
1361 struct htt_t2h_stats {
1362 	/* lock to protect htt_stats_msg update */
1363 	qdf_spinlock_t lock;
1364 
1365 	/* work queue to process htt stats */
1366 	qdf_work_t work;
1367 
1368 	/* T2H Ext stats message queue */
1369 	qdf_nbuf_queue_t msg;
1370 
1371 	/* number of completed stats in htt_stats_msg */
1372 	uint32_t num_stats;
1373 };
1374 
1375 struct link_desc_bank {
1376 	void *base_vaddr_unaligned;
1377 	void *base_vaddr;
1378 	qdf_dma_addr_t base_paddr_unaligned;
1379 	qdf_dma_addr_t base_paddr;
1380 	uint32_t size;
1381 };
1382 
1383 struct rx_buff_pool {
1384 	qdf_nbuf_queue_head_t emerg_nbuf_q;
1385 	uint32_t nbuf_fail_cnt;
1386 	bool is_initialized;
1387 };
1388 
1389 struct rx_refill_buff_pool {
1390 	bool is_initialized;
1391 	uint16_t head;
1392 	uint16_t tail;
1393 	struct dp_pdev *dp_pdev;
1394 	uint16_t max_bufq_len;
1395 	qdf_nbuf_t buf_elem[2048];
1396 };
1397 
1398 #ifdef DP_TX_HW_DESC_HISTORY
1399 #define DP_TX_HW_DESC_HIST_MAX 6144
1400 #define DP_TX_HW_DESC_HIST_PER_SLOT_MAX 2048
1401 #define DP_TX_HW_DESC_HIST_MAX_SLOTS 3
1402 #define DP_TX_HW_DESC_HIST_SLOT_SHIFT 11
1403 
1404 struct dp_tx_hw_desc_evt {
1405 	uint8_t tcl_desc[HAL_TX_DESC_LEN_BYTES];
1406 	uint8_t tcl_ring_id;
1407 	uint64_t posted;
1408 	uint32_t hp;
1409 	uint32_t tp;
1410 };
1411 
1412 /* struct dp_tx_hw_desc_history - TX HW desc hisotry
1413  * @index: Index where the last entry is written
1414  * @entry: history entries
1415  */
1416 struct dp_tx_hw_desc_history {
1417 	qdf_atomic_t index;
1418 	uint16_t num_entries_per_slot;
1419 	uint16_t allocated;
1420 	struct dp_tx_hw_desc_evt *entry[DP_TX_HW_DESC_HIST_MAX_SLOTS];
1421 };
1422 #endif
1423 
1424 /*
1425  * enum dp_mon_status_process_event - Events for monitor status buffer record
1426  * @DP_MON_STATUS_BUF_REAP: Monitor status buffer is reaped from ring
1427  * @DP_MON_STATUS_BUF_ENQUEUE: Status buffer is enqueued to local queue
1428  * @DP_MON_STATUS_BUF_DEQUEUE: Status buffer is dequeued from local queue
1429  */
1430 enum dp_mon_status_process_event {
1431 	DP_MON_STATUS_BUF_REAP,
1432 	DP_MON_STATUS_BUF_ENQUEUE,
1433 	DP_MON_STATUS_BUF_DEQUEUE,
1434 };
1435 
1436 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
1437 #define DP_MON_STATUS_HIST_MAX	2048
1438 
1439 /**
1440  * struct dp_buf_info_record - ring buffer info
1441  * @hbi: HW ring buffer info
1442  * @timestamp: timestamp when this entry was recorded
1443  * @event: event
1444  * @rx_desc: RX descriptor corresponding to the received buffer
1445  * @nbuf: buffer attached to rx_desc, if event is REAP, else the buffer
1446  *	  which was enqueued or dequeued.
1447  * @rx_desc_nbuf_data: nbuf data pointer.
1448  */
1449 struct dp_mon_stat_info_record {
1450 	struct hal_buf_info hbi;
1451 	uint64_t timestamp;
1452 	enum dp_mon_status_process_event event;
1453 	void *rx_desc;
1454 	qdf_nbuf_t nbuf;
1455 	uint8_t *rx_desc_nbuf_data;
1456 };
1457 
1458 /* struct dp_rx_history - rx ring hisotry
1459  * @index: Index where the last entry is written
1460  * @entry: history entries
1461  */
1462 struct dp_mon_status_ring_history {
1463 	qdf_atomic_t index;
1464 	struct dp_mon_stat_info_record entry[DP_MON_STATUS_HIST_MAX];
1465 };
1466 #endif
1467 
1468 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1469 /*
1470  * The logic for get current index of these history is dependent on this
1471  * value being power of 2.
1472  */
1473 #define DP_RX_HIST_MAX 2048
1474 #define DP_RX_ERR_HIST_MAX 2048
1475 #define DP_RX_REINJECT_HIST_MAX 1024
1476 #define DP_RX_REFILL_HIST_MAX 2048
1477 
1478 QDF_COMPILE_TIME_ASSERT(rx_history_size,
1479 			(DP_RX_HIST_MAX &
1480 			 (DP_RX_HIST_MAX - 1)) == 0);
1481 QDF_COMPILE_TIME_ASSERT(rx_err_history_size,
1482 			(DP_RX_ERR_HIST_MAX &
1483 			 (DP_RX_ERR_HIST_MAX - 1)) == 0);
1484 QDF_COMPILE_TIME_ASSERT(rx_reinject_history_size,
1485 			(DP_RX_REINJECT_HIST_MAX &
1486 			 (DP_RX_REINJECT_HIST_MAX - 1)) == 0);
1487 QDF_COMPILE_TIME_ASSERT(rx_refill_history_size,
1488 			(DP_RX_REFILL_HIST_MAX &
1489 			(DP_RX_REFILL_HIST_MAX - 1)) == 0);
1490 
1491 
1492 /**
1493  * struct dp_buf_info_record - ring buffer info
1494  * @hbi: HW ring buffer info
1495  * @timestamp: timestamp when this entry was recorded
1496  */
1497 struct dp_buf_info_record {
1498 	struct hal_buf_info hbi;
1499 	uint64_t timestamp;
1500 };
1501 
1502 /**
1503  * struct dp_refill_info_record - ring refill buffer info
1504  * @hp: HP value after refill
1505  * @tp: cached tail value during refill
1506  * @num_req: number of buffers requested to refill
1507  * @num_refill: number of buffers refilled to ring
1508  * @timestamp: timestamp when this entry was recorded
1509  */
1510 struct dp_refill_info_record {
1511 	uint32_t hp;
1512 	uint32_t tp;
1513 	uint32_t num_req;
1514 	uint32_t num_refill;
1515 	uint64_t timestamp;
1516 };
1517 
1518 /* struct dp_rx_history - rx ring hisotry
1519  * @index: Index where the last entry is written
1520  * @entry: history entries
1521  */
1522 struct dp_rx_history {
1523 	qdf_atomic_t index;
1524 	struct dp_buf_info_record entry[DP_RX_HIST_MAX];
1525 };
1526 
1527 /* struct dp_rx_err_history - rx err ring hisotry
1528  * @index: Index where the last entry is written
1529  * @entry: history entries
1530  */
1531 struct dp_rx_err_history {
1532 	qdf_atomic_t index;
1533 	struct dp_buf_info_record entry[DP_RX_ERR_HIST_MAX];
1534 };
1535 
1536 /* struct dp_rx_reinject_history - rx reinject ring hisotry
1537  * @index: Index where the last entry is written
1538  * @entry: history entries
1539  */
1540 struct dp_rx_reinject_history {
1541 	qdf_atomic_t index;
1542 	struct dp_buf_info_record entry[DP_RX_REINJECT_HIST_MAX];
1543 };
1544 
1545 /* struct dp_rx_refill_history - rx buf refill hisotry
1546  * @index: Index where the last entry is written
1547  * @entry: history entries
1548  */
1549 struct dp_rx_refill_history {
1550 	qdf_atomic_t index;
1551 	struct dp_refill_info_record entry[DP_RX_REFILL_HIST_MAX];
1552 };
1553 
1554 #endif
1555 
1556 enum dp_tx_event_type {
1557 	DP_TX_DESC_INVAL_EVT = 0,
1558 	DP_TX_DESC_MAP,
1559 	DP_TX_DESC_COOKIE,
1560 	DP_TX_DESC_FLUSH,
1561 	DP_TX_DESC_UNMAP,
1562 	DP_TX_COMP_UNMAP,
1563 	DP_TX_COMP_UNMAP_ERR,
1564 	DP_TX_COMP_MSDU_EXT,
1565 };
1566 
1567 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
1568 /* Size must be in 2 power, for bitwise index rotation */
1569 #define DP_TX_TCL_HISTORY_SIZE 0x4000
1570 #define DP_TX_TCL_HIST_PER_SLOT_MAX 2048
1571 #define DP_TX_TCL_HIST_MAX_SLOTS 8
1572 #define DP_TX_TCL_HIST_SLOT_SHIFT 11
1573 
1574 /* Size must be in 2 power, for bitwise index rotation */
1575 #define DP_TX_COMP_HISTORY_SIZE 0x4000
1576 #define DP_TX_COMP_HIST_PER_SLOT_MAX 2048
1577 #define DP_TX_COMP_HIST_MAX_SLOTS 8
1578 #define DP_TX_COMP_HIST_SLOT_SHIFT 11
1579 
1580 struct dp_tx_desc_event {
1581 	qdf_nbuf_t skb;
1582 	dma_addr_t paddr;
1583 	uint32_t sw_cookie;
1584 	enum dp_tx_event_type type;
1585 	uint64_t ts;
1586 };
1587 
1588 struct dp_tx_tcl_history {
1589 	qdf_atomic_t index;
1590 	uint16_t num_entries_per_slot;
1591 	uint16_t allocated;
1592 	struct dp_tx_desc_event *entry[DP_TX_TCL_HIST_MAX_SLOTS];
1593 };
1594 
1595 struct dp_tx_comp_history {
1596 	qdf_atomic_t index;
1597 	uint16_t num_entries_per_slot;
1598 	uint16_t allocated;
1599 	struct dp_tx_desc_event *entry[DP_TX_COMP_HIST_MAX_SLOTS];
1600 };
1601 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
1602 
1603 /* structure to record recent operation related variable */
1604 struct dp_last_op_info {
1605 	/* last link desc buf info through WBM release ring */
1606 	struct hal_buf_info wbm_rel_link_desc;
1607 	/* last link desc buf info through REO reinject ring */
1608 	struct hal_buf_info reo_reinject_link_desc;
1609 };
1610 
1611 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1612 
1613 /**
1614  * struct dp_swlm_tcl_data - params for tcl register write coalescing
1615  *			     decision making
1616  * @nbuf: TX packet
1617  * @tid: tid for transmitting the current packet
1618  * @num_ll_connections: Number of low latency connections on this vdev
1619  * @ring_id: TCL ring id
1620  * @pkt_len: Packet length
1621  *
1622  * This structure contains the information required by the software
1623  * latency manager to decide on whether to coalesce the current TCL
1624  * register write or not.
1625  */
1626 struct dp_swlm_tcl_data {
1627 	qdf_nbuf_t nbuf;
1628 	uint8_t tid;
1629 	uint8_t num_ll_connections;
1630 	uint8_t ring_id;
1631 	uint32_t pkt_len;
1632 };
1633 
1634 /**
1635  * union swlm_data - SWLM query data
1636  * @tcl_data: data for TCL query in SWLM
1637  */
1638 union swlm_data {
1639 	struct dp_swlm_tcl_data *tcl_data;
1640 };
1641 
1642 /**
1643  * struct dp_swlm_ops - SWLM ops
1644  * @tcl_wr_coalesce_check: handler to check if the current TCL register
1645  *			   write can be coalesced or not
1646  */
1647 struct dp_swlm_ops {
1648 	int (*tcl_wr_coalesce_check)(struct dp_soc *soc,
1649 				     struct dp_swlm_tcl_data *tcl_data);
1650 };
1651 
1652 /**
1653  * struct dp_swlm_stats - Stats for Software Latency manager.
1654  * @tcl.timer_flush_success: Num TCL HP writes success from timer context
1655  * @tcl.timer_flush_fail: Num TCL HP writes failure from timer context
1656  * @tcl.tid_fail: Num TCL register write coalescing skips, since the pkt
1657  *		 was being transmitted on a TID above coalescing threshold
1658  * @tcl.sp_frames: Num TCL register write coalescing skips, since the pkt
1659  *		  being transmitted was a special frame
1660  * @tcl.ll_connection: Num TCL register write coalescing skips, since the
1661  *		       vdev has low latency connections
1662  * @tcl.bytes_thresh_reached: Num TCL HP writes flush after the coalescing
1663  *			     bytes threshold was reached
1664  * @tcl.time_thresh_reached: Num TCL HP writes flush after the coalescing
1665  *			    session time expired
1666  * @tcl.tput_criteria_fail: Num TCL HP writes coalescing fails, since the
1667  *			   throughput did not meet session threshold
1668  * @tcl.coalesce_success: Num of TCL HP writes coalesced successfully.
1669  * @tcl.coalesce_fail: Num of TCL HP writes coalesces failed
1670  */
1671 struct dp_swlm_stats {
1672 	struct {
1673 		uint32_t timer_flush_success;
1674 		uint32_t timer_flush_fail;
1675 		uint32_t tid_fail;
1676 		uint32_t sp_frames;
1677 		uint32_t ll_connection;
1678 		uint32_t bytes_thresh_reached;
1679 		uint32_t time_thresh_reached;
1680 		uint32_t tput_criteria_fail;
1681 		uint32_t coalesce_success;
1682 		uint32_t coalesce_fail;
1683 	} tcl[MAX_TCL_DATA_RINGS];
1684 };
1685 
1686 /**
1687  * struct dp_swlm_tcl_params: Parameters based on TCL for different modules
1688  *			      in the Software latency manager.
1689  * @soc: DP soc reference
1690  * @ring_id: TCL ring id
1691  * @flush_timer: Timer for flushing the coalesced TCL HP writes
1692  * @sampling_session_tx_bytes: Num bytes transmitted in the sampling time
1693  * @bytes_flush_thresh: Bytes threshold to flush the TCL HP register write
1694  * @coalesce_end_time: End timestamp for current coalescing session
1695  * @bytes_coalesced: Num bytes coalesced in the current session
1696  * @prev_tx_packets: Previous TX packets accounted
1697  * @prev_tx_bytes: Previous TX bytes accounted
1698  * @prev_rx_bytes: Previous RX bytes accounted
1699  * @expire_time: expiry time for sample
1700  * @tput_pass_cnt: threshold throughput pass counter
1701  */
1702 struct dp_swlm_tcl_params {
1703 	struct dp_soc *soc;
1704 	uint32_t ring_id;
1705 	qdf_timer_t flush_timer;
1706 	uint32_t sampling_session_tx_bytes;
1707 	uint32_t bytes_flush_thresh;
1708 	uint64_t coalesce_end_time;
1709 	uint32_t bytes_coalesced;
1710 	uint32_t prev_tx_packets;
1711 	uint32_t prev_tx_bytes;
1712 	uint32_t prev_rx_bytes;
1713 	uint64_t expire_time;
1714 	uint32_t tput_pass_cnt;
1715 };
1716 
1717 /**
1718  * struct dp_swlm_params: Parameters for different modules in the
1719  *			  Software latency manager.
1720  * @rx_traffic_thresh: Threshold for RX traffic, to begin TCL register
1721  *			   write coalescing
1722  * @tx_traffic_thresh: Threshold for TX traffic, to begin TCL register
1723  *			   write coalescing
1724  * @sampling_time: Sampling time to test the throughput threshold
1725  * @time_flush_thresh: Time threshold to flush the TCL HP register write
1726  * @tx_thresh_multiplier: Multiplier to deduce the bytes threshold after
1727  *			      which the TCL HP register is written, thereby
1728  *			      ending the coalescing.
1729  * @tx_pkt_thresh: Threshold for TX packet count, to begin TCL register
1730  *		       write coalescing
1731  * @tcl: TCL ring specific params
1732  */
1733 
1734 struct dp_swlm_params {
1735 	uint32_t rx_traffic_thresh;
1736 	uint32_t tx_traffic_thresh;
1737 	uint32_t sampling_time;
1738 	uint32_t time_flush_thresh;
1739 	uint32_t tx_thresh_multiplier;
1740 	uint32_t tx_pkt_thresh;
1741 	struct dp_swlm_tcl_params tcl[MAX_TCL_DATA_RINGS];
1742 };
1743 
1744 /**
1745  * struct dp_swlm - Software latency manager context
1746  * @ops: SWLM ops pointers
1747  * @is_enabled: SWLM enabled/disabled
1748  * @is_init: SWLM module initialized
1749  * @stats: SWLM stats
1750  * @params: SWLM SRNG params
1751  * @tcl_flush_timer: flush timer for TCL register writes
1752  */
1753 struct dp_swlm {
1754 	struct dp_swlm_ops *ops;
1755 	uint8_t is_enabled:1,
1756 		is_init:1;
1757 	struct dp_swlm_stats stats;
1758 	struct dp_swlm_params params;
1759 };
1760 #endif
1761 
1762 #ifdef IPA_OFFLOAD
1763 /* IPA uC datapath offload Wlan Tx resources */
1764 struct ipa_dp_tx_rsc {
1765 	/* Resource info to be passed to IPA */
1766 	qdf_dma_addr_t ipa_tcl_ring_base_paddr;
1767 	void *ipa_tcl_ring_base_vaddr;
1768 	uint32_t ipa_tcl_ring_size;
1769 	qdf_dma_addr_t ipa_tcl_hp_paddr;
1770 	uint32_t alloc_tx_buf_cnt;
1771 
1772 	qdf_dma_addr_t ipa_wbm_ring_base_paddr;
1773 	void *ipa_wbm_ring_base_vaddr;
1774 	uint32_t ipa_wbm_ring_size;
1775 	qdf_dma_addr_t ipa_wbm_tp_paddr;
1776 	/* WBM2SW HP shadow paddr */
1777 	qdf_dma_addr_t ipa_wbm_hp_shadow_paddr;
1778 
1779 	/* TX buffers populated into the WBM ring */
1780 	void **tx_buf_pool_vaddr_unaligned;
1781 	qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
1782 };
1783 
1784 /* IPA uC datapath offload Wlan Rx resources */
1785 struct ipa_dp_rx_rsc {
1786 	/* Resource info to be passed to IPA */
1787 	qdf_dma_addr_t ipa_reo_ring_base_paddr;
1788 	void *ipa_reo_ring_base_vaddr;
1789 	uint32_t ipa_reo_ring_size;
1790 	qdf_dma_addr_t ipa_reo_tp_paddr;
1791 
1792 	/* Resource info to be passed to firmware and IPA */
1793 	qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
1794 	void *ipa_rx_refill_buf_ring_base_vaddr;
1795 	uint32_t ipa_rx_refill_buf_ring_size;
1796 	qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
1797 };
1798 #endif
1799 
1800 struct dp_tx_msdu_info_s;
1801 /*
1802  * enum dp_context_type- DP Context Type
1803  * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
1804  * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
1805  * @DP_CONTEXT_TYPE_VDEV: Context type DP VDEV
1806  * @DP_CONTEXT_TYPE_PEER: Context type DP PEER
1807  * @DP_CONTEXT_TYPE_MON_SOC: Context type DP MON SOC
1808  * @DP_CONTEXT_TYPE_MON_PDEV: Context type DP MON PDEV
1809  *
1810  * Helper enums to be used to retrieve the size of the corresponding
1811  * data structure by passing the type.
1812  */
1813 enum dp_context_type {
1814 	DP_CONTEXT_TYPE_SOC,
1815 	DP_CONTEXT_TYPE_PDEV,
1816 	DP_CONTEXT_TYPE_VDEV,
1817 	DP_CONTEXT_TYPE_PEER,
1818 	DP_CONTEXT_TYPE_MON_SOC,
1819 	DP_CONTEXT_TYPE_MON_PDEV
1820 };
1821 
1822 /*
1823  * struct dp_arch_ops- DP target specific arch ops
1824  * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
1825  * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
1826  * @tx_hw_enqueue: enqueue TX data to HW
1827  * @tx_comp_get_params_from_hal_desc: get software tx descriptor and release
1828  * 				      source from HAL desc for wbm release ring
1829  * @dp_service_near_full_srngs: Handler for servicing the near full IRQ
1830  * @txrx_set_vdev_param: target specific ops while setting vdev params
1831  * @dp_srng_test_and_update_nf_params: Check if the srng is in near full state
1832  *				and set the near-full params.
1833  * @ipa_get_bank_id: Get TCL bank id used by IPA
1834  */
1835 struct dp_arch_ops {
1836 	/* INIT/DEINIT Arch Ops */
1837 	QDF_STATUS (*txrx_soc_attach)(struct dp_soc *soc,
1838 				      struct cdp_soc_attach_params *params);
1839 	QDF_STATUS (*txrx_soc_detach)(struct dp_soc *soc);
1840 	QDF_STATUS (*txrx_soc_init)(struct dp_soc *soc);
1841 	QDF_STATUS (*txrx_soc_deinit)(struct dp_soc *soc);
1842 	QDF_STATUS (*txrx_soc_srng_alloc)(struct dp_soc *soc);
1843 	QDF_STATUS (*txrx_soc_srng_init)(struct dp_soc *soc);
1844 	void (*txrx_soc_srng_deinit)(struct dp_soc *soc);
1845 	void (*txrx_soc_srng_free)(struct dp_soc *soc);
1846 	QDF_STATUS (*txrx_pdev_attach)(struct dp_pdev *pdev,
1847 				       struct cdp_pdev_attach_params *params);
1848 	QDF_STATUS (*txrx_pdev_detach)(struct dp_pdev *pdev);
1849 	QDF_STATUS (*txrx_vdev_attach)(struct dp_soc *soc,
1850 				       struct dp_vdev *vdev);
1851 	QDF_STATUS (*txrx_vdev_detach)(struct dp_soc *soc,
1852 				       struct dp_vdev *vdev);
1853 	QDF_STATUS (*txrx_peer_map_attach)(struct dp_soc *soc);
1854 	void (*txrx_peer_map_detach)(struct dp_soc *soc);
1855 	QDF_STATUS (*dp_rxdma_ring_sel_cfg)(struct dp_soc *soc);
1856 	void (*soc_cfg_attach)(struct dp_soc *soc);
1857 	void (*peer_get_reo_hash)(struct dp_vdev *vdev,
1858 				  struct cdp_peer_setup_info *setup_info,
1859 				  enum cdp_host_reo_dest_ring *reo_dest,
1860 				  bool *hash_based,
1861 				  uint8_t *lmac_peer_id_msb);
1862 	 bool (*reo_remap_config)(struct dp_soc *soc, uint32_t *remap0,
1863 				  uint32_t *remap1, uint32_t *remap2);
1864 
1865 	/* TX RX Arch Ops */
1866 	QDF_STATUS (*tx_hw_enqueue)(struct dp_soc *soc, struct dp_vdev *vdev,
1867 				    struct dp_tx_desc_s *tx_desc,
1868 				    uint16_t fw_metadata,
1869 				    struct cdp_tx_exception_metadata *metadata,
1870 				    struct dp_tx_msdu_info_s *msdu_info);
1871 
1872 	 void (*tx_comp_get_params_from_hal_desc)(struct dp_soc *soc,
1873 						  void *tx_comp_hal_desc,
1874 						  struct dp_tx_desc_s **desc);
1875 	void (*dp_tx_process_htt_completion)(struct dp_soc *soc,
1876 					     struct dp_tx_desc_s *tx_desc,
1877 					     uint8_t *status,
1878 					     uint8_t ring_id);
1879 
1880 	uint32_t (*dp_rx_process)(struct dp_intr *int_ctx,
1881 				  hal_ring_handle_t hal_ring_hdl,
1882 				  uint8_t reo_ring_num, uint32_t quota);
1883 
1884 	qdf_nbuf_t (*dp_tx_send_fast)(struct cdp_soc_t *soc_hdl,
1885 				      uint8_t vdev_id,
1886 				      qdf_nbuf_t nbuf);
1887 
1888 	QDF_STATUS (*dp_tx_desc_pool_init)(struct dp_soc *soc,
1889 					   uint32_t num_elem,
1890 					   uint8_t pool_id);
1891 	void (*dp_tx_desc_pool_deinit)(
1892 				struct dp_soc *soc,
1893 				struct dp_tx_desc_pool_s *tx_desc_pool,
1894 				uint8_t pool_id);
1895 
1896 	QDF_STATUS (*dp_rx_desc_pool_init)(struct dp_soc *soc,
1897 					   struct rx_desc_pool *rx_desc_pool,
1898 					   uint32_t pool_id);
1899 	void (*dp_rx_desc_pool_deinit)(struct dp_soc *soc,
1900 				       struct rx_desc_pool *rx_desc_pool,
1901 				       uint32_t pool_id);
1902 
1903 	QDF_STATUS (*dp_wbm_get_rx_desc_from_hal_desc)(
1904 						struct dp_soc *soc,
1905 						void *ring_desc,
1906 						struct dp_rx_desc **r_rx_desc);
1907 
1908 	bool
1909 	(*dp_rx_intrabss_handle_nawds)(struct dp_soc *soc,
1910 				       struct dp_txrx_peer *ta_txrx_peer,
1911 				       qdf_nbuf_t nbuf_copy,
1912 				       struct cdp_tid_rx_stats *tid_stats);
1913 
1914 	void (*dp_rx_word_mask_subscribe)(
1915 				struct dp_soc *soc,
1916 				uint32_t *msg_word,
1917 				void *rx_filter);
1918 
1919 	struct dp_rx_desc *(*dp_rx_desc_cookie_2_va)(struct dp_soc *soc,
1920 						     uint32_t cookie);
1921 	uint32_t (*dp_service_near_full_srngs)(struct dp_soc *soc,
1922 					       struct dp_intr *int_ctx,
1923 					       uint32_t dp_budget);
1924 	void (*tx_implicit_rbm_set)(struct dp_soc *soc, uint8_t tx_ring_id,
1925 				    uint8_t bm_id);
1926 	uint16_t (*dp_rx_peer_metadata_peer_id_get)(struct dp_soc *soc,
1927 						    uint32_t peer_metadata);
1928 	bool (*dp_rx_chain_msdus)(struct dp_soc *soc, qdf_nbuf_t nbuf,
1929 				  uint8_t *rx_tlv_hdr, uint8_t mac_id);
1930 	/* Control Arch Ops */
1931 	QDF_STATUS (*txrx_set_vdev_param)(struct dp_soc *soc,
1932 					  struct dp_vdev *vdev,
1933 					  enum cdp_vdev_param_type param,
1934 					  cdp_config_param_type val);
1935 
1936 	/* Misc Arch Ops */
1937 	qdf_size_t (*txrx_get_context_size)(enum dp_context_type);
1938 #ifdef WIFI_MONITOR_SUPPORT
1939 	qdf_size_t (*txrx_get_mon_context_size)(enum dp_context_type);
1940 #endif
1941 	int (*dp_srng_test_and_update_nf_params)(struct dp_soc *soc,
1942 						 struct dp_srng *dp_srng,
1943 						 int *max_reap_limit);
1944 
1945 	/* MLO ops */
1946 #ifdef WLAN_FEATURE_11BE_MLO
1947 #ifdef WLAN_MCAST_MLO
1948 	void (*dp_tx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
1949 				    qdf_nbuf_t nbuf);
1950 	bool (*dp_rx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
1951 				    struct dp_txrx_peer *peer, qdf_nbuf_t nbuf);
1952 #endif
1953 	void (*mlo_peer_find_hash_detach)(struct dp_soc *soc);
1954 	QDF_STATUS (*mlo_peer_find_hash_attach)(struct dp_soc *soc);
1955 	void (*mlo_peer_find_hash_add)(struct dp_soc *soc,
1956 				       struct dp_peer *peer);
1957 	void (*mlo_peer_find_hash_remove)(struct dp_soc *soc,
1958 					  struct dp_peer *peer);
1959 	struct dp_peer *(*mlo_peer_find_hash_find)(struct dp_soc *soc,
1960 						   uint8_t *peer_mac_addr,
1961 						   int mac_addr_is_aligned,
1962 						   enum dp_mod_id mod_id,
1963 						   uint8_t vdev_id);
1964 #endif
1965 	uint64_t (*get_reo_qdesc_addr)(hal_soc_handle_t hal_soc_hdl,
1966 				       uint8_t *dst_ring_desc,
1967 				       uint8_t *buf,
1968 				       struct dp_txrx_peer *peer,
1969 				       unsigned int tid);
1970 	void (*get_rx_hash_key)(struct dp_soc *soc,
1971 				struct cdp_lro_hash_config *lro_hash);
1972 	void (*txrx_print_peer_stats)(struct cdp_peer_stats *peer_stats,
1973 				      enum peer_stats_type stats_type);
1974 	/* Dp peer reorder queue setup */
1975 	QDF_STATUS (*dp_peer_rx_reorder_queue_setup)(struct dp_soc *soc,
1976 						     struct dp_peer *peer,
1977 						     int tid,
1978 						     uint32_t ba_window_size);
1979 	struct dp_peer *(*dp_find_peer_by_destmac)(struct dp_soc *soc,
1980 						   uint8_t *dest_mac_addr,
1981 						   uint8_t vdev_id);
1982 	void (*dp_bank_reconfig)(struct dp_soc *soc, struct dp_vdev *vdev);
1983 
1984 	void (*dp_reconfig_tx_vdev_mcast_ctrl)(struct dp_soc *soc,
1985 					       struct dp_vdev *vdev);
1986 
1987 	void (*dp_cc_reg_cfg_init)(struct dp_soc *soc, bool is_4k_align);
1988 
1989 	QDF_STATUS
1990 	(*dp_tx_compute_hw_delay)(struct dp_soc *soc,
1991 				  struct dp_vdev *vdev,
1992 				  struct hal_tx_completion_status *ts,
1993 				  uint32_t *delay_us);
1994 	void (*print_mlo_ast_stats)(struct dp_soc *soc);
1995 	void (*dp_partner_chips_map)(struct dp_soc *soc,
1996 				     struct dp_peer *peer,
1997 				     uint16_t peer_id);
1998 	void (*dp_partner_chips_unmap)(struct dp_soc *soc,
1999 				       uint16_t peer_id);
2000 
2001 #ifdef IPA_OFFLOAD
2002 	int8_t (*ipa_get_bank_id)(struct dp_soc *soc);
2003 #endif
2004 	void (*dp_txrx_ppeds_rings_status)(struct dp_soc *soc);
2005 	QDF_STATUS (*txrx_soc_ppeds_start)(struct dp_soc *soc);
2006 	void (*txrx_soc_ppeds_stop)(struct dp_soc *soc);
2007 };
2008 
2009 /**
2010  * struct dp_soc_features: Data structure holding the SOC level feature flags.
2011  * @pn_in_reo_dest: PN provided by hardware in the REO destination ring.
2012  * @dmac_cmn_src_rxbuf_ring_enabled: Flag to indicate DMAC mode common Rx
2013  *				     buffer source rings
2014  * @rssi_dbm_conv_support: Rssi dbm conversion support param.
2015  * @umac_hw_reset_support: UMAC HW reset support
2016  */
2017 struct dp_soc_features {
2018 	uint8_t pn_in_reo_dest:1,
2019 		dmac_cmn_src_rxbuf_ring_enabled:1;
2020 	bool rssi_dbm_conv_support;
2021 	bool umac_hw_reset_support;
2022 };
2023 
2024 enum sysfs_printing_mode {
2025 	PRINTING_MODE_DISABLED = 0,
2026 	PRINTING_MODE_ENABLED
2027 };
2028 
2029 /**
2030  * @typedef tx_pause_callback
2031  * @brief OSIF function registered with the data path
2032  */
2033 
2034 typedef void (*notify_pre_reset_fw_callback)(struct dp_soc *soc);
2035 
2036 #ifdef WLAN_SYSFS_DP_STATS
2037 /**
2038  * struct sysfs_stats_config: Data structure holding stats sysfs config.
2039  * @rw_stats_lock: Lock to read and write to stat_type and pdev_id.
2040  * @sysfs_read_lock: Lock held while another stat req is being executed.
2041  * @sysfs_write_user_buffer: Lock to change buff len, max buf len
2042  * and *buf.
2043  * @sysfs_txrx_fw_request_done: Event to wait for firmware response.
2044  * @stat_type_requested: stat type requested.
2045  * @mac_id: mac id for which stat type are requested.
2046  * @printing_mode: Should a print go through.
2047  * @process_id: Process allowed to write to buffer.
2048  * @curr_buffer_length: Curr length of buffer written
2049  * @max_buffer_length: Max buffer length.
2050  * @buf: Sysfs buffer.
2051  */
2052 struct sysfs_stats_config {
2053 	/* lock held to read stats */
2054 	qdf_spinlock_t rw_stats_lock;
2055 	qdf_mutex_t sysfs_read_lock;
2056 	qdf_spinlock_t sysfs_write_user_buffer;
2057 	qdf_event_t sysfs_txrx_fw_request_done;
2058 	uint32_t stat_type_requested;
2059 	uint32_t mac_id;
2060 	enum sysfs_printing_mode printing_mode;
2061 	int process_id;
2062 	uint16_t curr_buffer_length;
2063 	uint16_t max_buffer_length;
2064 	char *buf;
2065 };
2066 #endif
2067 
2068 /* SOC level structure for data path */
2069 struct dp_soc {
2070 	/**
2071 	 * re-use memory section starts
2072 	 */
2073 
2074 	/* Common base structure - Should be the first member */
2075 	struct cdp_soc_t cdp_soc;
2076 
2077 	/* SoC Obj */
2078 	struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
2079 
2080 	/* OS device abstraction */
2081 	qdf_device_t osdev;
2082 
2083 	/*cce disable*/
2084 	bool cce_disable;
2085 
2086 	/* WLAN config context */
2087 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
2088 
2089 	/* HTT handle for host-fw interaction */
2090 	struct htt_soc *htt_handle;
2091 
2092 	/* Commint init done */
2093 	qdf_atomic_t cmn_init_done;
2094 
2095 	/* Opaque hif handle */
2096 	struct hif_opaque_softc *hif_handle;
2097 
2098 	/* PDEVs on this SOC */
2099 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
2100 
2101 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
2102 	struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT];
2103 
2104 	struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW];
2105 
2106 	/* RXDMA error destination ring */
2107 	struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW];
2108 
2109 	/* RXDMA monitor buffer replenish ring */
2110 	struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW];
2111 
2112 	/* RXDMA monitor destination ring */
2113 	struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW];
2114 
2115 	/* RXDMA monitor status ring. TBD: Check format of this ring */
2116 	struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW];
2117 
2118 	/* Number of PDEVs */
2119 	uint8_t pdev_count;
2120 
2121 	/*ast override support in HW*/
2122 	bool ast_override_support;
2123 
2124 	/*number of hw dscp tid map*/
2125 	uint8_t num_hw_dscp_tid_map;
2126 
2127 	/* HAL SOC handle */
2128 	hal_soc_handle_t hal_soc;
2129 
2130 	/* rx monitor pkt tlv size */
2131 	uint16_t rx_mon_pkt_tlv_size;
2132 	/* rx pkt tlv size */
2133 	uint16_t rx_pkt_tlv_size;
2134 
2135 	struct dp_arch_ops arch_ops;
2136 
2137 	/* Device ID coming from Bus sub-system */
2138 	uint32_t device_id;
2139 
2140 	/* Link descriptor pages */
2141 	struct qdf_mem_multi_page_t link_desc_pages;
2142 
2143 	/* total link descriptors for regular RX and TX */
2144 	uint32_t total_link_descs;
2145 
2146 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
2147 	struct dp_srng wbm_idle_link_ring;
2148 
2149 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
2150 	 */
2151 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
2152 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
2153 	uint32_t num_scatter_bufs;
2154 
2155 	/* Tx SW descriptor pool */
2156 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
2157 
2158 	/* Tx MSDU Extension descriptor pool */
2159 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
2160 
2161 	/* Tx TSO descriptor pool */
2162 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
2163 
2164 	/* Tx TSO Num of segments pool */
2165 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
2166 
2167 	/* REO destination rings */
2168 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
2169 
2170 	/* REO exception ring - See if should combine this with reo_dest_ring */
2171 	struct dp_srng reo_exception_ring;
2172 
2173 	/* REO reinjection ring */
2174 	struct dp_srng reo_reinject_ring;
2175 
2176 	/* REO command ring */
2177 	struct dp_srng reo_cmd_ring;
2178 
2179 	/* REO command status ring */
2180 	struct dp_srng reo_status_ring;
2181 
2182 	/* WBM Rx release ring */
2183 	struct dp_srng rx_rel_ring;
2184 
2185 	/* TCL data ring */
2186 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
2187 
2188 	/* Number of Tx comp rings */
2189 	uint8_t num_tx_comp_rings;
2190 
2191 	/* Number of TCL data rings */
2192 	uint8_t num_tcl_data_rings;
2193 
2194 	/* TCL CMD_CREDIT ring */
2195 	bool init_tcl_cmd_cred_ring;
2196 
2197 	/* It is used as credit based ring on QCN9000 else command ring */
2198 	struct dp_srng tcl_cmd_credit_ring;
2199 
2200 	/* TCL command status ring */
2201 	struct dp_srng tcl_status_ring;
2202 
2203 	/* WBM Tx completion rings */
2204 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
2205 
2206 	/* Common WBM link descriptor release ring (SW to WBM) */
2207 	struct dp_srng wbm_desc_rel_ring;
2208 
2209 	/* DP Interrupts */
2210 	struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
2211 
2212 	/* Monitor mode mac id to dp_intr_id map */
2213 	int mon_intr_id_lmac_map[MAX_NUM_LMAC_HW];
2214 	/* Rx SW descriptor pool for RXDMA monitor buffer */
2215 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
2216 
2217 	/* Rx SW descriptor pool for RXDMA status buffer */
2218 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
2219 
2220 	/* Rx SW descriptor pool for RXDMA buffer */
2221 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
2222 
2223 	/* Number of REO destination rings */
2224 	uint8_t num_reo_dest_rings;
2225 
2226 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2227 	/* lock to control access to soc TX descriptors */
2228 	qdf_spinlock_t flow_pool_array_lock;
2229 
2230 	/* pause callback to pause TX queues as per flow control */
2231 	tx_pause_callback pause_cb;
2232 
2233 	/* flow pool related statistics */
2234 	struct dp_txrx_pool_stats pool_stats;
2235 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
2236 
2237 	notify_pre_reset_fw_callback notify_fw_callback;
2238 
2239 	unsigned long service_rings_running;
2240 
2241 	uint32_t wbm_idle_scatter_buf_size;
2242 
2243 	/* VDEVs on this SOC */
2244 	struct dp_vdev *vdev_id_map[MAX_VDEV_CNT];
2245 
2246 	/* Tx H/W queues lock */
2247 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
2248 
2249 	/* Tx ring map for interrupt processing */
2250 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2251 
2252 	/* Rx ring map for interrupt processing */
2253 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2254 
2255 	/* peer ID to peer object map (array of pointers to peer objects) */
2256 	struct dp_peer **peer_id_to_obj_map;
2257 
2258 	struct {
2259 		unsigned mask;
2260 		unsigned idx_bits;
2261 		TAILQ_HEAD(, dp_peer) * bins;
2262 	} peer_hash;
2263 
2264 	/* rx defrag state – TBD: do we need this per radio? */
2265 	struct {
2266 		struct {
2267 			TAILQ_HEAD(, dp_rx_tid_defrag) waitlist;
2268 			uint32_t timeout_ms;
2269 			uint32_t next_flush_ms;
2270 			qdf_spinlock_t defrag_lock;
2271 		} defrag;
2272 		struct {
2273 			int defrag_timeout_check;
2274 			int dup_check;
2275 		} flags;
2276 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
2277 		qdf_spinlock_t reo_cmd_lock;
2278 	} rx;
2279 
2280 	/* optional rx processing function */
2281 	void (*rx_opt_proc)(
2282 		struct dp_vdev *vdev,
2283 		struct dp_peer *peer,
2284 		unsigned tid,
2285 		qdf_nbuf_t msdu_list);
2286 
2287 	/* pool addr for mcast enhance buff */
2288 	struct {
2289 		int size;
2290 		uint32_t paddr;
2291 		uint32_t *vaddr;
2292 		struct dp_tx_me_buf_t *freelist;
2293 		int buf_in_use;
2294 		qdf_dma_mem_context(memctx);
2295 	} me_buf;
2296 
2297 	/* Protect peer hash table */
2298 	DP_MUTEX_TYPE peer_hash_lock;
2299 	/* Protect peer_id_to_objmap */
2300 	DP_MUTEX_TYPE peer_map_lock;
2301 
2302 	/* maximum number of suppoerted peers */
2303 	uint32_t max_peers;
2304 	/* maximum value for peer_id */
2305 	uint32_t max_peer_id;
2306 
2307 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2308 	uint32_t peer_id_shift;
2309 	uint32_t peer_id_mask;
2310 #endif
2311 
2312 	/* SoC level data path statistics */
2313 	struct dp_soc_stats stats;
2314 #ifdef WLAN_SYSFS_DP_STATS
2315 	/* sysfs config for DP stats */
2316 	struct sysfs_stats_config *sysfs_config;
2317 #endif
2318 	/* timestamp to keep track of msdu buffers received on reo err ring */
2319 	uint64_t rx_route_err_start_pkt_ts;
2320 
2321 	/* Num RX Route err in a given window to keep track of rate of errors */
2322 	uint32_t rx_route_err_in_window;
2323 
2324 	/* Enable processing of Tx completion status words */
2325 	bool process_tx_status;
2326 	bool process_rx_status;
2327 	struct dp_ast_entry **ast_table;
2328 	struct {
2329 		unsigned mask;
2330 		unsigned idx_bits;
2331 		TAILQ_HEAD(, dp_ast_entry) * bins;
2332 	} ast_hash;
2333 
2334 #ifdef DP_TX_HW_DESC_HISTORY
2335 	struct dp_tx_hw_desc_history tx_hw_desc_history;
2336 #endif
2337 
2338 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2339 	struct dp_rx_history *rx_ring_history[MAX_REO_DEST_RINGS];
2340 	struct dp_rx_refill_history *rx_refill_ring_history[MAX_PDEV_CNT];
2341 	struct dp_rx_err_history *rx_err_ring_history;
2342 	struct dp_rx_reinject_history *rx_reinject_ring_history;
2343 #endif
2344 
2345 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
2346 	struct dp_mon_status_ring_history *mon_status_ring_history;
2347 #endif
2348 
2349 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
2350 	struct dp_tx_tcl_history tx_tcl_history;
2351 	struct dp_tx_comp_history tx_comp_history;
2352 #endif
2353 
2354 	qdf_spinlock_t ast_lock;
2355 	/*Timer for AST entry ageout maintenance */
2356 	qdf_timer_t ast_aging_timer;
2357 
2358 	/*Timer counter for WDS AST entry ageout*/
2359 	uint8_t wds_ast_aging_timer_cnt;
2360 	bool pending_ageout;
2361 	bool ast_offload_support;
2362 	bool host_ast_db_enable;
2363 	uint32_t max_ast_ageout_count;
2364 	uint8_t eapol_over_control_port;
2365 
2366 	uint8_t sta_mode_search_policy;
2367 	qdf_timer_t lmac_reap_timer;
2368 	uint8_t lmac_timer_init;
2369 	qdf_timer_t int_timer;
2370 	uint8_t intr_mode;
2371 	uint8_t lmac_polled_mode;
2372 
2373 	qdf_list_t reo_desc_freelist;
2374 	qdf_spinlock_t reo_desc_freelist_lock;
2375 
2376 	/* htt stats */
2377 	struct htt_t2h_stats htt_stats;
2378 
2379 	void *external_txrx_handle; /* External data path handle */
2380 #ifdef IPA_OFFLOAD
2381 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc;
2382 #ifdef IPA_WDI3_TX_TWO_PIPES
2383 	/* Resources for the alternative IPA TX pipe */
2384 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc_alt;
2385 #endif
2386 
2387 	struct ipa_dp_rx_rsc ipa_uc_rx_rsc;
2388 #ifdef IPA_WDI3_VLAN_SUPPORT
2389 	struct ipa_dp_rx_rsc ipa_uc_rx_rsc_alt;
2390 #endif
2391 	qdf_atomic_t ipa_pipes_enabled;
2392 	bool ipa_first_tx_db_access;
2393 	qdf_spinlock_t ipa_rx_buf_map_lock;
2394 	bool ipa_rx_buf_map_lock_initialized;
2395 	uint8_t ipa_reo_ctx_lock_required[MAX_REO_DEST_RINGS];
2396 #endif
2397 
2398 #ifdef WLAN_FEATURE_STATS_EXT
2399 	struct {
2400 		uint32_t rx_mpdu_received;
2401 		uint32_t rx_mpdu_missed;
2402 	} ext_stats;
2403 	qdf_event_t rx_hw_stats_event;
2404 	qdf_spinlock_t rx_hw_stats_lock;
2405 	bool is_last_stats_ctx_init;
2406 #endif /* WLAN_FEATURE_STATS_EXT */
2407 
2408 	/* Indicates HTT map/unmap versions*/
2409 	uint8_t peer_map_unmap_versions;
2410 	/* Per peer per Tid ba window size support */
2411 	uint8_t per_tid_basize_max_tid;
2412 	/* Soc level flag to enable da_war */
2413 	uint8_t da_war_enabled;
2414 	/* number of active ast entries */
2415 	uint32_t num_ast_entries;
2416 	/* peer extended rate statistics context at soc level*/
2417 	struct cdp_soc_rate_stats_ctx *rate_stats_ctx;
2418 	/* peer extended rate statistics control flag */
2419 	bool peerstats_enabled;
2420 
2421 	/* 8021p PCP-TID map values */
2422 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
2423 	/* TID map priority value */
2424 	uint8_t tidmap_prty;
2425 	/* Pointer to global per ring type specific configuration table */
2426 	struct wlan_srng_cfg *wlan_srng_cfg;
2427 	/* Num Tx outstanding on device */
2428 	qdf_atomic_t num_tx_outstanding;
2429 	/* Num Tx exception on device */
2430 	qdf_atomic_t num_tx_exception;
2431 	/* Num Tx allowed */
2432 	uint32_t num_tx_allowed;
2433 	/* Preferred HW mode */
2434 	uint8_t preferred_hw_mode;
2435 
2436 	/**
2437 	 * Flag to indicate whether WAR to address single cache entry
2438 	 * invalidation bug is enabled or not
2439 	 */
2440 	bool is_rx_fse_full_cache_invalidate_war_enabled;
2441 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2442 	/**
2443 	 * Pointer to DP RX Flow FST at SOC level if
2444 	 * is_rx_flow_search_table_per_pdev is false
2445 	 * TBD: rx_fst[num_macs] if we decide to have per mac FST
2446 	 */
2447 	struct dp_rx_fst *rx_fst;
2448 #ifdef WLAN_SUPPORT_RX_FISA
2449 	uint8_t fisa_enable;
2450 	uint8_t fisa_lru_del_enable;
2451 	/**
2452 	 * Params used for controlling the fisa aggregation dynamically
2453 	 */
2454 	struct {
2455 		qdf_atomic_t skip_fisa;
2456 		uint8_t fisa_force_flush[MAX_REO_DEST_RINGS];
2457 	} skip_fisa_param;
2458 
2459 	/**
2460 	 * CMEM address and size for FST in CMEM, This is the address
2461 	 * shared during init time.
2462 	 */
2463 	uint64_t fst_cmem_base;
2464 	uint64_t fst_cmem_size;
2465 #endif
2466 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
2467 	/* SG supported for msdu continued packets from wbm release ring */
2468 	bool wbm_release_desc_rx_sg_support;
2469 	bool peer_map_attach_success;
2470 	/* Flag to disable mac1 ring interrupts */
2471 	bool disable_mac1_intr;
2472 	/* Flag to disable mac2 ring interrupts */
2473 	bool disable_mac2_intr;
2474 
2475 	struct {
2476 		/* 1st msdu in sg for msdu continued packets in wbm rel ring */
2477 		bool wbm_is_first_msdu_in_sg;
2478 		/* Wbm sg list head */
2479 		qdf_nbuf_t wbm_sg_nbuf_head;
2480 		/* Wbm sg list tail */
2481 		qdf_nbuf_t wbm_sg_nbuf_tail;
2482 		uint32_t wbm_sg_desc_msdu_len;
2483 	} wbm_sg_param;
2484 	/* Number of msdu exception descriptors */
2485 	uint32_t num_msdu_exception_desc;
2486 
2487 	/* RX buffer params */
2488 	struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
2489 	struct rx_refill_buff_pool rx_refill_buff_pool;
2490 	/* Save recent operation related variable */
2491 	struct dp_last_op_info last_op_info;
2492 	TAILQ_HEAD(, dp_peer) inactive_peer_list;
2493 	qdf_spinlock_t inactive_peer_list_lock;
2494 	TAILQ_HEAD(, dp_vdev) inactive_vdev_list;
2495 	qdf_spinlock_t inactive_vdev_list_lock;
2496 	/* lock to protect vdev_id_map table*/
2497 	qdf_spinlock_t vdev_map_lock;
2498 
2499 	/* Flow Search Table is in CMEM */
2500 	bool fst_in_cmem;
2501 
2502 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2503 	struct dp_swlm swlm;
2504 #endif
2505 
2506 #ifdef FEATURE_RUNTIME_PM
2507 	/* DP Rx timestamp */
2508 	qdf_time_t rx_last_busy;
2509 	/* Dp runtime refcount */
2510 	qdf_atomic_t dp_runtime_refcount;
2511 	/* Dp tx pending count in RTPM */
2512 	qdf_atomic_t tx_pending_rtpm;
2513 #endif
2514 	/* Invalid buffer that allocated for RX buffer */
2515 	qdf_nbuf_queue_t invalid_buf_queue;
2516 
2517 #ifdef FEATURE_MEC
2518 	/** @mec_lock: spinlock for MEC table */
2519 	qdf_spinlock_t mec_lock;
2520 	/** @mec_cnt: number of active mec entries */
2521 	qdf_atomic_t mec_cnt;
2522 	struct {
2523 		/** @mask: mask bits */
2524 		uint32_t mask;
2525 		/** @idx_bits: index to shift bits */
2526 		uint32_t idx_bits;
2527 		/** @bins: MEC table */
2528 		TAILQ_HEAD(, dp_mec_entry) * bins;
2529 	} mec_hash;
2530 #endif
2531 
2532 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
2533 	qdf_list_t reo_desc_deferred_freelist;
2534 	qdf_spinlock_t reo_desc_deferred_freelist_lock;
2535 	bool reo_desc_deferred_freelist_init;
2536 #endif
2537 	/* BM id for first WBM2SW  ring */
2538 	uint32_t wbm_sw0_bm_id;
2539 
2540 	/* Store arch_id from device_id */
2541 	uint16_t arch_id;
2542 
2543 	/* link desc ID start per device type */
2544 	uint32_t link_desc_id_start;
2545 
2546 	/* CMEM buffer target reserved for host usage */
2547 	uint64_t cmem_base;
2548 	/* CMEM size in bytes */
2549 	uint64_t cmem_total_size;
2550 	/* CMEM free size in bytes */
2551 	uint64_t cmem_avail_size;
2552 
2553 	/* SOC level feature flags */
2554 	struct dp_soc_features features;
2555 
2556 #ifdef WIFI_MONITOR_SUPPORT
2557 	struct dp_mon_soc *monitor_soc;
2558 #endif
2559 	uint8_t rxdma2sw_rings_not_supported:1,
2560 		wbm_sg_last_msdu_war:1,
2561 		mec_fw_offload:1,
2562 		multi_peer_grp_cmd_supported:1;
2563 
2564 	/* Number of Rx refill rings */
2565 	uint8_t num_rx_refill_buf_rings;
2566 #ifdef FEATURE_RUNTIME_PM
2567 	/* flag to indicate vote for runtime_pm for high tput castt*/
2568 	qdf_atomic_t rtpm_high_tput_flag;
2569 #endif
2570 	/* Buffer manager ID for idle link descs */
2571 	uint8_t idle_link_bm_id;
2572 	qdf_atomic_t ref_count;
2573 
2574 	unsigned long vdev_stats_id_map;
2575 	bool txmon_hw_support;
2576 
2577 #ifdef DP_UMAC_HW_RESET_SUPPORT
2578 	struct dp_soc_umac_reset_ctx umac_reset_ctx;
2579 #endif
2580 	/* PPDU to link_id mapping parameters */
2581 	uint8_t link_id_offset;
2582 	uint8_t link_id_bits;
2583 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
2584 	/* A flag using to decide the switch of rx link speed  */
2585 	bool high_throughput;
2586 #endif
2587 	bool is_tx_pause;
2588 };
2589 
2590 #ifdef IPA_OFFLOAD
2591 /**
2592  * dp_ipa_resources - Resources needed for IPA
2593  */
2594 struct dp_ipa_resources {
2595 	qdf_shared_mem_t tx_ring;
2596 	uint32_t tx_num_alloc_buffer;
2597 
2598 	qdf_shared_mem_t tx_comp_ring;
2599 	qdf_shared_mem_t rx_rdy_ring;
2600 	qdf_shared_mem_t rx_refill_ring;
2601 
2602 	/* IPA UC doorbell registers paddr */
2603 	qdf_dma_addr_t tx_comp_doorbell_paddr;
2604 	uint32_t *tx_comp_doorbell_vaddr;
2605 	qdf_dma_addr_t rx_ready_doorbell_paddr;
2606 
2607 	bool is_db_ddr_mapped;
2608 
2609 #ifdef IPA_WDI3_TX_TWO_PIPES
2610 	qdf_shared_mem_t tx_alt_ring;
2611 	uint32_t tx_alt_ring_num_alloc_buffer;
2612 	qdf_shared_mem_t tx_alt_comp_ring;
2613 
2614 	/* IPA UC doorbell registers paddr */
2615 	qdf_dma_addr_t tx_alt_comp_doorbell_paddr;
2616 	uint32_t *tx_alt_comp_doorbell_vaddr;
2617 #endif
2618 #ifdef IPA_WDI3_VLAN_SUPPORT
2619 	qdf_shared_mem_t rx_alt_rdy_ring;
2620 	qdf_shared_mem_t rx_alt_refill_ring;
2621 	qdf_dma_addr_t rx_alt_ready_doorbell_paddr;
2622 #endif
2623 };
2624 #endif
2625 
2626 #define MAX_RX_MAC_RINGS 2
2627 /* Same as NAC_MAX_CLENT */
2628 #define DP_NAC_MAX_CLIENT  24
2629 
2630 /*
2631  * 24 bits cookie size
2632  * 10 bits page id 0 ~ 1023 for MCL
2633  * 3 bits page id 0 ~ 7 for WIN
2634  * WBM Idle List Desc size = 128,
2635  * Num descs per page = 4096/128 = 32 for MCL
2636  * Num descs per page = 2MB/128 = 16384 for WIN
2637  */
2638 /*
2639  * Macros to setup link descriptor cookies - for link descriptors, we just
2640  * need first 3 bits to store bank/page ID for WIN. The
2641  * remaining bytes will be used to set a unique ID, which will
2642  * be useful in debugging
2643  */
2644 #ifdef MAX_ALLOC_PAGE_SIZE
2645 #if PAGE_SIZE == 4096
2646 #define LINK_DESC_PAGE_ID_MASK  0x007FE0
2647 #define LINK_DESC_ID_SHIFT      5
2648 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
2649 #elif PAGE_SIZE == 65536
2650 #define LINK_DESC_PAGE_ID_MASK  0x007E00
2651 #define LINK_DESC_ID_SHIFT      9
2652 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x800
2653 #else
2654 #error "Unsupported kernel PAGE_SIZE"
2655 #endif
2656 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
2657 	((((_page_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_desc_id))
2658 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
2659 	(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
2660 #else
2661 #define LINK_DESC_PAGE_ID_MASK  0x7
2662 #define LINK_DESC_ID_SHIFT      3
2663 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
2664 	((((_desc_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_page_id))
2665 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
2666 	((_cookie) & LINK_DESC_PAGE_ID_MASK)
2667 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
2668 #endif
2669 #define LINK_DESC_ID_START_20_BITS_COOKIE 0x4000
2670 
2671 /* same as ieee80211_nac_param */
2672 enum dp_nac_param_cmd {
2673 	/* IEEE80211_NAC_PARAM_ADD */
2674 	DP_NAC_PARAM_ADD = 1,
2675 	/* IEEE80211_NAC_PARAM_DEL */
2676 	DP_NAC_PARAM_DEL,
2677 	/* IEEE80211_NAC_PARAM_LIST */
2678 	DP_NAC_PARAM_LIST,
2679 };
2680 
2681 /**
2682  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
2683  * @neighbour_peers_macaddr: neighbour peer's mac address
2684  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
2685  * @ast_entry: ast_entry for neighbour peer
2686  * @rssi: rssi value
2687  */
2688 struct dp_neighbour_peer {
2689 	/* MAC address of neighbour's peer */
2690 	union dp_align_mac_addr neighbour_peers_macaddr;
2691 	struct dp_vdev *vdev;
2692 	struct dp_ast_entry *ast_entry;
2693 	uint8_t rssi;
2694 	/* node in the list of neighbour's peer */
2695 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
2696 };
2697 
2698 #ifdef WLAN_TX_PKT_CAPTURE_ENH
2699 #define WLAN_TX_PKT_CAPTURE_ENH 1
2700 #define DP_TX_PPDU_PROC_THRESHOLD 8
2701 #define DP_TX_PPDU_PROC_TIMEOUT 10
2702 #endif
2703 
2704 /**
2705  * struct ppdu_info - PPDU Status info descriptor
2706  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
2707  * @sched_cmdid: schedule command id, which will be same in a burst
2708  * @max_ppdu_id: wrap around for ppdu id
2709  * @last_tlv_cnt: Keep track for missing ppdu tlvs
2710  * @last_user: last ppdu processed for user
2711  * @is_ampdu: set if Ampdu aggregate
2712  * @nbuf: ppdu descriptor payload
2713  * @ppdu_desc: ppdu descriptor
2714  * @ppdu_info_list_elem: linked list of ppdu tlvs
2715  * @ppdu_info_queue_elem: Singly linked list (queue) of ppdu tlvs
2716  * @mpdu_compltn_common_tlv: Successful tlv counter from COMPLTN COMMON tlv
2717  * @mpdu_ack_ba_tlv: Successful tlv counter from ACK BA tlv
2718  */
2719 struct ppdu_info {
2720 	uint32_t ppdu_id;
2721 	uint32_t sched_cmdid;
2722 	uint32_t max_ppdu_id;
2723 	uint32_t tsf_l32;
2724 	uint16_t tlv_bitmap;
2725 	uint16_t last_tlv_cnt;
2726 	uint16_t last_user:8,
2727 		 is_ampdu:1;
2728 	qdf_nbuf_t nbuf;
2729 	struct cdp_tx_completion_ppdu *ppdu_desc;
2730 #ifdef WLAN_TX_PKT_CAPTURE_ENH
2731 	union {
2732 		TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem;
2733 		STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem;
2734 	} ulist;
2735 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem
2736 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem
2737 #else
2738 	TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
2739 #endif
2740 	uint8_t compltn_common_tlv;
2741 	uint8_t ack_ba_tlv;
2742 	bool done;
2743 };
2744 
2745 /**
2746  * struct msdu_completion_info - wbm msdu completion info
2747  * @ppdu_id            - Unique ppduid assigned by firmware for every tx packet
2748  * @peer_id            - peer_id
2749  * @tid                - tid which used during transmit
2750  * @first_msdu         - first msdu indication
2751  * @last_msdu          - last msdu indication
2752  * @msdu_part_of_amsdu - msdu part of amsdu
2753  * @transmit_cnt       - retried count
2754  * @status             - transmit status
2755  * @tsf                - timestamp which it transmitted
2756  */
2757 struct msdu_completion_info {
2758 	uint32_t ppdu_id;
2759 	uint16_t peer_id;
2760 	uint8_t tid;
2761 	uint8_t first_msdu:1,
2762 		last_msdu:1,
2763 		msdu_part_of_amsdu:1;
2764 	uint8_t transmit_cnt;
2765 	uint8_t status;
2766 	uint32_t tsf;
2767 };
2768 
2769 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
2770 struct rx_protocol_tag_map {
2771 	/* This is the user configured tag for the said protocol type */
2772 	uint16_t tag;
2773 };
2774 
2775 /**
2776  * rx_protocol_tag_stats - protocol statistics
2777  * @tag_ctr: number of rx msdus matching this tag
2778  * @mon_tag_ctr: number of msdus matching this tag in mon path
2779  */
2780 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
2781 struct rx_protocol_tag_stats {
2782 	uint32_t tag_ctr;
2783 };
2784 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
2785 
2786 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
2787 
2788 #ifdef WLAN_RX_PKT_CAPTURE_ENH
2789 /* Template data to be set for Enhanced RX Monitor packets */
2790 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a
2791 
2792 /**
2793  * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern
2794  * at end of each MSDU in monitor-lite mode
2795  * @reserved1: reserved for future use
2796  * @reserved2: reserved for future use
2797  * @flow_tag: flow tag value read from skb->cb
2798  * @protocol_tag: protocol tag value read from skb->cb
2799  */
2800 struct dp_rx_mon_enh_trailer_data {
2801 	uint16_t reserved1;
2802 	uint16_t reserved2;
2803 	uint16_t flow_tag;
2804 	uint16_t protocol_tag;
2805 };
2806 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
2807 
2808 #ifdef HTT_STATS_DEBUGFS_SUPPORT
2809 /* Number of debugfs entries created for HTT stats */
2810 #define PDEV_HTT_STATS_DBGFS_SIZE HTT_DBG_NUM_EXT_STATS
2811 
2812 /* struct pdev_htt_stats_dbgfs_priv - Structure to maintain debugfs information
2813  * of HTT stats
2814  * @pdev: dp pdev of debugfs entry
2815  * @stats_id: stats id of debugfs entry
2816  */
2817 struct pdev_htt_stats_dbgfs_priv {
2818 	struct dp_pdev *pdev;
2819 	uint16_t stats_id;
2820 };
2821 
2822 /* struct pdev_htt_stats_dbgfs_cfg - PDEV level data structure for debugfs
2823  * support for HTT stats
2824  * @debugfs_entry: qdf_debugfs directory entry
2825  * @m: qdf debugfs file handler
2826  * @pdev_htt_stats_dbgfs_ops: File operations of entry created
2827  * @priv: HTT stats debugfs private object
2828  * @htt_stats_dbgfs_event: HTT stats event for debugfs support
2829  * @lock: HTT stats debugfs lock
2830  * @htt_stats_dbgfs_msg_process: Function callback to print HTT stats
2831  */
2832 struct pdev_htt_stats_dbgfs_cfg {
2833 	qdf_dentry_t debugfs_entry[PDEV_HTT_STATS_DBGFS_SIZE];
2834 	qdf_debugfs_file_t m;
2835 	struct qdf_debugfs_fops
2836 			pdev_htt_stats_dbgfs_ops[PDEV_HTT_STATS_DBGFS_SIZE - 1];
2837 	struct pdev_htt_stats_dbgfs_priv priv[PDEV_HTT_STATS_DBGFS_SIZE - 1];
2838 	qdf_event_t htt_stats_dbgfs_event;
2839 	qdf_mutex_t lock;
2840 	void (*htt_stats_dbgfs_msg_process)(void *data, A_INT32 len);
2841 };
2842 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
2843 
2844 struct dp_srng_ring_state {
2845 	enum hal_ring_type ring_type;
2846 	uint32_t sw_head;
2847 	uint32_t sw_tail;
2848 	uint32_t hw_head;
2849 	uint32_t hw_tail;
2850 
2851 };
2852 
2853 struct dp_soc_srngs_state {
2854 	uint32_t seq_num;
2855 	uint32_t max_ring_id;
2856 	struct dp_srng_ring_state ring_state[DP_MAX_SRNGS];
2857 	TAILQ_ENTRY(dp_soc_srngs_state) list_elem;
2858 };
2859 
2860 #ifdef WLAN_FEATURE_11BE_MLO
2861 /* struct dp_mlo_sync_timestamp - PDEV level data structure for storing
2862  * MLO timestamp received via HTT msg.
2863  * msg_type: This would be set to HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND
2864  * pdev_id: pdev_id
2865  * chip_id: chip_id
2866  * mac_clk_freq: mac clock frequency of the mac HW block in MHz
2867  * sync_tstmp_lo_us: lower 32 bits of the WLAN global time stamp (in us) at
2868  *                   which last sync interrupt was received
2869  * sync_tstmp_hi_us: upper 32 bits of the WLAN global time stamp (in us) at
2870  *                   which last sync interrupt was received
2871  * mlo_offset_lo_us: lower 32 bits of the MLO time stamp offset in us
2872  * mlo_offset_hi_us: upper 32 bits of the MLO time stamp offset in us
2873  * mlo_offset_clks:  MLO time stamp offset in clock ticks for sub us
2874  * mlo_comp_us:      MLO time stamp compensation applied in us
2875  * mlo_comp_clks:    MLO time stamp compensation applied in clock ticks
2876  *                   for sub us resolution
2877  * mlo_comp_timer:   period of MLO compensation timer at which compensation
2878  *                   is applied, in us
2879  */
2880 struct dp_mlo_sync_timestamp {
2881 	uint32_t msg_type:8,
2882 		 pdev_id:2,
2883 		 chip_id:2,
2884 		 rsvd1:4,
2885 		 mac_clk_freq:16;
2886 	uint32_t sync_tstmp_lo_us;
2887 	uint32_t sync_tstmp_hi_us;
2888 	uint32_t mlo_offset_lo_us;
2889 	uint32_t mlo_offset_hi_us;
2890 	uint32_t mlo_offset_clks;
2891 	uint32_t mlo_comp_us:16,
2892 		 mlo_comp_clks:10,
2893 		 rsvd2:6;
2894 	uint32_t mlo_comp_timer:22,
2895 		 rsvd3:10;
2896 };
2897 #endif
2898 
2899 /* PDEV level structure for data path */
2900 struct dp_pdev {
2901 	/**
2902 	 * Re-use Memory Section Starts
2903 	 */
2904 
2905 	/* PDEV Id */
2906 	uint8_t pdev_id;
2907 
2908 	/* LMAC Id */
2909 	uint8_t lmac_id;
2910 
2911 	/* Target pdev  Id */
2912 	uint8_t target_pdev_id;
2913 
2914 	bool pdev_deinit;
2915 
2916 	/* TXRX SOC handle */
2917 	struct dp_soc *soc;
2918 
2919 	/* pdev status down or up required to handle dynamic hw
2920 	 * mode switch between DBS and DBS_SBS.
2921 	 * 1 = down
2922 	 * 0 = up
2923 	 */
2924 	bool is_pdev_down;
2925 
2926 	/* Enhanced Stats is enabled */
2927 	bool enhanced_stats_en;
2928 
2929 	/* Flag to indicate fast RX */
2930 	bool rx_fast_flag;
2931 
2932 	/* Second ring used to replenish rx buffers */
2933 	struct dp_srng rx_refill_buf_ring2;
2934 #ifdef IPA_WDI3_VLAN_SUPPORT
2935 	/* Third ring used to replenish rx buffers */
2936 	struct dp_srng rx_refill_buf_ring3;
2937 #endif
2938 
2939 	/* Empty ring used by firmware to post rx buffers to the MAC */
2940 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
2941 
2942 	int ch_band_lmac_id_mapping[REG_BAND_UNKNOWN];
2943 
2944 	/* wlan_cfg pdev ctxt*/
2945 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
2946 
2947 	/**
2948 	 * TODO: See if we need a ring map here for LMAC rings.
2949 	 * 1. Monitor rings are currently planning to be processed on receiving
2950 	 * PPDU end interrupts and hence won't need ring based interrupts.
2951 	 * 2. Rx buffer rings will be replenished during REO destination
2952 	 * processing and doesn't require regular interrupt handling - we will
2953 	 * only handle low water mark interrupts which is not expected
2954 	 * frequently
2955 	 */
2956 
2957 	/* VDEV list */
2958 	TAILQ_HEAD(, dp_vdev) vdev_list;
2959 
2960 	/* vdev list lock */
2961 	qdf_spinlock_t vdev_list_lock;
2962 
2963 	/* Number of vdevs this device have */
2964 	uint16_t vdev_count;
2965 
2966 	/* PDEV transmit lock */
2967 	qdf_spinlock_t tx_lock;
2968 
2969 	/*tx_mutex for me*/
2970 	DP_MUTEX_TYPE tx_mutex;
2971 
2972 	/* msdu chain head & tail */
2973 	qdf_nbuf_t invalid_peer_head_msdu;
2974 	qdf_nbuf_t invalid_peer_tail_msdu;
2975 
2976 	/* Band steering  */
2977 	/* TBD */
2978 
2979 	/* PDEV level data path statistics */
2980 	struct cdp_pdev_stats stats;
2981 
2982 	/* Global RX decap mode for the device */
2983 	enum htt_pkt_type rx_decap_mode;
2984 
2985 	qdf_atomic_t num_tx_outstanding;
2986 	int32_t tx_descs_max;
2987 
2988 	qdf_atomic_t num_tx_exception;
2989 
2990 	/* MCL specific local peer handle */
2991 	struct {
2992 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
2993 		uint8_t freelist;
2994 		qdf_spinlock_t lock;
2995 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
2996 	} local_peer_ids;
2997 
2998 	/* dscp_tid_map_*/
2999 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
3000 
3001 	/* operating channel */
3002 	struct {
3003 		uint8_t num;
3004 		uint8_t band;
3005 		uint16_t freq;
3006 	} operating_channel;
3007 
3008 	/* pool addr for mcast enhance buff */
3009 	struct {
3010 		int size;
3011 		uint32_t paddr;
3012 		char *vaddr;
3013 		struct dp_tx_me_buf_t *freelist;
3014 		int buf_in_use;
3015 		qdf_dma_mem_context(memctx);
3016 	} me_buf;
3017 
3018 	bool hmmc_tid_override_en;
3019 	uint8_t hmmc_tid;
3020 
3021 	/* Number of VAPs with mcast enhancement enabled */
3022 	qdf_atomic_t mc_num_vap_attached;
3023 
3024 	qdf_atomic_t stats_cmd_complete;
3025 
3026 #ifdef IPA_OFFLOAD
3027 	ipa_uc_op_cb_type ipa_uc_op_cb;
3028 	void *usr_ctxt;
3029 	struct dp_ipa_resources ipa_resource;
3030 #endif
3031 
3032 	/* TBD */
3033 
3034 	/* map this pdev to a particular Reo Destination ring */
3035 	enum cdp_host_reo_dest_ring reo_dest;
3036 
3037 	/* WDI event handlers */
3038 	struct wdi_event_subscribe_t **wdi_event_list;
3039 
3040 	bool cfr_rcc_mode;
3041 
3042 	/* enable time latency check for tx completion */
3043 	bool latency_capture_enable;
3044 
3045 	/* enable calculation of delay stats*/
3046 	bool delay_stats_flag;
3047 	void *dp_txrx_handle; /* Advanced data path handle */
3048 	uint32_t ppdu_id;
3049 	bool first_nbuf;
3050 	/* Current noise-floor reading for the pdev channel */
3051 	int16_t chan_noise_floor;
3052 
3053 	/*
3054 	 * For multiradio device, this flag indicates if
3055 	 * this radio is primary or secondary.
3056 	 *
3057 	 * For HK 1.0, this is used for WAR for the AST issue.
3058 	 * HK 1.x mandates creation of only 1 AST entry with same MAC address
3059 	 * across 2 radios. is_primary indicates the radio on which DP should
3060 	 * install HW AST entry if there is a request to add 2 AST entries
3061 	 * with same MAC address across 2 radios
3062 	 */
3063 	uint8_t is_primary;
3064 	struct cdp_tx_sojourn_stats sojourn_stats;
3065 	qdf_nbuf_t sojourn_buf;
3066 
3067 	union dp_rx_desc_list_elem_t *free_list_head;
3068 	union dp_rx_desc_list_elem_t *free_list_tail;
3069 	/* Cached peer_id from htt_peer_details_tlv */
3070 	uint16_t fw_stats_peer_id;
3071 
3072 	/* qdf_event for fw_peer_stats */
3073 	qdf_event_t fw_peer_stats_event;
3074 
3075 	/* qdf_event for fw_stats */
3076 	qdf_event_t fw_stats_event;
3077 
3078 	/* qdf_event for fw__obss_stats */
3079 	qdf_event_t fw_obss_stats_event;
3080 
3081 	/* To check if request is already sent for obss stats */
3082 	bool pending_fw_obss_stats_response;
3083 
3084 	/* User configured max number of tx buffers */
3085 	uint32_t num_tx_allowed;
3086 
3087 	/* unique cookie required for peer session */
3088 	uint32_t next_peer_cookie;
3089 
3090 	/*
3091 	 * Run time enabled when the first protocol tag is added,
3092 	 * run time disabled when the last protocol tag is deleted
3093 	 */
3094 	bool  is_rx_protocol_tagging_enabled;
3095 
3096 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
3097 	/*
3098 	 * The protocol type is used as array index to save
3099 	 * user provided tag info
3100 	 */
3101 	struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX];
3102 
3103 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
3104 	/*
3105 	 * Track msdus received from each reo ring separately to avoid
3106 	 * simultaneous writes from different core
3107 	 */
3108 	struct rx_protocol_tag_stats
3109 		reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX];
3110 	/* Track msdus received from exception ring separately */
3111 	struct rx_protocol_tag_stats
3112 		rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3113 	struct rx_protocol_tag_stats
3114 		mon_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3115 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
3116 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
3117 
3118 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3119 	/**
3120 	 * Pointer to DP Flow FST at SOC level if
3121 	 * is_rx_flow_search_table_per_pdev is true
3122 	 */
3123 	struct dp_rx_fst *rx_fst;
3124 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
3125 
3126 #ifdef FEATURE_TSO_STATS
3127 	/* TSO Id to index into TSO packet information */
3128 	qdf_atomic_t tso_idx;
3129 #endif /* FEATURE_TSO_STATS */
3130 
3131 #ifdef WLAN_SUPPORT_DATA_STALL
3132 	data_stall_detect_cb data_stall_detect_callback;
3133 #endif /* WLAN_SUPPORT_DATA_STALL */
3134 
3135 	/* flag to indicate whether LRO hash command has been sent to FW */
3136 	uint8_t is_lro_hash_configured;
3137 
3138 #ifdef HTT_STATS_DEBUGFS_SUPPORT
3139 	/* HTT stats debugfs params */
3140 	struct pdev_htt_stats_dbgfs_cfg *dbgfs_cfg;
3141 #endif
3142 	struct {
3143 		qdf_work_t work;
3144 		qdf_workqueue_t *work_queue;
3145 		uint32_t seq_num;
3146 		uint8_t queue_depth;
3147 		qdf_spinlock_t list_lock;
3148 
3149 		TAILQ_HEAD(, dp_soc_srngs_state) list;
3150 	} bkp_stats;
3151 #ifdef WIFI_MONITOR_SUPPORT
3152 	struct dp_mon_pdev *monitor_pdev;
3153 #endif
3154 #ifdef WLAN_FEATURE_11BE_MLO
3155 	struct dp_mlo_sync_timestamp timestamp;
3156 #endif
3157 	/* Is isolation mode enabled */
3158 	bool  isolation;
3159 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
3160 	uint8_t is_first_wakeup_packet;
3161 #endif
3162 #ifdef CONNECTIVITY_PKTLOG
3163 	/* packetdump callback functions */
3164 	ol_txrx_pktdump_cb dp_tx_packetdump_cb;
3165 	ol_txrx_pktdump_cb dp_rx_packetdump_cb;
3166 #endif
3167 
3168 	/* Firmware Stats for TLV received from Firmware */
3169 	uint64_t fw_stats_tlv_bitmap_rcvd;
3170 
3171 	/* For Checking Pending Firmware Response */
3172 	bool pending_fw_stats_response;
3173 };
3174 
3175 struct dp_peer;
3176 
3177 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3178 #define WLAN_ROAM_PEER_AUTH_STATUS_NONE 0x0
3179 /**
3180  * This macro is equivalent to macro ROAM_AUTH_STATUS_AUTHENTICATED used
3181  * in connection mgr
3182  */
3183 #define WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED 0x2
3184 #endif
3185 
3186 /* VDEV structure for data path state */
3187 struct dp_vdev {
3188 	/* OS device abstraction */
3189 	qdf_device_t osdev;
3190 
3191 	/* physical device that is the parent of this virtual device */
3192 	struct dp_pdev *pdev;
3193 
3194 	/* VDEV operating mode */
3195 	enum wlan_op_mode opmode;
3196 
3197 	/* VDEV subtype */
3198 	enum wlan_op_subtype subtype;
3199 
3200 	/* Tx encapsulation type for this VAP */
3201 	enum htt_cmn_pkt_type tx_encap_type;
3202 
3203 	/* Rx Decapsulation type for this VAP */
3204 	enum htt_cmn_pkt_type rx_decap_type;
3205 
3206 	/* WDS enabled */
3207 	bool wds_enabled;
3208 
3209 	/* MEC enabled */
3210 	bool mec_enabled;
3211 
3212 #ifdef QCA_SUPPORT_WDS_EXTENDED
3213 	bool wds_ext_enabled;
3214 #endif /* QCA_SUPPORT_WDS_EXTENDED */
3215 	bool drop_3addr_mcast;
3216 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
3217 	bool skip_bar_update;
3218 	unsigned long skip_bar_update_last_ts;
3219 #endif
3220 	/* WDS Aging timer period */
3221 	uint32_t wds_aging_timer_val;
3222 
3223 	/* NAWDS enabled */
3224 	bool nawds_enabled;
3225 
3226 	/* Multicast enhancement enabled */
3227 	uint8_t mcast_enhancement_en;
3228 
3229 	/* IGMP multicast enhancement enabled */
3230 	uint8_t igmp_mcast_enhanc_en;
3231 
3232 	/* vdev_id - ID used to specify a particular vdev to the target */
3233 	uint8_t vdev_id;
3234 
3235 	/* Default HTT meta data for this VDEV */
3236 	/* TBD: check alignment constraints */
3237 	uint16_t htt_tcl_metadata;
3238 
3239 	/* vdev lmac_id */
3240 	uint8_t lmac_id;
3241 
3242 	/* vdev bank_id */
3243 	uint8_t bank_id;
3244 
3245 	/* Mesh mode vdev */
3246 	uint32_t mesh_vdev;
3247 
3248 	/* Mesh mode rx filter setting */
3249 	uint32_t mesh_rx_filter;
3250 
3251 	/* DSCP-TID mapping table ID */
3252 	uint8_t dscp_tid_map_id;
3253 
3254 	/* Address search type to be set in TX descriptor */
3255 	uint8_t search_type;
3256 
3257 	/*
3258 	 * Flag to indicate if s/w tid classification should be
3259 	 * skipped
3260 	 */
3261 	uint8_t skip_sw_tid_classification;
3262 
3263 	/* Flag to enable peer authorization */
3264 	uint8_t peer_authorize;
3265 
3266 	/* AST hash value for BSS peer in HW valid for STA VAP*/
3267 	uint16_t bss_ast_hash;
3268 
3269 	/* AST hash index for BSS peer in HW valid for STA VAP*/
3270 	uint16_t bss_ast_idx;
3271 
3272 	bool multipass_en;
3273 
3274 	/* Address search flags to be configured in HAL descriptor */
3275 	uint8_t hal_desc_addr_search_flags;
3276 
3277 	/* Handle to the OS shim SW's virtual device */
3278 	ol_osif_vdev_handle osif_vdev;
3279 
3280 	/* MAC address */
3281 	union dp_align_mac_addr mac_addr;
3282 
3283 #ifdef WLAN_FEATURE_11BE_MLO
3284 	/* MLO MAC address corresponding to vdev */
3285 	union dp_align_mac_addr mld_mac_addr;
3286 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
3287 	bool mlo_vdev;
3288 #endif
3289 #endif
3290 
3291 	/* node in the pdev's list of vdevs */
3292 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
3293 
3294 	/* dp_peer list */
3295 	TAILQ_HEAD(, dp_peer) peer_list;
3296 	/* to protect peer_list */
3297 	DP_MUTEX_TYPE peer_list_lock;
3298 
3299 	/* RX call back function to flush GRO packets*/
3300 	ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;
3301 	/* default RX call back function called by dp */
3302 	ol_txrx_rx_fp osif_rx;
3303 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
3304 	/* callback to receive eapol frames */
3305 	ol_txrx_rx_fp osif_rx_eapol;
3306 #endif
3307 	/* callback to deliver rx frames to the OS */
3308 	ol_txrx_rx_fp osif_rx_stack;
3309 	/* Callback to handle rx fisa frames */
3310 	ol_txrx_fisa_rx_fp osif_fisa_rx;
3311 	ol_txrx_fisa_flush_fp osif_fisa_flush;
3312 
3313 	/* call back function to flush out queued rx packets*/
3314 	ol_txrx_rx_flush_fp osif_rx_flush;
3315 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
3316 	ol_txrx_get_key_fp osif_get_key;
3317 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
3318 
3319 #ifdef notyet
3320 	/* callback to check if the msdu is an WAI (WAPI) frame */
3321 	ol_rx_check_wai_fp osif_check_wai;
3322 #endif
3323 
3324 	/* proxy arp function */
3325 	ol_txrx_proxy_arp_fp osif_proxy_arp;
3326 
3327 	ol_txrx_mcast_me_fp me_convert;
3328 
3329 	/* completion function used by this vdev*/
3330 	ol_txrx_completion_fp tx_comp;
3331 
3332 	ol_txrx_get_tsf_time get_tsf_time;
3333 
3334 	/* callback to classify critical packets */
3335 	ol_txrx_classify_critical_pkt_fp tx_classify_critical_pkt_cb;
3336 
3337 	/* deferred vdev deletion state */
3338 	struct {
3339 		/* VDEV delete pending */
3340 		int pending;
3341 		/*
3342 		* callback and a context argument to provide a
3343 		* notification for when the vdev is deleted.
3344 		*/
3345 		ol_txrx_vdev_delete_cb callback;
3346 		void *context;
3347 	} delete;
3348 
3349 	/* tx data delivery notification callback function */
3350 	struct {
3351 		ol_txrx_data_tx_cb func;
3352 		void *ctxt;
3353 	} tx_non_std_data_callback;
3354 
3355 
3356 	/* safe mode control to bypass the encrypt and decipher process*/
3357 	uint32_t safemode;
3358 
3359 	/* rx filter related */
3360 	uint32_t drop_unenc;
3361 #ifdef notyet
3362 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
3363 	uint32_t filters_num;
3364 #endif
3365 	/* TDLS Link status */
3366 	bool tdls_link_connected;
3367 	bool is_tdls_frame;
3368 
3369 	/* per vdev rx nbuf queue */
3370 	qdf_nbuf_queue_t rxq;
3371 
3372 	uint8_t tx_ring_id;
3373 	struct dp_tx_desc_pool_s *tx_desc;
3374 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
3375 
3376 	/* Capture timestamp of previous tx packet enqueued */
3377 	uint64_t prev_tx_enq_tstamp;
3378 
3379 	/* Capture timestamp of previous rx packet delivered */
3380 	uint64_t prev_rx_deliver_tstamp;
3381 
3382 	/* VDEV Stats */
3383 	struct cdp_vdev_stats stats;
3384 
3385 	/* Is this a proxySTA VAP */
3386 	uint8_t proxysta_vdev : 1, /* Is this a proxySTA VAP */
3387 		wrap_vdev : 1, /* Is this a QWRAP AP VAP */
3388 		isolation_vdev : 1, /* Is this a QWRAP AP VAP */
3389 		reserved : 5; /* Reserved */
3390 
3391 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3392 	struct dp_tx_desc_pool_s *pool;
3393 #endif
3394 	/* AP BRIDGE enabled */
3395 	bool ap_bridge_enabled;
3396 
3397 	enum cdp_sec_type  sec_type;
3398 
3399 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
3400 	bool raw_mode_war;
3401 
3402 
3403 	/* 8021p PCP-TID mapping table ID */
3404 	uint8_t tidmap_tbl_id;
3405 
3406 	/* 8021p PCP-TID map values */
3407 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
3408 
3409 	/* TIDmap priority */
3410 	uint8_t tidmap_prty;
3411 
3412 #ifdef QCA_MULTIPASS_SUPPORT
3413 	uint16_t *iv_vlan_map;
3414 
3415 	/* dp_peer special list */
3416 	TAILQ_HEAD(, dp_txrx_peer) mpass_peer_list;
3417 	DP_MUTEX_TYPE mpass_peer_mutex;
3418 #endif
3419 	/* Extended data path handle */
3420 	struct cdp_ext_vdev *vdev_dp_ext_handle;
3421 #ifdef VDEV_PEER_PROTOCOL_COUNT
3422 	/*
3423 	 * Rx-Ingress and Tx-Egress are in the lower level DP layer
3424 	 * Rx-Egress and Tx-ingress are handled in osif layer for DP
3425 	 * So
3426 	 * Rx-Egress and Tx-ingress mask definitions are in OSIF layer
3427 	 * Rx-Ingress and Tx-Egress definitions are here below
3428 	 */
3429 #define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1
3430 #define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2
3431 #define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4
3432 #define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8
3433 	bool peer_protocol_count_track;
3434 	int peer_protocol_count_dropmask;
3435 #endif
3436 	/* callback to collect connectivity stats */
3437 	ol_txrx_stats_rx_fp stats_cb;
3438 	uint32_t num_peers;
3439 	/* entry to inactive_list*/
3440 	TAILQ_ENTRY(dp_vdev) inactive_list_elem;
3441 
3442 #ifdef WLAN_SUPPORT_RX_FISA
3443 	/**
3444 	 * Params used for controlling the fisa aggregation dynamically
3445 	 */
3446 	uint8_t fisa_disallowed[MAX_REO_DEST_RINGS];
3447 	uint8_t fisa_force_flushed[MAX_REO_DEST_RINGS];
3448 #endif
3449 	/*
3450 	 * Refcount for VDEV currently incremented when
3451 	 * peer is created for VDEV
3452 	 */
3453 	qdf_atomic_t ref_cnt;
3454 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
3455 	uint8_t num_latency_critical_conn;
3456 #ifdef WLAN_SUPPORT_MESH_LATENCY
3457 	uint8_t peer_tid_latency_enabled;
3458 	/* tid latency configuration parameters */
3459 	struct {
3460 		uint32_t service_interval;
3461 		uint32_t burst_size;
3462 		uint8_t latency_tid;
3463 	} mesh_tid_latency_config;
3464 #endif
3465 #ifdef WIFI_MONITOR_SUPPORT
3466 	struct dp_mon_vdev *monitor_vdev;
3467 #endif
3468 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
3469 	/* Delta between TQM clock and TSF clock */
3470 	uint32_t delta_tsf;
3471 #endif
3472 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
3473 	/* Indicate if uplink delay report is enabled or not */
3474 	qdf_atomic_t ul_delay_report;
3475 	/* accumulative delay for every TX completion */
3476 	qdf_atomic_t ul_delay_accum;
3477 	/* accumulative number of packets delay has accumulated */
3478 	qdf_atomic_t ul_pkts_accum;
3479 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
3480 
3481 	/* vdev_stats_id - ID used for stats collection by FW from HW*/
3482 	uint8_t vdev_stats_id;
3483 #ifdef HW_TX_DELAY_STATS_ENABLE
3484 	/* hw tx delay stats enable */
3485 	uint8_t hw_tx_delay_stats_enabled;
3486 #endif
3487 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3488 	uint32_t roaming_peer_status;
3489 	union dp_align_mac_addr roaming_peer_mac;
3490 #endif
3491 #ifdef DP_TRAFFIC_END_INDICATION
3492 	/* per vdev feature enable/disable status */
3493 	bool traffic_end_ind_en;
3494 	/* per vdev nbuf queue for traffic end indication packets */
3495 	qdf_nbuf_queue_t end_ind_pkt_q;
3496 #endif
3497 };
3498 
3499 enum {
3500 	dp_sec_mcast = 0,
3501 	dp_sec_ucast
3502 };
3503 
3504 #ifdef WDS_VENDOR_EXTENSION
3505 typedef struct {
3506 	uint8_t	wds_tx_mcast_4addr:1,
3507 		wds_tx_ucast_4addr:1,
3508 		wds_rx_filter:1,      /* enforce rx filter */
3509 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
3510 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
3511 
3512 } dp_ecm_policy;
3513 #endif
3514 
3515 /*
3516  * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
3517  * @cached_bufq: nbuff list to enqueue rx packets
3518  * @bufq_lock: spinlock for nbuff list access
3519  * @thres: maximum threshold for number of rx buff to enqueue
3520  * @entries: number of entries
3521  * @dropped: number of packets dropped
3522  */
3523 struct dp_peer_cached_bufq {
3524 	qdf_list_t cached_bufq;
3525 	qdf_spinlock_t bufq_lock;
3526 	uint32_t thresh;
3527 	uint32_t entries;
3528 	uint32_t dropped;
3529 };
3530 
3531 /**
3532  * enum dp_peer_ast_flowq
3533  * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue
3534  * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue
3535  * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP
3536  * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP
3537  */
3538 enum dp_peer_ast_flowq {
3539 	DP_PEER_AST_FLOWQ_HI_PRIO,
3540 	DP_PEER_AST_FLOWQ_LOW_PRIO,
3541 	DP_PEER_AST_FLOWQ_UDP,
3542 	DP_PEER_AST_FLOWQ_NON_UDP,
3543 	DP_PEER_AST_FLOWQ_MAX,
3544 };
3545 
3546 /*
3547  * struct dp_ast_flow_override_info - ast override info
3548  * @ast_index - ast indexes in peer map message
3549  * @ast_valid_mask - ast valid mask for each ast index
3550  * @ast_flow_mask - ast flow mask for each ast index
3551  * @tid_valid_low_pri_mask - per tid mask for low priority flow
3552  * @tid_valid_hi_pri_mask - per tid mask for hi priority flow
3553  */
3554 struct dp_ast_flow_override_info {
3555 	uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX];
3556 	uint8_t ast_valid_mask;
3557 	uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX];
3558 	uint8_t tid_valid_low_pri_mask;
3559 	uint8_t tid_valid_hi_pri_mask;
3560 };
3561 
3562 /*
3563  * struct dp_peer_ast_params - ast parameters for a msdu flow-queue
3564  * @ast_index - ast index populated by FW
3565  * @is_valid - ast flow valid mask
3566  * @valid_tid_mask - per tid mask for this ast index
3567  * @flowQ - flow queue id associated with this ast index
3568  */
3569 struct dp_peer_ast_params {
3570 	uint16_t ast_idx;
3571 	uint8_t is_valid;
3572 	uint8_t valid_tid_mask;
3573 	uint8_t flowQ;
3574 };
3575 
3576 #define DP_MLO_FLOW_INFO_MAX	3
3577 
3578 /**
3579  * struct dp_mlo_flow_override_info - Flow override info
3580  * @ast_idx: Primary TCL AST Index
3581  * @ast_idx_valid: Is AST index valid
3582  * @chip_id: CHIP ID
3583  * @tidmask: tidmask
3584  * @cache_set_num: Cache set number
3585  */
3586 struct dp_mlo_flow_override_info {
3587 	uint16_t ast_idx;
3588 	uint8_t ast_idx_valid;
3589 	uint8_t chip_id;
3590 	uint8_t tidmask;
3591 	uint8_t cache_set_num;
3592 };
3593 
3594 /**
3595  * struct dp_mlo_link_info - Link info
3596  * @peer_chip_id: Peer Chip ID
3597  * @vdev_id: Vdev ID
3598  */
3599 struct dp_mlo_link_info {
3600 	uint8_t peer_chip_id;
3601 	uint8_t vdev_id;
3602 };
3603 
3604 #ifdef WLAN_SUPPORT_MSCS
3605 /*MSCS Procedure based macros */
3606 #define IEEE80211_MSCS_MAX_ELEM_SIZE    5
3607 #define IEEE80211_TCLAS_MASK_CLA_TYPE_4  4
3608 /*
3609  * struct dp_peer_mscs_parameter - MSCS database obtained from
3610  * MSCS Request and Response in the control path. This data is used
3611  * by the AP to find out what priority to set based on the tuple
3612  * classification during packet processing.
3613  * @user_priority_bitmap - User priority bitmap obtained during
3614  * handshake
3615  * @user_priority_limit - User priority limit obtained during
3616  * handshake
3617  * @classifier_mask - params to be compared during processing
3618  */
3619 struct dp_peer_mscs_parameter {
3620 	uint8_t user_priority_bitmap;
3621 	uint8_t user_priority_limit;
3622 	uint8_t classifier_mask;
3623 };
3624 #endif
3625 
3626 #ifdef QCA_SUPPORT_WDS_EXTENDED
3627 #define WDS_EXT_PEER_INIT_BIT 0
3628 
3629 /**
3630  * struct dp_wds_ext_peer - wds ext peer structure
3631  * This is used when wds extended feature is enabled
3632  * both compile time and run time. It is created
3633  * when 1st 4 address frame is received from
3634  * wds backhaul.
3635  * @osif_vdev: Handle to the OS shim SW's virtual device
3636  * @init: wds ext netdev state
3637  */
3638 struct dp_wds_ext_peer {
3639 	ol_osif_peer_handle osif_peer;
3640 	unsigned long init;
3641 };
3642 #endif /* QCA_SUPPORT_WDS_EXTENDED */
3643 
3644 #ifdef WLAN_SUPPORT_MESH_LATENCY
3645 /*Advanced Mesh latency feature based macros */
3646 /*
3647  * struct dp_peer_mesh_latency parameter - Mesh latency related
3648  * parameters. This data is updated per peer per TID based on
3649  * the flow tuple classification in external rule database
3650  * during packet processing.
3651  * @service_interval_dl - Service interval associated with TID in DL
3652  * @burst_size_dl - Burst size additive over multiple flows in DL
3653  * @service_interval_ul - Service interval associated with TID in UL
3654  * @burst_size_ul - Burst size additive over multiple flows in UL
3655  * @ac - custom ac derived from service interval
3656  * @msduq - MSDU queue number within TID
3657  */
3658 struct dp_peer_mesh_latency_parameter {
3659 	uint32_t service_interval_dl;
3660 	uint32_t burst_size_dl;
3661 	uint32_t service_interval_ul;
3662 	uint32_t burst_size_ul;
3663 	uint8_t ac;
3664 	uint8_t msduq;
3665 };
3666 #endif
3667 
3668 #ifdef WLAN_FEATURE_11BE_MLO
3669 /* Max number of links for MLO connection */
3670 #define DP_MAX_MLO_LINKS 3
3671 
3672 /**
3673  * struct dp_peer_link_info - link peer information for MLO
3674  * @mac_add: Mac address
3675  * @vdev_id: Vdev ID for current link peer
3676  * @is_valid: flag for link peer info valid or not
3677  * @chip_id: chip id
3678  */
3679 struct dp_peer_link_info {
3680 	union dp_align_mac_addr mac_addr;
3681 	uint8_t vdev_id;
3682 	uint8_t is_valid;
3683 	uint8_t chip_id;
3684 };
3685 
3686 /**
3687  * struct dp_mld_link_peers - this structure is used to get link peers
3688 			      pointer from mld peer
3689  * @link_peers: link peers pointer array
3690  * @num_links: number of link peers fetched
3691  */
3692 struct dp_mld_link_peers {
3693 	struct dp_peer *link_peers[DP_MAX_MLO_LINKS];
3694 	uint8_t num_links;
3695 };
3696 #endif
3697 
3698 typedef void *dp_txrx_ref_handle;
3699 
3700 /**
3701  * struct dp_peer_per_pkt_tx_stats- Peer Tx stats updated in per pkt
3702  *				Tx completion path
3703  * @cdp_pkt_info ucast: Unicast Packet Count
3704  * @cdp_pkt_info mcast: Multicast Packet Count
3705  * @cdp_pkt_info bcast: Broadcast Packet Count
3706  * @cdp_pkt_info nawds_mcast: NAWDS Multicast Packet Count
3707  * @cdp_pkt_info tx_success: Successful Tx Packets
3708  * @nawds_mcast_drop: NAWDS Multicast Drop Count
3709  * @ofdma: Total Packets as ofdma
3710  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
3711  * @amsdu_cnt: Number of MSDUs part of AMSDU
3712  * @cdp_pkt_info fw_rem: Discarded by firmware
3713  * @fw_rem_notx: firmware_discard_untransmitted
3714  * @fw_rem_tx: firmware_discard_transmitted
3715  * @age_out: aged out in mpdu/msdu queues
3716  * @fw_reason1: discarded by firmware reason 1
3717  * @fw_reason2: discarded by firmware reason 2
3718  * @fw_reason3: discarded by firmware reason  3
3719  * @fw_rem_no_match: dropped due to fw no match command
3720  * @drop_threshold: dropped due to HW threshold
3721  * @drop_link_desc_na: dropped due resource not available in HW
3722  * @invalid_drop: Invalid msdu drop
3723  * @mcast_vdev_drop: MCAST drop configured for VDEV in HW
3724  * @invalid_rr: Invalid TQM release reason
3725  * @failed_retry_count: packets failed due to retry above 802.11 retry limit
3726  * @retry_count: packets successfully send after one or more retry
3727  * @multiple_retry_count: packets successfully sent after more than one retry
3728  * @no_ack_count: no ack pkt count for different protocols
3729  * @tx_success_twt: Successful Tx Packets in TWT session
3730  * @last_tx_ts: last timestamp in jiffies when tx comp occurred
3731  * @avg_sojourn_msdu[CDP_DATA_TID_MAX]: Avg sojourn msdu stat
3732  * @protocol_trace_cnt: per-peer protocol counter
3733  * @release_src_not_tqm: Counter to keep track of release source is not TQM
3734  *			 in TX completion status processing
3735  */
3736 struct dp_peer_per_pkt_tx_stats {
3737 	struct cdp_pkt_info ucast;
3738 	struct cdp_pkt_info mcast;
3739 	struct cdp_pkt_info bcast;
3740 	struct cdp_pkt_info nawds_mcast;
3741 	struct cdp_pkt_info tx_success;
3742 	uint32_t nawds_mcast_drop;
3743 	uint32_t ofdma;
3744 	uint32_t non_amsdu_cnt;
3745 	uint32_t amsdu_cnt;
3746 	struct {
3747 		struct cdp_pkt_info fw_rem;
3748 		uint32_t fw_rem_notx;
3749 		uint32_t fw_rem_tx;
3750 		uint32_t age_out;
3751 		uint32_t fw_reason1;
3752 		uint32_t fw_reason2;
3753 		uint32_t fw_reason3;
3754 		uint32_t fw_rem_queue_disable;
3755 		uint32_t fw_rem_no_match;
3756 		uint32_t drop_threshold;
3757 		uint32_t drop_link_desc_na;
3758 		uint32_t invalid_drop;
3759 		uint32_t mcast_vdev_drop;
3760 		uint32_t invalid_rr;
3761 	} dropped;
3762 	uint32_t failed_retry_count;
3763 	uint32_t retry_count;
3764 	uint32_t multiple_retry_count;
3765 	uint32_t no_ack_count[QDF_PROTO_SUBTYPE_MAX];
3766 	struct cdp_pkt_info tx_success_twt;
3767 	unsigned long last_tx_ts;
3768 	qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
3769 #ifdef VDEV_PEER_PROTOCOL_COUNT
3770 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
3771 #endif
3772 	uint32_t release_src_not_tqm;
3773 };
3774 
3775 /**
3776  * struct dp_peer_extd_tx_stats - Peer Tx stats updated in either
3777  *	per pkt Tx completion path when macro QCA_ENHANCED_STATS_SUPPORT is
3778  *	disabled or in HTT Tx PPDU completion path when macro is enabled
3779  * @stbc: Packets in STBC
3780  * @ldpc: Packets in LDPC
3781  * @retries: Packet retries
3782  * @pkt_type[DOT11_MAX]: pkt count for different .11 modes
3783  * @wme_ac_type[WME_AC_MAX]: Wireless Multimedia type Count
3784  * @excess_retries_per_ac[WME_AC_MAX]: Wireless Multimedia type Count
3785  * @ampdu_cnt: completion of aggregation
3786  * @non_ampdu_cnt: tx completion not aggregated
3787  * @num_ppdu_cookie_valid: no. of valid ppdu cookies rcvd from FW
3788  * @tx_ppdus: ppdus in tx
3789  * @tx_mpdus_success: mpdus successful in tx
3790  * @tx_mpdus_tried: mpdus tried in tx
3791  * @tx_rate: Tx Rate in kbps
3792  * @last_tx_rate: Last tx rate for unicast packets
3793  * @last_tx_rate_mcs: Tx rate mcs for unicast packets
3794  * @mcast_last_tx_rate: Last tx rate for multicast packets
3795  * @mcast_last_tx_rate_mcs: Last tx rate mcs for multicast
3796  * @rnd_avg_tx_rate: Rounded average tx rate
3797  * @avg_tx_rate: Average TX rate
3798  * @tx_ratecode: Tx rate code of last frame
3799  * @pream_punct_cnt: Preamble Punctured count
3800  * @sgi_count[MAX_GI]: SGI count
3801  * @nss[SS_COUNT]: Packet count for different num_spatial_stream values
3802  * @bw[MAX_BW]: Packet Count for different bandwidths
3803  * @ru_start: RU start index
3804  * @ru_tones: RU tones size
3805  * @ru_loc: pkt info for RU location 26/ 52/ 106/ 242/ 484 counter
3806  * @transmit_type: pkt info for tx transmit type
3807  * @mu_group_id: mumimo mu group id
3808  * @last_ack_rssi: RSSI of last acked packet
3809  * @nss_info: NSS 1,2, ...8
3810  * @mcs_info: MCS index
3811  * @bw_info: Bandwidth
3812  *       <enum 0 bw_20_MHz>
3813  *       <enum 1 bw_40_MHz>
3814  *       <enum 2 bw_80_MHz>
3815  *       <enum 3 bw_160_MHz>
3816  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
3817  *       <enum 1     0_4_us_sgi > Legacy short GI
3818  *       <enum 2     1_6_us_sgi > HE related GI
3819  *       <enum 3     3_2_us_sgi > HE
3820  * @preamble_info: preamble
3821  * @tx_ucast_total: total ucast count
3822  * @tx_ucast_success: total ucast success count
3823  * @retries_mpdu: mpdu number of successfully transmitted after retries
3824  * @mpdu_success_with_retries: mpdu retry count in case of successful tx
3825  * @su_be_ppdu_cnt: SU Tx packet count for 11BE
3826  * @mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX]: MU Tx packet count for 11BE
3827  * @punc_bw[MAX_PUNCTURED_MODE]: MSDU count for punctured bw
3828  * @rts_success: RTS success count
3829  * @rts_failure: RTS failure count
3830  * @bar_cnt: Block ACK Request frame count
3831  * @ndpa_cnt: NDP announcement frame count
3832  */
3833 struct dp_peer_extd_tx_stats {
3834 	uint32_t stbc;
3835 	uint32_t ldpc;
3836 	uint32_t retries;
3837 	struct cdp_pkt_type pkt_type[DOT11_MAX];
3838 	uint32_t wme_ac_type[WME_AC_MAX];
3839 	uint32_t excess_retries_per_ac[WME_AC_MAX];
3840 	uint32_t ampdu_cnt;
3841 	uint32_t non_ampdu_cnt;
3842 	uint32_t num_ppdu_cookie_valid;
3843 	uint32_t tx_ppdus;
3844 	uint32_t tx_mpdus_success;
3845 	uint32_t tx_mpdus_tried;
3846 
3847 	uint32_t tx_rate;
3848 	uint32_t last_tx_rate;
3849 	uint32_t last_tx_rate_mcs;
3850 	uint32_t mcast_last_tx_rate;
3851 	uint32_t mcast_last_tx_rate_mcs;
3852 	uint64_t rnd_avg_tx_rate;
3853 	uint64_t avg_tx_rate;
3854 	uint16_t tx_ratecode;
3855 
3856 	uint32_t sgi_count[MAX_GI];
3857 	uint32_t pream_punct_cnt;
3858 	uint32_t nss[SS_COUNT];
3859 	uint32_t bw[MAX_BW];
3860 	uint32_t ru_start;
3861 	uint32_t ru_tones;
3862 	struct cdp_tx_pkt_info ru_loc[MAX_RU_LOCATIONS];
3863 
3864 	struct cdp_tx_pkt_info transmit_type[MAX_TRANSMIT_TYPES];
3865 	uint32_t mu_group_id[MAX_MU_GROUP_ID];
3866 
3867 	uint32_t last_ack_rssi;
3868 
3869 	uint32_t nss_info:4,
3870 		 mcs_info:4,
3871 		 bw_info:4,
3872 		 gi_info:4,
3873 		 preamble_info:4;
3874 
3875 	uint32_t retries_mpdu;
3876 	uint32_t mpdu_success_with_retries;
3877 	struct cdp_pkt_info tx_ucast_total;
3878 	struct cdp_pkt_info tx_ucast_success;
3879 #ifdef WLAN_FEATURE_11BE
3880 	struct cdp_pkt_type su_be_ppdu_cnt;
3881 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
3882 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
3883 #endif
3884 	uint32_t rts_success;
3885 	uint32_t rts_failure;
3886 	uint32_t bar_cnt;
3887 	uint32_t ndpa_cnt;
3888 };
3889 
3890 /**
3891  * struct dp_peer_per_pkt_rx_stats - Peer Rx stats updated in per pkt Rx path
3892  * @rcvd_reo[CDP_MAX_RX_RINGS]: Packets received on the reo ring
3893  * @rx_lmac[CDP_MAX_LMACS]: Packets received on each lmac
3894  * @unicast: Total unicast packets
3895  * @multicast: Total multicast packets
3896  * @bcast:  Broadcast Packet Count
3897  * @raw: Raw Pakets received
3898  * @nawds_mcast_drop: Total NAWDS multicast packets dropped
3899  * @mec_drop: Total MEC packets dropped
3900  * @last_rx_ts: last timestamp in jiffies when RX happened
3901  * @intra_bss.pkts: Intra BSS packets received
3902  * @intra_bss.fail: Intra BSS packets failed
3903  * @intra_bss.mdns_no_fws: Intra BSS MDNS packets not forwarded
3904  * @mic_err: Rx MIC errors CCMP
3905  * @decrypt_err: Rx Decryption Errors CRC
3906  * @fcserr: rx MIC check failed (CCMP)
3907  * @pn_err: pn check failed
3908  * @oor_err: Rx OOR errors
3909  * @jump_2k_err: 2k jump errors
3910  * @rxdma_wifi_parse_err: rxdma wifi parse errors
3911  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
3912  * @amsdu_cnt: Number of MSDUs part of AMSDU
3913  * @rx_retries: retries of packet in rx
3914  * @multipass_rx_pkt_drop: Dropped multipass rx pkt
3915  * @peer_unauth_rx_pkt_drop: Unauth rx packet drops
3916  * @policy_check_drop: policy check drops
3917  * @to_stack_twt: Total packets sent up the stack in TWT session
3918  * @protocol_trace_cnt: per-peer protocol counters
3919  */
3920 struct dp_peer_per_pkt_rx_stats {
3921 	struct cdp_pkt_info rcvd_reo[CDP_MAX_RX_RINGS];
3922 	struct cdp_pkt_info rx_lmac[CDP_MAX_LMACS];
3923 	struct cdp_pkt_info unicast;
3924 	struct cdp_pkt_info multicast;
3925 	struct cdp_pkt_info bcast;
3926 	struct cdp_pkt_info raw;
3927 	uint32_t nawds_mcast_drop;
3928 	struct cdp_pkt_info mec_drop;
3929 	unsigned long last_rx_ts;
3930 	struct {
3931 		struct cdp_pkt_info pkts;
3932 		struct cdp_pkt_info fail;
3933 		uint32_t mdns_no_fwd;
3934 	} intra_bss;
3935 	struct {
3936 		uint32_t mic_err;
3937 		uint32_t decrypt_err;
3938 		uint32_t fcserr;
3939 		uint32_t pn_err;
3940 		uint32_t oor_err;
3941 		uint32_t jump_2k_err;
3942 		uint32_t rxdma_wifi_parse_err;
3943 	} err;
3944 	uint32_t non_amsdu_cnt;
3945 	uint32_t amsdu_cnt;
3946 	uint32_t rx_retries;
3947 	uint32_t multipass_rx_pkt_drop;
3948 	uint32_t peer_unauth_rx_pkt_drop;
3949 	uint32_t policy_check_drop;
3950 	struct cdp_pkt_info to_stack_twt;
3951 #ifdef VDEV_PEER_PROTOCOL_COUNT
3952 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
3953 #endif
3954 	uint32_t mcast_3addr_drop;
3955 };
3956 
3957 /**
3958  * struct dp_peer_extd_rx_stats - Peer Rx stats updated in either
3959  *	per pkt Rx path when macro QCA_ENHANCED_STATS_SUPPORT is disabled or in
3960  *	Rx monitor patch when macro is enabled
3961  * @pkt_type[DOT11_MAX]: pkt counter for different .11 modes
3962  * @wme_ac_type[WME_AC_MAX]: Wireless Multimedia type Count
3963  * @mpdu_cnt_fcs_ok: SU Rx success mpdu count
3964  * @mpdu_cnt_fcs_err: SU Rx fail mpdu count
3965  * @non_ampdu_cnt: Number of MSDUs with no MPDU level aggregation
3966  * @ampdu_cnt: Number of MSDUs part of AMSPU
3967  * @rx_mpdus: mpdu in rx
3968  * @rx_ppdus: ppdu in rx
3969  * @su_ax_ppdu_cnt: SU Rx packet count for .11ax
3970  * @rx_mu[TXRX_TYPE_MU_MAX]: Rx MU stats
3971  * @reception_type[MAX_RECEPTION_TYPES]: Reception type of packets
3972  * @ppdu_cnt[MAX_RECEPTION_TYPES]: PPDU packet count in reception type
3973  * @sgi_count[MAX_GI]: sgi count
3974  * @nss[SS_COUNT]: packet count in spatiel Streams
3975  * @ppdu_nss[SS_COUNT]: PPDU packet count in spatial streams
3976  * @bw[MAX_BW]: Packet Count in different bandwidths
3977  * @rx_mpdu_cnt[MAX_MCS]: rx mpdu count per MCS rate
3978  * @rx_rate: Rx rate
3979  * @last_rx_rate: Previous rx rate
3980  * @rnd_avg_rx_rate: Rounded average rx rate
3981  * @avg_rx_rate: Average Rx rate
3982  * @rx_ratecode: Rx rate code of last frame
3983  * @avg_snr: Average snr
3984  * @rx_snr_measured_time: Time at which snr is measured
3985  * @snr: SNR of received signal
3986  * @last_snr: Previous snr
3987  * @nss_info: NSS 1,2, ...8
3988  * @mcs_info: MCS index
3989  * @bw_info: Bandwidth
3990  *       <enum 0 bw_20_MHz>
3991  *       <enum 1 bw_40_MHz>
3992  *       <enum 2 bw_80_MHz>
3993  *       <enum 3 bw_160_MHz>
3994  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
3995  *       <enum 1     0_4_us_sgi > Legacy short GI
3996  *       <enum 2     1_6_us_sgi > HE related GI
3997  *       <enum 3     3_2_us_sgi > HE
3998  * @preamble_info: preamble
3999  * @mpdu_retry_cnt: retries of mpdu in rx
4000  * @su_be_ppdu_cnt: SU Rx packet count for BE
4001  * @mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX]: MU rx packet count for BE
4002  * @punc_bw[MAX_PUNCTURED_MODE]: MSDU count for punctured bw
4003  * @bar_cnt: Block ACK Request frame count
4004  * @ndpa_cnt: NDP announcement frame count
4005  */
4006 struct dp_peer_extd_rx_stats {
4007 	struct cdp_pkt_type pkt_type[DOT11_MAX];
4008 	uint32_t wme_ac_type[WME_AC_MAX];
4009 	uint32_t mpdu_cnt_fcs_ok;
4010 	uint32_t mpdu_cnt_fcs_err;
4011 	uint32_t non_ampdu_cnt;
4012 	uint32_t ampdu_cnt;
4013 	uint32_t rx_mpdus;
4014 	uint32_t rx_ppdus;
4015 
4016 	struct cdp_pkt_type su_ax_ppdu_cnt;
4017 	struct cdp_rx_mu rx_mu[TXRX_TYPE_MU_MAX];
4018 	uint32_t reception_type[MAX_RECEPTION_TYPES];
4019 	uint32_t ppdu_cnt[MAX_RECEPTION_TYPES];
4020 
4021 	uint32_t sgi_count[MAX_GI];
4022 	uint32_t nss[SS_COUNT];
4023 	uint32_t ppdu_nss[SS_COUNT];
4024 	uint32_t bw[MAX_BW];
4025 	uint32_t rx_mpdu_cnt[MAX_MCS];
4026 
4027 	uint32_t rx_rate;
4028 	uint32_t last_rx_rate;
4029 	uint32_t rnd_avg_rx_rate;
4030 	uint32_t avg_rx_rate;
4031 	uint32_t rx_ratecode;
4032 
4033 	uint32_t avg_snr;
4034 	unsigned long rx_snr_measured_time;
4035 	uint8_t snr;
4036 	uint8_t last_snr;
4037 
4038 	uint32_t nss_info:4,
4039 		 mcs_info:4,
4040 		 bw_info:4,
4041 		 gi_info:4,
4042 		 preamble_info:4;
4043 
4044 	uint32_t mpdu_retry_cnt;
4045 #ifdef WLAN_FEATURE_11BE
4046 	struct cdp_pkt_type su_be_ppdu_cnt;
4047 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
4048 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
4049 #endif
4050 	uint32_t bar_cnt;
4051 	uint32_t ndpa_cnt;
4052 };
4053 
4054 /**
4055  * struct dp_peer_per_pkt_stats - Per pkt stats for peer
4056  * @tx: Per pkt Tx stats
4057  * @rx: Per pkt Rx stats
4058  */
4059 struct dp_peer_per_pkt_stats {
4060 	struct dp_peer_per_pkt_tx_stats tx;
4061 	struct dp_peer_per_pkt_rx_stats rx;
4062 };
4063 
4064 /**
4065  * struct dp_peer_extd_stats - Stats from extended path for peer
4066  * @tx: Extended path tx stats
4067  * @rx: Extended path rx stats
4068  */
4069 struct dp_peer_extd_stats {
4070 	struct dp_peer_extd_tx_stats tx;
4071 	struct dp_peer_extd_rx_stats rx;
4072 };
4073 
4074 /**
4075  * struct dp_peer_stats - Peer stats
4076  * @per_pkt_stats: Per packet path stats
4077  * @extd_stats: Extended path stats
4078  */
4079 struct dp_peer_stats {
4080 	struct dp_peer_per_pkt_stats per_pkt_stats;
4081 #ifndef QCA_ENHANCED_STATS_SUPPORT
4082 	struct dp_peer_extd_stats extd_stats;
4083 #endif
4084 };
4085 
4086 /**
4087  * struct dp_txrx_peer: DP txrx_peer structure used in per pkt path
4088  * @tx_failed: Total Tx failure
4089  * @cdp_pkt_info comp_pkt: Pkt Info for which completions were received
4090  * @to_stack: Total packets sent up the stack
4091  * @stats: Peer stats
4092  * @delay_stats: Peer delay stats
4093  * @jitter_stats: Peer jitter stats
4094  * @bw: bandwidth of peer connection
4095  * @mpdu_retry_threshold: MPDU retry threshold to increment tx bad count
4096  */
4097 struct dp_txrx_peer {
4098 	/* Core TxRx Peer */
4099 
4100 	/* VDEV to which this peer is associated */
4101 	struct dp_vdev *vdev;
4102 
4103 	/* peer ID for this peer */
4104 	uint16_t peer_id;
4105 
4106 	uint8_t authorize:1, /* Set when authorized */
4107 		in_twt:1, /* in TWT session */
4108 		hw_txrx_stats_en:1, /*Indicate HW offload vdev stats */
4109 		mld_peer:1; /* MLD peer*/
4110 
4111 	uint32_t tx_failed;
4112 	struct cdp_pkt_info comp_pkt;
4113 	struct cdp_pkt_info to_stack;
4114 
4115 	struct dp_peer_stats stats;
4116 
4117 	struct dp_peer_delay_stats *delay_stats;
4118 
4119 	struct cdp_peer_tid_stats *jitter_stats;
4120 
4121 	struct {
4122 		enum cdp_sec_type sec_type;
4123 		u_int32_t michael_key[2]; /* relevant for TKIP */
4124 	} security[2]; /* 0 -> multicast, 1 -> unicast */
4125 
4126 	uint16_t nawds_enabled:1, /* NAWDS flag */
4127 		bss_peer:1, /* set for bss peer */
4128 		isolation:1, /* enable peer isolation for this peer */
4129 		wds_enabled:1; /* WDS peer */
4130 #ifdef WDS_VENDOR_EXTENSION
4131 	dp_ecm_policy wds_ecm;
4132 #endif
4133 #ifdef PEER_CACHE_RX_PKTS
4134 	qdf_atomic_t flush_in_progress;
4135 	struct dp_peer_cached_bufq bufq_info;
4136 #endif
4137 #ifdef QCA_MULTIPASS_SUPPORT
4138 	/* node in the special peer list element */
4139 	TAILQ_ENTRY(dp_txrx_peer) mpass_peer_list_elem;
4140 	/* vlan id for key */
4141 	uint16_t vlan_id;
4142 #endif
4143 #ifdef QCA_SUPPORT_WDS_EXTENDED
4144 	struct dp_wds_ext_peer wds_ext;
4145 	ol_txrx_rx_fp osif_rx;
4146 #endif
4147 	struct dp_rx_tid_defrag rx_tid[DP_MAX_TIDS];
4148 #ifdef CONFIG_SAWF
4149 	struct dp_peer_sawf_stats *sawf_stats;
4150 #endif
4151 #ifdef DP_PEER_EXTENDED_API
4152 	enum cdp_peer_bw bw;
4153 	uint8_t mpdu_retry_threshold;
4154 #endif
4155 };
4156 
4157 /* Peer structure for data path state */
4158 struct dp_peer {
4159 	struct dp_txrx_peer *txrx_peer;
4160 #ifdef WIFI_MONITOR_SUPPORT
4161 	struct dp_mon_peer *monitor_peer;
4162 #endif
4163 	/* peer ID for this peer */
4164 	uint16_t peer_id;
4165 
4166 	/* VDEV to which this peer is associated */
4167 	struct dp_vdev *vdev;
4168 
4169 	struct dp_ast_entry *self_ast_entry;
4170 
4171 	qdf_atomic_t ref_cnt;
4172 
4173 	union dp_align_mac_addr mac_addr;
4174 
4175 	/* node in the vdev's list of peers */
4176 	TAILQ_ENTRY(dp_peer) peer_list_elem;
4177 	/* node in the hash table bin's list of peers */
4178 	TAILQ_ENTRY(dp_peer) hash_list_elem;
4179 
4180 	/* TID structures pointer */
4181 	struct dp_rx_tid *rx_tid;
4182 
4183 	/* TBD: No transmit TID state required? */
4184 
4185 	struct {
4186 		enum cdp_sec_type sec_type;
4187 		u_int32_t michael_key[2]; /* relevant for TKIP */
4188 	} security[2]; /* 0 -> multicast, 1 -> unicast */
4189 
4190 	/* NAWDS Flag and Bss Peer bit */
4191 	uint16_t bss_peer:1, /* set for bss peer */
4192 		authorize:1, /* Set when authorized */
4193 		valid:1, /* valid bit */
4194 		delete_in_progress:1, /* Indicate kickout sent */
4195 		sta_self_peer:1, /* Indicate STA self peer */
4196 		is_tdls_peer:1; /* Indicate TDLS peer */
4197 
4198 #ifdef WLAN_FEATURE_11BE_MLO
4199 	uint8_t first_link:1, /* first link peer for MLO */
4200 		primary_link:1; /* primary link for MLO */
4201 #endif
4202 
4203 	/* MCL specific peer local id */
4204 	uint16_t local_id;
4205 	enum ol_txrx_peer_state state;
4206 	qdf_spinlock_t peer_info_lock;
4207 
4208 	/* Peer calibrated stats */
4209 	struct cdp_calibr_stats stats;
4210 
4211 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
4212 	/* TBD */
4213 
4214 	/* Active Block ack sessions */
4215 	uint16_t active_ba_session_cnt;
4216 
4217 	/* Current HW buffersize setting */
4218 	uint16_t hw_buffer_size;
4219 
4220 	/*
4221 	 * Flag to check if sessions with 256 buffersize
4222 	 * should be terminated.
4223 	 */
4224 	uint8_t kill_256_sessions;
4225 	qdf_atomic_t is_default_route_set;
4226 
4227 #ifdef QCA_PEER_MULTIQ_SUPPORT
4228 	struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
4229 #endif
4230 	/* entry to inactive_list*/
4231 	TAILQ_ENTRY(dp_peer) inactive_list_elem;
4232 
4233 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
4234 
4235 	uint8_t peer_state;
4236 	qdf_spinlock_t peer_state_lock;
4237 #ifdef WLAN_SUPPORT_MSCS
4238 	struct dp_peer_mscs_parameter mscs_ipv4_parameter, mscs_ipv6_parameter;
4239 	bool mscs_active;
4240 #endif
4241 #ifdef WLAN_SUPPORT_MESH_LATENCY
4242 	struct dp_peer_mesh_latency_parameter mesh_latency_params[DP_MAX_TIDS];
4243 #endif
4244 #ifdef WLAN_FEATURE_11BE_MLO
4245 	/* peer type */
4246 	enum cdp_peer_type peer_type;
4247 	/*---------for link peer---------*/
4248 	struct dp_peer *mld_peer;
4249 	/*---------for mld peer----------*/
4250 	struct dp_peer_link_info link_peers[DP_MAX_MLO_LINKS];
4251 	uint8_t num_links;
4252 	DP_MUTEX_TYPE link_peers_info_lock;
4253 #endif
4254 #ifdef CONFIG_SAWF_DEF_QUEUES
4255 	struct dp_peer_sawf *sawf;
4256 #endif
4257 };
4258 
4259 /*
4260  * dp_invalid_peer_msg
4261  * @nbuf: data buffer
4262  * @wh: 802.11 header
4263  * @vdev_id: id of vdev
4264  */
4265 struct dp_invalid_peer_msg {
4266 	qdf_nbuf_t nbuf;
4267 	struct ieee80211_frame *wh;
4268 	uint8_t vdev_id;
4269 };
4270 
4271 /*
4272  * dp_tx_me_buf_t: ME buffer
4273  * next: pointer to next buffer
4274  * data: Destination Mac address
4275  * paddr_macbuf: physical address for dest_mac
4276  */
4277 struct dp_tx_me_buf_t {
4278 	/* Note: ME buf pool initialization logic expects next pointer to
4279 	 * be the first element. Dont add anything before next */
4280 	struct dp_tx_me_buf_t *next;
4281 	uint8_t data[QDF_MAC_ADDR_SIZE];
4282 	qdf_dma_addr_t paddr_macbuf;
4283 };
4284 
4285 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
4286 struct hal_rx_fst;
4287 
4288 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4289 struct dp_rx_fse {
4290 	/* HAL Rx Flow Search Entry which matches HW definition */
4291 	void *hal_rx_fse;
4292 	/* Toeplitz hash value */
4293 	uint32_t flow_hash;
4294 	/* Flow index, equivalent to hash value truncated to FST size */
4295 	uint32_t flow_id;
4296 	/* Stats tracking for this flow */
4297 	struct cdp_flow_stats stats;
4298 	/* Flag indicating whether flow is IPv4 address tuple */
4299 	uint8_t is_ipv4_addr_entry;
4300 	/* Flag indicating whether flow is valid */
4301 	uint8_t is_valid;
4302 };
4303 
4304 struct dp_rx_fst {
4305 	/* Software (DP) FST */
4306 	uint8_t *base;
4307 	/* Pointer to HAL FST */
4308 	struct hal_rx_fst *hal_rx_fst;
4309 	/* Base physical address of HAL RX HW FST */
4310 	uint64_t hal_rx_fst_base_paddr;
4311 	/* Maximum number of flows FSE supports */
4312 	uint16_t max_entries;
4313 	/* Num entries in flow table */
4314 	uint16_t num_entries;
4315 	/* SKID Length */
4316 	uint16_t max_skid_length;
4317 	/* Hash mask to obtain legitimate hash entry */
4318 	uint32_t hash_mask;
4319 	/* Timer for bundling of flows */
4320 	qdf_timer_t cache_invalidate_timer;
4321 	/**
4322 	 * Flag which tracks whether cache update
4323 	 * is needed on timer expiry
4324 	 */
4325 	qdf_atomic_t is_cache_update_pending;
4326 	/* Flag to indicate completion of FSE setup in HW/FW */
4327 	bool fse_setup_done;
4328 };
4329 
4330 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
4331 #elif WLAN_SUPPORT_RX_FISA
4332 
4333 /**
4334  * struct dp_fisa_reo_mismatch_stats - reo mismatch sub-case stats for FISA
4335  * @allow_cce_match: packet allowed due to cce mismatch
4336  * @allow_fse_metdata_mismatch: packet allowed since it belongs to same flow,
4337  *			only fse_metadata is not same.
4338  * @allow_non_aggr: packet allowed due to any other reason.
4339  */
4340 struct dp_fisa_reo_mismatch_stats {
4341 	uint32_t allow_cce_match;
4342 	uint32_t allow_fse_metdata_mismatch;
4343 	uint32_t allow_non_aggr;
4344 };
4345 
4346 struct dp_fisa_stats {
4347 	/* flow index invalid from RX HW TLV */
4348 	uint32_t invalid_flow_index;
4349 	struct dp_fisa_reo_mismatch_stats reo_mismatch;
4350 };
4351 
4352 enum fisa_aggr_ret {
4353 	FISA_AGGR_DONE,
4354 	FISA_AGGR_NOT_ELIGIBLE,
4355 	FISA_FLUSH_FLOW
4356 };
4357 
4358 /**
4359  * struct fisa_pkt_hist - FISA Packet history structure
4360  * @tlv_hist: array of TLV history
4361  * @ts: array of timestamps of fisa packets
4362  * @idx: index indicating the next location to be used in the array.
4363  */
4364 struct fisa_pkt_hist {
4365 	uint8_t *tlv_hist;
4366 	qdf_time_t ts_hist[FISA_FLOW_MAX_AGGR_COUNT];
4367 	uint32_t idx;
4368 };
4369 
4370 struct dp_fisa_rx_sw_ft {
4371 	/* HAL Rx Flow Search Entry which matches HW definition */
4372 	void *hw_fse;
4373 	/* hash value */
4374 	uint32_t flow_hash;
4375 	/* toeplitz hash value*/
4376 	uint32_t flow_id_toeplitz;
4377 	/* Flow index, equivalent to hash value truncated to FST size */
4378 	uint32_t flow_id;
4379 	/* Stats tracking for this flow */
4380 	struct cdp_flow_stats stats;
4381 	/* Flag indicating whether flow is IPv4 address tuple */
4382 	uint8_t is_ipv4_addr_entry;
4383 	/* Flag indicating whether flow is valid */
4384 	uint8_t is_valid;
4385 	uint8_t is_populated;
4386 	uint8_t is_flow_udp;
4387 	uint8_t is_flow_tcp;
4388 	qdf_nbuf_t head_skb;
4389 	uint16_t cumulative_l4_checksum;
4390 	uint16_t adjusted_cumulative_ip_length;
4391 	uint16_t cur_aggr;
4392 	uint16_t napi_flush_cumulative_l4_checksum;
4393 	uint16_t napi_flush_cumulative_ip_length;
4394 	qdf_nbuf_t last_skb;
4395 	uint32_t head_skb_ip_hdr_offset;
4396 	uint32_t head_skb_l4_hdr_offset;
4397 	struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
4398 	uint8_t napi_id;
4399 	struct dp_vdev *vdev;
4400 	uint64_t bytes_aggregated;
4401 	uint32_t flush_count;
4402 	uint32_t aggr_count;
4403 	uint8_t do_not_aggregate;
4404 	uint16_t hal_cumultive_ip_len;
4405 	struct dp_soc *soc_hdl;
4406 	/* last aggregate count fetched from RX PKT TLV */
4407 	uint32_t last_hal_aggr_count;
4408 	uint32_t cur_aggr_gso_size;
4409 	qdf_net_udphdr_t *head_skb_udp_hdr;
4410 	uint16_t frags_cumulative_len;
4411 	/* CMEM parameters */
4412 	uint32_t cmem_offset;
4413 	uint32_t metadata;
4414 	uint32_t reo_dest_indication;
4415 	qdf_time_t flow_init_ts;
4416 	qdf_time_t last_accessed_ts;
4417 #ifdef WLAN_SUPPORT_RX_FISA_HIST
4418 	struct fisa_pkt_hist pkt_hist;
4419 #endif
4420 };
4421 
4422 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_fisa_rx_sw_ft)
4423 #define MAX_FSE_CACHE_FL_HST 10
4424 /**
4425  * struct fse_cache_flush_history - Debug history cache flush
4426  * @timestamp: Entry update timestamp
4427  * @flows_added: Number of flows added for this flush
4428  * @flows_deleted: Number of flows deleted for this flush
4429  */
4430 struct fse_cache_flush_history {
4431 	uint64_t timestamp;
4432 	uint32_t flows_added;
4433 	uint32_t flows_deleted;
4434 };
4435 
4436 struct dp_rx_fst {
4437 	/* Software (DP) FST */
4438 	uint8_t *base;
4439 	/* Pointer to HAL FST */
4440 	struct hal_rx_fst *hal_rx_fst;
4441 	/* Base physical address of HAL RX HW FST */
4442 	uint64_t hal_rx_fst_base_paddr;
4443 	/* Maximum number of flows FSE supports */
4444 	uint16_t max_entries;
4445 	/* Num entries in flow table */
4446 	uint16_t num_entries;
4447 	/* SKID Length */
4448 	uint16_t max_skid_length;
4449 	/* Hash mask to obtain legitimate hash entry */
4450 	uint32_t hash_mask;
4451 	/* Lock for adding/deleting entries of FST */
4452 	qdf_spinlock_t dp_rx_fst_lock;
4453 	uint32_t add_flow_count;
4454 	uint32_t del_flow_count;
4455 	uint32_t hash_collision_cnt;
4456 	struct dp_soc *soc_hdl;
4457 	qdf_atomic_t fse_cache_flush_posted;
4458 	qdf_timer_t fse_cache_flush_timer;
4459 	/* Allow FSE cache flush cmd to FW */
4460 	bool fse_cache_flush_allow;
4461 	struct fse_cache_flush_history cache_fl_rec[MAX_FSE_CACHE_FL_HST];
4462 	/* FISA DP stats */
4463 	struct dp_fisa_stats stats;
4464 
4465 	/* CMEM params */
4466 	qdf_work_t fst_update_work;
4467 	qdf_workqueue_t *fst_update_wq;
4468 	qdf_list_t fst_update_list;
4469 	uint32_t meta_counter;
4470 	uint32_t cmem_ba;
4471 	qdf_spinlock_t dp_rx_sw_ft_lock[MAX_REO_DEST_RINGS];
4472 	qdf_event_t cmem_resp_event;
4473 	bool flow_deletion_supported;
4474 	bool fst_in_cmem;
4475 	bool pm_suspended;
4476 };
4477 
4478 #endif /* WLAN_SUPPORT_RX_FISA */
4479 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
4480 
4481 #ifdef WLAN_FEATURE_STATS_EXT
4482 /*
4483  * dp_req_rx_hw_stats_t: RX peer HW stats query structure
4484  * @pending_tid_query_cnt: pending tid stats count which waits for REO status
4485  * @is_query_timeout: flag to show is stats query timeout
4486  */
4487 struct dp_req_rx_hw_stats_t {
4488 	qdf_atomic_t pending_tid_stats_cnt;
4489 	bool is_query_timeout;
4490 };
4491 #endif
4492 /* soc level structure to declare arch specific ops for DP */
4493 
4494 
4495 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id);
4496 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
4497 					    uint32_t mac_id);
4498 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
4499 
4500 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
4501 void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc);
4502 #else
4503 static inline void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc) {}
4504 #endif
4505 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
4506 			 int ring_type, uint32_t num_entries,
4507 			 bool cached);
4508 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng);
4509 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
4510 			int ring_type, int ring_num, int mac_id);
4511 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
4512 		    int ring_type, int ring_num);
4513 void dp_print_peer_txrx_stats_be(struct cdp_peer_stats *peer_stats,
4514 				 enum peer_stats_type stats_type);
4515 void dp_print_peer_txrx_stats_li(struct cdp_peer_stats *peer_stats,
4516 				 enum peer_stats_type stats_type);
4517 
4518 enum timer_yield_status
4519 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
4520 			  uint64_t start_time);
4521 
4522 /*
4523  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
4524  * @vdev: Datapath VDEV handle
4525  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
4526  * @hash_based: pointer to hash value (enabled/disabled) to be populated
4527  *
4528  * Return: None
4529  */
4530 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
4531 				  enum cdp_host_reo_dest_ring *reo_dest,
4532 				  bool *hash_based);
4533 
4534 /**
4535  * dp_reo_remap_config() - configure reo remap register value based
4536  *                         nss configuration.
4537  *		based on offload_radio value below remap configuration
4538  *		get applied.
4539  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
4540  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
4541  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
4542  *		3 - both Radios handled by NSS (remap not required)
4543  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
4544  *
4545  * @remap0: output parameter indicates reo remap 0 register value
4546  * @remap1: output parameter indicates reo remap 1 register value
4547  * @remap2: output parameter indicates reo remap 2 register value
4548  * Return: bool type, true if remap is configured else false.
4549  */
4550 
4551 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4552 			 uint32_t *remap1, uint32_t *remap2);
4553 
4554 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
4555 /**
4556  * dp_tx_comp_get_prefetched_params_from_hal_desc() - Get prefetched TX desc
4557  * @soc: DP soc handle
4558  * @tx_comp_hal_desc: HAL TX Comp Descriptor
4559  * @r_tx_desc: SW Tx Descriptor retrieved from HAL desc.
4560  *
4561  * Return: None
4562  */
4563 void dp_tx_comp_get_prefetched_params_from_hal_desc(
4564 					struct dp_soc *soc,
4565 					void *tx_comp_hal_desc,
4566 					struct dp_tx_desc_s **r_tx_desc);
4567 #endif
4568 #endif /* _DP_TYPES_H_ */
4569