xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_TYPES_H_
21 #define _DP_TYPES_H_
22 
23 #include <qdf_types.h>
24 #include <qdf_nbuf.h>
25 #include <qdf_lock.h>
26 #include <qdf_atomic.h>
27 #include <qdf_util.h>
28 #include <qdf_list.h>
29 #include <qdf_lro.h>
30 #include <queue.h>
31 #include <htt_common.h>
32 #include <htt.h>
33 #include <htt_stats.h>
34 #include <cdp_txrx_cmn.h>
35 #ifdef DP_MOB_DEFS
36 #include <cds_ieee80211_common.h>
37 #endif
38 #include <wdi_event_api.h>    /* WDI subscriber event list */
39 
40 #include "hal_hw_headers.h"
41 #include <hal_tx.h>
42 #include <hal_reo.h>
43 #include "wlan_cfg.h"
44 #include "hal_rx.h"
45 #include <hal_api.h>
46 #include <hal_api_mon.h>
47 #include "hal_rx.h"
48 //#include "hal_rx_flow.h"
49 
50 #define MAX_BW 8
51 #define MAX_RETRIES 4
52 #define MAX_RECEPTION_TYPES 4
53 
54 #define MINIDUMP_STR_SIZE 25
55 #ifndef REMOVE_PKT_LOG
56 #include <pktlog.h>
57 #endif
58 #include <dp_umac_reset.h>
59 
60 //#include "dp_tx.h"
61 
62 #define REPT_MU_MIMO 1
63 #define REPT_MU_OFDMA_MIMO 3
64 #define DP_VO_TID 6
65  /** MAX TID MAPS AVAILABLE PER PDEV */
66 #define DP_MAX_TID_MAPS 16
67 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */
68 #define DSCP_TID_MAP_MAX (64 + 6)
69 #define DP_IP_DSCP_SHIFT 2
70 #define DP_IP_DSCP_MASK 0x3f
71 #define DP_FC0_SUBTYPE_QOS 0x80
72 #define DP_QOS_TID 0x0f
73 #define DP_IPV6_PRIORITY_SHIFT 20
74 #define MAX_MON_LINK_DESC_BANKS 2
75 #define DP_VDEV_ALL CDP_VDEV_ALL
76 
77 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
78 #define WLAN_DP_RESET_MON_BUF_RING_FILTER
79 #define MAX_TXDESC_POOLS 6
80 #else
81 #define MAX_TXDESC_POOLS 4
82 #endif
83 
84 /* Max no of descriptors to handle special frames like EAPOL */
85 #define MAX_TX_SPL_DESC 1024
86 
87 #define MAX_RXDESC_POOLS 4
88 #define MAX_PPE_TXDESC_POOLS 1
89 
90 /* Max no. of VDEV per PSOC */
91 #ifdef WLAN_PSOC_MAX_VDEVS
92 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS
93 #else
94 #define MAX_VDEV_CNT 51
95 #endif
96 
97 /* Max no. of VDEVs, a PDEV can support */
98 #ifdef WLAN_PDEV_MAX_VDEVS
99 #define DP_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS
100 #else
101 #define DP_PDEV_MAX_VDEVS 17
102 #endif
103 
104 #define EXCEPTION_DEST_RING_ID 0
105 #define MAX_IDLE_SCATTER_BUFS 16
106 #define DP_MAX_IRQ_PER_CONTEXT 12
107 #define DEFAULT_HW_PEER_ID 0xffff
108 
109 #define MAX_AST_AGEOUT_COUNT 128
110 
111 #ifdef TX_ADDR_INDEX_SEARCH
112 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_INDEX_SEARCH
113 #else
114 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_SEARCH_DEFAULT
115 #endif
116 
117 #define WBM_INT_ERROR_ALL 0
118 #define WBM_INT_ERROR_REO_NULL_BUFFER 1
119 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2
120 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3
121 #define WBM_INT_ERROR_REO_BUFF_REAPED 4
122 #define MAX_WBM_INT_ERROR_REASONS 5
123 
124 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
125 /* Maximum retries for Delba per tid per peer */
126 #define DP_MAX_DELBA_RETRY 3
127 
128 #ifdef AST_OFFLOAD_ENABLE
129 #define AST_OFFLOAD_ENABLE_STATUS 1
130 #else
131 #define AST_OFFLOAD_ENABLE_STATUS 0
132 #endif
133 
134 #ifdef FEATURE_MEC_OFFLOAD
135 #define FW_MEC_FW_OFFLOAD_ENABLED 1
136 #else
137 #define FW_MEC_FW_OFFLOAD_ENABLED 0
138 #endif
139 
140 #define PCP_TID_MAP_MAX 8
141 #define MAX_MU_USERS 37
142 
143 #define REO_CMD_EVENT_HIST_MAX 64
144 
145 #define DP_MAX_SRNGS 64
146 
147 /* 2G PHYB */
148 #define PHYB_2G_LMAC_ID 2
149 #define PHYB_2G_TARGET_PDEV_ID 2
150 
151 /* Flags for skippig s/w tid classification */
152 #define DP_TX_HW_DSCP_TID_MAP_VALID 0x1
153 #define DP_TXRX_HLOS_TID_OVERRIDE_ENABLED 0x2
154 #define DP_TX_MESH_ENABLED 0x4
155 #define DP_TX_INVALID_QOS_TAG 0xf
156 
157 #ifdef WLAN_SUPPORT_RX_FISA
158 #define FISA_FLOW_MAX_AGGR_COUNT        16 /* max flow aggregate count */
159 #endif
160 
161 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
162 #define DP_RX_REFILL_BUFF_POOL_SIZE  2048
163 #define DP_RX_REFILL_BUFF_POOL_BURST 64
164 #define DP_RX_REFILL_THRD_THRESHOLD  512
165 #endif
166 
167 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
168 #define DP_SKIP_BAR_UPDATE_TIMEOUT 5000
169 #endif
170 
171 #define DP_TX_MAGIC_PATTERN_INUSE	0xABCD1234
172 #define DP_TX_MAGIC_PATTERN_FREE	0xDEADBEEF
173 
174 #ifdef IPA_OFFLOAD
175 #define DP_PEER_REO_STATS_TID_SHIFT 16
176 #define DP_PEER_REO_STATS_TID_MASK 0xFFFF0000
177 #define DP_PEER_REO_STATS_PEER_ID_MASK 0x0000FFFF
178 #define DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid) \
179 	((comb_peer_id_tid & DP_PEER_REO_STATS_TID_MASK) >> \
180 	DP_PEER_REO_STATS_TID_SHIFT)
181 #define DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid) \
182 	(comb_peer_id_tid & DP_PEER_REO_STATS_PEER_ID_MASK)
183 #endif
184 
185 enum rx_pktlog_mode {
186 	DP_RX_PKTLOG_DISABLED = 0,
187 	DP_RX_PKTLOG_FULL,
188 	DP_RX_PKTLOG_LITE,
189 };
190 
191 /* enum m_copy_mode - Available mcopy mode
192  *
193  */
194 enum m_copy_mode {
195 	M_COPY_DISABLED = 0,
196 	M_COPY = 2,
197 	M_COPY_EXTENDED = 4,
198 };
199 
200 struct msdu_list {
201 	qdf_nbuf_t head;
202 	qdf_nbuf_t tail;
203 	uint32_t sum_len;
204 };
205 
206 struct dp_soc_cmn;
207 struct dp_pdev;
208 struct dp_vdev;
209 struct dp_tx_desc_s;
210 struct dp_soc;
211 union dp_rx_desc_list_elem_t;
212 struct cdp_peer_rate_stats_ctx;
213 struct cdp_soc_rate_stats_ctx;
214 struct dp_rx_fst;
215 struct dp_mon_filter;
216 struct dp_mon_mpdu;
217 #ifdef BE_PKTLOG_SUPPORT
218 struct dp_mon_filter_be;
219 #endif
220 struct dp_peer;
221 struct dp_txrx_peer;
222 
223 /**
224  * enum dp_peer_state - DP peer states
225  * @DP_PEER_STATE_NONE:
226  * @DP_PEER_STATE_INIT:
227  * @DP_PEER_STATE_ACTIVE:
228  * @DP_PEER_STATE_LOGICAL_DELETE:
229  * @DP_PEER_STATE_INACTIVE:
230  * @DP_PEER_STATE_FREED:
231  * @DP_PEER_STATE_INVALID:
232  */
233 enum dp_peer_state {
234 	DP_PEER_STATE_NONE,
235 	DP_PEER_STATE_INIT,
236 	DP_PEER_STATE_ACTIVE,
237 	DP_PEER_STATE_LOGICAL_DELETE,
238 	DP_PEER_STATE_INACTIVE,
239 	DP_PEER_STATE_FREED,
240 	DP_PEER_STATE_INVALID,
241 };
242 
243 /**
244  * enum dp_mod_id - DP module IDs
245  * @DP_MOD_ID_TX_RX:
246  * @DP_MOD_ID_TX_COMP:
247  * @DP_MOD_ID_RX:
248  * @DP_MOD_ID_HTT_COMP:
249  * @DP_MOD_ID_RX_ERR:
250  * @DP_MOD_ID_TX_PPDU_STATS:
251  * @DP_MOD_ID_RX_PPDU_STATS:
252  * @DP_MOD_ID_CDP:
253  * @DP_MOD_ID_GENERIC_STATS:
254  * @DP_MOD_ID_TX_MULTIPASS:
255  * @DP_MOD_ID_TX_CAPTURE:
256  * @DP_MOD_ID_NSS_OFFLOAD:
257  * @DP_MOD_ID_CONFIG:
258  * @DP_MOD_ID_HTT:
259  * @DP_MOD_ID_IPA:
260  * @DP_MOD_ID_AST:
261  * @DP_MOD_ID_MCAST2UCAST:
262  * @DP_MOD_ID_CHILD:
263  * @DP_MOD_ID_MESH:
264  * @DP_MOD_ID_TX_EXCEPTION:
265  * @DP_MOD_ID_TDLS:
266  * @DP_MOD_ID_MISC:
267  * @DP_MOD_ID_MSCS:
268  * @DP_MOD_ID_TX:
269  * @DP_MOD_ID_SAWF:
270  * @DP_MOD_ID_REINJECT:
271  * @DP_MOD_ID_SCS:
272  * @DP_MOD_ID_UMAC_RESET:
273  * @DP_MOD_ID_TX_MCAST:
274  * @DP_MOD_ID_DS:
275  * @DP_MOD_ID_MAX:
276  */
277 enum dp_mod_id {
278 	DP_MOD_ID_TX_RX,
279 	DP_MOD_ID_TX_COMP,
280 	DP_MOD_ID_RX,
281 	DP_MOD_ID_HTT_COMP,
282 	DP_MOD_ID_RX_ERR,
283 	DP_MOD_ID_TX_PPDU_STATS,
284 	DP_MOD_ID_RX_PPDU_STATS,
285 	DP_MOD_ID_CDP,
286 	DP_MOD_ID_GENERIC_STATS,
287 	DP_MOD_ID_TX_MULTIPASS,
288 	DP_MOD_ID_TX_CAPTURE,
289 	DP_MOD_ID_NSS_OFFLOAD,
290 	DP_MOD_ID_CONFIG,
291 	DP_MOD_ID_HTT,
292 	DP_MOD_ID_IPA,
293 	DP_MOD_ID_AST,
294 	DP_MOD_ID_MCAST2UCAST,
295 	DP_MOD_ID_CHILD,
296 	DP_MOD_ID_MESH,
297 	DP_MOD_ID_TX_EXCEPTION,
298 	DP_MOD_ID_TDLS,
299 	DP_MOD_ID_MISC,
300 	DP_MOD_ID_MSCS,
301 	DP_MOD_ID_TX,
302 	DP_MOD_ID_SAWF,
303 	DP_MOD_ID_REINJECT,
304 	DP_MOD_ID_SCS,
305 	DP_MOD_ID_UMAC_RESET,
306 	DP_MOD_ID_TX_MCAST,
307 	DP_MOD_ID_DS,
308 	DP_MOD_ID_MAX,
309 };
310 
311 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
312 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
313 
314 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
315 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
316 
317 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
318 	TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase))
319 
320 #define DP_MUTEX_TYPE qdf_spinlock_t
321 
322 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
323 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
324 
325 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
326     ((_a)[0] == 0x33 &&                         \
327      (_a)[1] == 0x33)
328 
329 #define DP_FRAME_IS_BROADCAST(_a)              \
330     ((_a)[0] == 0xff &&                         \
331      (_a)[1] == 0xff &&                         \
332      (_a)[2] == 0xff &&                         \
333      (_a)[3] == 0xff &&                         \
334      (_a)[4] == 0xff &&                         \
335      (_a)[5] == 0xff)
336 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
337 		(_llc)->llc_ssap == 0xaa && \
338 		(_llc)->llc_un.type_snap.control == 0x3)
339 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
340 #define DP_FRAME_FC0_TYPE_MASK 0x0c
341 #define DP_FRAME_FC0_TYPE_DATA 0x08
342 #define DP_FRAME_IS_DATA(_frame) \
343 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
344 
345 /*
346  * macros to convert hw mac id to sw mac id:
347  * mac ids used by hardware start from a value of 1 while
348  * those in host software start from a value of 0. Use the
349  * macros below to convert between mac ids used by software and
350  * hardware
351  */
352 #define DP_SW2HW_MACID(id) ((id) + 1)
353 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
354 
355 /*
356  * Number of Tx Queues
357  * enum and macro to define how many threshold levels is used
358  * for the AC based flow control
359  */
360 #ifdef QCA_AC_BASED_FLOW_CONTROL
361 enum dp_fl_ctrl_threshold {
362 	DP_TH_BE_BK = 0,
363 	DP_TH_VI,
364 	DP_TH_VO,
365 	DP_TH_HI,
366 };
367 
368 #define FL_TH_MAX (4)
369 #define FL_TH_VI_PERCENTAGE (80)
370 #define FL_TH_VO_PERCENTAGE (60)
371 #define FL_TH_HI_PERCENTAGE (40)
372 #endif
373 
374 /**
375  * enum dp_intr_mode
376  * @DP_INTR_INTEGRATED: Line interrupts
377  * @DP_INTR_MSI: MSI interrupts
378  * @DP_INTR_POLL: Polling
379  * @DP_INTR_LEGACY_VIRTUAL_IRQ:
380  */
381 enum dp_intr_mode {
382 	DP_INTR_INTEGRATED = 0,
383 	DP_INTR_MSI,
384 	DP_INTR_POLL,
385 	DP_INTR_LEGACY_VIRTUAL_IRQ,
386 };
387 
388 /**
389  * enum dp_tx_frm_type
390  * @dp_tx_frm_std: Regular frame, no added header fragments
391  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
392  * @dp_tx_frm_sg: SG segment
393  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
394  * @dp_tx_frm_me: Multicast to Unicast Converted frame
395  * @dp_tx_frm_raw: Raw Frame
396  * @dp_tx_frm_rmnet:
397  */
398 enum dp_tx_frm_type {
399 	dp_tx_frm_std = 0,
400 	dp_tx_frm_tso,
401 	dp_tx_frm_sg,
402 	dp_tx_frm_audio,
403 	dp_tx_frm_me,
404 	dp_tx_frm_raw,
405 	dp_tx_frm_rmnet,
406 };
407 
408 /**
409  * enum dp_ast_type
410  * @dp_ast_type_wds: WDS peer AST type
411  * @dp_ast_type_static: static ast entry type
412  * @dp_ast_type_mec: Multicast echo ast entry type
413  */
414 enum dp_ast_type {
415 	dp_ast_type_wds = 0,
416 	dp_ast_type_static,
417 	dp_ast_type_mec,
418 };
419 
420 /**
421  * enum dp_nss_cfg
422  * @dp_nss_cfg_default: No radios are offloaded
423  * @dp_nss_cfg_first_radio: First radio offloaded
424  * @dp_nss_cfg_second_radio: Second radio offloaded
425  * @dp_nss_cfg_dbdc: Dual radios offloaded
426  * @dp_nss_cfg_dbtc: Three radios offloaded
427  * @dp_nss_cfg_max: max value
428  */
429 enum dp_nss_cfg {
430 	dp_nss_cfg_default = 0x0,
431 	dp_nss_cfg_first_radio = 0x1,
432 	dp_nss_cfg_second_radio = 0x2,
433 	dp_nss_cfg_dbdc = 0x3,
434 	dp_nss_cfg_dbtc = 0x7,
435 	dp_nss_cfg_max
436 };
437 
438 #ifdef WLAN_TX_PKT_CAPTURE_ENH
439 #define DP_CPU_RING_MAP_1 1
440 #endif
441 
442 /**
443  * enum dp_cpu_ring_map_types - dp tx cpu ring map
444  * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
445  * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
446  * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
447  * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
448  * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
449  * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring
450  * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
451  */
452 enum dp_cpu_ring_map_types {
453 	DP_NSS_DEFAULT_MAP,
454 	DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
455 	DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
456 	DP_NSS_DBDC_OFFLOADED_MAP,
457 	DP_NSS_DBTC_OFFLOADED_MAP,
458 #ifdef WLAN_TX_PKT_CAPTURE_ENH
459 	DP_SINGLE_TX_RING_MAP,
460 #endif
461 	DP_NSS_CPU_RING_MAP_MAX
462 };
463 
464 /**
465  * struct dp_rx_nbuf_frag_info - Hold vaddr and paddr for a buffer
466  *
467  * @paddr: Physical address of buffer allocated.
468  * @virt_addr: union of virtual address representations
469  * @nbuf: Allocated nbuf in case of nbuf approach.
470  * @vaddr: Virtual address of frag allocated in case of frag approach.
471  */
472 struct dp_rx_nbuf_frag_info {
473 	qdf_dma_addr_t paddr;
474 	union {
475 		qdf_nbuf_t nbuf;
476 		qdf_frag_t vaddr;
477 	} virt_addr;
478 };
479 
480 /**
481  * enum dp_ctxt_type - context type
482  * @DP_PDEV_TYPE: PDEV context
483  * @DP_RX_RING_HIST_TYPE: Datapath rx ring history
484  * @DP_RX_ERR_RING_HIST_TYPE: Datapath rx error ring history
485  * @DP_RX_REINJECT_RING_HIST_TYPE: Datapath reinject ring history
486  * @DP_TX_TCL_HIST_TYPE:
487  * @DP_TX_COMP_HIST_TYPE:
488  * @DP_FISA_RX_FT_TYPE:
489  * @DP_RX_REFILL_RING_HIST_TYPE: Datapath rx refill ring history
490  * @DP_TX_HW_DESC_HIST_TYPE: Datapath TX HW descriptor history
491  * @DP_MON_SOC_TYPE: Datapath monitor soc context
492  * @DP_MON_PDEV_TYPE: Datapath monitor pdev context
493  * @DP_MON_STATUS_BUF_HIST_TYPE: DP monitor status buffer history
494  * @DP_CFG_EVENT_HIST_TYPE: DP config events history
495  */
496 enum dp_ctxt_type {
497 	DP_PDEV_TYPE,
498 	DP_RX_RING_HIST_TYPE,
499 	DP_RX_ERR_RING_HIST_TYPE,
500 	DP_RX_REINJECT_RING_HIST_TYPE,
501 	DP_TX_TCL_HIST_TYPE,
502 	DP_TX_COMP_HIST_TYPE,
503 	DP_FISA_RX_FT_TYPE,
504 	DP_RX_REFILL_RING_HIST_TYPE,
505 	DP_TX_HW_DESC_HIST_TYPE,
506 	DP_MON_SOC_TYPE,
507 	DP_MON_PDEV_TYPE,
508 	DP_MON_STATUS_BUF_HIST_TYPE,
509 	DP_CFG_EVENT_HIST_TYPE,
510 };
511 
512 /**
513  * enum dp_desc_type - source type for multiple pages allocation
514  * @DP_TX_DESC_TYPE: DP SW TX descriptor
515  * @DP_TX_PPEDS_DESC_TYPE: DP PPE-DS Tx descriptor
516  * @DP_TX_EXT_DESC_TYPE: DP TX msdu extension descriptor
517  * @DP_TX_EXT_DESC_LINK_TYPE: DP link descriptor for msdu ext_desc
518  * @DP_TX_TSO_DESC_TYPE: DP TX TSO descriptor
519  * @DP_TX_TSO_NUM_SEG_TYPE: DP TX number of segments
520  * @DP_RX_DESC_BUF_TYPE: DP RX SW descriptor
521  * @DP_RX_DESC_STATUS_TYPE: DP RX SW descriptor for monitor status
522  * @DP_HW_LINK_DESC_TYPE: DP HW link descriptor
523  * @DP_HW_CC_SPT_PAGE_TYPE: DP pages for HW CC secondary page table
524  */
525 enum dp_desc_type {
526 	DP_TX_DESC_TYPE,
527 	DP_TX_PPEDS_DESC_TYPE,
528 	DP_TX_EXT_DESC_TYPE,
529 	DP_TX_EXT_DESC_LINK_TYPE,
530 	DP_TX_TSO_DESC_TYPE,
531 	DP_TX_TSO_NUM_SEG_TYPE,
532 	DP_RX_DESC_BUF_TYPE,
533 	DP_RX_DESC_STATUS_TYPE,
534 	DP_HW_LINK_DESC_TYPE,
535 	DP_HW_CC_SPT_PAGE_TYPE,
536 };
537 
538 /**
539  * struct rx_desc_pool
540  * @pool_size: number of RX descriptor in the pool
541  * @elem_size: Element size
542  * @desc_pages: Multi page descriptors
543  * @array: pointer to array of RX descriptor
544  * @freelist: pointer to free RX descriptor link list
545  * @lock: Protection for the RX descriptor pool
546  * @owner: owner for nbuf
547  * @buf_size: Buffer size
548  * @buf_alignment: Buffer alignment
549  * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
550  * @desc_type: type of desc this pool serves
551  */
552 struct rx_desc_pool {
553 	uint32_t pool_size;
554 #ifdef RX_DESC_MULTI_PAGE_ALLOC
555 	uint16_t elem_size;
556 	struct qdf_mem_multi_page_t desc_pages;
557 #else
558 	union dp_rx_desc_list_elem_t *array;
559 #endif
560 	union dp_rx_desc_list_elem_t *freelist;
561 	qdf_spinlock_t lock;
562 	uint8_t owner;
563 	uint16_t buf_size;
564 	uint8_t buf_alignment;
565 	bool rx_mon_dest_frag_enable;
566 	enum dp_desc_type desc_type;
567 };
568 
569 /**
570  * struct dp_tx_ext_desc_elem_s
571  * @next: next extension descriptor pointer
572  * @vaddr: hlos virtual address pointer
573  * @paddr: physical address pointer for descriptor
574  * @flags: mark features for extension descriptor
575  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
576  *		Tx completion of ME packet
577  * @tso_desc: Pointer to Tso desc
578  * @tso_num_desc: Pointer to tso_num_desc
579  */
580 struct dp_tx_ext_desc_elem_s {
581 	struct dp_tx_ext_desc_elem_s *next;
582 	void *vaddr;
583 	qdf_dma_addr_t paddr;
584 	uint16_t flags;
585 	struct dp_tx_me_buf_t *me_buffer;
586 	struct qdf_tso_seg_elem_t *tso_desc;
587 	struct qdf_tso_num_seg_elem_t *tso_num_desc;
588 };
589 
590 /*
591  * NB: intentionally not using kernel-doc comment because the kernel-doc
592  *     script does not handle the qdf_dma_mem_context macro
593  * struct dp_tx_ext_desc_pool_s - Tx Extension Descriptor Pool
594  * @elem_count: Number of descriptors in the pool
595  * @elem_size: Size of each descriptor
596  * @num_free: Number of free descriptors
597  * @desc_pages: multiple page allocation information for actual descriptors
598  * @link_elem_size: size of the link descriptor in cacheable memory used for
599  * 		    chaining the extension descriptors
600  * @desc_link_pages: multiple page allocation information for link descriptors
601  * @freelist:
602  * @lock:
603  * @memctx:
604  */
605 struct dp_tx_ext_desc_pool_s {
606 	uint16_t elem_count;
607 	int elem_size;
608 	uint16_t num_free;
609 	struct qdf_mem_multi_page_t desc_pages;
610 	int link_elem_size;
611 	struct qdf_mem_multi_page_t desc_link_pages;
612 	struct dp_tx_ext_desc_elem_s *freelist;
613 	qdf_spinlock_t lock;
614 	qdf_dma_mem_context(memctx);
615 };
616 
617 /**
618  * struct dp_tx_desc_s - Tx Descriptor
619  * @next: Next in the chain of descriptors in freelist or in the completion list
620  * @nbuf: Buffer Address
621  * @length:
622  * @magic:
623  * @timestamp_tick:
624  * @flags: Flags to track the state of descriptor and special frame handling
625  * @id: Descriptor ID
626  * @dma_addr:
627  * @vdev_id: vdev_id of vdev over which the packet was transmitted
628  * @tx_status:
629  * @peer_id:
630  * @pdev: Handle to pdev
631  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
632  * 		   This is maintained in descriptor to allow more efficient
633  * 		   processing in completion event processing code.
634  * 		   This field is filled in with the htt_pkt_type enum.
635  * @buffer_src: buffer source TQM, REO, FW etc.
636  * @reserved:
637  * @frm_type: Frame Type - ToDo check if this is redundant
638  * @pkt_offset: Offset from which the actual packet data starts
639  * @pool_id: Pool ID - used when releasing the descriptor
640  * @shinfo_addr:
641  * @msdu_ext_desc: MSDU extension descriptor
642  * @timestamp:
643  * @comp:
644  */
645 struct dp_tx_desc_s {
646 	struct dp_tx_desc_s *next;
647 	qdf_nbuf_t nbuf;
648 	uint16_t length;
649 #ifdef DP_TX_TRACKING
650 	uint32_t magic;
651 	uint64_t timestamp_tick;
652 #endif
653 	uint16_t flags;
654 	uint32_t id;
655 	qdf_dma_addr_t dma_addr;
656 	uint8_t vdev_id;
657 	uint8_t tx_status;
658 	uint16_t peer_id;
659 	struct dp_pdev *pdev;
660 	uint8_t tx_encap_type:2,
661 		buffer_src:3,
662 		reserved:3;
663 	uint8_t frm_type;
664 	uint8_t pkt_offset;
665 	uint8_t  pool_id;
666 	unsigned char *shinfo_addr;
667 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
668 	qdf_ktime_t timestamp;
669 	struct hal_tx_desc_comp_s comp;
670 };
671 
672 #ifdef QCA_AC_BASED_FLOW_CONTROL
673 /**
674  * enum flow_pool_status - flow pool status
675  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
676  *				and network queues are unpaused
677  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
678  *			   and network queues are paused
679  * @FLOW_POOL_BE_BK_PAUSED:
680  * @FLOW_POOL_VI_PAUSED:
681  * @FLOW_POOL_VO_PAUSED:
682  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
683  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
684  * @FLOW_POOL_ACTIVE_UNPAUSED_REATTACH: pool is reattached but network
685  *					queues are not paused
686  */
687 enum flow_pool_status {
688 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
689 	FLOW_POOL_ACTIVE_PAUSED = 1,
690 	FLOW_POOL_BE_BK_PAUSED = 2,
691 	FLOW_POOL_VI_PAUSED = 3,
692 	FLOW_POOL_VO_PAUSED = 4,
693 	FLOW_POOL_INVALID = 5,
694 	FLOW_POOL_INACTIVE = 6,
695 	FLOW_POOL_ACTIVE_UNPAUSED_REATTACH = 7,
696 };
697 
698 #else
699 /**
700  * enum flow_pool_status - flow pool status
701  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
702  *				and network queues are unpaused
703  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
704  *			   and network queues are paused
705  * @FLOW_POOL_BE_BK_PAUSED:
706  * @FLOW_POOL_VI_PAUSED:
707  * @FLOW_POOL_VO_PAUSED:
708  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
709  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
710  */
711 enum flow_pool_status {
712 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
713 	FLOW_POOL_ACTIVE_PAUSED = 1,
714 	FLOW_POOL_BE_BK_PAUSED = 2,
715 	FLOW_POOL_VI_PAUSED = 3,
716 	FLOW_POOL_VO_PAUSED = 4,
717 	FLOW_POOL_INVALID = 5,
718 	FLOW_POOL_INACTIVE = 6,
719 };
720 
721 #endif
722 
723 /**
724  * struct dp_tx_tso_seg_pool_s
725  * @pool_size: total number of pool elements
726  * @num_free: free element count
727  * @freelist: first free element pointer
728  * @desc_pages: multiple page allocation information for actual descriptors
729  * @lock: lock for accessing the pool
730  */
731 struct dp_tx_tso_seg_pool_s {
732 	uint16_t pool_size;
733 	uint16_t num_free;
734 	struct qdf_tso_seg_elem_t *freelist;
735 	struct qdf_mem_multi_page_t desc_pages;
736 	qdf_spinlock_t lock;
737 };
738 
739 /**
740  * struct dp_tx_tso_num_seg_pool_s - TSO Num seg pool
741  * @num_seg_pool_size: total number of pool elements
742  * @num_free: free element count
743  * @freelist: first free element pointer
744  * @desc_pages: multiple page allocation information for actual descriptors
745  * @lock: lock for accessing the pool
746  */
747 
748 struct dp_tx_tso_num_seg_pool_s {
749 	uint16_t num_seg_pool_size;
750 	uint16_t num_free;
751 	struct qdf_tso_num_seg_elem_t *freelist;
752 	struct qdf_mem_multi_page_t desc_pages;
753 	/*tso mutex */
754 	qdf_spinlock_t lock;
755 };
756 
757 /**
758  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
759  * @elem_size: Size of each descriptor in the pool
760  * @num_allocated: Number of used descriptors
761  * @freelist: Chain of free descriptors
762  * @desc_pages: multiple page allocation information for actual descriptors
763  * @pool_size: Total number of descriptors in the pool
764  * @flow_pool_id:
765  * @num_invalid_bin: Deleted pool with pending Tx completions.
766  * @avail_desc:
767  * @status:
768  * @flow_type:
769  * @stop_th:
770  * @start_th:
771  * @max_pause_time:
772  * @latest_pause_time:
773  * @pkt_drop_no_desc:
774  * @flow_pool_lock:
775  * @pool_create_cnt:
776  * @pool_owner_ctx:
777  * @elem_count:
778  * @num_free: Number of free descriptors
779  * @lock: Lock for descriptor allocation/free from/to the pool
780  */
781 struct dp_tx_desc_pool_s {
782 	uint16_t elem_size;
783 	uint32_t num_allocated;
784 	struct dp_tx_desc_s *freelist;
785 	struct qdf_mem_multi_page_t desc_pages;
786 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
787 	uint16_t pool_size;
788 	uint8_t flow_pool_id;
789 	uint8_t num_invalid_bin;
790 	uint16_t avail_desc;
791 	enum flow_pool_status status;
792 	enum htt_flow_type flow_type;
793 #ifdef QCA_AC_BASED_FLOW_CONTROL
794 	uint16_t stop_th[FL_TH_MAX];
795 	uint16_t start_th[FL_TH_MAX];
796 	qdf_time_t max_pause_time[FL_TH_MAX];
797 	qdf_time_t latest_pause_time[FL_TH_MAX];
798 #else
799 	uint16_t stop_th;
800 	uint16_t start_th;
801 #endif
802 	uint16_t pkt_drop_no_desc;
803 	qdf_spinlock_t flow_pool_lock;
804 	uint8_t pool_create_cnt;
805 	void *pool_owner_ctx;
806 #else
807 	uint16_t elem_count;
808 	uint32_t num_free;
809 	qdf_spinlock_t lock;
810 #endif
811 };
812 
813 /**
814  * struct dp_txrx_pool_stats - flow pool related statistics
815  * @pool_map_count: flow pool map received
816  * @pool_unmap_count: flow pool unmap received
817  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
818  */
819 struct dp_txrx_pool_stats {
820 	uint16_t pool_map_count;
821 	uint16_t pool_unmap_count;
822 	uint16_t pkt_drop_no_pool;
823 };
824 
825 /**
826  * struct dp_srng - DP srng structure
827  * @hal_srng: hal_srng handle
828  * @base_vaddr_unaligned: un-aligned virtual base address of the srng ring
829  * @base_vaddr_aligned: aligned virtual base address of the srng ring
830  * @base_paddr_unaligned: un-aligned physical base address of the srng ring
831  * @base_paddr_aligned: aligned physical base address of the srng ring
832  * @alloc_size: size of the srng ring
833  * @cached: is the srng ring memory cached or un-cached memory
834  * @irq: irq number of the srng ring
835  * @num_entries: number of entries in the srng ring
836  * @is_mem_prealloc: Is this srng memory pre-allocated
837  * @crit_thresh: Critical threshold for near-full processing of this srng
838  * @safe_thresh: Safe threshold for near-full processing of this srng
839  * @near_full: Flag to indicate srng is near-full
840  */
841 struct dp_srng {
842 	hal_ring_handle_t hal_srng;
843 	void *base_vaddr_unaligned;
844 	void *base_vaddr_aligned;
845 	qdf_dma_addr_t base_paddr_unaligned;
846 	qdf_dma_addr_t base_paddr_aligned;
847 	uint32_t alloc_size;
848 	uint8_t cached;
849 	int irq;
850 	uint32_t num_entries;
851 #ifdef DP_MEM_PRE_ALLOC
852 	uint8_t is_mem_prealloc;
853 #endif
854 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
855 	uint16_t crit_thresh;
856 	uint16_t safe_thresh;
857 	qdf_atomic_t near_full;
858 #endif
859 };
860 
861 struct dp_rx_reorder_array_elem {
862 	qdf_nbuf_t head;
863 	qdf_nbuf_t tail;
864 };
865 
866 #define DP_RX_BA_INACTIVE 0
867 #define DP_RX_BA_ACTIVE 1
868 #define DP_RX_BA_IN_PROGRESS 2
869 struct dp_reo_cmd_info {
870 	uint16_t cmd;
871 	enum hal_reo_cmd_type cmd_type;
872 	void *data;
873 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
874 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
875 };
876 
877 struct dp_peer_delay_stats {
878 	struct cdp_delay_tid_stats delay_tid_stats[CDP_MAX_DATA_TIDS]
879 						  [CDP_MAX_TXRX_CTX];
880 };
881 
882 /* Rx TID defrag*/
883 struct dp_rx_tid_defrag {
884 	/* TID */
885 	int tid;
886 
887 	/* only used for defrag right now */
888 	TAILQ_ENTRY(dp_rx_tid_defrag) defrag_waitlist_elem;
889 
890 	/* Store dst desc for reinjection */
891 	hal_ring_desc_t dst_ring_desc;
892 	struct dp_rx_desc *head_frag_desc;
893 
894 	/* Sequence and fragments that are being processed currently */
895 	uint32_t curr_seq_num;
896 	uint32_t curr_frag_num;
897 
898 	/* TODO: Check the following while adding defragmentation support */
899 	struct dp_rx_reorder_array_elem *array;
900 	/* base - single rx reorder element used for non-aggr cases */
901 	struct dp_rx_reorder_array_elem base;
902 	/* rx_tid lock */
903 	qdf_spinlock_t defrag_tid_lock;
904 
905 	/* head PN number */
906 	uint64_t pn128[2];
907 
908 	uint32_t defrag_timeout_ms;
909 
910 	/* defrag usage only, dp_peer pointer related with this tid */
911 	struct dp_txrx_peer *defrag_peer;
912 };
913 
914 /* Rx TID */
915 struct dp_rx_tid {
916 	/* TID */
917 	int tid;
918 
919 	/* Num of addba requests */
920 	uint32_t num_of_addba_req;
921 
922 	/* Num of addba responses */
923 	uint32_t num_of_addba_resp;
924 
925 	/* Num of delba requests */
926 	uint32_t num_of_delba_req;
927 
928 	/* Num of addba responses successful */
929 	uint32_t num_addba_rsp_success;
930 
931 	/* Num of addba responses failed */
932 	uint32_t num_addba_rsp_failed;
933 
934 	/* pn size */
935 	uint8_t pn_size;
936 	/* REO TID queue descriptors */
937 	void *hw_qdesc_vaddr_unaligned;
938 	void *hw_qdesc_vaddr_aligned;
939 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
940 	qdf_dma_addr_t hw_qdesc_paddr;
941 	uint32_t hw_qdesc_alloc_size;
942 
943 	/* RX ADDBA session state */
944 	int ba_status;
945 
946 	/* RX BA window size */
947 	uint16_t ba_win_size;
948 
949 	/* Starting sequence number in Addba request */
950 	uint16_t startseqnum;
951 	uint16_t dialogtoken;
952 	uint16_t statuscode;
953 	/* user defined ADDBA response status code */
954 	uint16_t userstatuscode;
955 
956 	/* rx_tid lock */
957 	qdf_spinlock_t tid_lock;
958 
959 	/* Store ppdu_id when 2k exception is received */
960 	uint32_t ppdu_id_2k;
961 
962 	/* Delba Tx completion status */
963 	uint8_t delba_tx_status;
964 
965 	/* Delba Tx retry count */
966 	uint8_t delba_tx_retry;
967 
968 	/* Delba stats */
969 	uint32_t delba_tx_success_cnt;
970 	uint32_t delba_tx_fail_cnt;
971 
972 	/* Delba reason code for retries */
973 	uint8_t delba_rcode;
974 
975 	/* Coex Override preserved windows size 1 based */
976 	uint16_t rx_ba_win_size_override;
977 #ifdef IPA_OFFLOAD
978 	/* rx msdu count per tid */
979 	struct cdp_pkt_info rx_msdu_cnt;
980 #endif
981 
982 };
983 
984 /**
985  * struct dp_intr_stats - DP Interrupt Stats for an interrupt context
986  * @num_tx_ring_masks: interrupts with tx_ring_mask set
987  * @num_rx_ring_masks: interrupts with rx_ring_mask set
988  * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set
989  * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set
990  * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set
991  * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set
992  * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set
993  * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
994  * @num_host2rxdma_mon_ring_masks: interrupts with host2rxdma_ring_mask set
995  * @num_rx_ring_near_full_masks: Near-full interrupts for REO DST ring
996  * @num_tx_comp_ring_near_full_masks: Near-full interrupts for TX completion
997  * @num_rx_wbm_rel_ring_near_full_masks: total number of times the wbm rel ring
998  *                                       near full interrupt was received
999  * @num_reo_status_ring_near_full_masks: total number of times the reo status
1000  *                                       near full interrupt was received
1001  * @num_near_full_masks: total number of times the near full interrupt
1002  *                       was received
1003  * @num_masks: total number of times the interrupt was received
1004  * @num_host2txmon_ring__masks: interrupts with host2txmon_ring_mask set
1005  * @num_near_full_masks: total number of times the interrupt was received
1006  * @num_masks: total number of times the near full interrupt was received
1007  * @num_tx_mon_ring_masks: interrupts with num_tx_mon_ring_masks set
1008  *
1009  * Counter for individual masks are incremented only if there are any packets
1010  * on that ring.
1011  */
1012 struct dp_intr_stats {
1013 	uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS];
1014 	uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS];
1015 	uint32_t num_rx_mon_ring_masks;
1016 	uint32_t num_rx_err_ring_masks;
1017 	uint32_t num_rx_wbm_rel_ring_masks;
1018 	uint32_t num_reo_status_ring_masks;
1019 	uint32_t num_rxdma2host_ring_masks;
1020 	uint32_t num_host2rxdma_ring_masks;
1021 	uint32_t num_host2rxdma_mon_ring_masks;
1022 	uint32_t num_rx_ring_near_full_masks[MAX_REO_DEST_RINGS];
1023 	uint32_t num_tx_comp_ring_near_full_masks[MAX_TCL_DATA_RINGS];
1024 	uint32_t num_rx_wbm_rel_ring_near_full_masks;
1025 	uint32_t num_reo_status_ring_near_full_masks;
1026 	uint32_t num_host2txmon_ring__masks;
1027 	uint32_t num_near_full_masks;
1028 	uint32_t num_masks;
1029 	uint32_t num_tx_mon_ring_masks;
1030 };
1031 
1032 #ifdef DP_UMAC_HW_RESET_SUPPORT
1033 /**
1034  * struct dp_intr_bkp - DP per interrupt context ring masks old state
1035  * @tx_ring_mask: WBM Tx completion rings (0-2) associated with this napi ctxt
1036  * @rx_ring_mask: Rx REO rings (0-3) associated with this interrupt context
1037  * @rx_mon_ring_mask: Rx monitor ring mask (0-2)
1038  * @rx_err_ring_mask: REO Exception Ring
1039  * @rx_wbm_rel_ring_mask: WBM2SW Rx Release Ring
1040  * @reo_status_ring_mask: REO command response ring
1041  * @rxdma2host_ring_mask: RXDMA to host destination ring
1042  * @host2rxdma_ring_mask: Host to RXDMA buffer ring
1043  * @host2rxdma_mon_ring_mask: Host to RXDMA monitor  buffer ring
1044  * @host2txmon_ring_mask: Tx monitor buffer ring
1045  * @tx_mon_ring_mask: Tx monitor ring mask (0-2)
1046  *
1047  */
1048 struct dp_intr_bkp {
1049 	uint8_t tx_ring_mask;
1050 	uint8_t rx_ring_mask;
1051 	uint8_t rx_mon_ring_mask;
1052 	uint8_t rx_err_ring_mask;
1053 	uint8_t rx_wbm_rel_ring_mask;
1054 	uint8_t reo_status_ring_mask;
1055 	uint8_t rxdma2host_ring_mask;
1056 	uint8_t host2rxdma_ring_mask;
1057 	uint8_t host2rxdma_mon_ring_mask;
1058 	uint8_t host2txmon_ring_mask;
1059 	uint8_t tx_mon_ring_mask;
1060 };
1061 #endif
1062 
1063 /* per interrupt context  */
1064 struct dp_intr {
1065 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
1066 				associated with this napi context */
1067 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
1068 				with this interrupt context */
1069 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
1070 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
1071 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
1072 	uint8_t reo_status_ring_mask; /* REO command response ring */
1073 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
1074 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
1075 	/* Host to RXDMA monitor  buffer ring */
1076 	uint8_t host2rxdma_mon_ring_mask;
1077 	/* RX REO rings near full interrupt mask */
1078 	uint8_t rx_near_full_grp_1_mask;
1079 	/* RX REO rings near full interrupt mask */
1080 	uint8_t rx_near_full_grp_2_mask;
1081 	/* WBM TX completion rings near full interrupt mask */
1082 	uint8_t tx_ring_near_full_mask;
1083 	uint8_t host2txmon_ring_mask; /* Tx monitor buffer ring */
1084 	uint8_t tx_mon_ring_mask;  /* Tx monitor ring mask (0-2) */
1085 	struct dp_soc *soc;    /* Reference to SoC structure ,
1086 				to get DMA ring handles */
1087 	qdf_lro_ctx_t lro_ctx;
1088 	uint8_t dp_intr_id;
1089 
1090 	/* Interrupt Stats for individual masks */
1091 	struct dp_intr_stats intr_stats;
1092 	uint8_t umac_reset_intr_mask;  /* UMAC reset interrupt mask */
1093 };
1094 
1095 #define REO_DESC_FREELIST_SIZE 64
1096 #define REO_DESC_FREE_DEFER_MS 1000
1097 struct reo_desc_list_node {
1098 	qdf_list_node_t node;
1099 	unsigned long free_ts;
1100 	struct dp_rx_tid rx_tid;
1101 	bool resend_update_reo_cmd;
1102 	uint32_t pending_ext_desc_size;
1103 #ifdef REO_QDESC_HISTORY
1104 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1105 #endif
1106 };
1107 
1108 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
1109 #define REO_DESC_DEFERRED_FREELIST_SIZE 256
1110 #define REO_DESC_DEFERRED_FREE_MS 30000
1111 
1112 struct reo_desc_deferred_freelist_node {
1113 	qdf_list_node_t node;
1114 	unsigned long free_ts;
1115 	void *hw_qdesc_vaddr_unaligned;
1116 	qdf_dma_addr_t hw_qdesc_paddr;
1117 	uint32_t hw_qdesc_alloc_size;
1118 #ifdef REO_QDESC_HISTORY
1119 	uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1120 #endif /* REO_QDESC_HISTORY */
1121 };
1122 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
1123 
1124 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1125 /**
1126  * struct reo_cmd_event_record: Elements to record for each reo command
1127  * @cmd_type: reo command type
1128  * @cmd_return_status: reo command post status
1129  * @timestamp: record timestamp for the reo command
1130  */
1131 struct reo_cmd_event_record {
1132 	enum hal_reo_cmd_type cmd_type;
1133 	uint8_t cmd_return_status;
1134 	uint64_t timestamp;
1135 };
1136 
1137 /**
1138  * struct reo_cmd_event_history: Account for reo cmd events
1139  * @index: record number
1140  * @cmd_record: list of records
1141  */
1142 struct reo_cmd_event_history {
1143 	qdf_atomic_t index;
1144 	struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX];
1145 };
1146 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1147 
1148 /* SoC level data path statistics */
1149 struct dp_soc_stats {
1150 	struct {
1151 		uint32_t added;
1152 		uint32_t deleted;
1153 		uint32_t aged_out;
1154 		uint32_t map_err;
1155 		uint32_t ast_mismatch;
1156 	} ast;
1157 
1158 	struct {
1159 		uint32_t added;
1160 		uint32_t deleted;
1161 	} mec;
1162 
1163 	/* SOC level TX stats */
1164 	struct {
1165 		/* Total packets transmitted */
1166 		struct cdp_pkt_info egress[MAX_TCL_DATA_RINGS];
1167 		/* Enqueues per tcl ring */
1168 		uint32_t tcl_enq[MAX_TCL_DATA_RINGS];
1169 		/* packets dropped on tx because of no peer */
1170 		struct cdp_pkt_info tx_invalid_peer;
1171 		/* descriptors in each tcl ring */
1172 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
1173 		/* Descriptors in use at soc */
1174 		uint32_t desc_in_use;
1175 		/* tqm_release_reason == FW removed */
1176 		uint32_t dropped_fw_removed;
1177 		/* tx completion release_src != TQM or FW */
1178 		uint32_t invalid_release_source;
1179 		/* TX descriptor from completion ring Desc is not valid */
1180 		uint32_t invalid_tx_comp_desc;
1181 		/* tx completion wbm_internal_error */
1182 		uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS];
1183 		/* tx completion non_wbm_internal_error */
1184 		uint32_t non_wbm_internal_err;
1185 		/* TX Comp loop packet limit hit */
1186 		uint32_t tx_comp_loop_pkt_limit_hit;
1187 		/* Head pointer Out of sync at the end of dp_tx_comp_handler */
1188 		uint32_t hp_oos2;
1189 		/* tx desc freed as part of vdev detach */
1190 		uint32_t tx_comp_exception;
1191 		/* TQM drops after/during peer delete */
1192 		uint64_t tqm_drop_no_peer;
1193 		/* Number of tx completions reaped per WBM2SW release ring */
1194 		uint32_t tx_comp[MAX_TCL_DATA_RINGS];
1195 		/* Number of tx completions force freed */
1196 		uint32_t tx_comp_force_freed;
1197 		/* Tx completion ring near full */
1198 		uint32_t near_full;
1199 		/* Tx drops with buffer src as HAL_TX_COMP_RELEASE_SOURCE_FW */
1200 		uint32_t fw2wbm_tx_drop;
1201 	} tx;
1202 
1203 	/* SOC level RX stats */
1204 	struct {
1205 		/* Total rx packets count */
1206 		struct cdp_pkt_info ingress;
1207 		/* Rx errors */
1208 		/* Total Packets in Rx Error ring */
1209 		uint32_t err_ring_pkts;
1210 		/* No of Fragments */
1211 		uint32_t rx_frags;
1212 		/* No of incomplete fragments in waitlist */
1213 		uint32_t rx_frag_wait;
1214 		/* Fragments dropped due to errors */
1215 		uint32_t rx_frag_err;
1216 		/* Fragments received OOR causing sequence num mismatch */
1217 		uint32_t rx_frag_oor;
1218 		/* Fragments dropped due to len errors in skb */
1219 		uint32_t rx_frag_err_len_error;
1220 		/* Fragments dropped due to no peer found */
1221 		uint32_t rx_frag_err_no_peer;
1222 		/* No of reinjected packets */
1223 		uint32_t reo_reinject;
1224 		/* Reap loop packet limit hit */
1225 		uint32_t reap_loop_pkt_limit_hit;
1226 		/* Head pointer Out of sync at the end of dp_rx_process */
1227 		uint32_t hp_oos2;
1228 		/* Rx ring near full */
1229 		uint32_t near_full;
1230 		/* Break ring reaping as not all scattered msdu received */
1231 		uint32_t msdu_scatter_wait_break;
1232 		/* Number of bar frames received */
1233 		uint32_t bar_frame;
1234 		/* Number of frames routed from rxdma */
1235 		uint32_t rxdma2rel_route_drop;
1236 		/* Number of frames routed from reo*/
1237 		uint32_t reo2rel_route_drop;
1238 		uint64_t fast_recycled;
1239 		/* Number of hw stats requested */
1240 		uint32_t rx_hw_stats_requested;
1241 		/* Number of hw stats request timeout */
1242 		uint32_t rx_hw_stats_timeout;
1243 
1244 		struct {
1245 			/* Invalid RBM error count */
1246 			uint32_t invalid_rbm;
1247 			/* Invalid VDEV Error count */
1248 			uint32_t invalid_vdev;
1249 			/* Invalid PDEV error count */
1250 			uint32_t invalid_pdev;
1251 
1252 			/* Packets delivered to stack that no related peer */
1253 			uint32_t pkt_delivered_no_peer;
1254 			/* Defrag peer uninit error count */
1255 			uint32_t defrag_peer_uninit;
1256 			/* Invalid sa_idx or da_idx*/
1257 			uint32_t invalid_sa_da_idx;
1258 			/* MSDU DONE failures */
1259 			uint32_t msdu_done_fail;
1260 			/* Invalid PEER Error count */
1261 			struct cdp_pkt_info rx_invalid_peer;
1262 			/* Invalid PEER ID count */
1263 			struct cdp_pkt_info rx_invalid_peer_id;
1264 			/* Invalid packet length */
1265 			struct cdp_pkt_info rx_invalid_pkt_len;
1266 			/* HAL ring access Fail error count */
1267 			uint32_t hal_ring_access_fail;
1268 			/* HAL ring access full Fail error count */
1269 			uint32_t hal_ring_access_full_fail;
1270 			/* RX DMA error count */
1271 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
1272 			/* RX REO DEST Desc Invalid Magic count */
1273 			uint32_t rx_desc_invalid_magic;
1274 			/* REO Error count */
1275 			uint32_t reo_error[HAL_REO_ERR_MAX];
1276 			/* HAL REO ERR Count */
1277 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
1278 			/* HAL REO DEST Duplicate count */
1279 			uint32_t hal_reo_dest_dup;
1280 			/* HAL WBM RELEASE Duplicate count */
1281 			uint32_t hal_wbm_rel_dup;
1282 			/* HAL RXDMA error Duplicate count */
1283 			uint32_t hal_rxdma_err_dup;
1284 			/* ipa smmu map duplicate count */
1285 			uint32_t ipa_smmu_map_dup;
1286 			/* ipa smmu unmap duplicate count */
1287 			uint32_t ipa_smmu_unmap_dup;
1288 			/* ipa smmu unmap while ipa pipes is disabled */
1289 			uint32_t ipa_unmap_no_pipe;
1290 			/* REO cmd send fail/requeue count */
1291 			uint32_t reo_cmd_send_fail;
1292 			/* REO cmd send drain count */
1293 			uint32_t reo_cmd_send_drain;
1294 			/* RX msdu drop count due to scatter */
1295 			uint32_t scatter_msdu;
1296 			/* RX msdu drop count due to invalid cookie */
1297 			uint32_t invalid_cookie;
1298 			/* Count of stale cookie read in RX path */
1299 			uint32_t stale_cookie;
1300 			/* Delba sent count due to RX 2k jump */
1301 			uint32_t rx_2k_jump_delba_sent;
1302 			/* RX 2k jump msdu indicated to stack count */
1303 			uint32_t rx_2k_jump_to_stack;
1304 			/* RX 2k jump msdu dropped count */
1305 			uint32_t rx_2k_jump_drop;
1306 			/* REO ERR msdu buffer received */
1307 			uint32_t reo_err_msdu_buf_rcved;
1308 			/* REO ERR msdu buffer with invalid coookie received */
1309 			uint32_t reo_err_msdu_buf_invalid_cookie;
1310 			/* REO OOR msdu drop count */
1311 			uint32_t reo_err_oor_drop;
1312 			/* REO OOR msdu indicated to stack count */
1313 			uint32_t reo_err_oor_to_stack;
1314 			/* REO OOR scattered msdu count */
1315 			uint32_t reo_err_oor_sg_count;
1316 			/* RX msdu rejected count on delivery to vdev stack_fn*/
1317 			uint32_t rejected;
1318 			/* Incorrect msdu count in MPDU desc info */
1319 			uint32_t msdu_count_mismatch;
1320 			/* RX raw frame dropped count */
1321 			uint32_t raw_frm_drop;
1322 			/* Stale link desc cookie count*/
1323 			uint32_t invalid_link_cookie;
1324 			/* Nbuf sanity failure */
1325 			uint32_t nbuf_sanity_fail;
1326 			/* Duplicate link desc refilled */
1327 			uint32_t dup_refill_link_desc;
1328 			/* Incorrect msdu continuation bit in MSDU desc */
1329 			uint32_t msdu_continuation_err;
1330 			/* count of start sequence (ssn) updates */
1331 			uint32_t ssn_update_count;
1332 			/* count of bar handling fail */
1333 			uint32_t bar_handle_fail_count;
1334 			/* EAPOL drop count in intrabss scenario */
1335 			uint32_t intrabss_eapol_drop;
1336 			/* PN check failed for 2K-jump or OOR error */
1337 			uint32_t pn_in_dest_check_fail;
1338 			/* MSDU len err count */
1339 			uint32_t msdu_len_err;
1340 			/* Rx flush count */
1341 			uint32_t rx_flush_count;
1342 			/* Rx invalid tid count */
1343 			uint32_t rx_invalid_tid_err;
1344 			/* Invalid address1 in defrag path*/
1345 			uint32_t defrag_ad1_invalid;
1346 			/* decrypt error drop */
1347 			uint32_t decrypt_err_drop;
1348 		} err;
1349 
1350 		/* packet count per core - per ring */
1351 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
1352 	} rx;
1353 
1354 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1355 	struct reo_cmd_event_history cmd_event_history;
1356 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1357 };
1358 
1359 union dp_align_mac_addr {
1360 	uint8_t raw[QDF_MAC_ADDR_SIZE];
1361 	struct {
1362 		uint16_t bytes_ab;
1363 		uint16_t bytes_cd;
1364 		uint16_t bytes_ef;
1365 	} align2;
1366 	struct {
1367 		uint32_t bytes_abcd;
1368 		uint16_t bytes_ef;
1369 	} align4;
1370 	struct __attribute__((__packed__)) {
1371 		uint16_t bytes_ab;
1372 		uint32_t bytes_cdef;
1373 	} align4_2;
1374 };
1375 
1376 /**
1377  * struct dp_ast_free_cb_params - HMWDS free callback cookie
1378  * @mac_addr: ast mac address
1379  * @peer_mac_addr: mac address of peer
1380  * @type: ast entry type
1381  * @vdev_id: vdev_id
1382  * @flags: ast flags
1383  */
1384 struct dp_ast_free_cb_params {
1385 	union dp_align_mac_addr mac_addr;
1386 	union dp_align_mac_addr peer_mac_addr;
1387 	enum cdp_txrx_ast_entry_type type;
1388 	uint8_t vdev_id;
1389 	uint32_t flags;
1390 };
1391 
1392 /**
1393  * struct dp_ast_entry - AST entry
1394  *
1395  * @ast_idx: Hardware AST Index
1396  * @peer_id: Next Hop peer_id (for non-WDS nodes, this will be point to
1397  *           associated peer with this MAC address)
1398  * @mac_addr:  MAC Address for this AST entry
1399  * @next_hop: Set to 1 if this is for a WDS node
1400  * @is_active: flag to indicate active data traffic on this node
1401  *             (used for aging out/expiry)
1402  * @ase_list_elem: node in peer AST list
1403  * @is_bss: flag to indicate if entry corresponds to bss peer
1404  * @is_mapped: flag to indicate that we have mapped the AST entry
1405  *             in ast_table
1406  * @pdev_id: pdev ID
1407  * @vdev_id: vdev ID
1408  * @ast_hash_value: hast value in HW
1409  * @ref_cnt: reference count
1410  * @type: flag to indicate type of the entry(static/WDS/MEC)
1411  * @delete_in_progress: Flag to indicate that delete commands send to FW
1412  *                      and host is waiting for response from FW
1413  * @callback: ast free/unmap callback
1414  * @cookie: argument to callback
1415  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
1416  */
1417 struct dp_ast_entry {
1418 	uint16_t ast_idx;
1419 	uint16_t peer_id;
1420 	union dp_align_mac_addr mac_addr;
1421 	bool next_hop;
1422 	bool is_active;
1423 	bool is_mapped;
1424 	uint8_t pdev_id;
1425 	uint8_t vdev_id;
1426 	uint16_t ast_hash_value;
1427 	qdf_atomic_t ref_cnt;
1428 	enum cdp_txrx_ast_entry_type type;
1429 	bool delete_in_progress;
1430 	txrx_ast_free_cb callback;
1431 	void *cookie;
1432 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
1433 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
1434 };
1435 
1436 /**
1437  * struct dp_mec_entry - MEC entry
1438  *
1439  * @mac_addr:  MAC Address for this MEC entry
1440  * @is_active: flag to indicate active data traffic on this node
1441  *             (used for aging out/expiry)
1442  * @pdev_id: pdev ID
1443  * @vdev_id: vdev ID
1444  * @hash_list_elem: node in soc MEC hash list (mac address used as hash)
1445  */
1446 struct dp_mec_entry {
1447 	union dp_align_mac_addr mac_addr;
1448 	bool is_active;
1449 	uint8_t pdev_id;
1450 	uint8_t vdev_id;
1451 
1452 	TAILQ_ENTRY(dp_mec_entry) hash_list_elem;
1453 };
1454 
1455 /* SOC level htt stats */
1456 struct htt_t2h_stats {
1457 	/* lock to protect htt_stats_msg update */
1458 	qdf_spinlock_t lock;
1459 
1460 	/* work queue to process htt stats */
1461 	qdf_work_t work;
1462 
1463 	/* T2H Ext stats message queue */
1464 	qdf_nbuf_queue_t msg;
1465 
1466 	/* number of completed stats in htt_stats_msg */
1467 	uint32_t num_stats;
1468 };
1469 
1470 struct link_desc_bank {
1471 	void *base_vaddr_unaligned;
1472 	void *base_vaddr;
1473 	qdf_dma_addr_t base_paddr_unaligned;
1474 	qdf_dma_addr_t base_paddr;
1475 	uint32_t size;
1476 };
1477 
1478 struct rx_buff_pool {
1479 	qdf_nbuf_queue_head_t emerg_nbuf_q;
1480 	uint32_t nbuf_fail_cnt;
1481 	bool is_initialized;
1482 };
1483 
1484 struct rx_refill_buff_pool {
1485 	bool is_initialized;
1486 	uint16_t head;
1487 	uint16_t tail;
1488 	struct dp_pdev *dp_pdev;
1489 	uint16_t max_bufq_len;
1490 	qdf_nbuf_t buf_elem[2048];
1491 };
1492 
1493 #ifdef DP_TX_HW_DESC_HISTORY
1494 #define DP_TX_HW_DESC_HIST_MAX 6144
1495 #define DP_TX_HW_DESC_HIST_PER_SLOT_MAX 2048
1496 #define DP_TX_HW_DESC_HIST_MAX_SLOTS 3
1497 #define DP_TX_HW_DESC_HIST_SLOT_SHIFT 11
1498 
1499 struct dp_tx_hw_desc_evt {
1500 	uint8_t tcl_desc[HAL_TX_DESC_LEN_BYTES];
1501 	uint8_t tcl_ring_id;
1502 	uint64_t posted;
1503 	uint32_t hp;
1504 	uint32_t tp;
1505 };
1506 
1507 /* struct dp_tx_hw_desc_history - TX HW desc hisotry
1508  * @index: Index where the last entry is written
1509  * @entry: history entries
1510  */
1511 struct dp_tx_hw_desc_history {
1512 	qdf_atomic_t index;
1513 	uint16_t num_entries_per_slot;
1514 	uint16_t allocated;
1515 	struct dp_tx_hw_desc_evt *entry[DP_TX_HW_DESC_HIST_MAX_SLOTS];
1516 };
1517 #endif
1518 
1519 /**
1520  * enum dp_mon_status_process_event - Events for monitor status buffer record
1521  * @DP_MON_STATUS_BUF_REAP: Monitor status buffer is reaped from ring
1522  * @DP_MON_STATUS_BUF_ENQUEUE: Status buffer is enqueued to local queue
1523  * @DP_MON_STATUS_BUF_DEQUEUE: Status buffer is dequeued from local queue
1524  */
1525 enum dp_mon_status_process_event {
1526 	DP_MON_STATUS_BUF_REAP,
1527 	DP_MON_STATUS_BUF_ENQUEUE,
1528 	DP_MON_STATUS_BUF_DEQUEUE,
1529 };
1530 
1531 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
1532 #define DP_MON_STATUS_HIST_MAX	2048
1533 
1534 /**
1535  * struct dp_mon_stat_info_record - monitor stat ring buffer info
1536  * @hbi: HW ring buffer info
1537  * @timestamp: timestamp when this entry was recorded
1538  * @event: event
1539  * @rx_desc: RX descriptor corresponding to the received buffer
1540  * @nbuf: buffer attached to rx_desc, if event is REAP, else the buffer
1541  *	  which was enqueued or dequeued.
1542  * @rx_desc_nbuf_data: nbuf data pointer.
1543  */
1544 struct dp_mon_stat_info_record {
1545 	struct hal_buf_info hbi;
1546 	uint64_t timestamp;
1547 	enum dp_mon_status_process_event event;
1548 	void *rx_desc;
1549 	qdf_nbuf_t nbuf;
1550 	uint8_t *rx_desc_nbuf_data;
1551 };
1552 
1553 /* struct dp_rx_history - rx ring hisotry
1554  * @index: Index where the last entry is written
1555  * @entry: history entries
1556  */
1557 struct dp_mon_status_ring_history {
1558 	qdf_atomic_t index;
1559 	struct dp_mon_stat_info_record entry[DP_MON_STATUS_HIST_MAX];
1560 };
1561 #endif
1562 
1563 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1564 /*
1565  * The logic for get current index of these history is dependent on this
1566  * value being power of 2.
1567  */
1568 #define DP_RX_HIST_MAX 2048
1569 #define DP_RX_ERR_HIST_MAX 2048
1570 #define DP_RX_REINJECT_HIST_MAX 1024
1571 #define DP_RX_REFILL_HIST_MAX 2048
1572 
1573 QDF_COMPILE_TIME_ASSERT(rx_history_size,
1574 			(DP_RX_HIST_MAX &
1575 			 (DP_RX_HIST_MAX - 1)) == 0);
1576 QDF_COMPILE_TIME_ASSERT(rx_err_history_size,
1577 			(DP_RX_ERR_HIST_MAX &
1578 			 (DP_RX_ERR_HIST_MAX - 1)) == 0);
1579 QDF_COMPILE_TIME_ASSERT(rx_reinject_history_size,
1580 			(DP_RX_REINJECT_HIST_MAX &
1581 			 (DP_RX_REINJECT_HIST_MAX - 1)) == 0);
1582 QDF_COMPILE_TIME_ASSERT(rx_refill_history_size,
1583 			(DP_RX_REFILL_HIST_MAX &
1584 			(DP_RX_REFILL_HIST_MAX - 1)) == 0);
1585 
1586 
1587 /**
1588  * struct dp_buf_info_record - ring buffer info
1589  * @hbi: HW ring buffer info
1590  * @timestamp: timestamp when this entry was recorded
1591  */
1592 struct dp_buf_info_record {
1593 	struct hal_buf_info hbi;
1594 	uint64_t timestamp;
1595 };
1596 
1597 /**
1598  * struct dp_refill_info_record - ring refill buffer info
1599  * @hp: HP value after refill
1600  * @tp: cached tail value during refill
1601  * @num_req: number of buffers requested to refill
1602  * @num_refill: number of buffers refilled to ring
1603  * @timestamp: timestamp when this entry was recorded
1604  */
1605 struct dp_refill_info_record {
1606 	uint32_t hp;
1607 	uint32_t tp;
1608 	uint32_t num_req;
1609 	uint32_t num_refill;
1610 	uint64_t timestamp;
1611 };
1612 
1613 /**
1614  * struct dp_rx_history - rx ring hisotry
1615  * @index: Index where the last entry is written
1616  * @entry: history entries
1617  */
1618 struct dp_rx_history {
1619 	qdf_atomic_t index;
1620 	struct dp_buf_info_record entry[DP_RX_HIST_MAX];
1621 };
1622 
1623 /**
1624  * struct dp_rx_err_history - rx err ring hisotry
1625  * @index: Index where the last entry is written
1626  * @entry: history entries
1627  */
1628 struct dp_rx_err_history {
1629 	qdf_atomic_t index;
1630 	struct dp_buf_info_record entry[DP_RX_ERR_HIST_MAX];
1631 };
1632 
1633 /**
1634  * struct dp_rx_reinject_history - rx reinject ring hisotry
1635  * @index: Index where the last entry is written
1636  * @entry: history entries
1637  */
1638 struct dp_rx_reinject_history {
1639 	qdf_atomic_t index;
1640 	struct dp_buf_info_record entry[DP_RX_REINJECT_HIST_MAX];
1641 };
1642 
1643 /**
1644  * struct dp_rx_refill_history - rx buf refill hisotry
1645  * @index: Index where the last entry is written
1646  * @entry: history entries
1647  */
1648 struct dp_rx_refill_history {
1649 	qdf_atomic_t index;
1650 	struct dp_refill_info_record entry[DP_RX_REFILL_HIST_MAX];
1651 };
1652 
1653 #endif
1654 
1655 /**
1656  * enum dp_cfg_event_type - Datapath config events type
1657  * @DP_CFG_EVENT_VDEV_ATTACH: vdev attach
1658  * @DP_CFG_EVENT_VDEV_DETACH: vdev detach
1659  * @DP_CFG_EVENT_VDEV_UNREF_DEL: vdev memory free after last ref is released
1660  * @DP_CFG_EVENT_PEER_CREATE: peer create
1661  * @DP_CFG_EVENT_PEER_DELETE: peer delete
1662  * @DP_CFG_EVENT_PEER_UNREF_DEL: peer memory free after last ref is released
1663  * @DP_CFG_EVENT_PEER_SETUP: peer setup
1664  * @DP_CFG_EVENT_MLO_ADD_LINK: add link peer to mld peer
1665  * @DP_CFG_EVENT_MLO_DEL_LINK: delete link peer from mld peer
1666  * @DP_CFG_EVENT_MLO_SETUP: MLO peer setup
1667  * @DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE: MLD peer vdev update
1668  * @DP_CFG_EVENT_PEER_MAP: peer map
1669  * @DP_CFG_EVENT_PEER_UNMAP: peer unmap
1670  * @DP_CFG_EVENT_MLO_PEER_MAP: MLD peer map
1671  * @DP_CFG_EVENT_MLO_PEER_UNMAP: MLD peer unmap
1672  */
1673 enum dp_cfg_event_type {
1674 	DP_CFG_EVENT_VDEV_ATTACH,
1675 	DP_CFG_EVENT_VDEV_DETACH,
1676 	DP_CFG_EVENT_VDEV_UNREF_DEL,
1677 	DP_CFG_EVENT_PEER_CREATE,
1678 	DP_CFG_EVENT_PEER_DELETE,
1679 	DP_CFG_EVENT_PEER_UNREF_DEL,
1680 	DP_CFG_EVENT_PEER_SETUP,
1681 	DP_CFG_EVENT_MLO_ADD_LINK,
1682 	DP_CFG_EVENT_MLO_DEL_LINK,
1683 	DP_CFG_EVENT_MLO_SETUP,
1684 	DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE,
1685 	DP_CFG_EVENT_PEER_MAP,
1686 	DP_CFG_EVENT_PEER_UNMAP,
1687 	DP_CFG_EVENT_MLO_PEER_MAP,
1688 	DP_CFG_EVENT_MLO_PEER_UNMAP,
1689 };
1690 
1691 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
1692 /* Size must be in 2 power, for bitwise index rotation */
1693 #define DP_CFG_EVT_HISTORY_SIZE 0x800
1694 #define DP_CFG_EVT_HIST_PER_SLOT_MAX 256
1695 #define DP_CFG_EVT_HIST_MAX_SLOTS 8
1696 #define DP_CFG_EVT_HIST_SLOT_SHIFT 8
1697 
1698 /**
1699  * struct dp_vdev_attach_detach_desc - vdev ops descriptor
1700  * @vdev: DP vdev handle
1701  * @mac_addr: vdev mac address
1702  * @vdev_id: vdev id
1703  * @ref_count: vdev ref count
1704  */
1705 struct dp_vdev_attach_detach_desc {
1706 	struct dp_vdev *vdev;
1707 	union dp_align_mac_addr mac_addr;
1708 	uint8_t vdev_id;
1709 	int32_t ref_count;
1710 };
1711 
1712 /**
1713  * struct dp_peer_cmn_ops_desc - peer events descriptor
1714  * @vdev_id: vdev_id of the vdev on which peer exists
1715  * @is_reuse: indicates if its a peer reuse case, during peer create
1716  * @peer: DP peer handle
1717  * @vdev: DP vdev handle on which peer exists
1718  * @mac_addr: peer mac address
1719  * @vdev_mac_addr: vdev mac address
1720  * @vdev_ref_count: vdev ref count
1721  * @peer_ref_count: peer ref count
1722  */
1723 struct dp_peer_cmn_ops_desc {
1724 	uint8_t vdev_id : 5,
1725 		is_reuse : 1;
1726 	struct dp_peer *peer;
1727 	struct dp_vdev *vdev;
1728 	union dp_align_mac_addr mac_addr;
1729 	union dp_align_mac_addr vdev_mac_addr;
1730 	int32_t vdev_ref_count;
1731 	int32_t peer_ref_count;
1732 };
1733 
1734 /**
1735  * struct dp_mlo_add_del_link_desc - MLO add/del link event descriptor
1736  * @idx: index at which link peer got added in MLD peer's list
1737  * @num_links: num links added in the MLD peer's list
1738  * @action_result: add/del was success or not
1739  * @link_peer: link peer handle
1740  * @mld_peer: MLD peer handle
1741  * @link_mac_addr: link peer mac address
1742  * @mld_mac_addr: MLD peer mac address
1743  */
1744 struct dp_mlo_add_del_link_desc {
1745 	uint8_t idx : 3,
1746 		num_links : 3,
1747 		action_result : 1,
1748 		reserved : 1;
1749 	struct dp_peer *link_peer;
1750 	struct dp_peer *mld_peer;
1751 	union dp_align_mac_addr link_mac_addr;
1752 	union dp_align_mac_addr mld_mac_addr;
1753 };
1754 
1755 /**
1756  * struct dp_mlo_setup_vdev_update_desc - MLD peer vdev update event desc
1757  * @mld_peer: MLD peer handle
1758  * @prev_vdev: previous vdev handle
1759  * @new_vdev: new vdev handle
1760  */
1761 struct dp_mlo_setup_vdev_update_desc {
1762 	struct dp_peer *mld_peer;
1763 	struct dp_vdev *prev_vdev;
1764 	struct dp_vdev *new_vdev;
1765 };
1766 
1767 /**
1768  * struct dp_rx_peer_map_unmap_desc - peer map/unmap event descriptor
1769  * @peer_id: peer id
1770  * @ml_peer_id: ML peer id, if its an MLD peer
1771  * @hw_peer_id: hw peer id
1772  * @vdev_id: vdev id of the peer
1773  * @is_ml_peer: is this MLD peer
1774  * @mac_addr: mac address of the peer
1775  * @peer: peer handle
1776  */
1777 struct dp_rx_peer_map_unmap_desc {
1778 	uint16_t peer_id;
1779 	uint16_t ml_peer_id;
1780 	uint16_t hw_peer_id;
1781 	uint8_t vdev_id;
1782 	uint8_t is_ml_peer;
1783 	union dp_align_mac_addr mac_addr;
1784 	struct dp_peer *peer;
1785 };
1786 
1787 /**
1788  * struct dp_peer_setup_desc - peer setup event descriptor
1789  * @peer: DP peer handle
1790  * @vdev: vdev handle on which peer exists
1791  * @vdev_ref_count: vdev ref count
1792  * @mac_addr: peer mac address
1793  * @mld_mac_addr: MLD mac address
1794  * @is_first_link: is the current link the first link created
1795  * @is_primary_link: is the current link primary link
1796  * @vdev_id: vdev id of the vdev on which the current link peer exists
1797  */
1798 struct dp_peer_setup_desc {
1799 	struct dp_peer *peer;
1800 	struct dp_vdev *vdev;
1801 	int32_t vdev_ref_count;
1802 	union dp_align_mac_addr mac_addr;
1803 	union dp_align_mac_addr mld_mac_addr;
1804 	uint8_t is_first_link : 1,
1805 		is_primary_link : 1,
1806 		vdev_id : 5,
1807 		reserved : 1;
1808 };
1809 
1810 /**
1811  * union dp_cfg_event_desc - DP config event descriptor
1812  * @vdev_evt: vdev events desc
1813  * @peer_cmn_evt: common peer events desc
1814  * @peer_setup_evt: peer setup event desc
1815  * @mlo_link_delink_evt: MLO link/delink event desc
1816  * @mlo_setup_vdev_update: MLD peer vdev update event desc
1817  * @peer_map_unmap_evt: peer map/unmap event desc
1818  */
1819 union dp_cfg_event_desc {
1820 	struct dp_vdev_attach_detach_desc vdev_evt;
1821 	struct dp_peer_cmn_ops_desc peer_cmn_evt;
1822 	struct dp_peer_setup_desc peer_setup_evt;
1823 	struct dp_mlo_add_del_link_desc mlo_link_delink_evt;
1824 	struct dp_mlo_setup_vdev_update_desc mlo_setup_vdev_update;
1825 	struct dp_rx_peer_map_unmap_desc peer_map_unmap_evt;
1826 };
1827 
1828 /**
1829  * struct dp_cfg_event - DP config event descriptor
1830  * @timestamp: timestamp at which event was recorded
1831  * @type: event type
1832  * @event_desc: event descriptor
1833  */
1834 struct dp_cfg_event {
1835 	uint64_t timestamp;
1836 	enum dp_cfg_event_type type;
1837 	union dp_cfg_event_desc event_desc;
1838 };
1839 
1840 /**
1841  * struct dp_cfg_event_history - DP config event history
1842  * @index: current index
1843  * @num_entries_per_slot: number of entries per slot
1844  * @allocated: Is the history allocated or not
1845  * @entry: event history descriptors
1846  */
1847 struct dp_cfg_event_history {
1848 	qdf_atomic_t index;
1849 	uint16_t num_entries_per_slot;
1850 	uint16_t allocated;
1851 	struct dp_cfg_event *entry[DP_CFG_EVT_HIST_MAX_SLOTS];
1852 };
1853 #endif
1854 
1855 enum dp_tx_event_type {
1856 	DP_TX_DESC_INVAL_EVT = 0,
1857 	DP_TX_DESC_MAP,
1858 	DP_TX_DESC_COOKIE,
1859 	DP_TX_DESC_FLUSH,
1860 	DP_TX_DESC_UNMAP,
1861 	DP_TX_COMP_UNMAP,
1862 	DP_TX_COMP_UNMAP_ERR,
1863 	DP_TX_COMP_MSDU_EXT,
1864 };
1865 
1866 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
1867 /* Size must be in 2 power, for bitwise index rotation */
1868 #define DP_TX_TCL_HISTORY_SIZE 0x4000
1869 #define DP_TX_TCL_HIST_PER_SLOT_MAX 2048
1870 #define DP_TX_TCL_HIST_MAX_SLOTS 8
1871 #define DP_TX_TCL_HIST_SLOT_SHIFT 11
1872 
1873 /* Size must be in 2 power, for bitwise index rotation */
1874 #define DP_TX_COMP_HISTORY_SIZE 0x4000
1875 #define DP_TX_COMP_HIST_PER_SLOT_MAX 2048
1876 #define DP_TX_COMP_HIST_MAX_SLOTS 8
1877 #define DP_TX_COMP_HIST_SLOT_SHIFT 11
1878 
1879 struct dp_tx_desc_event {
1880 	qdf_nbuf_t skb;
1881 	dma_addr_t paddr;
1882 	uint32_t sw_cookie;
1883 	enum dp_tx_event_type type;
1884 	uint64_t ts;
1885 };
1886 
1887 struct dp_tx_tcl_history {
1888 	qdf_atomic_t index;
1889 	uint16_t num_entries_per_slot;
1890 	uint16_t allocated;
1891 	struct dp_tx_desc_event *entry[DP_TX_TCL_HIST_MAX_SLOTS];
1892 };
1893 
1894 struct dp_tx_comp_history {
1895 	qdf_atomic_t index;
1896 	uint16_t num_entries_per_slot;
1897 	uint16_t allocated;
1898 	struct dp_tx_desc_event *entry[DP_TX_COMP_HIST_MAX_SLOTS];
1899 };
1900 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
1901 
1902 /* structure to record recent operation related variable */
1903 struct dp_last_op_info {
1904 	/* last link desc buf info through WBM release ring */
1905 	struct hal_buf_info wbm_rel_link_desc;
1906 	/* last link desc buf info through REO reinject ring */
1907 	struct hal_buf_info reo_reinject_link_desc;
1908 };
1909 
1910 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1911 
1912 /**
1913  * struct dp_swlm_tcl_data - params for tcl register write coalescing
1914  *			     decision making
1915  * @nbuf: TX packet
1916  * @tid: tid for transmitting the current packet
1917  * @num_ll_connections: Number of low latency connections on this vdev
1918  * @ring_id: TCL ring id
1919  * @pkt_len: Packet length
1920  *
1921  * This structure contains the information required by the software
1922  * latency manager to decide on whether to coalesce the current TCL
1923  * register write or not.
1924  */
1925 struct dp_swlm_tcl_data {
1926 	qdf_nbuf_t nbuf;
1927 	uint8_t tid;
1928 	uint8_t num_ll_connections;
1929 	uint8_t ring_id;
1930 	uint32_t pkt_len;
1931 };
1932 
1933 /**
1934  * union swlm_data - SWLM query data
1935  * @tcl_data: data for TCL query in SWLM
1936  */
1937 union swlm_data {
1938 	struct dp_swlm_tcl_data *tcl_data;
1939 };
1940 
1941 /**
1942  * struct dp_swlm_ops - SWLM ops
1943  * @tcl_wr_coalesce_check: handler to check if the current TCL register
1944  *			   write can be coalesced or not
1945  */
1946 struct dp_swlm_ops {
1947 	int (*tcl_wr_coalesce_check)(struct dp_soc *soc,
1948 				     struct dp_swlm_tcl_data *tcl_data);
1949 };
1950 
1951 /**
1952  * struct dp_swlm_stats - Stats for Software Latency manager.
1953  * @tcl: TCL stats
1954  * @tcl.timer_flush_success: Num TCL HP writes success from timer context
1955  * @tcl.timer_flush_fail: Num TCL HP writes failure from timer context
1956  * @tcl.tid_fail: Num TCL register write coalescing skips, since the pkt
1957  *		 was being transmitted on a TID above coalescing threshold
1958  * @tcl.sp_frames: Num TCL register write coalescing skips, since the pkt
1959  *		  being transmitted was a special frame
1960  * @tcl.ll_connection: Num TCL register write coalescing skips, since the
1961  *		       vdev has low latency connections
1962  * @tcl.bytes_thresh_reached: Num TCL HP writes flush after the coalescing
1963  *			     bytes threshold was reached
1964  * @tcl.time_thresh_reached: Num TCL HP writes flush after the coalescing
1965  *			    session time expired
1966  * @tcl.tput_criteria_fail: Num TCL HP writes coalescing fails, since the
1967  *			   throughput did not meet session threshold
1968  * @tcl.coalesce_success: Num of TCL HP writes coalesced successfully.
1969  * @tcl.coalesce_fail: Num of TCL HP writes coalesces failed
1970  */
1971 struct dp_swlm_stats {
1972 	struct {
1973 		uint32_t timer_flush_success;
1974 		uint32_t timer_flush_fail;
1975 		uint32_t tid_fail;
1976 		uint32_t sp_frames;
1977 		uint32_t ll_connection;
1978 		uint32_t bytes_thresh_reached;
1979 		uint32_t time_thresh_reached;
1980 		uint32_t tput_criteria_fail;
1981 		uint32_t coalesce_success;
1982 		uint32_t coalesce_fail;
1983 	} tcl[MAX_TCL_DATA_RINGS];
1984 };
1985 
1986 /**
1987  * struct dp_swlm_tcl_params: Parameters based on TCL for different modules
1988  *			      in the Software latency manager.
1989  * @soc: DP soc reference
1990  * @ring_id: TCL ring id
1991  * @flush_timer: Timer for flushing the coalesced TCL HP writes
1992  * @sampling_session_tx_bytes: Num bytes transmitted in the sampling time
1993  * @bytes_flush_thresh: Bytes threshold to flush the TCL HP register write
1994  * @coalesce_end_time: End timestamp for current coalescing session
1995  * @bytes_coalesced: Num bytes coalesced in the current session
1996  * @prev_tx_packets: Previous TX packets accounted
1997  * @prev_tx_bytes: Previous TX bytes accounted
1998  * @prev_rx_bytes: Previous RX bytes accounted
1999  * @expire_time: expiry time for sample
2000  * @tput_pass_cnt: threshold throughput pass counter
2001  */
2002 struct dp_swlm_tcl_params {
2003 	struct dp_soc *soc;
2004 	uint32_t ring_id;
2005 	qdf_timer_t flush_timer;
2006 	uint32_t sampling_session_tx_bytes;
2007 	uint32_t bytes_flush_thresh;
2008 	uint64_t coalesce_end_time;
2009 	uint32_t bytes_coalesced;
2010 	uint32_t prev_tx_packets;
2011 	uint32_t prev_tx_bytes;
2012 	uint32_t prev_rx_bytes;
2013 	uint64_t expire_time;
2014 	uint32_t tput_pass_cnt;
2015 };
2016 
2017 /**
2018  * struct dp_swlm_params: Parameters for different modules in the
2019  *			  Software latency manager.
2020  * @rx_traffic_thresh: Threshold for RX traffic, to begin TCL register
2021  *			   write coalescing
2022  * @tx_traffic_thresh: Threshold for TX traffic, to begin TCL register
2023  *			   write coalescing
2024  * @sampling_time: Sampling time to test the throughput threshold
2025  * @time_flush_thresh: Time threshold to flush the TCL HP register write
2026  * @tx_thresh_multiplier: Multiplier to deduce the bytes threshold after
2027  *			      which the TCL HP register is written, thereby
2028  *			      ending the coalescing.
2029  * @tx_pkt_thresh: Threshold for TX packet count, to begin TCL register
2030  *		       write coalescing
2031  * @tcl: TCL ring specific params
2032  */
2033 
2034 struct dp_swlm_params {
2035 	uint32_t rx_traffic_thresh;
2036 	uint32_t tx_traffic_thresh;
2037 	uint32_t sampling_time;
2038 	uint32_t time_flush_thresh;
2039 	uint32_t tx_thresh_multiplier;
2040 	uint32_t tx_pkt_thresh;
2041 	struct dp_swlm_tcl_params tcl[MAX_TCL_DATA_RINGS];
2042 };
2043 
2044 /**
2045  * struct dp_swlm - Software latency manager context
2046  * @ops: SWLM ops pointers
2047  * @is_enabled: SWLM enabled/disabled
2048  * @is_init: SWLM module initialized
2049  * @stats: SWLM stats
2050  * @params: SWLM SRNG params
2051  * @tcl_flush_timer: flush timer for TCL register writes
2052  */
2053 struct dp_swlm {
2054 	struct dp_swlm_ops *ops;
2055 	uint8_t is_enabled:1,
2056 		is_init:1;
2057 	struct dp_swlm_stats stats;
2058 	struct dp_swlm_params params;
2059 };
2060 #endif
2061 
2062 #ifdef IPA_OFFLOAD
2063 /* IPA uC datapath offload Wlan Tx resources */
2064 struct ipa_dp_tx_rsc {
2065 	/* Resource info to be passed to IPA */
2066 	qdf_dma_addr_t ipa_tcl_ring_base_paddr;
2067 	void *ipa_tcl_ring_base_vaddr;
2068 	uint32_t ipa_tcl_ring_size;
2069 	qdf_dma_addr_t ipa_tcl_hp_paddr;
2070 	uint32_t alloc_tx_buf_cnt;
2071 
2072 	qdf_dma_addr_t ipa_wbm_ring_base_paddr;
2073 	void *ipa_wbm_ring_base_vaddr;
2074 	uint32_t ipa_wbm_ring_size;
2075 	qdf_dma_addr_t ipa_wbm_tp_paddr;
2076 	/* WBM2SW HP shadow paddr */
2077 	qdf_dma_addr_t ipa_wbm_hp_shadow_paddr;
2078 
2079 	/* TX buffers populated into the WBM ring */
2080 	void **tx_buf_pool_vaddr_unaligned;
2081 	qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
2082 };
2083 
2084 /* IPA uC datapath offload Wlan Rx resources */
2085 struct ipa_dp_rx_rsc {
2086 	/* Resource info to be passed to IPA */
2087 	qdf_dma_addr_t ipa_reo_ring_base_paddr;
2088 	void *ipa_reo_ring_base_vaddr;
2089 	uint32_t ipa_reo_ring_size;
2090 	qdf_dma_addr_t ipa_reo_tp_paddr;
2091 
2092 	/* Resource info to be passed to firmware and IPA */
2093 	qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
2094 	void *ipa_rx_refill_buf_ring_base_vaddr;
2095 	uint32_t ipa_rx_refill_buf_ring_size;
2096 	qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
2097 };
2098 #endif
2099 
2100 struct dp_tx_msdu_info_s;
2101 /**
2102  * enum dp_context_type- DP Context Type
2103  * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
2104  * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
2105  * @DP_CONTEXT_TYPE_VDEV: Context type DP VDEV
2106  * @DP_CONTEXT_TYPE_PEER: Context type DP PEER
2107  * @DP_CONTEXT_TYPE_MON_SOC: Context type DP MON SOC
2108  * @DP_CONTEXT_TYPE_MON_PDEV: Context type DP MON PDEV
2109  *
2110  * Helper enums to be used to retrieve the size of the corresponding
2111  * data structure by passing the type.
2112  */
2113 enum dp_context_type {
2114 	DP_CONTEXT_TYPE_SOC,
2115 	DP_CONTEXT_TYPE_PDEV,
2116 	DP_CONTEXT_TYPE_VDEV,
2117 	DP_CONTEXT_TYPE_PEER,
2118 	DP_CONTEXT_TYPE_MON_SOC,
2119 	DP_CONTEXT_TYPE_MON_PDEV
2120 };
2121 
2122 /**
2123  * struct dp_arch_ops - DP target specific arch ops
2124  * @txrx_soc_attach:
2125  * @txrx_soc_detach:
2126  * @txrx_soc_init:
2127  * @txrx_soc_deinit:
2128  * @txrx_soc_srng_alloc:
2129  * @txrx_soc_srng_init:
2130  * @txrx_soc_srng_deinit:
2131  * @txrx_soc_srng_free:
2132  * @txrx_pdev_attach:
2133  * @txrx_pdev_detach:
2134  * @txrx_vdev_attach:
2135  * @txrx_vdev_detach:
2136  * @txrx_peer_map_attach:
2137  * @txrx_peer_map_detach:
2138  * @dp_rxdma_ring_sel_cfg:
2139  * @soc_cfg_attach:
2140  * @txrx_peer_setup:
2141  * @peer_get_reo_hash:
2142  * @reo_remap_config:
2143  * @tx_hw_enqueue: enqueue TX data to HW
2144  * @tx_comp_get_params_from_hal_desc: get software tx descriptor and release
2145  * 				      source from HAL desc for wbm release ring
2146  * @dp_tx_process_htt_completion:
2147  * @dp_rx_process:
2148  * @dp_tx_send_fast:
2149  * @dp_tx_desc_pool_init:
2150  * @dp_tx_desc_pool_deinit:
2151  * @dp_rx_desc_pool_init:
2152  * @dp_rx_desc_pool_deinit:
2153  * @dp_wbm_get_rx_desc_from_hal_desc:
2154  * @dp_rx_intrabss_mcast_handler:
2155  * @dp_rx_word_mask_subscribe:
2156  * @dp_rx_desc_cookie_2_va:
2157  * @dp_service_near_full_srngs: Handler for servicing the near full IRQ
2158  * @tx_implicit_rbm_set:
2159  * @dp_rx_peer_metadata_peer_id_get:
2160  * @dp_rx_chain_msdus:
2161  * @txrx_set_vdev_param: target specific ops while setting vdev params
2162  * @txrx_get_vdev_mcast_param: target specific ops for getting vdev
2163  *			       params related to multicast
2164  * @txrx_get_context_size:
2165  * @txrx_get_mon_context_size:
2166  * @dp_srng_test_and_update_nf_params: Check if the srng is in near full state
2167  *				and set the near-full params.
2168  * @dp_tx_mcast_handler:
2169  * @dp_rx_mcast_handler:
2170  * @dp_tx_is_mcast_primary:
2171  * @dp_soc_get_by_idle_bm_id:
2172  * @mlo_peer_find_hash_detach:
2173  * @mlo_peer_find_hash_attach:
2174  * @mlo_peer_find_hash_add:
2175  * @mlo_peer_find_hash_remove:
2176  * @mlo_peer_find_hash_find:
2177  * @get_reo_qdesc_addr:
2178  * @get_rx_hash_key:
2179  * @dp_set_rx_fst:
2180  * @dp_get_rx_fst:
2181  * @dp_rx_fst_deref:
2182  * @dp_rx_fst_ref:
2183  * @txrx_print_peer_stats:
2184  * @dp_peer_rx_reorder_queue_setup: Dp peer reorder queue setup
2185  * @dp_find_peer_by_destmac:
2186  * @dp_bank_reconfig:
2187  * @dp_rx_replenish_soc_get:
2188  * @dp_soc_get_num_soc:
2189  * @dp_reconfig_tx_vdev_mcast_ctrl:
2190  * @dp_cc_reg_cfg_init:
2191  * @dp_tx_compute_hw_delay:
2192  * @print_mlo_ast_stats:
2193  * @dp_partner_chips_map:
2194  * @dp_partner_chips_unmap:
2195  * @ipa_get_bank_id: Get TCL bank id used by IPA
2196  * @dp_txrx_ppeds_rings_status:
2197  * @dp_tx_ppeds_inuse_desc:
2198  * @dp_tx_ppeds_cfg_astidx_cache_mapping:
2199  * @txrx_soc_ppeds_start:
2200  * @txrx_soc_ppeds_stop:
2201  * @dp_register_ppeds_interrupts:
2202  * @dp_free_ppeds_interrupts:
2203  * @dp_rx_wbm_err_reap_desc: Reap WBM Error Ring Descriptor
2204  * @dp_rx_null_q_desc_handle: Handle Null Queue Exception Error
2205  */
2206 struct dp_arch_ops {
2207 	/* INIT/DEINIT Arch Ops */
2208 	QDF_STATUS (*txrx_soc_attach)(struct dp_soc *soc,
2209 				      struct cdp_soc_attach_params *params);
2210 	QDF_STATUS (*txrx_soc_detach)(struct dp_soc *soc);
2211 	QDF_STATUS (*txrx_soc_init)(struct dp_soc *soc);
2212 	QDF_STATUS (*txrx_soc_deinit)(struct dp_soc *soc);
2213 	QDF_STATUS (*txrx_soc_srng_alloc)(struct dp_soc *soc);
2214 	QDF_STATUS (*txrx_soc_srng_init)(struct dp_soc *soc);
2215 	void (*txrx_soc_srng_deinit)(struct dp_soc *soc);
2216 	void (*txrx_soc_srng_free)(struct dp_soc *soc);
2217 	QDF_STATUS (*txrx_pdev_attach)(struct dp_pdev *pdev,
2218 				       struct cdp_pdev_attach_params *params);
2219 	QDF_STATUS (*txrx_pdev_detach)(struct dp_pdev *pdev);
2220 	QDF_STATUS (*txrx_vdev_attach)(struct dp_soc *soc,
2221 				       struct dp_vdev *vdev);
2222 	QDF_STATUS (*txrx_vdev_detach)(struct dp_soc *soc,
2223 				       struct dp_vdev *vdev);
2224 	QDF_STATUS (*txrx_peer_map_attach)(struct dp_soc *soc);
2225 	void (*txrx_peer_map_detach)(struct dp_soc *soc);
2226 	QDF_STATUS (*dp_rxdma_ring_sel_cfg)(struct dp_soc *soc);
2227 	void (*soc_cfg_attach)(struct dp_soc *soc);
2228 	QDF_STATUS (*txrx_peer_setup)(struct dp_soc *soc,
2229 				      struct dp_peer *peer);
2230 	void (*peer_get_reo_hash)(struct dp_vdev *vdev,
2231 				  struct cdp_peer_setup_info *setup_info,
2232 				  enum cdp_host_reo_dest_ring *reo_dest,
2233 				  bool *hash_based,
2234 				  uint8_t *lmac_peer_id_msb);
2235 	 bool (*reo_remap_config)(struct dp_soc *soc, uint32_t *remap0,
2236 				  uint32_t *remap1, uint32_t *remap2);
2237 
2238 	/* TX RX Arch Ops */
2239 	QDF_STATUS (*tx_hw_enqueue)(struct dp_soc *soc, struct dp_vdev *vdev,
2240 				    struct dp_tx_desc_s *tx_desc,
2241 				    uint16_t fw_metadata,
2242 				    struct cdp_tx_exception_metadata *metadata,
2243 				    struct dp_tx_msdu_info_s *msdu_info);
2244 
2245 	void (*tx_comp_get_params_from_hal_desc)(struct dp_soc *soc,
2246 						 void *tx_comp_hal_desc,
2247 						 struct dp_tx_desc_s **desc);
2248 
2249 	qdf_nbuf_t (*dp_tx_mlo_mcast_send)(struct dp_soc *soc,
2250 					   struct dp_vdev *vdev,
2251 					   qdf_nbuf_t nbuf,
2252 					   struct cdp_tx_exception_metadata
2253 					   *tx_exc_metadata);
2254 
2255 	void (*dp_tx_process_htt_completion)(struct dp_soc *soc,
2256 					     struct dp_tx_desc_s *tx_desc,
2257 					     uint8_t *status,
2258 					     uint8_t ring_id);
2259 
2260 	uint32_t (*dp_rx_process)(struct dp_intr *int_ctx,
2261 				  hal_ring_handle_t hal_ring_hdl,
2262 				  uint8_t reo_ring_num, uint32_t quota);
2263 
2264 	qdf_nbuf_t (*dp_tx_send_fast)(struct cdp_soc_t *soc_hdl,
2265 				      uint8_t vdev_id,
2266 				      qdf_nbuf_t nbuf);
2267 
2268 	QDF_STATUS (*dp_tx_desc_pool_init)(struct dp_soc *soc,
2269 					   uint32_t num_elem,
2270 					   uint8_t pool_id);
2271 	void (*dp_tx_desc_pool_deinit)(
2272 				struct dp_soc *soc,
2273 				struct dp_tx_desc_pool_s *tx_desc_pool,
2274 				uint8_t pool_id);
2275 
2276 	QDF_STATUS (*dp_rx_desc_pool_init)(struct dp_soc *soc,
2277 					   struct rx_desc_pool *rx_desc_pool,
2278 					   uint32_t pool_id);
2279 	void (*dp_rx_desc_pool_deinit)(struct dp_soc *soc,
2280 				       struct rx_desc_pool *rx_desc_pool,
2281 				       uint32_t pool_id);
2282 
2283 	QDF_STATUS (*dp_wbm_get_rx_desc_from_hal_desc)(
2284 						struct dp_soc *soc,
2285 						void *ring_desc,
2286 						struct dp_rx_desc **r_rx_desc);
2287 
2288 	bool
2289 	(*dp_rx_intrabss_mcast_handler)(struct dp_soc *soc,
2290 					struct dp_txrx_peer *ta_txrx_peer,
2291 					qdf_nbuf_t nbuf_copy,
2292 					struct cdp_tid_rx_stats *tid_stats);
2293 
2294 	void (*dp_rx_word_mask_subscribe)(
2295 				struct dp_soc *soc,
2296 				uint32_t *msg_word,
2297 				void *rx_filter);
2298 
2299 	struct dp_rx_desc *(*dp_rx_desc_cookie_2_va)(struct dp_soc *soc,
2300 						     uint32_t cookie);
2301 	uint32_t (*dp_service_near_full_srngs)(struct dp_soc *soc,
2302 					       struct dp_intr *int_ctx,
2303 					       uint32_t dp_budget);
2304 	void (*tx_implicit_rbm_set)(struct dp_soc *soc, uint8_t tx_ring_id,
2305 				    uint8_t bm_id);
2306 	uint16_t (*dp_rx_peer_metadata_peer_id_get)(struct dp_soc *soc,
2307 						    uint32_t peer_metadata);
2308 	bool (*dp_rx_chain_msdus)(struct dp_soc *soc, qdf_nbuf_t nbuf,
2309 				  uint8_t *rx_tlv_hdr, uint8_t mac_id);
2310 	/* Control Arch Ops */
2311 	QDF_STATUS (*txrx_set_vdev_param)(struct dp_soc *soc,
2312 					  struct dp_vdev *vdev,
2313 					  enum cdp_vdev_param_type param,
2314 					  cdp_config_param_type val);
2315 
2316 	QDF_STATUS (*txrx_get_vdev_mcast_param)(struct dp_soc *soc,
2317 						struct dp_vdev *vdev,
2318 						cdp_config_param_type *val);
2319 
2320 	/* Misc Arch Ops */
2321 	qdf_size_t (*txrx_get_context_size)(enum dp_context_type);
2322 #ifdef WIFI_MONITOR_SUPPORT
2323 	qdf_size_t (*txrx_get_mon_context_size)(enum dp_context_type);
2324 #endif
2325 	int (*dp_srng_test_and_update_nf_params)(struct dp_soc *soc,
2326 						 struct dp_srng *dp_srng,
2327 						 int *max_reap_limit);
2328 
2329 	/* MLO ops */
2330 #ifdef WLAN_FEATURE_11BE_MLO
2331 #ifdef WLAN_MCAST_MLO
2332 	void (*dp_tx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
2333 				    qdf_nbuf_t nbuf);
2334 	bool (*dp_rx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
2335 				    struct dp_txrx_peer *peer, qdf_nbuf_t nbuf);
2336 	bool (*dp_tx_is_mcast_primary)(struct dp_soc *soc,
2337 				       struct dp_vdev *vdev);
2338 #endif
2339 	struct dp_soc * (*dp_soc_get_by_idle_bm_id)(struct dp_soc *soc,
2340 						    uint8_t bm_id);
2341 
2342 	void (*mlo_peer_find_hash_detach)(struct dp_soc *soc);
2343 	QDF_STATUS (*mlo_peer_find_hash_attach)(struct dp_soc *soc);
2344 	void (*mlo_peer_find_hash_add)(struct dp_soc *soc,
2345 				       struct dp_peer *peer);
2346 	void (*mlo_peer_find_hash_remove)(struct dp_soc *soc,
2347 					  struct dp_peer *peer);
2348 
2349 	struct dp_peer *(*mlo_peer_find_hash_find)(struct dp_soc *soc,
2350 						   uint8_t *peer_mac_addr,
2351 						   int mac_addr_is_aligned,
2352 						   enum dp_mod_id mod_id,
2353 						   uint8_t vdev_id);
2354 #endif
2355 	uint64_t (*get_reo_qdesc_addr)(hal_soc_handle_t hal_soc_hdl,
2356 				       uint8_t *dst_ring_desc,
2357 				       uint8_t *buf,
2358 				       struct dp_txrx_peer *peer,
2359 				       unsigned int tid);
2360 	void (*get_rx_hash_key)(struct dp_soc *soc,
2361 				struct cdp_lro_hash_config *lro_hash);
2362 	void (*dp_set_rx_fst)(struct dp_soc *soc, struct dp_rx_fst *fst);
2363 	struct dp_rx_fst *(*dp_get_rx_fst)(struct dp_soc *soc);
2364 	uint8_t (*dp_rx_fst_deref)(struct dp_soc *soc);
2365 	void (*dp_rx_fst_ref)(struct dp_soc *soc);
2366 	void (*txrx_print_peer_stats)(struct cdp_peer_stats *peer_stats,
2367 				      enum peer_stats_type stats_type);
2368 	QDF_STATUS (*dp_peer_rx_reorder_queue_setup)(struct dp_soc *soc,
2369 						     struct dp_peer *peer,
2370 						     int tid,
2371 						     uint32_t ba_window_size);
2372 	struct dp_peer *(*dp_find_peer_by_destmac)(struct dp_soc *soc,
2373 						   uint8_t *dest_mac_addr,
2374 						   uint8_t vdev_id);
2375 	void (*dp_bank_reconfig)(struct dp_soc *soc, struct dp_vdev *vdev);
2376 
2377 	struct dp_soc * (*dp_rx_replenish_soc_get)(struct dp_soc *soc,
2378 						   uint8_t chip_id);
2379 
2380 	uint8_t (*dp_soc_get_num_soc)(struct dp_soc *soc);
2381 	void (*dp_reconfig_tx_vdev_mcast_ctrl)(struct dp_soc *soc,
2382 					       struct dp_vdev *vdev);
2383 
2384 	void (*dp_cc_reg_cfg_init)(struct dp_soc *soc, bool is_4k_align);
2385 
2386 	QDF_STATUS
2387 	(*dp_tx_compute_hw_delay)(struct dp_soc *soc,
2388 				  struct dp_vdev *vdev,
2389 				  struct hal_tx_completion_status *ts,
2390 				  uint32_t *delay_us);
2391 	void (*print_mlo_ast_stats)(struct dp_soc *soc);
2392 	void (*dp_partner_chips_map)(struct dp_soc *soc,
2393 				     struct dp_peer *peer,
2394 				     uint16_t peer_id);
2395 	void (*dp_partner_chips_unmap)(struct dp_soc *soc,
2396 				       uint16_t peer_id);
2397 
2398 #ifdef IPA_OFFLOAD
2399 	int8_t (*ipa_get_bank_id)(struct dp_soc *soc);
2400 #endif
2401 #ifdef WLAN_SUPPORT_PPEDS
2402 	void (*dp_txrx_ppeds_rings_status)(struct dp_soc *soc);
2403 	void (*dp_tx_ppeds_inuse_desc)(struct dp_soc *soc);
2404 	void (*dp_tx_ppeds_cfg_astidx_cache_mapping)(struct dp_soc *soc,
2405 						     struct dp_vdev *vdev,
2406 						     bool peer_map);
2407 #endif
2408 	QDF_STATUS (*txrx_soc_ppeds_start)(struct dp_soc *soc);
2409 	void (*txrx_soc_ppeds_stop)(struct dp_soc *soc);
2410 	int (*dp_register_ppeds_interrupts)(struct dp_soc *soc,
2411 					    struct dp_srng *srng, int vector,
2412 					    int ring_type, int ring_num);
2413 	void (*dp_free_ppeds_interrupts)(struct dp_soc *soc,
2414 					 struct dp_srng *srng, int ring_type,
2415 					 int ring_num);
2416 	qdf_nbuf_t (*dp_rx_wbm_err_reap_desc)(struct dp_intr *int_ctx,
2417 					      struct dp_soc *soc,
2418 					      hal_ring_handle_t hal_ring_hdl,
2419 					      uint32_t quota,
2420 					      uint32_t *rx_bufs_used);
2421 	QDF_STATUS (*dp_rx_null_q_desc_handle)(struct dp_soc *soc,
2422 					       qdf_nbuf_t nbuf,
2423 					       uint8_t *rx_tlv_hdr,
2424 					       uint8_t pool_id,
2425 					       struct dp_txrx_peer *txrx_peer,
2426 					       bool is_reo_exception);
2427 };
2428 
2429 /**
2430  * struct dp_soc_features: Data structure holding the SOC level feature flags.
2431  * @pn_in_reo_dest: PN provided by hardware in the REO destination ring.
2432  * @dmac_cmn_src_rxbuf_ring_enabled: Flag to indicate DMAC mode common Rx
2433  *				     buffer source rings
2434  * @rssi_dbm_conv_support: Rssi dbm conversion support param.
2435  * @umac_hw_reset_support: UMAC HW reset support
2436  * @wds_ext_ast_override_enable:
2437  */
2438 struct dp_soc_features {
2439 	uint8_t pn_in_reo_dest:1,
2440 		dmac_cmn_src_rxbuf_ring_enabled:1;
2441 	bool rssi_dbm_conv_support;
2442 	bool umac_hw_reset_support;
2443 	bool wds_ext_ast_override_enable;
2444 };
2445 
2446 enum sysfs_printing_mode {
2447 	PRINTING_MODE_DISABLED = 0,
2448 	PRINTING_MODE_ENABLED
2449 };
2450 
2451 /**
2452  * typedef notify_pre_reset_fw_callback() - pre-reset callback
2453  * @soc: DP SoC
2454  */
2455 typedef void (*notify_pre_reset_fw_callback)(struct dp_soc *soc);
2456 
2457 #ifdef WLAN_SYSFS_DP_STATS
2458 /**
2459  * struct sysfs_stats_config: Data structure holding stats sysfs config.
2460  * @rw_stats_lock: Lock to read and write to stat_type and pdev_id.
2461  * @sysfs_read_lock: Lock held while another stat req is being executed.
2462  * @sysfs_write_user_buffer: Lock to change buff len, max buf len
2463  * and *buf.
2464  * @sysfs_txrx_fw_request_done: Event to wait for firmware response.
2465  * @stat_type_requested: stat type requested.
2466  * @mac_id: mac id for which stat type are requested.
2467  * @printing_mode: Should a print go through.
2468  * @process_id: Process allowed to write to buffer.
2469  * @curr_buffer_length: Curr length of buffer written
2470  * @max_buffer_length: Max buffer length.
2471  * @buf: Sysfs buffer.
2472  */
2473 struct sysfs_stats_config {
2474 	/* lock held to read stats */
2475 	qdf_spinlock_t rw_stats_lock;
2476 	qdf_mutex_t sysfs_read_lock;
2477 	qdf_spinlock_t sysfs_write_user_buffer;
2478 	qdf_event_t sysfs_txrx_fw_request_done;
2479 	uint32_t stat_type_requested;
2480 	uint32_t mac_id;
2481 	enum sysfs_printing_mode printing_mode;
2482 	int process_id;
2483 	uint16_t curr_buffer_length;
2484 	uint16_t max_buffer_length;
2485 	char *buf;
2486 };
2487 #endif
2488 
2489 /* SOC level structure for data path */
2490 struct dp_soc {
2491 	/**
2492 	 * re-use memory section starts
2493 	 */
2494 
2495 	/* Common base structure - Should be the first member */
2496 	struct cdp_soc_t cdp_soc;
2497 
2498 	/* SoC Obj */
2499 	struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
2500 
2501 	/* OS device abstraction */
2502 	qdf_device_t osdev;
2503 
2504 	/*cce disable*/
2505 	bool cce_disable;
2506 
2507 	/* WLAN config context */
2508 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
2509 
2510 	/* HTT handle for host-fw interaction */
2511 	struct htt_soc *htt_handle;
2512 
2513 	/* Commint init done */
2514 	qdf_atomic_t cmn_init_done;
2515 
2516 	/* Opaque hif handle */
2517 	struct hif_opaque_softc *hif_handle;
2518 
2519 	/* PDEVs on this SOC */
2520 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
2521 
2522 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
2523 	struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT];
2524 
2525 	struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW];
2526 
2527 	/* RXDMA error destination ring */
2528 	struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW];
2529 
2530 	/* RXDMA monitor buffer replenish ring */
2531 	struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW];
2532 
2533 	/* RXDMA monitor destination ring */
2534 	struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW];
2535 
2536 	/* RXDMA monitor status ring. TBD: Check format of this ring */
2537 	struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW];
2538 
2539 	/* Number of PDEVs */
2540 	uint8_t pdev_count;
2541 
2542 	/*ast override support in HW*/
2543 	bool ast_override_support;
2544 
2545 	/*number of hw dscp tid map*/
2546 	uint8_t num_hw_dscp_tid_map;
2547 
2548 	/* HAL SOC handle */
2549 	hal_soc_handle_t hal_soc;
2550 
2551 	/* rx monitor pkt tlv size */
2552 	uint16_t rx_mon_pkt_tlv_size;
2553 	/* rx pkt tlv size */
2554 	uint16_t rx_pkt_tlv_size;
2555 	/* rx pkt tlv size in current operation mode */
2556 	uint16_t curr_rx_pkt_tlv_size;
2557 
2558 	struct dp_arch_ops arch_ops;
2559 
2560 	/* Device ID coming from Bus sub-system */
2561 	uint32_t device_id;
2562 
2563 	/* Link descriptor pages */
2564 	struct qdf_mem_multi_page_t link_desc_pages;
2565 
2566 	/* total link descriptors for regular RX and TX */
2567 	uint32_t total_link_descs;
2568 
2569 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
2570 	struct dp_srng wbm_idle_link_ring;
2571 
2572 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
2573 	 */
2574 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
2575 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
2576 	uint32_t num_scatter_bufs;
2577 
2578 	/* Tx SW descriptor pool */
2579 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
2580 
2581 	/* Tx MSDU Extension descriptor pool */
2582 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
2583 
2584 	/* Tx TSO descriptor pool */
2585 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
2586 
2587 	/* Tx TSO Num of segments pool */
2588 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
2589 
2590 	/* REO destination rings */
2591 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
2592 
2593 	/* REO exception ring - See if should combine this with reo_dest_ring */
2594 	struct dp_srng reo_exception_ring;
2595 
2596 	/* REO reinjection ring */
2597 	struct dp_srng reo_reinject_ring;
2598 
2599 	/* REO command ring */
2600 	struct dp_srng reo_cmd_ring;
2601 
2602 	/* REO command status ring */
2603 	struct dp_srng reo_status_ring;
2604 
2605 	/* WBM Rx release ring */
2606 	struct dp_srng rx_rel_ring;
2607 
2608 	/* TCL data ring */
2609 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
2610 
2611 	/* Number of Tx comp rings */
2612 	uint8_t num_tx_comp_rings;
2613 
2614 	/* Number of TCL data rings */
2615 	uint8_t num_tcl_data_rings;
2616 
2617 	/* TCL CMD_CREDIT ring */
2618 	bool init_tcl_cmd_cred_ring;
2619 
2620 	/* It is used as credit based ring on QCN9000 else command ring */
2621 	struct dp_srng tcl_cmd_credit_ring;
2622 
2623 	/* TCL command status ring */
2624 	struct dp_srng tcl_status_ring;
2625 
2626 	/* WBM Tx completion rings */
2627 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
2628 
2629 	/* Common WBM link descriptor release ring (SW to WBM) */
2630 	struct dp_srng wbm_desc_rel_ring;
2631 
2632 	/* DP Interrupts */
2633 	struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
2634 
2635 	/* Monitor mode mac id to dp_intr_id map */
2636 	int mon_intr_id_lmac_map[MAX_NUM_LMAC_HW];
2637 	/* Rx SW descriptor pool for RXDMA monitor buffer */
2638 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
2639 
2640 	/* Rx SW descriptor pool for RXDMA status buffer */
2641 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
2642 
2643 	/* Rx SW descriptor pool for RXDMA buffer */
2644 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
2645 
2646 	/* Number of REO destination rings */
2647 	uint8_t num_reo_dest_rings;
2648 
2649 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2650 	/* lock to control access to soc TX descriptors */
2651 	qdf_spinlock_t flow_pool_array_lock;
2652 
2653 	/* pause callback to pause TX queues as per flow control */
2654 	tx_pause_callback pause_cb;
2655 
2656 	/* flow pool related statistics */
2657 	struct dp_txrx_pool_stats pool_stats;
2658 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
2659 
2660 	notify_pre_reset_fw_callback notify_fw_callback;
2661 
2662 	unsigned long service_rings_running;
2663 
2664 	uint32_t wbm_idle_scatter_buf_size;
2665 
2666 	/* VDEVs on this SOC */
2667 	struct dp_vdev *vdev_id_map[MAX_VDEV_CNT];
2668 
2669 	/* Tx H/W queues lock */
2670 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
2671 
2672 	/* Tx ring map for interrupt processing */
2673 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2674 
2675 	/* Rx ring map for interrupt processing */
2676 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2677 
2678 	/* peer ID to peer object map (array of pointers to peer objects) */
2679 	struct dp_peer **peer_id_to_obj_map;
2680 
2681 	struct {
2682 		unsigned mask;
2683 		unsigned idx_bits;
2684 		TAILQ_HEAD(, dp_peer) * bins;
2685 	} peer_hash;
2686 
2687 	/* rx defrag state – TBD: do we need this per radio? */
2688 	struct {
2689 		struct {
2690 			TAILQ_HEAD(, dp_rx_tid_defrag) waitlist;
2691 			uint32_t timeout_ms;
2692 			uint32_t next_flush_ms;
2693 			qdf_spinlock_t defrag_lock;
2694 		} defrag;
2695 		struct {
2696 			int defrag_timeout_check;
2697 			int dup_check;
2698 		} flags;
2699 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
2700 		qdf_spinlock_t reo_cmd_lock;
2701 	} rx;
2702 
2703 	/* optional rx processing function */
2704 	void (*rx_opt_proc)(
2705 		struct dp_vdev *vdev,
2706 		struct dp_peer *peer,
2707 		unsigned tid,
2708 		qdf_nbuf_t msdu_list);
2709 
2710 	/* pool addr for mcast enhance buff */
2711 	struct {
2712 		int size;
2713 		uint32_t paddr;
2714 		uint32_t *vaddr;
2715 		struct dp_tx_me_buf_t *freelist;
2716 		int buf_in_use;
2717 		qdf_dma_mem_context(memctx);
2718 	} me_buf;
2719 
2720 	/* Protect peer hash table */
2721 	DP_MUTEX_TYPE peer_hash_lock;
2722 	/* Protect peer_id_to_objmap */
2723 	DP_MUTEX_TYPE peer_map_lock;
2724 
2725 	/* maximum number of suppoerted peers */
2726 	uint32_t max_peers;
2727 	/* maximum value for peer_id */
2728 	uint32_t max_peer_id;
2729 
2730 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2731 	uint32_t peer_id_shift;
2732 	uint32_t peer_id_mask;
2733 #endif
2734 
2735 	/* SoC level data path statistics */
2736 	struct dp_soc_stats stats;
2737 #ifdef WLAN_SYSFS_DP_STATS
2738 	/* sysfs config for DP stats */
2739 	struct sysfs_stats_config *sysfs_config;
2740 #endif
2741 	/* timestamp to keep track of msdu buffers received on reo err ring */
2742 	uint64_t rx_route_err_start_pkt_ts;
2743 
2744 	/* Num RX Route err in a given window to keep track of rate of errors */
2745 	uint32_t rx_route_err_in_window;
2746 
2747 	/* Enable processing of Tx completion status words */
2748 	bool process_tx_status;
2749 	bool process_rx_status;
2750 	struct dp_ast_entry **ast_table;
2751 	struct {
2752 		unsigned mask;
2753 		unsigned idx_bits;
2754 		TAILQ_HEAD(, dp_ast_entry) * bins;
2755 	} ast_hash;
2756 
2757 #ifdef DP_TX_HW_DESC_HISTORY
2758 	struct dp_tx_hw_desc_history tx_hw_desc_history;
2759 #endif
2760 
2761 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2762 	struct dp_rx_history *rx_ring_history[MAX_REO_DEST_RINGS];
2763 	struct dp_rx_refill_history *rx_refill_ring_history[MAX_PDEV_CNT];
2764 	struct dp_rx_err_history *rx_err_ring_history;
2765 	struct dp_rx_reinject_history *rx_reinject_ring_history;
2766 #endif
2767 
2768 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
2769 	struct dp_mon_status_ring_history *mon_status_ring_history;
2770 #endif
2771 
2772 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
2773 	struct dp_tx_tcl_history tx_tcl_history;
2774 	struct dp_tx_comp_history tx_comp_history;
2775 #endif
2776 
2777 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
2778 	struct dp_cfg_event_history cfg_event_history;
2779 #endif
2780 
2781 	qdf_spinlock_t ast_lock;
2782 	/*Timer for AST entry ageout maintenance */
2783 	qdf_timer_t ast_aging_timer;
2784 
2785 	/*Timer counter for WDS AST entry ageout*/
2786 	uint8_t wds_ast_aging_timer_cnt;
2787 	bool pending_ageout;
2788 	bool ast_offload_support;
2789 	bool host_ast_db_enable;
2790 	uint32_t max_ast_ageout_count;
2791 	uint8_t eapol_over_control_port;
2792 
2793 	uint8_t sta_mode_search_policy;
2794 	qdf_timer_t lmac_reap_timer;
2795 	uint8_t lmac_timer_init;
2796 	qdf_timer_t int_timer;
2797 	uint8_t intr_mode;
2798 	uint8_t lmac_polled_mode;
2799 
2800 	qdf_list_t reo_desc_freelist;
2801 	qdf_spinlock_t reo_desc_freelist_lock;
2802 
2803 	/* htt stats */
2804 	struct htt_t2h_stats htt_stats;
2805 
2806 	void *external_txrx_handle; /* External data path handle */
2807 #ifdef IPA_OFFLOAD
2808 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc;
2809 #ifdef IPA_WDI3_TX_TWO_PIPES
2810 	/* Resources for the alternative IPA TX pipe */
2811 	struct ipa_dp_tx_rsc ipa_uc_tx_rsc_alt;
2812 #endif
2813 
2814 	struct ipa_dp_rx_rsc ipa_uc_rx_rsc;
2815 #ifdef IPA_WDI3_VLAN_SUPPORT
2816 	struct ipa_dp_rx_rsc ipa_uc_rx_rsc_alt;
2817 #endif
2818 	qdf_atomic_t ipa_pipes_enabled;
2819 	bool ipa_first_tx_db_access;
2820 	qdf_spinlock_t ipa_rx_buf_map_lock;
2821 	bool ipa_rx_buf_map_lock_initialized;
2822 	uint8_t ipa_reo_ctx_lock_required[MAX_REO_DEST_RINGS];
2823 #endif
2824 
2825 #ifdef WLAN_FEATURE_STATS_EXT
2826 	struct {
2827 		uint32_t rx_mpdu_received;
2828 		uint32_t rx_mpdu_missed;
2829 	} ext_stats;
2830 	qdf_event_t rx_hw_stats_event;
2831 	qdf_spinlock_t rx_hw_stats_lock;
2832 	bool is_last_stats_ctx_init;
2833 #endif /* WLAN_FEATURE_STATS_EXT */
2834 
2835 	/* Indicates HTT map/unmap versions*/
2836 	uint8_t peer_map_unmap_versions;
2837 	/* Per peer per Tid ba window size support */
2838 	uint8_t per_tid_basize_max_tid;
2839 	/* Soc level flag to enable da_war */
2840 	uint8_t da_war_enabled;
2841 	/* number of active ast entries */
2842 	uint32_t num_ast_entries;
2843 	/* peer extended rate statistics context at soc level*/
2844 	struct cdp_soc_rate_stats_ctx *rate_stats_ctx;
2845 	/* peer extended rate statistics control flag */
2846 	bool peerstats_enabled;
2847 
2848 	/* 8021p PCP-TID map values */
2849 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
2850 	/* TID map priority value */
2851 	uint8_t tidmap_prty;
2852 	/* Pointer to global per ring type specific configuration table */
2853 	struct wlan_srng_cfg *wlan_srng_cfg;
2854 	/* Num Tx outstanding on device */
2855 	qdf_atomic_t num_tx_outstanding;
2856 	/* Num Tx exception on device */
2857 	qdf_atomic_t num_tx_exception;
2858 	/* Num Tx allowed */
2859 	uint32_t num_tx_allowed;
2860 	/* Num Regular Tx allowed */
2861 	uint32_t num_reg_tx_allowed;
2862 	/* Num Tx allowed for special frames*/
2863 	uint32_t num_tx_spl_allowed;
2864 	/* Preferred HW mode */
2865 	uint8_t preferred_hw_mode;
2866 
2867 	/**
2868 	 * Flag to indicate whether WAR to address single cache entry
2869 	 * invalidation bug is enabled or not
2870 	 */
2871 	bool is_rx_fse_full_cache_invalidate_war_enabled;
2872 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
2873 	/**
2874 	 * Pointer to DP RX Flow FST at SOC level if
2875 	 * is_rx_flow_search_table_per_pdev is false
2876 	 * TBD: rx_fst[num_macs] if we decide to have per mac FST
2877 	 */
2878 	struct dp_rx_fst *rx_fst;
2879 #ifdef WLAN_SUPPORT_RX_FISA
2880 	uint8_t fisa_enable;
2881 	uint8_t fisa_lru_del_enable;
2882 	/**
2883 	 * Params used for controlling the fisa aggregation dynamically
2884 	 */
2885 	struct {
2886 		qdf_atomic_t skip_fisa;
2887 		uint8_t fisa_force_flush[MAX_REO_DEST_RINGS];
2888 	} skip_fisa_param;
2889 
2890 	/**
2891 	 * CMEM address and size for FST in CMEM, This is the address
2892 	 * shared during init time.
2893 	 */
2894 	uint64_t fst_cmem_base;
2895 	uint64_t fst_cmem_size;
2896 #endif
2897 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
2898 	/* SG supported for msdu continued packets from wbm release ring */
2899 	bool wbm_release_desc_rx_sg_support;
2900 	bool peer_map_attach_success;
2901 	/* Flag to disable mac1 ring interrupts */
2902 	bool disable_mac1_intr;
2903 	/* Flag to disable mac2 ring interrupts */
2904 	bool disable_mac2_intr;
2905 
2906 	struct {
2907 		/* 1st msdu in sg for msdu continued packets in wbm rel ring */
2908 		bool wbm_is_first_msdu_in_sg;
2909 		/* Wbm sg list head */
2910 		qdf_nbuf_t wbm_sg_nbuf_head;
2911 		/* Wbm sg list tail */
2912 		qdf_nbuf_t wbm_sg_nbuf_tail;
2913 		uint32_t wbm_sg_desc_msdu_len;
2914 	} wbm_sg_param;
2915 	/* Number of msdu exception descriptors */
2916 	uint32_t num_msdu_exception_desc;
2917 
2918 	/* RX buffer params */
2919 	struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
2920 	struct rx_refill_buff_pool rx_refill_buff_pool;
2921 	/* Save recent operation related variable */
2922 	struct dp_last_op_info last_op_info;
2923 	TAILQ_HEAD(, dp_peer) inactive_peer_list;
2924 	qdf_spinlock_t inactive_peer_list_lock;
2925 	TAILQ_HEAD(, dp_vdev) inactive_vdev_list;
2926 	qdf_spinlock_t inactive_vdev_list_lock;
2927 	/* lock to protect vdev_id_map table*/
2928 	qdf_spinlock_t vdev_map_lock;
2929 
2930 	/* Flow Search Table is in CMEM */
2931 	bool fst_in_cmem;
2932 
2933 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2934 	struct dp_swlm swlm;
2935 #endif
2936 
2937 #ifdef FEATURE_RUNTIME_PM
2938 	/* DP Rx timestamp */
2939 	qdf_time_t rx_last_busy;
2940 	/* Dp runtime refcount */
2941 	qdf_atomic_t dp_runtime_refcount;
2942 	/* Dp tx pending count in RTPM */
2943 	qdf_atomic_t tx_pending_rtpm;
2944 #endif
2945 	/* Invalid buffer that allocated for RX buffer */
2946 	qdf_nbuf_queue_t invalid_buf_queue;
2947 
2948 #ifdef FEATURE_MEC
2949 	/** @mec_lock: spinlock for MEC table */
2950 	qdf_spinlock_t mec_lock;
2951 	/** @mec_cnt: number of active mec entries */
2952 	qdf_atomic_t mec_cnt;
2953 	struct {
2954 		/** @mask: mask bits */
2955 		uint32_t mask;
2956 		/** @idx_bits: index to shift bits */
2957 		uint32_t idx_bits;
2958 		/** @bins: MEC table */
2959 		TAILQ_HEAD(, dp_mec_entry) * bins;
2960 	} mec_hash;
2961 #endif
2962 
2963 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
2964 	qdf_list_t reo_desc_deferred_freelist;
2965 	qdf_spinlock_t reo_desc_deferred_freelist_lock;
2966 	bool reo_desc_deferred_freelist_init;
2967 #endif
2968 	/* BM id for first WBM2SW  ring */
2969 	uint32_t wbm_sw0_bm_id;
2970 
2971 	/* Store arch_id from device_id */
2972 	uint16_t arch_id;
2973 
2974 	/* link desc ID start per device type */
2975 	uint32_t link_desc_id_start;
2976 
2977 	/* CMEM buffer target reserved for host usage */
2978 	uint64_t cmem_base;
2979 	/* CMEM size in bytes */
2980 	uint64_t cmem_total_size;
2981 	/* CMEM free size in bytes */
2982 	uint64_t cmem_avail_size;
2983 
2984 	/* SOC level feature flags */
2985 	struct dp_soc_features features;
2986 
2987 #ifdef WIFI_MONITOR_SUPPORT
2988 	struct dp_mon_soc *monitor_soc;
2989 #endif
2990 	uint8_t rxdma2sw_rings_not_supported:1,
2991 		wbm_sg_last_msdu_war:1,
2992 		mec_fw_offload:1,
2993 		multi_peer_grp_cmd_supported:1;
2994 
2995 	/* Number of Rx refill rings */
2996 	uint8_t num_rx_refill_buf_rings;
2997 #ifdef FEATURE_RUNTIME_PM
2998 	/* flag to indicate vote for runtime_pm for high tput castt*/
2999 	qdf_atomic_t rtpm_high_tput_flag;
3000 #endif
3001 	/* Buffer manager ID for idle link descs */
3002 	uint8_t idle_link_bm_id;
3003 	qdf_atomic_t ref_count;
3004 
3005 	unsigned long vdev_stats_id_map;
3006 	bool txmon_hw_support;
3007 
3008 #ifdef DP_UMAC_HW_RESET_SUPPORT
3009 	struct dp_soc_umac_reset_ctx umac_reset_ctx;
3010 #endif
3011 	/* PPDU to link_id mapping parameters */
3012 	uint8_t link_id_offset;
3013 	uint8_t link_id_bits;
3014 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
3015 	/* A flag using to decide the switch of rx link speed  */
3016 	bool high_throughput;
3017 #endif
3018 	bool is_tx_pause;
3019 
3020 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3021 	/* number of IPv4 flows inserted */
3022 	qdf_atomic_t ipv4_fse_cnt;
3023 	/* number of IPv6 flows inserted */
3024 	qdf_atomic_t ipv6_fse_cnt;
3025 #endif
3026 	/* Reo queue ref table items */
3027 	struct reo_queue_ref_table reo_qref;
3028 };
3029 
3030 #ifdef IPA_OFFLOAD
3031 /**
3032  * struct dp_ipa_resources - Resources needed for IPA
3033  * @tx_ring:
3034  * @tx_num_alloc_buffer:
3035  * @tx_comp_ring:
3036  * @rx_rdy_ring:
3037  * @rx_refill_ring:
3038  * @tx_comp_doorbell_paddr: IPA UC doorbell registers paddr
3039  * @tx_comp_doorbell_vaddr:
3040  * @rx_ready_doorbell_paddr:
3041  * @is_db_ddr_mapped:
3042  * @tx_alt_ring:
3043  * @tx_alt_ring_num_alloc_buffer:
3044  * @tx_alt_comp_ring:
3045  * @tx_alt_comp_doorbell_paddr: IPA UC doorbell registers paddr
3046  * @tx_alt_comp_doorbell_vaddr:
3047  * @rx_alt_rdy_ring:
3048  * @rx_alt_refill_ring:
3049  * @rx_alt_ready_doorbell_paddr:
3050  */
3051 struct dp_ipa_resources {
3052 	qdf_shared_mem_t tx_ring;
3053 	uint32_t tx_num_alloc_buffer;
3054 
3055 	qdf_shared_mem_t tx_comp_ring;
3056 	qdf_shared_mem_t rx_rdy_ring;
3057 	qdf_shared_mem_t rx_refill_ring;
3058 
3059 	/* IPA UC doorbell registers paddr */
3060 	qdf_dma_addr_t tx_comp_doorbell_paddr;
3061 	uint32_t *tx_comp_doorbell_vaddr;
3062 	qdf_dma_addr_t rx_ready_doorbell_paddr;
3063 
3064 	bool is_db_ddr_mapped;
3065 
3066 #ifdef IPA_WDI3_TX_TWO_PIPES
3067 	qdf_shared_mem_t tx_alt_ring;
3068 	uint32_t tx_alt_ring_num_alloc_buffer;
3069 	qdf_shared_mem_t tx_alt_comp_ring;
3070 
3071 	/* IPA UC doorbell registers paddr */
3072 	qdf_dma_addr_t tx_alt_comp_doorbell_paddr;
3073 	uint32_t *tx_alt_comp_doorbell_vaddr;
3074 #endif
3075 #ifdef IPA_WDI3_VLAN_SUPPORT
3076 	qdf_shared_mem_t rx_alt_rdy_ring;
3077 	qdf_shared_mem_t rx_alt_refill_ring;
3078 	qdf_dma_addr_t rx_alt_ready_doorbell_paddr;
3079 #endif
3080 };
3081 #endif
3082 
3083 #define MAX_RX_MAC_RINGS 2
3084 /* Same as NAC_MAX_CLENT */
3085 #define DP_NAC_MAX_CLIENT  24
3086 
3087 /*
3088  * 24 bits cookie size
3089  * 10 bits page id 0 ~ 1023 for MCL
3090  * 3 bits page id 0 ~ 7 for WIN
3091  * WBM Idle List Desc size = 128,
3092  * Num descs per page = 4096/128 = 32 for MCL
3093  * Num descs per page = 2MB/128 = 16384 for WIN
3094  */
3095 /*
3096  * Macros to setup link descriptor cookies - for link descriptors, we just
3097  * need first 3 bits to store bank/page ID for WIN. The
3098  * remaining bytes will be used to set a unique ID, which will
3099  * be useful in debugging
3100  */
3101 #ifdef MAX_ALLOC_PAGE_SIZE
3102 #if PAGE_SIZE == 4096
3103 #define LINK_DESC_PAGE_ID_MASK  0x007FE0
3104 #define LINK_DESC_ID_SHIFT      5
3105 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
3106 #elif PAGE_SIZE == 65536
3107 #define LINK_DESC_PAGE_ID_MASK  0x007E00
3108 #define LINK_DESC_ID_SHIFT      9
3109 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x800
3110 #else
3111 #error "Unsupported kernel PAGE_SIZE"
3112 #endif
3113 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
3114 	((((_page_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_desc_id))
3115 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
3116 	(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
3117 #else
3118 #define LINK_DESC_PAGE_ID_MASK  0x7
3119 #define LINK_DESC_ID_SHIFT      3
3120 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
3121 	((((_desc_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_page_id))
3122 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
3123 	((_cookie) & LINK_DESC_PAGE_ID_MASK)
3124 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
3125 #endif
3126 #define LINK_DESC_ID_START_20_BITS_COOKIE 0x4000
3127 
3128 /* same as ieee80211_nac_param */
3129 enum dp_nac_param_cmd {
3130 	/* IEEE80211_NAC_PARAM_ADD */
3131 	DP_NAC_PARAM_ADD = 1,
3132 	/* IEEE80211_NAC_PARAM_DEL */
3133 	DP_NAC_PARAM_DEL,
3134 	/* IEEE80211_NAC_PARAM_LIST */
3135 	DP_NAC_PARAM_LIST,
3136 };
3137 
3138 /**
3139  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
3140  * @neighbour_peers_macaddr: neighbour peer's mac address
3141  * @vdev: associated vdev
3142  * @ast_entry: ast_entry for neighbour peer
3143  * @rssi: rssi value
3144  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
3145  */
3146 struct dp_neighbour_peer {
3147 	union dp_align_mac_addr neighbour_peers_macaddr;
3148 	struct dp_vdev *vdev;
3149 	struct dp_ast_entry *ast_entry;
3150 	uint8_t rssi;
3151 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
3152 };
3153 
3154 #ifdef WLAN_TX_PKT_CAPTURE_ENH
3155 #define WLAN_TX_PKT_CAPTURE_ENH 1
3156 #define DP_TX_PPDU_PROC_THRESHOLD 8
3157 #define DP_TX_PPDU_PROC_TIMEOUT 10
3158 #endif
3159 
3160 /**
3161  * struct ppdu_info - PPDU Status info descriptor
3162  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
3163  * @sched_cmdid: schedule command id, which will be same in a burst
3164  * @max_ppdu_id: wrap around for ppdu id
3165  * @tsf_l32:
3166  * @tlv_bitmap:
3167  * @last_tlv_cnt: Keep track for missing ppdu tlvs
3168  * @last_user: last ppdu processed for user
3169  * @is_ampdu: set if Ampdu aggregate
3170  * @nbuf: ppdu descriptor payload
3171  * @ppdu_desc: ppdu descriptor
3172  * @ulist: Union of lists
3173  * @ppdu_info_dlist_elem: linked list of ppdu tlvs
3174  * @ppdu_info_slist_elem: Singly linked list (queue) of ppdu tlvs
3175  * @ppdu_info_list_elem: linked list of ppdu tlvs
3176  * @ppdu_info_queue_elem: Singly linked list (queue) of ppdu tlvs
3177  * @compltn_common_tlv: Successful tlv counter from COMPLTN COMMON tlv
3178  * @ack_ba_tlv: Successful tlv counter from ACK BA tlv
3179  * @done:
3180  */
3181 struct ppdu_info {
3182 	uint32_t ppdu_id;
3183 	uint32_t sched_cmdid;
3184 	uint32_t max_ppdu_id;
3185 	uint32_t tsf_l32;
3186 	uint16_t tlv_bitmap;
3187 	uint16_t last_tlv_cnt;
3188 	uint16_t last_user:8,
3189 		 is_ampdu:1;
3190 	qdf_nbuf_t nbuf;
3191 	struct cdp_tx_completion_ppdu *ppdu_desc;
3192 #ifdef WLAN_TX_PKT_CAPTURE_ENH
3193 	union {
3194 		TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem;
3195 		STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem;
3196 	} ulist;
3197 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem
3198 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem
3199 #else
3200 	TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
3201 #endif
3202 	uint8_t compltn_common_tlv;
3203 	uint8_t ack_ba_tlv;
3204 	bool done;
3205 };
3206 
3207 /**
3208  * struct msdu_completion_info - wbm msdu completion info
3209  * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
3210  * @peer_id: peer_id
3211  * @tid: tid which used during transmit
3212  * @first_msdu: first msdu indication
3213  * @last_msdu: last msdu indication
3214  * @msdu_part_of_amsdu: msdu part of amsdu
3215  * @transmit_cnt: retried count
3216  * @status: transmit status
3217  * @tsf: timestamp which it transmitted
3218  */
3219 struct msdu_completion_info {
3220 	uint32_t ppdu_id;
3221 	uint16_t peer_id;
3222 	uint8_t tid;
3223 	uint8_t first_msdu:1,
3224 		last_msdu:1,
3225 		msdu_part_of_amsdu:1;
3226 	uint8_t transmit_cnt;
3227 	uint8_t status;
3228 	uint32_t tsf;
3229 };
3230 
3231 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
3232 struct rx_protocol_tag_map {
3233 	/* This is the user configured tag for the said protocol type */
3234 	uint16_t tag;
3235 };
3236 
3237 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
3238 /**
3239  * struct rx_protocol_tag_stats - protocol statistics
3240  * @tag_ctr: number of rx msdus matching this tag
3241  */
3242 struct rx_protocol_tag_stats {
3243 	uint32_t tag_ctr;
3244 };
3245 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
3246 
3247 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
3248 
3249 #ifdef WLAN_RX_PKT_CAPTURE_ENH
3250 /* Template data to be set for Enhanced RX Monitor packets */
3251 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a
3252 
3253 /**
3254  * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern
3255  * at end of each MSDU in monitor-lite mode
3256  * @reserved1: reserved for future use
3257  * @reserved2: reserved for future use
3258  * @flow_tag: flow tag value read from skb->cb
3259  * @protocol_tag: protocol tag value read from skb->cb
3260  */
3261 struct dp_rx_mon_enh_trailer_data {
3262 	uint16_t reserved1;
3263 	uint16_t reserved2;
3264 	uint16_t flow_tag;
3265 	uint16_t protocol_tag;
3266 };
3267 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
3268 
3269 #ifdef HTT_STATS_DEBUGFS_SUPPORT
3270 /* Number of debugfs entries created for HTT stats */
3271 #define PDEV_HTT_STATS_DBGFS_SIZE HTT_DBG_NUM_EXT_STATS
3272 
3273 /**
3274  * struct pdev_htt_stats_dbgfs_priv - Structure to maintain debugfs information
3275  * of HTT stats
3276  * @pdev: dp pdev of debugfs entry
3277  * @stats_id: stats id of debugfs entry
3278  */
3279 struct pdev_htt_stats_dbgfs_priv {
3280 	struct dp_pdev *pdev;
3281 	uint16_t stats_id;
3282 };
3283 
3284 /**
3285  * struct pdev_htt_stats_dbgfs_cfg - PDEV level data structure for debugfs
3286  * support for HTT stats
3287  * @debugfs_entry: qdf_debugfs directory entry
3288  * @m: qdf debugfs file handler
3289  * @pdev_htt_stats_dbgfs_ops: File operations of entry created
3290  * @priv: HTT stats debugfs private object
3291  * @htt_stats_dbgfs_event: HTT stats event for debugfs support
3292  * @lock: HTT stats debugfs lock
3293  * @htt_stats_dbgfs_msg_process: Function callback to print HTT stats
3294  */
3295 struct pdev_htt_stats_dbgfs_cfg {
3296 	qdf_dentry_t debugfs_entry[PDEV_HTT_STATS_DBGFS_SIZE];
3297 	qdf_debugfs_file_t m;
3298 	struct qdf_debugfs_fops
3299 			pdev_htt_stats_dbgfs_ops[PDEV_HTT_STATS_DBGFS_SIZE - 1];
3300 	struct pdev_htt_stats_dbgfs_priv priv[PDEV_HTT_STATS_DBGFS_SIZE - 1];
3301 	qdf_event_t htt_stats_dbgfs_event;
3302 	qdf_mutex_t lock;
3303 	void (*htt_stats_dbgfs_msg_process)(void *data, A_INT32 len);
3304 };
3305 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
3306 
3307 struct dp_srng_ring_state {
3308 	enum hal_ring_type ring_type;
3309 	uint32_t sw_head;
3310 	uint32_t sw_tail;
3311 	uint32_t hw_head;
3312 	uint32_t hw_tail;
3313 
3314 };
3315 
3316 struct dp_soc_srngs_state {
3317 	uint32_t seq_num;
3318 	uint32_t max_ring_id;
3319 	struct dp_srng_ring_state ring_state[DP_MAX_SRNGS];
3320 	TAILQ_ENTRY(dp_soc_srngs_state) list_elem;
3321 };
3322 
3323 #ifdef WLAN_FEATURE_11BE_MLO
3324 /* struct dp_mlo_sync_timestamp - PDEV level data structure for storing
3325  * MLO timestamp received via HTT msg.
3326  * msg_type: This would be set to HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND
3327  * pdev_id: pdev_id
3328  * chip_id: chip_id
3329  * mac_clk_freq: mac clock frequency of the mac HW block in MHz
3330  * sync_tstmp_lo_us: lower 32 bits of the WLAN global time stamp (in us) at
3331  *                   which last sync interrupt was received
3332  * sync_tstmp_hi_us: upper 32 bits of the WLAN global time stamp (in us) at
3333  *                   which last sync interrupt was received
3334  * mlo_offset_lo_us: lower 32 bits of the MLO time stamp offset in us
3335  * mlo_offset_hi_us: upper 32 bits of the MLO time stamp offset in us
3336  * mlo_offset_clks:  MLO time stamp offset in clock ticks for sub us
3337  * mlo_comp_us:      MLO time stamp compensation applied in us
3338  * mlo_comp_clks:    MLO time stamp compensation applied in clock ticks
3339  *                   for sub us resolution
3340  * mlo_comp_timer:   period of MLO compensation timer at which compensation
3341  *                   is applied, in us
3342  */
3343 struct dp_mlo_sync_timestamp {
3344 	uint32_t msg_type:8,
3345 		 pdev_id:2,
3346 		 chip_id:2,
3347 		 rsvd1:4,
3348 		 mac_clk_freq:16;
3349 	uint32_t sync_tstmp_lo_us;
3350 	uint32_t sync_tstmp_hi_us;
3351 	uint32_t mlo_offset_lo_us;
3352 	uint32_t mlo_offset_hi_us;
3353 	uint32_t mlo_offset_clks;
3354 	uint32_t mlo_comp_us:16,
3355 		 mlo_comp_clks:10,
3356 		 rsvd2:6;
3357 	uint32_t mlo_comp_timer:22,
3358 		 rsvd3:10;
3359 };
3360 #endif
3361 
3362 /* PDEV level structure for data path */
3363 struct dp_pdev {
3364 	/**
3365 	 * Re-use Memory Section Starts
3366 	 */
3367 
3368 	/* PDEV Id */
3369 	uint8_t pdev_id;
3370 
3371 	/* LMAC Id */
3372 	uint8_t lmac_id;
3373 
3374 	/* Target pdev  Id */
3375 	uint8_t target_pdev_id;
3376 
3377 	bool pdev_deinit;
3378 
3379 	/* TXRX SOC handle */
3380 	struct dp_soc *soc;
3381 
3382 	/* pdev status down or up required to handle dynamic hw
3383 	 * mode switch between DBS and DBS_SBS.
3384 	 * 1 = down
3385 	 * 0 = up
3386 	 */
3387 	bool is_pdev_down;
3388 
3389 	/* Enhanced Stats is enabled */
3390 	bool enhanced_stats_en;
3391 
3392 	/* Flag to indicate fast RX */
3393 	bool rx_fast_flag;
3394 
3395 	/* Second ring used to replenish rx buffers */
3396 	struct dp_srng rx_refill_buf_ring2;
3397 #ifdef IPA_WDI3_VLAN_SUPPORT
3398 	/* Third ring used to replenish rx buffers */
3399 	struct dp_srng rx_refill_buf_ring3;
3400 #endif
3401 
3402 #ifdef FEATURE_DIRECT_LINK
3403 	/* Fourth ring used to replenish rx buffers */
3404 	struct dp_srng rx_refill_buf_ring4;
3405 #endif
3406 
3407 	/* Empty ring used by firmware to post rx buffers to the MAC */
3408 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
3409 
3410 	int ch_band_lmac_id_mapping[REG_BAND_UNKNOWN];
3411 
3412 	/* wlan_cfg pdev ctxt*/
3413 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
3414 
3415 	/**
3416 	 * TODO: See if we need a ring map here for LMAC rings.
3417 	 * 1. Monitor rings are currently planning to be processed on receiving
3418 	 * PPDU end interrupts and hence won't need ring based interrupts.
3419 	 * 2. Rx buffer rings will be replenished during REO destination
3420 	 * processing and doesn't require regular interrupt handling - we will
3421 	 * only handle low water mark interrupts which is not expected
3422 	 * frequently
3423 	 */
3424 
3425 	/* VDEV list */
3426 	TAILQ_HEAD(, dp_vdev) vdev_list;
3427 
3428 	/* vdev list lock */
3429 	qdf_spinlock_t vdev_list_lock;
3430 
3431 	/* Number of vdevs this device have */
3432 	uint16_t vdev_count;
3433 
3434 	/* PDEV transmit lock */
3435 	qdf_spinlock_t tx_lock;
3436 
3437 	/*tx_mutex for me*/
3438 	DP_MUTEX_TYPE tx_mutex;
3439 
3440 	/* msdu chain head & tail */
3441 	qdf_nbuf_t invalid_peer_head_msdu;
3442 	qdf_nbuf_t invalid_peer_tail_msdu;
3443 
3444 	/* Band steering  */
3445 	/* TBD */
3446 
3447 	/* PDEV level data path statistics */
3448 	struct cdp_pdev_stats stats;
3449 
3450 	/* Global RX decap mode for the device */
3451 	enum htt_pkt_type rx_decap_mode;
3452 
3453 	qdf_atomic_t num_tx_outstanding;
3454 	int32_t tx_descs_max;
3455 
3456 	qdf_atomic_t num_tx_exception;
3457 
3458 	/* MCL specific local peer handle */
3459 	struct {
3460 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
3461 		uint8_t freelist;
3462 		qdf_spinlock_t lock;
3463 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
3464 	} local_peer_ids;
3465 
3466 	/* dscp_tid_map_*/
3467 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
3468 
3469 	/* operating channel */
3470 	struct {
3471 		uint8_t num;
3472 		uint8_t band;
3473 		uint16_t freq;
3474 	} operating_channel;
3475 
3476 	/* pool addr for mcast enhance buff */
3477 	struct {
3478 		int size;
3479 		uint32_t paddr;
3480 		char *vaddr;
3481 		struct dp_tx_me_buf_t *freelist;
3482 		int buf_in_use;
3483 		qdf_dma_mem_context(memctx);
3484 	} me_buf;
3485 
3486 	bool hmmc_tid_override_en;
3487 	uint8_t hmmc_tid;
3488 
3489 	/* Number of VAPs with mcast enhancement enabled */
3490 	qdf_atomic_t mc_num_vap_attached;
3491 
3492 	qdf_atomic_t stats_cmd_complete;
3493 
3494 #ifdef IPA_OFFLOAD
3495 	ipa_uc_op_cb_type ipa_uc_op_cb;
3496 	void *usr_ctxt;
3497 	struct dp_ipa_resources ipa_resource;
3498 #endif
3499 
3500 	/* TBD */
3501 
3502 	/* map this pdev to a particular Reo Destination ring */
3503 	enum cdp_host_reo_dest_ring reo_dest;
3504 
3505 	/* WDI event handlers */
3506 	struct wdi_event_subscribe_t **wdi_event_list;
3507 
3508 	bool cfr_rcc_mode;
3509 
3510 	/* enable time latency check for tx completion */
3511 	bool latency_capture_enable;
3512 
3513 	/* enable calculation of delay stats*/
3514 	bool delay_stats_flag;
3515 	void *dp_txrx_handle; /* Advanced data path handle */
3516 	uint32_t ppdu_id;
3517 	bool first_nbuf;
3518 	/* Current noise-floor reading for the pdev channel */
3519 	int16_t chan_noise_floor;
3520 
3521 	/*
3522 	 * For multiradio device, this flag indicates if
3523 	 * this radio is primary or secondary.
3524 	 *
3525 	 * For HK 1.0, this is used for WAR for the AST issue.
3526 	 * HK 1.x mandates creation of only 1 AST entry with same MAC address
3527 	 * across 2 radios. is_primary indicates the radio on which DP should
3528 	 * install HW AST entry if there is a request to add 2 AST entries
3529 	 * with same MAC address across 2 radios
3530 	 */
3531 	uint8_t is_primary;
3532 	struct cdp_tx_sojourn_stats sojourn_stats;
3533 	qdf_nbuf_t sojourn_buf;
3534 
3535 	union dp_rx_desc_list_elem_t *free_list_head;
3536 	union dp_rx_desc_list_elem_t *free_list_tail;
3537 	/* Cached peer_id from htt_peer_details_tlv */
3538 	uint16_t fw_stats_peer_id;
3539 
3540 	/* qdf_event for fw_peer_stats */
3541 	qdf_event_t fw_peer_stats_event;
3542 
3543 	/* qdf_event for fw_stats */
3544 	qdf_event_t fw_stats_event;
3545 
3546 	/* qdf_event for fw__obss_stats */
3547 	qdf_event_t fw_obss_stats_event;
3548 
3549 	/* To check if request is already sent for obss stats */
3550 	bool pending_fw_obss_stats_response;
3551 
3552 	/* User configured max number of tx buffers */
3553 	uint32_t num_tx_allowed;
3554 
3555 	/*
3556 	 * User configured max num of tx buffers excluding the
3557 	 * number of buffers reserved for handling special frames
3558 	 */
3559 	uint32_t num_reg_tx_allowed;
3560 
3561 	/* User configured max number of tx buffers for the special frames*/
3562 	uint32_t num_tx_spl_allowed;
3563 
3564 	/* unique cookie required for peer session */
3565 	uint32_t next_peer_cookie;
3566 
3567 	/*
3568 	 * Run time enabled when the first protocol tag is added,
3569 	 * run time disabled when the last protocol tag is deleted
3570 	 */
3571 	bool  is_rx_protocol_tagging_enabled;
3572 
3573 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
3574 	/*
3575 	 * The protocol type is used as array index to save
3576 	 * user provided tag info
3577 	 */
3578 	struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX];
3579 
3580 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
3581 	/*
3582 	 * Track msdus received from each reo ring separately to avoid
3583 	 * simultaneous writes from different core
3584 	 */
3585 	struct rx_protocol_tag_stats
3586 		reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX];
3587 	/* Track msdus received from exception ring separately */
3588 	struct rx_protocol_tag_stats
3589 		rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3590 	struct rx_protocol_tag_stats
3591 		mon_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3592 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
3593 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
3594 
3595 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3596 	/**
3597 	 * Pointer to DP Flow FST at SOC level if
3598 	 * is_rx_flow_search_table_per_pdev is true
3599 	 */
3600 	struct dp_rx_fst *rx_fst;
3601 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
3602 
3603 #ifdef FEATURE_TSO_STATS
3604 	/* TSO Id to index into TSO packet information */
3605 	qdf_atomic_t tso_idx;
3606 #endif /* FEATURE_TSO_STATS */
3607 
3608 #ifdef WLAN_SUPPORT_DATA_STALL
3609 	data_stall_detect_cb data_stall_detect_callback;
3610 #endif /* WLAN_SUPPORT_DATA_STALL */
3611 
3612 	/* flag to indicate whether LRO hash command has been sent to FW */
3613 	uint8_t is_lro_hash_configured;
3614 
3615 #ifdef HTT_STATS_DEBUGFS_SUPPORT
3616 	/* HTT stats debugfs params */
3617 	struct pdev_htt_stats_dbgfs_cfg *dbgfs_cfg;
3618 #endif
3619 	struct {
3620 		qdf_work_t work;
3621 		qdf_workqueue_t *work_queue;
3622 		uint32_t seq_num;
3623 		uint8_t queue_depth;
3624 		qdf_spinlock_t list_lock;
3625 
3626 		TAILQ_HEAD(, dp_soc_srngs_state) list;
3627 	} bkp_stats;
3628 #ifdef WIFI_MONITOR_SUPPORT
3629 	struct dp_mon_pdev *monitor_pdev;
3630 #endif
3631 #ifdef WLAN_FEATURE_11BE_MLO
3632 	struct dp_mlo_sync_timestamp timestamp;
3633 #endif
3634 	/* Is isolation mode enabled */
3635 	bool  isolation;
3636 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
3637 	uint8_t is_first_wakeup_packet;
3638 #endif
3639 #ifdef CONNECTIVITY_PKTLOG
3640 	/* packetdump callback functions */
3641 	ol_txrx_pktdump_cb dp_tx_packetdump_cb;
3642 	ol_txrx_pktdump_cb dp_rx_packetdump_cb;
3643 #endif
3644 
3645 	/* Firmware Stats for TLV received from Firmware */
3646 	uint64_t fw_stats_tlv_bitmap_rcvd;
3647 
3648 	/* For Checking Pending Firmware Response */
3649 	bool pending_fw_stats_response;
3650 };
3651 
3652 struct dp_peer;
3653 
3654 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3655 #define WLAN_ROAM_PEER_AUTH_STATUS_NONE 0x0
3656 /*
3657  * This macro is equivalent to macro ROAM_AUTH_STATUS_AUTHENTICATED used
3658  * in connection mgr
3659  */
3660 #define WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED 0x2
3661 #endif
3662 
3663 /* VDEV structure for data path state */
3664 struct dp_vdev {
3665 	/* OS device abstraction */
3666 	qdf_device_t osdev;
3667 
3668 	/* physical device that is the parent of this virtual device */
3669 	struct dp_pdev *pdev;
3670 
3671 	/* VDEV operating mode */
3672 	enum wlan_op_mode opmode;
3673 
3674 	/* VDEV subtype */
3675 	enum wlan_op_subtype subtype;
3676 
3677 	/* Tx encapsulation type for this VAP */
3678 	enum htt_cmn_pkt_type tx_encap_type;
3679 
3680 	/* Rx Decapsulation type for this VAP */
3681 	enum htt_cmn_pkt_type rx_decap_type;
3682 
3683 	/* WDS enabled */
3684 	bool wds_enabled;
3685 
3686 	/* MEC enabled */
3687 	bool mec_enabled;
3688 
3689 #ifdef QCA_SUPPORT_WDS_EXTENDED
3690 	bool wds_ext_enabled;
3691 	bool drop_tx_mcast;
3692 #endif /* QCA_SUPPORT_WDS_EXTENDED */
3693 	bool drop_3addr_mcast;
3694 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
3695 	bool skip_bar_update;
3696 	unsigned long skip_bar_update_last_ts;
3697 #endif
3698 	/* WDS Aging timer period */
3699 	uint32_t wds_aging_timer_val;
3700 
3701 	/* NAWDS enabled */
3702 	bool nawds_enabled;
3703 
3704 	/* Multicast enhancement enabled */
3705 	uint8_t mcast_enhancement_en;
3706 
3707 	/* IGMP multicast enhancement enabled */
3708 	uint8_t igmp_mcast_enhanc_en;
3709 
3710 	/* vdev_id - ID used to specify a particular vdev to the target */
3711 	uint8_t vdev_id;
3712 
3713 	/* Default HTT meta data for this VDEV */
3714 	/* TBD: check alignment constraints */
3715 	uint16_t htt_tcl_metadata;
3716 
3717 	/* vdev lmac_id */
3718 	uint8_t lmac_id;
3719 
3720 	/* vdev bank_id */
3721 	uint8_t bank_id;
3722 
3723 	/* Mesh mode vdev */
3724 	uint32_t mesh_vdev;
3725 
3726 	/* Mesh mode rx filter setting */
3727 	uint32_t mesh_rx_filter;
3728 
3729 	/* DSCP-TID mapping table ID */
3730 	uint8_t dscp_tid_map_id;
3731 
3732 	/* Address search type to be set in TX descriptor */
3733 	uint8_t search_type;
3734 
3735 	/*
3736 	 * Flag to indicate if s/w tid classification should be
3737 	 * skipped
3738 	 */
3739 	uint8_t skip_sw_tid_classification;
3740 
3741 	/* Flag to enable peer authorization */
3742 	uint8_t peer_authorize;
3743 
3744 	/* AST hash value for BSS peer in HW valid for STA VAP*/
3745 	uint16_t bss_ast_hash;
3746 
3747 	/* AST hash index for BSS peer in HW valid for STA VAP*/
3748 	uint16_t bss_ast_idx;
3749 
3750 	bool multipass_en;
3751 
3752 	/* Address search flags to be configured in HAL descriptor */
3753 	uint8_t hal_desc_addr_search_flags;
3754 
3755 	/* Handle to the OS shim SW's virtual device */
3756 	ol_osif_vdev_handle osif_vdev;
3757 
3758 	/* MAC address */
3759 	union dp_align_mac_addr mac_addr;
3760 
3761 #ifdef WLAN_FEATURE_11BE_MLO
3762 	/* MLO MAC address corresponding to vdev */
3763 	union dp_align_mac_addr mld_mac_addr;
3764 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
3765 	bool mlo_vdev;
3766 #endif
3767 #endif
3768 
3769 	/* node in the pdev's list of vdevs */
3770 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
3771 
3772 	/* dp_peer list */
3773 	TAILQ_HEAD(, dp_peer) peer_list;
3774 	/* to protect peer_list */
3775 	DP_MUTEX_TYPE peer_list_lock;
3776 
3777 	/* RX call back function to flush GRO packets*/
3778 	ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;
3779 	/* default RX call back function called by dp */
3780 	ol_txrx_rx_fp osif_rx;
3781 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
3782 	/* callback to receive eapol frames */
3783 	ol_txrx_rx_fp osif_rx_eapol;
3784 #endif
3785 	/* callback to deliver rx frames to the OS */
3786 	ol_txrx_rx_fp osif_rx_stack;
3787 	/* Callback to handle rx fisa frames */
3788 	ol_txrx_fisa_rx_fp osif_fisa_rx;
3789 	ol_txrx_fisa_flush_fp osif_fisa_flush;
3790 
3791 	/* call back function to flush out queued rx packets*/
3792 	ol_txrx_rx_flush_fp osif_rx_flush;
3793 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
3794 	ol_txrx_get_key_fp osif_get_key;
3795 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
3796 
3797 #ifdef notyet
3798 	/* callback to check if the msdu is an WAI (WAPI) frame */
3799 	ol_rx_check_wai_fp osif_check_wai;
3800 #endif
3801 
3802 	/* proxy arp function */
3803 	ol_txrx_proxy_arp_fp osif_proxy_arp;
3804 
3805 	ol_txrx_mcast_me_fp me_convert;
3806 
3807 	/* completion function used by this vdev*/
3808 	ol_txrx_completion_fp tx_comp;
3809 
3810 	ol_txrx_get_tsf_time get_tsf_time;
3811 
3812 	/* callback to classify critical packets */
3813 	ol_txrx_classify_critical_pkt_fp tx_classify_critical_pkt_cb;
3814 
3815 	/* deferred vdev deletion state */
3816 	struct {
3817 		/* VDEV delete pending */
3818 		int pending;
3819 		/*
3820 		* callback and a context argument to provide a
3821 		* notification for when the vdev is deleted.
3822 		*/
3823 		ol_txrx_vdev_delete_cb callback;
3824 		void *context;
3825 	} delete;
3826 
3827 	/* tx data delivery notification callback function */
3828 	struct {
3829 		ol_txrx_data_tx_cb func;
3830 		void *ctxt;
3831 	} tx_non_std_data_callback;
3832 
3833 
3834 	/* safe mode control to bypass the encrypt and decipher process*/
3835 	uint32_t safemode;
3836 
3837 	/* rx filter related */
3838 	uint32_t drop_unenc;
3839 #ifdef notyet
3840 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
3841 	uint32_t filters_num;
3842 #endif
3843 	/* TDLS Link status */
3844 	bool tdls_link_connected;
3845 	bool is_tdls_frame;
3846 
3847 	/* per vdev rx nbuf queue */
3848 	qdf_nbuf_queue_t rxq;
3849 
3850 	uint8_t tx_ring_id;
3851 	struct dp_tx_desc_pool_s *tx_desc;
3852 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
3853 
3854 	/* Capture timestamp of previous tx packet enqueued */
3855 	uint64_t prev_tx_enq_tstamp;
3856 
3857 	/* Capture timestamp of previous rx packet delivered */
3858 	uint64_t prev_rx_deliver_tstamp;
3859 
3860 	/* VDEV Stats */
3861 	struct cdp_vdev_stats stats;
3862 
3863 	/* Is this a proxySTA VAP */
3864 	uint8_t proxysta_vdev : 1, /* Is this a proxySTA VAP */
3865 		wrap_vdev : 1, /* Is this a QWRAP AP VAP */
3866 		isolation_vdev : 1, /* Is this a QWRAP AP VAP */
3867 		reserved : 5; /* Reserved */
3868 
3869 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
3870 	struct dp_tx_desc_pool_s *pool;
3871 #endif
3872 	/* AP BRIDGE enabled */
3873 	bool ap_bridge_enabled;
3874 
3875 	enum cdp_sec_type  sec_type;
3876 
3877 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
3878 	bool raw_mode_war;
3879 
3880 
3881 	/* 8021p PCP-TID mapping table ID */
3882 	uint8_t tidmap_tbl_id;
3883 
3884 	/* 8021p PCP-TID map values */
3885 	uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
3886 
3887 	/* TIDmap priority */
3888 	uint8_t tidmap_prty;
3889 
3890 #ifdef QCA_MULTIPASS_SUPPORT
3891 	uint16_t *iv_vlan_map;
3892 
3893 	/* dp_peer special list */
3894 	TAILQ_HEAD(, dp_txrx_peer) mpass_peer_list;
3895 	DP_MUTEX_TYPE mpass_peer_mutex;
3896 #endif
3897 	/* Extended data path handle */
3898 	struct cdp_ext_vdev *vdev_dp_ext_handle;
3899 #ifdef VDEV_PEER_PROTOCOL_COUNT
3900 	/*
3901 	 * Rx-Ingress and Tx-Egress are in the lower level DP layer
3902 	 * Rx-Egress and Tx-ingress are handled in osif layer for DP
3903 	 * So
3904 	 * Rx-Egress and Tx-ingress mask definitions are in OSIF layer
3905 	 * Rx-Ingress and Tx-Egress definitions are here below
3906 	 */
3907 #define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1
3908 #define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2
3909 #define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4
3910 #define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8
3911 	bool peer_protocol_count_track;
3912 	int peer_protocol_count_dropmask;
3913 #endif
3914 	/* callback to collect connectivity stats */
3915 	ol_txrx_stats_rx_fp stats_cb;
3916 	uint32_t num_peers;
3917 	/* entry to inactive_list*/
3918 	TAILQ_ENTRY(dp_vdev) inactive_list_elem;
3919 
3920 #ifdef WLAN_SUPPORT_RX_FISA
3921 	/**
3922 	 * Params used for controlling the fisa aggregation dynamically
3923 	 */
3924 	uint8_t fisa_disallowed[MAX_REO_DEST_RINGS];
3925 	uint8_t fisa_force_flushed[MAX_REO_DEST_RINGS];
3926 #endif
3927 	/*
3928 	 * Refcount for VDEV currently incremented when
3929 	 * peer is created for VDEV
3930 	 */
3931 	qdf_atomic_t ref_cnt;
3932 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
3933 	uint8_t num_latency_critical_conn;
3934 #ifdef WLAN_SUPPORT_MESH_LATENCY
3935 	uint8_t peer_tid_latency_enabled;
3936 	/* tid latency configuration parameters */
3937 	struct {
3938 		uint32_t service_interval;
3939 		uint32_t burst_size;
3940 		uint8_t latency_tid;
3941 	} mesh_tid_latency_config;
3942 #endif
3943 #ifdef WIFI_MONITOR_SUPPORT
3944 	struct dp_mon_vdev *monitor_vdev;
3945 #endif
3946 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
3947 	/* Delta between TQM clock and TSF clock */
3948 	uint32_t delta_tsf;
3949 #endif
3950 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
3951 	/* Indicate if uplink delay report is enabled or not */
3952 	qdf_atomic_t ul_delay_report;
3953 	/* accumulative delay for every TX completion */
3954 	qdf_atomic_t ul_delay_accum;
3955 	/* accumulative number of packets delay has accumulated */
3956 	qdf_atomic_t ul_pkts_accum;
3957 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
3958 
3959 	/* vdev_stats_id - ID used for stats collection by FW from HW*/
3960 	uint8_t vdev_stats_id;
3961 #ifdef HW_TX_DELAY_STATS_ENABLE
3962 	/* hw tx delay stats enable */
3963 	uint8_t hw_tx_delay_stats_enabled;
3964 #endif
3965 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3966 	uint32_t roaming_peer_status;
3967 	union dp_align_mac_addr roaming_peer_mac;
3968 #endif
3969 #ifdef DP_TRAFFIC_END_INDICATION
3970 	/* per vdev feature enable/disable status */
3971 	bool traffic_end_ind_en;
3972 	/* per vdev nbuf queue for traffic end indication packets */
3973 	qdf_nbuf_queue_t end_ind_pkt_q;
3974 #endif
3975 #ifdef FEATURE_DIRECT_LINK
3976 	/* Flag to indicate if to_fw should be set for tx pkts on this vdev */
3977 	bool to_fw;
3978 #endif
3979 };
3980 
3981 enum {
3982 	dp_sec_mcast = 0,
3983 	dp_sec_ucast
3984 };
3985 
3986 #ifdef WDS_VENDOR_EXTENSION
3987 typedef struct {
3988 	uint8_t	wds_tx_mcast_4addr:1,
3989 		wds_tx_ucast_4addr:1,
3990 		wds_rx_filter:1,      /* enforce rx filter */
3991 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
3992 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
3993 
3994 } dp_ecm_policy;
3995 #endif
3996 
3997 /**
3998  * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
3999  * @cached_bufq: nbuff list to enqueue rx packets
4000  * @bufq_lock: spinlock for nbuff list access
4001  * @thresh: maximum threshold for number of rx buff to enqueue
4002  * @entries: number of entries
4003  * @dropped: number of packets dropped
4004  */
4005 struct dp_peer_cached_bufq {
4006 	qdf_list_t cached_bufq;
4007 	qdf_spinlock_t bufq_lock;
4008 	uint32_t thresh;
4009 	uint32_t entries;
4010 	uint32_t dropped;
4011 };
4012 
4013 /**
4014  * enum dp_peer_ast_flowq
4015  * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue
4016  * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue
4017  * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP
4018  * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP
4019  * @DP_PEER_AST_FLOWQ_MAX: max value
4020  */
4021 enum dp_peer_ast_flowq {
4022 	DP_PEER_AST_FLOWQ_HI_PRIO,
4023 	DP_PEER_AST_FLOWQ_LOW_PRIO,
4024 	DP_PEER_AST_FLOWQ_UDP,
4025 	DP_PEER_AST_FLOWQ_NON_UDP,
4026 	DP_PEER_AST_FLOWQ_MAX,
4027 };
4028 
4029 /**
4030  * struct dp_ast_flow_override_info - ast override info
4031  * @ast_idx: ast indexes in peer map message
4032  * @ast_valid_mask: ast valid mask for each ast index
4033  * @ast_flow_mask: ast flow mask for each ast index
4034  * @tid_valid_low_pri_mask: per tid mask for low priority flow
4035  * @tid_valid_hi_pri_mask: per tid mask for hi priority flow
4036  */
4037 struct dp_ast_flow_override_info {
4038 	uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX];
4039 	uint8_t ast_valid_mask;
4040 	uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX];
4041 	uint8_t tid_valid_low_pri_mask;
4042 	uint8_t tid_valid_hi_pri_mask;
4043 };
4044 
4045 /**
4046  * struct dp_peer_ast_params - ast parameters for a msdu flow-queue
4047  * @ast_idx: ast index populated by FW
4048  * @is_valid: ast flow valid mask
4049  * @valid_tid_mask: per tid mask for this ast index
4050  * @flowQ: flow queue id associated with this ast index
4051  */
4052 struct dp_peer_ast_params {
4053 	uint16_t ast_idx;
4054 	uint8_t is_valid;
4055 	uint8_t valid_tid_mask;
4056 	uint8_t flowQ;
4057 };
4058 
4059 #define DP_MLO_FLOW_INFO_MAX	3
4060 
4061 /**
4062  * struct dp_mlo_flow_override_info - Flow override info
4063  * @ast_idx: Primary TCL AST Index
4064  * @ast_idx_valid: Is AST index valid
4065  * @chip_id: CHIP ID
4066  * @tidmask: tidmask
4067  * @cache_set_num: Cache set number
4068  */
4069 struct dp_mlo_flow_override_info {
4070 	uint16_t ast_idx;
4071 	uint8_t ast_idx_valid;
4072 	uint8_t chip_id;
4073 	uint8_t tidmask;
4074 	uint8_t cache_set_num;
4075 };
4076 
4077 /**
4078  * struct dp_mlo_link_info - Link info
4079  * @peer_chip_id: Peer Chip ID
4080  * @vdev_id: Vdev ID
4081  */
4082 struct dp_mlo_link_info {
4083 	uint8_t peer_chip_id;
4084 	uint8_t vdev_id;
4085 };
4086 
4087 #ifdef WLAN_SUPPORT_MSCS
4088 /*MSCS Procedure based macros */
4089 #define IEEE80211_MSCS_MAX_ELEM_SIZE    5
4090 #define IEEE80211_TCLAS_MASK_CLA_TYPE_4  4
4091 /**
4092  * struct dp_peer_mscs_parameter - MSCS database obtained from
4093  * MSCS Request and Response in the control path. This data is used
4094  * by the AP to find out what priority to set based on the tuple
4095  * classification during packet processing.
4096  * @user_priority_bitmap: User priority bitmap obtained during
4097  * handshake
4098  * @user_priority_limit: User priority limit obtained during
4099  * handshake
4100  * @classifier_mask: params to be compared during processing
4101  */
4102 struct dp_peer_mscs_parameter {
4103 	uint8_t user_priority_bitmap;
4104 	uint8_t user_priority_limit;
4105 	uint8_t classifier_mask;
4106 };
4107 #endif
4108 
4109 #ifdef QCA_SUPPORT_WDS_EXTENDED
4110 #define WDS_EXT_PEER_INIT_BIT 0
4111 
4112 /**
4113  * struct dp_wds_ext_peer - wds ext peer structure
4114  * This is used when wds extended feature is enabled
4115  * both compile time and run time. It is created
4116  * when 1st 4 address frame is received from
4117  * wds backhaul.
4118  * @osif_peer: Handle to the OS shim SW's virtual device
4119  * @init: wds ext netdev state
4120  */
4121 struct dp_wds_ext_peer {
4122 	ol_osif_peer_handle osif_peer;
4123 	unsigned long init;
4124 };
4125 #endif /* QCA_SUPPORT_WDS_EXTENDED */
4126 
4127 #ifdef WLAN_SUPPORT_MESH_LATENCY
4128 /*Advanced Mesh latency feature based macros */
4129 
4130 /**
4131  * struct dp_peer_mesh_latency_parameter - Mesh latency related
4132  * parameters. This data is updated per peer per TID based on
4133  * the flow tuple classification in external rule database
4134  * during packet processing.
4135  * @service_interval_dl: Service interval associated with TID in DL
4136  * @burst_size_dl: Burst size additive over multiple flows in DL
4137  * @service_interval_ul: Service interval associated with TID in UL
4138  * @burst_size_ul: Burst size additive over multiple flows in UL
4139  * @ac: custom ac derived from service interval
4140  * @msduq: MSDU queue number within TID
4141  */
4142 struct dp_peer_mesh_latency_parameter {
4143 	uint32_t service_interval_dl;
4144 	uint32_t burst_size_dl;
4145 	uint32_t service_interval_ul;
4146 	uint32_t burst_size_ul;
4147 	uint8_t ac;
4148 	uint8_t msduq;
4149 };
4150 #endif
4151 
4152 #ifdef WLAN_FEATURE_11BE_MLO
4153 /* Max number of links for MLO connection */
4154 #define DP_MAX_MLO_LINKS 3
4155 
4156 /**
4157  * struct dp_peer_link_info - link peer information for MLO
4158  * @mac_addr: Mac address
4159  * @vdev_id: Vdev ID for current link peer
4160  * @is_valid: flag for link peer info valid or not
4161  * @chip_id: chip id
4162  */
4163 struct dp_peer_link_info {
4164 	union dp_align_mac_addr mac_addr;
4165 	uint8_t vdev_id;
4166 	uint8_t is_valid;
4167 	uint8_t chip_id;
4168 };
4169 
4170 /**
4171  * struct dp_mld_link_peers - this structure is used to get link peers
4172  *			      pointer from mld peer
4173  * @link_peers: link peers pointer array
4174  * @num_links: number of link peers fetched
4175  */
4176 struct dp_mld_link_peers {
4177 	struct dp_peer *link_peers[DP_MAX_MLO_LINKS];
4178 	uint8_t num_links;
4179 };
4180 #endif
4181 
4182 typedef void *dp_txrx_ref_handle;
4183 
4184 /**
4185  * struct dp_peer_per_pkt_tx_stats- Peer Tx stats updated in per pkt
4186  *				Tx completion path
4187  * @ucast: Unicast Packet Count
4188  * @mcast: Multicast Packet Count
4189  * @bcast: Broadcast Packet Count
4190  * @nawds_mcast: NAWDS Multicast Packet Count
4191  * @tx_success: Successful Tx Packets
4192  * @nawds_mcast_drop: NAWDS Multicast Drop Count
4193  * @ofdma: Total Packets as ofdma
4194  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
4195  * @amsdu_cnt: Number of MSDUs part of AMSDU
4196  * @dropped: Dropped packet statistics
4197  * @dropped.fw_rem: Discarded by firmware
4198  * @dropped.fw_rem_notx: firmware_discard_untransmitted
4199  * @dropped.fw_rem_tx: firmware_discard_transmitted
4200  * @dropped.age_out: aged out in mpdu/msdu queues
4201  * @dropped.fw_reason1: discarded by firmware reason 1
4202  * @dropped.fw_reason2: discarded by firmware reason 2
4203  * @dropped.fw_reason3: discarded by firmware reason  3
4204  * @dropped.fw_rem_no_match: dropped due to fw no match command
4205  * @dropped.drop_threshold: dropped due to HW threshold
4206  * @dropped.drop_link_desc_na: dropped due resource not available in HW
4207  * @dropped.invalid_drop: Invalid msdu drop
4208  * @dropped.mcast_vdev_drop: MCAST drop configured for VDEV in HW
4209  * @dropped.invalid_rr: Invalid TQM release reason
4210  * @failed_retry_count: packets failed due to retry above 802.11 retry limit
4211  * @retry_count: packets successfully send after one or more retry
4212  * @multiple_retry_count: packets successfully sent after more than one retry
4213  * @no_ack_count: no ack pkt count for different protocols
4214  * @tx_success_twt: Successful Tx Packets in TWT session
4215  * @last_tx_ts: last timestamp in jiffies when tx comp occurred
4216  * @avg_sojourn_msdu: Avg sojourn msdu stat
4217  * @protocol_trace_cnt: per-peer protocol counter
4218  * @release_src_not_tqm: Counter to keep track of release source is not TQM
4219  *			 in TX completion status processing
4220  */
4221 struct dp_peer_per_pkt_tx_stats {
4222 	struct cdp_pkt_info ucast;
4223 	struct cdp_pkt_info mcast;
4224 	struct cdp_pkt_info bcast;
4225 	struct cdp_pkt_info nawds_mcast;
4226 	struct cdp_pkt_info tx_success;
4227 	uint32_t nawds_mcast_drop;
4228 	uint32_t ofdma;
4229 	uint32_t non_amsdu_cnt;
4230 	uint32_t amsdu_cnt;
4231 	struct {
4232 		struct cdp_pkt_info fw_rem;
4233 		uint32_t fw_rem_notx;
4234 		uint32_t fw_rem_tx;
4235 		uint32_t age_out;
4236 		uint32_t fw_reason1;
4237 		uint32_t fw_reason2;
4238 		uint32_t fw_reason3;
4239 		uint32_t fw_rem_queue_disable;
4240 		uint32_t fw_rem_no_match;
4241 		uint32_t drop_threshold;
4242 		uint32_t drop_link_desc_na;
4243 		uint32_t invalid_drop;
4244 		uint32_t mcast_vdev_drop;
4245 		uint32_t invalid_rr;
4246 	} dropped;
4247 	uint32_t failed_retry_count;
4248 	uint32_t retry_count;
4249 	uint32_t multiple_retry_count;
4250 	uint32_t no_ack_count[QDF_PROTO_SUBTYPE_MAX];
4251 	struct cdp_pkt_info tx_success_twt;
4252 	unsigned long last_tx_ts;
4253 	qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
4254 #ifdef VDEV_PEER_PROTOCOL_COUNT
4255 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
4256 #endif
4257 	uint32_t release_src_not_tqm;
4258 };
4259 
4260 /**
4261  * struct dp_peer_extd_tx_stats - Peer Tx stats updated in either
4262  *	per pkt Tx completion path when macro QCA_ENHANCED_STATS_SUPPORT is
4263  *	disabled or in HTT Tx PPDU completion path when macro is enabled
4264  * @stbc: Packets in STBC
4265  * @ldpc: Packets in LDPC
4266  * @retries: Packet retries
4267  * @pkt_type: pkt count for different .11 modes
4268  * @wme_ac_type: Wireless Multimedia type Count
4269  * @excess_retries_per_ac: Wireless Multimedia type Count
4270  * @ampdu_cnt: completion of aggregation
4271  * @non_ampdu_cnt: tx completion not aggregated
4272  * @num_ppdu_cookie_valid: no. of valid ppdu cookies rcvd from FW
4273  * @tx_ppdus: ppdus in tx
4274  * @tx_mpdus_success: mpdus successful in tx
4275  * @tx_mpdus_tried: mpdus tried in tx
4276  * @tx_rate: Tx Rate in kbps
4277  * @last_tx_rate: Last tx rate for unicast packets
4278  * @last_tx_rate_mcs: Tx rate mcs for unicast packets
4279  * @mcast_last_tx_rate: Last tx rate for multicast packets
4280  * @mcast_last_tx_rate_mcs: Last tx rate mcs for multicast
4281  * @rnd_avg_tx_rate: Rounded average tx rate
4282  * @avg_tx_rate: Average TX rate
4283  * @tx_ratecode: Tx rate code of last frame
4284  * @pream_punct_cnt: Preamble Punctured count
4285  * @sgi_count: SGI count
4286  * @nss: Packet count for different num_spatial_stream values
4287  * @bw: Packet Count for different bandwidths
4288  * @ru_start: RU start index
4289  * @ru_tones: RU tones size
4290  * @ru_loc: pkt info for RU location 26/ 52/ 106/ 242/ 484 counter
4291  * @transmit_type: pkt info for tx transmit type
4292  * @mu_group_id: mumimo mu group id
4293  * @last_ack_rssi: RSSI of last acked packet
4294  * @nss_info: NSS 1,2, ...8
4295  * @mcs_info: MCS index
4296  * @bw_info: Bandwidth
4297  *       <enum 0 bw_20_MHz>
4298  *       <enum 1 bw_40_MHz>
4299  *       <enum 2 bw_80_MHz>
4300  *       <enum 3 bw_160_MHz>
4301  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
4302  *       <enum 1     0_4_us_sgi > Legacy short GI
4303  *       <enum 2     1_6_us_sgi > HE related GI
4304  *       <enum 3     3_2_us_sgi > HE
4305  * @preamble_info: preamble
4306  * @tx_ucast_total: total ucast count
4307  * @tx_ucast_success: total ucast success count
4308  * @retries_mpdu: mpdu number of successfully transmitted after retries
4309  * @mpdu_success_with_retries: mpdu retry count in case of successful tx
4310  * @su_be_ppdu_cnt: SU Tx packet count for 11BE
4311  * @mu_be_ppdu_cnt: MU Tx packet count for 11BE
4312  * @punc_bw: MSDU count for punctured bw
4313  * @rts_success: RTS success count
4314  * @rts_failure: RTS failure count
4315  * @bar_cnt: Block ACK Request frame count
4316  * @ndpa_cnt: NDP announcement frame count
4317  * @wme_ac_type_bytes: Wireless Multimedia bytes Count
4318  */
4319 struct dp_peer_extd_tx_stats {
4320 	uint32_t stbc;
4321 	uint32_t ldpc;
4322 	uint32_t retries;
4323 	struct cdp_pkt_type pkt_type[DOT11_MAX];
4324 	uint32_t wme_ac_type[WME_AC_MAX];
4325 	uint32_t excess_retries_per_ac[WME_AC_MAX];
4326 	uint32_t ampdu_cnt;
4327 	uint32_t non_ampdu_cnt;
4328 	uint32_t num_ppdu_cookie_valid;
4329 	uint32_t tx_ppdus;
4330 	uint32_t tx_mpdus_success;
4331 	uint32_t tx_mpdus_tried;
4332 
4333 	uint32_t tx_rate;
4334 	uint32_t last_tx_rate;
4335 	uint32_t last_tx_rate_mcs;
4336 	uint32_t mcast_last_tx_rate;
4337 	uint32_t mcast_last_tx_rate_mcs;
4338 	uint64_t rnd_avg_tx_rate;
4339 	uint64_t avg_tx_rate;
4340 	uint16_t tx_ratecode;
4341 
4342 	uint32_t sgi_count[MAX_GI];
4343 	uint32_t pream_punct_cnt;
4344 	uint32_t nss[SS_COUNT];
4345 	uint32_t bw[MAX_BW];
4346 	uint32_t ru_start;
4347 	uint32_t ru_tones;
4348 	struct cdp_tx_pkt_info ru_loc[MAX_RU_LOCATIONS];
4349 
4350 	struct cdp_tx_pkt_info transmit_type[MAX_TRANSMIT_TYPES];
4351 	uint32_t mu_group_id[MAX_MU_GROUP_ID];
4352 
4353 	uint32_t last_ack_rssi;
4354 
4355 	uint32_t nss_info:4,
4356 		 mcs_info:4,
4357 		 bw_info:4,
4358 		 gi_info:4,
4359 		 preamble_info:4;
4360 
4361 	uint32_t retries_mpdu;
4362 	uint32_t mpdu_success_with_retries;
4363 	struct cdp_pkt_info tx_ucast_total;
4364 	struct cdp_pkt_info tx_ucast_success;
4365 #ifdef WLAN_FEATURE_11BE
4366 	struct cdp_pkt_type su_be_ppdu_cnt;
4367 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
4368 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
4369 #endif
4370 	uint32_t rts_success;
4371 	uint32_t rts_failure;
4372 	uint32_t bar_cnt;
4373 	uint32_t ndpa_cnt;
4374 	uint64_t wme_ac_type_bytes[WME_AC_MAX];
4375 };
4376 
4377 /**
4378  * struct dp_peer_per_pkt_rx_stats - Peer Rx stats updated in per pkt Rx path
4379  * @rcvd_reo: Packets received on the reo ring
4380  * @rx_lmac: Packets received on each lmac
4381  * @unicast: Total unicast packets
4382  * @multicast: Total multicast packets
4383  * @bcast:  Broadcast Packet Count
4384  * @raw: Raw Pakets received
4385  * @nawds_mcast_drop: Total NAWDS multicast packets dropped
4386  * @mec_drop: Total MEC packets dropped
4387  * @last_rx_ts: last timestamp in jiffies when RX happened
4388  * @intra_bss: Intra BSS statistics
4389  * @intra_bss.pkts: Intra BSS packets received
4390  * @intra_bss.fail: Intra BSS packets failed
4391  * @intra_bss.mdns_no_fws: Intra BSS MDNS packets not forwarded
4392  * @err: error counters
4393  * @err.mic_err: Rx MIC errors CCMP
4394  * @err.decrypt_err: Rx Decryption Errors CRC
4395  * @err.fcserr: rx MIC check failed (CCMP)
4396  * @err.pn_err: pn check failed
4397  * @err.oor_err: Rx OOR errors
4398  * @err.jump_2k_err: 2k jump errors
4399  * @err.rxdma_wifi_parse_err: rxdma wifi parse errors
4400  * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
4401  * @amsdu_cnt: Number of MSDUs part of AMSDU
4402  * @rx_retries: retries of packet in rx
4403  * @multipass_rx_pkt_drop: Dropped multipass rx pkt
4404  * @peer_unauth_rx_pkt_drop: Unauth rx packet drops
4405  * @policy_check_drop: policy check drops
4406  * @to_stack_twt: Total packets sent up the stack in TWT session
4407  * @protocol_trace_cnt: per-peer protocol counters
4408  * @mcast_3addr_drop:
4409  * @rx_total: total rx count
4410  */
4411 struct dp_peer_per_pkt_rx_stats {
4412 	struct cdp_pkt_info rcvd_reo[CDP_MAX_RX_RINGS];
4413 	struct cdp_pkt_info rx_lmac[CDP_MAX_LMACS];
4414 	struct cdp_pkt_info unicast;
4415 	struct cdp_pkt_info multicast;
4416 	struct cdp_pkt_info bcast;
4417 	struct cdp_pkt_info raw;
4418 	uint32_t nawds_mcast_drop;
4419 	struct cdp_pkt_info mec_drop;
4420 	unsigned long last_rx_ts;
4421 	struct {
4422 		struct cdp_pkt_info pkts;
4423 		struct cdp_pkt_info fail;
4424 		uint32_t mdns_no_fwd;
4425 	} intra_bss;
4426 	struct {
4427 		uint32_t mic_err;
4428 		uint32_t decrypt_err;
4429 		uint32_t fcserr;
4430 		uint32_t pn_err;
4431 		uint32_t oor_err;
4432 		uint32_t jump_2k_err;
4433 		uint32_t rxdma_wifi_parse_err;
4434 	} err;
4435 	uint32_t non_amsdu_cnt;
4436 	uint32_t amsdu_cnt;
4437 	uint32_t rx_retries;
4438 	uint32_t multipass_rx_pkt_drop;
4439 	uint32_t peer_unauth_rx_pkt_drop;
4440 	uint32_t policy_check_drop;
4441 	struct cdp_pkt_info to_stack_twt;
4442 #ifdef VDEV_PEER_PROTOCOL_COUNT
4443 	struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
4444 #endif
4445 	uint32_t mcast_3addr_drop;
4446 #ifdef IPA_OFFLOAD
4447 	struct cdp_pkt_info rx_total;
4448 #endif
4449 };
4450 
4451 /**
4452  * struct dp_peer_extd_rx_stats - Peer Rx stats updated in either
4453  *	per pkt Rx path when macro QCA_ENHANCED_STATS_SUPPORT is disabled or in
4454  *	Rx monitor patch when macro is enabled
4455  * @pkt_type: pkt counter for different .11 modes
4456  * @wme_ac_type: Wireless Multimedia type Count
4457  * @mpdu_cnt_fcs_ok: SU Rx success mpdu count
4458  * @mpdu_cnt_fcs_err: SU Rx fail mpdu count
4459  * @non_ampdu_cnt: Number of MSDUs with no MPDU level aggregation
4460  * @ampdu_cnt: Number of MSDUs part of AMSPU
4461  * @rx_mpdus: mpdu in rx
4462  * @rx_ppdus: ppdu in rx
4463  * @su_ax_ppdu_cnt: SU Rx packet count for .11ax
4464  * @rx_mu: Rx MU stats
4465  * @reception_type: Reception type of packets
4466  * @ppdu_cnt: PPDU packet count in reception type
4467  * @sgi_count: sgi count
4468  * @nss: packet count in spatiel Streams
4469  * @ppdu_nss: PPDU packet count in spatial streams
4470  * @bw: Packet Count in different bandwidths
4471  * @rx_mpdu_cnt: rx mpdu count per MCS rate
4472  * @rx_rate: Rx rate
4473  * @last_rx_rate: Previous rx rate
4474  * @rnd_avg_rx_rate: Rounded average rx rate
4475  * @avg_rx_rate: Average Rx rate
4476  * @rx_ratecode: Rx rate code of last frame
4477  * @avg_snr: Average snr
4478  * @rx_snr_measured_time: Time at which snr is measured
4479  * @snr: SNR of received signal
4480  * @last_snr: Previous snr
4481  * @nss_info: NSS 1,2, ...8
4482  * @mcs_info: MCS index
4483  * @bw_info: Bandwidth
4484  *       <enum 0 bw_20_MHz>
4485  *       <enum 1 bw_40_MHz>
4486  *       <enum 2 bw_80_MHz>
4487  *       <enum 3 bw_160_MHz>
4488  * @gi_info: <enum 0     0_8_us_sgi > Legacy normal GI
4489  *       <enum 1     0_4_us_sgi > Legacy short GI
4490  *       <enum 2     1_6_us_sgi > HE related GI
4491  *       <enum 3     3_2_us_sgi > HE
4492  * @preamble_info: preamble
4493  * @mpdu_retry_cnt: retries of mpdu in rx
4494  * @su_be_ppdu_cnt: SU Rx packet count for BE
4495  * @mu_be_ppdu_cnt: MU rx packet count for BE
4496  * @punc_bw: MSDU count for punctured bw
4497  * @bar_cnt: Block ACK Request frame count
4498  * @ndpa_cnt: NDP announcement frame count
4499  * @wme_ac_type_bytes: Wireless Multimedia type Bytes Count
4500  */
4501 struct dp_peer_extd_rx_stats {
4502 	struct cdp_pkt_type pkt_type[DOT11_MAX];
4503 	uint32_t wme_ac_type[WME_AC_MAX];
4504 	uint32_t mpdu_cnt_fcs_ok;
4505 	uint32_t mpdu_cnt_fcs_err;
4506 	uint32_t non_ampdu_cnt;
4507 	uint32_t ampdu_cnt;
4508 	uint32_t rx_mpdus;
4509 	uint32_t rx_ppdus;
4510 
4511 	struct cdp_pkt_type su_ax_ppdu_cnt;
4512 	struct cdp_rx_mu rx_mu[TXRX_TYPE_MU_MAX];
4513 	uint32_t reception_type[MAX_RECEPTION_TYPES];
4514 	uint32_t ppdu_cnt[MAX_RECEPTION_TYPES];
4515 
4516 	uint32_t sgi_count[MAX_GI];
4517 	uint32_t nss[SS_COUNT];
4518 	uint32_t ppdu_nss[SS_COUNT];
4519 	uint32_t bw[MAX_BW];
4520 	uint32_t rx_mpdu_cnt[MAX_MCS];
4521 
4522 	uint32_t rx_rate;
4523 	uint32_t last_rx_rate;
4524 	uint32_t rnd_avg_rx_rate;
4525 	uint32_t avg_rx_rate;
4526 	uint32_t rx_ratecode;
4527 
4528 	uint32_t avg_snr;
4529 	unsigned long rx_snr_measured_time;
4530 	uint8_t snr;
4531 	uint8_t last_snr;
4532 
4533 	uint32_t nss_info:4,
4534 		 mcs_info:4,
4535 		 bw_info:4,
4536 		 gi_info:4,
4537 		 preamble_info:4;
4538 
4539 	uint32_t mpdu_retry_cnt;
4540 #ifdef WLAN_FEATURE_11BE
4541 	struct cdp_pkt_type su_be_ppdu_cnt;
4542 	struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
4543 	uint32_t punc_bw[MAX_PUNCTURED_MODE];
4544 #endif
4545 	uint32_t bar_cnt;
4546 	uint32_t ndpa_cnt;
4547 	uint64_t wme_ac_type_bytes[WME_AC_MAX];
4548 };
4549 
4550 /**
4551  * struct dp_peer_per_pkt_stats - Per pkt stats for peer
4552  * @tx: Per pkt Tx stats
4553  * @rx: Per pkt Rx stats
4554  */
4555 struct dp_peer_per_pkt_stats {
4556 	struct dp_peer_per_pkt_tx_stats tx;
4557 	struct dp_peer_per_pkt_rx_stats rx;
4558 };
4559 
4560 /**
4561  * struct dp_peer_extd_stats - Stats from extended path for peer
4562  * @tx: Extended path tx stats
4563  * @rx: Extended path rx stats
4564  */
4565 struct dp_peer_extd_stats {
4566 	struct dp_peer_extd_tx_stats tx;
4567 	struct dp_peer_extd_rx_stats rx;
4568 };
4569 
4570 /**
4571  * struct dp_peer_stats - Peer stats
4572  * @per_pkt_stats: Per packet path stats
4573  * @extd_stats: Extended path stats
4574  */
4575 struct dp_peer_stats {
4576 	struct dp_peer_per_pkt_stats per_pkt_stats;
4577 #ifndef QCA_ENHANCED_STATS_SUPPORT
4578 	struct dp_peer_extd_stats extd_stats;
4579 #endif
4580 };
4581 
4582 /**
4583  * struct dp_txrx_peer: DP txrx_peer structure used in per pkt path
4584  * @vdev: VDEV to which this peer is associated
4585  * @peer_id: peer ID for this peer
4586  * @authorize: Set when authorized
4587  * @in_twt: in TWT session
4588  * @hw_txrx_stats_en: Indicate HW offload vdev stats
4589  * @mld_peer:1: MLD peer
4590  * @tx_failed: Total Tx failure
4591  * @comp_pkt: Pkt Info for which completions were received
4592  * @to_stack: Total packets sent up the stack
4593  * @stats: Peer stats
4594  * @delay_stats: Peer delay stats
4595  * @jitter_stats: Peer jitter stats
4596  * @security: Security credentials
4597  * @nawds_enabled: NAWDS flag
4598  * @bss_peer: set for bss peer
4599  * @isolation: enable peer isolation for this peer
4600  * @wds_enabled: WDS peer
4601  * @wds_ecm:
4602  * @flush_in_progress:
4603  * @bufq_info:
4604  * @mpass_peer_list_elem: node in the special peer list element
4605  * @vlan_id: vlan id for key
4606  * @wds_ext:
4607  * @osif_rx:
4608  * @rx_tid:
4609  * @sawf_stats:
4610  * @bw: bandwidth of peer connection
4611  * @mpdu_retry_threshold: MPDU retry threshold to increment tx bad count
4612  */
4613 struct dp_txrx_peer {
4614 	struct dp_vdev *vdev;
4615 	uint16_t peer_id;
4616 	uint8_t authorize:1,
4617 		in_twt:1,
4618 		hw_txrx_stats_en:1,
4619 		mld_peer:1;
4620 	uint32_t tx_failed;
4621 	struct cdp_pkt_info comp_pkt;
4622 	struct cdp_pkt_info to_stack;
4623 
4624 	struct dp_peer_stats stats;
4625 
4626 	struct dp_peer_delay_stats *delay_stats;
4627 
4628 	struct cdp_peer_tid_stats *jitter_stats;
4629 
4630 	struct {
4631 		enum cdp_sec_type sec_type;
4632 		u_int32_t michael_key[2]; /* relevant for TKIP */
4633 	} security[2]; /* 0 -> multicast, 1 -> unicast */
4634 
4635 	uint16_t nawds_enabled:1,
4636 		bss_peer:1,
4637 		isolation:1,
4638 		wds_enabled:1;
4639 #ifdef WDS_VENDOR_EXTENSION
4640 	dp_ecm_policy wds_ecm;
4641 #endif
4642 #ifdef PEER_CACHE_RX_PKTS
4643 	qdf_atomic_t flush_in_progress;
4644 	struct dp_peer_cached_bufq bufq_info;
4645 #endif
4646 #ifdef QCA_MULTIPASS_SUPPORT
4647 	TAILQ_ENTRY(dp_txrx_peer) mpass_peer_list_elem;
4648 	uint16_t vlan_id;
4649 #endif
4650 #ifdef QCA_SUPPORT_WDS_EXTENDED
4651 	struct dp_wds_ext_peer wds_ext;
4652 	ol_txrx_rx_fp osif_rx;
4653 #endif
4654 	struct dp_rx_tid_defrag rx_tid[DP_MAX_TIDS];
4655 #ifdef CONFIG_SAWF
4656 	struct dp_peer_sawf_stats *sawf_stats;
4657 #endif
4658 #ifdef DP_PEER_EXTENDED_API
4659 	enum cdp_peer_bw bw;
4660 	uint8_t mpdu_retry_threshold;
4661 #endif
4662 };
4663 
4664 /* Peer structure for data path state */
4665 struct dp_peer {
4666 	struct dp_txrx_peer *txrx_peer;
4667 #ifdef WIFI_MONITOR_SUPPORT
4668 	struct dp_mon_peer *monitor_peer;
4669 #endif
4670 	/* peer ID for this peer */
4671 	uint16_t peer_id;
4672 
4673 	/* VDEV to which this peer is associated */
4674 	struct dp_vdev *vdev;
4675 
4676 	struct dp_ast_entry *self_ast_entry;
4677 
4678 	qdf_atomic_t ref_cnt;
4679 
4680 	union dp_align_mac_addr mac_addr;
4681 
4682 	/* node in the vdev's list of peers */
4683 	TAILQ_ENTRY(dp_peer) peer_list_elem;
4684 	/* node in the hash table bin's list of peers */
4685 	TAILQ_ENTRY(dp_peer) hash_list_elem;
4686 
4687 	/* TID structures pointer */
4688 	struct dp_rx_tid *rx_tid;
4689 
4690 	/* TBD: No transmit TID state required? */
4691 
4692 	struct {
4693 		enum cdp_sec_type sec_type;
4694 		u_int32_t michael_key[2]; /* relevant for TKIP */
4695 	} security[2]; /* 0 -> multicast, 1 -> unicast */
4696 
4697 	/* NAWDS Flag and Bss Peer bit */
4698 	uint16_t bss_peer:1, /* set for bss peer */
4699 		authorize:1, /* Set when authorized */
4700 		valid:1, /* valid bit */
4701 		delete_in_progress:1, /* Indicate kickout sent */
4702 		sta_self_peer:1, /* Indicate STA self peer */
4703 		is_tdls_peer:1; /* Indicate TDLS peer */
4704 
4705 #ifdef WLAN_FEATURE_11BE_MLO
4706 	uint8_t first_link:1, /* first link peer for MLO */
4707 		primary_link:1; /* primary link for MLO */
4708 #endif
4709 
4710 	/* MCL specific peer local id */
4711 	uint16_t local_id;
4712 	enum ol_txrx_peer_state state;
4713 	qdf_spinlock_t peer_info_lock;
4714 
4715 	/* Peer calibrated stats */
4716 	struct cdp_calibr_stats stats;
4717 
4718 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
4719 	/* TBD */
4720 
4721 	/* Active Block ack sessions */
4722 	uint16_t active_ba_session_cnt;
4723 
4724 	/* Current HW buffersize setting */
4725 	uint16_t hw_buffer_size;
4726 
4727 	/*
4728 	 * Flag to check if sessions with 256 buffersize
4729 	 * should be terminated.
4730 	 */
4731 	uint8_t kill_256_sessions;
4732 	qdf_atomic_t is_default_route_set;
4733 
4734 #ifdef QCA_PEER_MULTIQ_SUPPORT
4735 	struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
4736 #endif
4737 	/* entry to inactive_list*/
4738 	TAILQ_ENTRY(dp_peer) inactive_list_elem;
4739 
4740 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
4741 
4742 	uint8_t peer_state;
4743 	qdf_spinlock_t peer_state_lock;
4744 #ifdef WLAN_SUPPORT_MSCS
4745 	struct dp_peer_mscs_parameter mscs_ipv4_parameter, mscs_ipv6_parameter;
4746 	bool mscs_active;
4747 #endif
4748 #ifdef WLAN_SUPPORT_MESH_LATENCY
4749 	struct dp_peer_mesh_latency_parameter mesh_latency_params[DP_MAX_TIDS];
4750 #endif
4751 #ifdef WLAN_FEATURE_11BE_MLO
4752 	/* peer type */
4753 	enum cdp_peer_type peer_type;
4754 	/*---------for link peer---------*/
4755 	struct dp_peer *mld_peer;
4756 	/*---------for mld peer----------*/
4757 	struct dp_peer_link_info link_peers[DP_MAX_MLO_LINKS];
4758 	uint8_t num_links;
4759 	DP_MUTEX_TYPE link_peers_info_lock;
4760 #endif
4761 #ifdef CONFIG_SAWF_DEF_QUEUES
4762 	struct dp_peer_sawf *sawf;
4763 #endif
4764 	/* AST hash index for peer in HW */
4765 	uint16_t ast_idx;
4766 
4767 	/* AST hash value for peer in HW */
4768 	uint16_t ast_hash;
4769 };
4770 
4771 /**
4772  * struct dp_invalid_peer_msg - Invalid peer message
4773  * @nbuf: data buffer
4774  * @wh: 802.11 header
4775  * @vdev_id: id of vdev
4776  */
4777 struct dp_invalid_peer_msg {
4778 	qdf_nbuf_t nbuf;
4779 	struct ieee80211_frame *wh;
4780 	uint8_t vdev_id;
4781 };
4782 
4783 /**
4784  * struct dp_tx_me_buf_t - ME buffer
4785  * @next: pointer to next buffer
4786  * @data: Destination Mac address
4787  * @paddr_macbuf: physical address for dest_mac
4788  */
4789 struct dp_tx_me_buf_t {
4790 	/* Note: ME buf pool initialization logic expects next pointer to
4791 	 * be the first element. Dont add anything before next */
4792 	struct dp_tx_me_buf_t *next;
4793 	uint8_t data[QDF_MAC_ADDR_SIZE];
4794 	qdf_dma_addr_t paddr_macbuf;
4795 };
4796 
4797 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
4798 struct hal_rx_fst;
4799 
4800 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4801 struct dp_rx_fse {
4802 	/* HAL Rx Flow Search Entry which matches HW definition */
4803 	void *hal_rx_fse;
4804 	/* Toeplitz hash value */
4805 	uint32_t flow_hash;
4806 	/* Flow index, equivalent to hash value truncated to FST size */
4807 	uint32_t flow_id;
4808 	/* Stats tracking for this flow */
4809 	struct cdp_flow_stats stats;
4810 	/* Flag indicating whether flow is IPv4 address tuple */
4811 	uint8_t is_ipv4_addr_entry;
4812 	/* Flag indicating whether flow is valid */
4813 	uint8_t is_valid;
4814 };
4815 
4816 struct dp_rx_fst {
4817 	/* Software (DP) FST */
4818 	uint8_t *base;
4819 	/* Pointer to HAL FST */
4820 	struct hal_rx_fst *hal_rx_fst;
4821 	/* Base physical address of HAL RX HW FST */
4822 	uint64_t hal_rx_fst_base_paddr;
4823 	/* Maximum number of flows FSE supports */
4824 	uint16_t max_entries;
4825 	/* Num entries in flow table */
4826 	uint16_t num_entries;
4827 	/* SKID Length */
4828 	uint16_t max_skid_length;
4829 	/* Hash mask to obtain legitimate hash entry */
4830 	uint32_t hash_mask;
4831 	/* Timer for bundling of flows */
4832 	qdf_timer_t cache_invalidate_timer;
4833 	/**
4834 	 * Flag which tracks whether cache update
4835 	 * is needed on timer expiry
4836 	 */
4837 	qdf_atomic_t is_cache_update_pending;
4838 	/* Flag to indicate completion of FSE setup in HW/FW */
4839 	bool fse_setup_done;
4840 };
4841 
4842 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
4843 #elif WLAN_SUPPORT_RX_FISA
4844 
4845 /**
4846  * struct dp_fisa_reo_mismatch_stats - reo mismatch sub-case stats for FISA
4847  * @allow_cce_match: packet allowed due to cce mismatch
4848  * @allow_fse_metdata_mismatch: packet allowed since it belongs to same flow,
4849  *			only fse_metadata is not same.
4850  * @allow_non_aggr: packet allowed due to any other reason.
4851  */
4852 struct dp_fisa_reo_mismatch_stats {
4853 	uint32_t allow_cce_match;
4854 	uint32_t allow_fse_metdata_mismatch;
4855 	uint32_t allow_non_aggr;
4856 };
4857 
4858 struct dp_fisa_stats {
4859 	/* flow index invalid from RX HW TLV */
4860 	uint32_t invalid_flow_index;
4861 	/* workqueue deferred due to suspend */
4862 	uint32_t update_deferred;
4863 	struct dp_fisa_reo_mismatch_stats reo_mismatch;
4864 };
4865 
4866 enum fisa_aggr_ret {
4867 	FISA_AGGR_DONE,
4868 	FISA_AGGR_NOT_ELIGIBLE,
4869 	FISA_FLUSH_FLOW
4870 };
4871 
4872 /**
4873  * struct fisa_pkt_hist - FISA Packet history structure
4874  * @tlv_hist: array of TLV history
4875  * @ts_hist: array of timestamps of fisa packets
4876  * @idx: index indicating the next location to be used in the array.
4877  */
4878 struct fisa_pkt_hist {
4879 	uint8_t *tlv_hist;
4880 	qdf_time_t ts_hist[FISA_FLOW_MAX_AGGR_COUNT];
4881 	uint32_t idx;
4882 };
4883 
4884 struct dp_fisa_rx_sw_ft {
4885 	/* HAL Rx Flow Search Entry which matches HW definition */
4886 	void *hw_fse;
4887 	/* hash value */
4888 	uint32_t flow_hash;
4889 	/* toeplitz hash value*/
4890 	uint32_t flow_id_toeplitz;
4891 	/* Flow index, equivalent to hash value truncated to FST size */
4892 	uint32_t flow_id;
4893 	/* Stats tracking for this flow */
4894 	struct cdp_flow_stats stats;
4895 	/* Flag indicating whether flow is IPv4 address tuple */
4896 	uint8_t is_ipv4_addr_entry;
4897 	/* Flag indicating whether flow is valid */
4898 	uint8_t is_valid;
4899 	uint8_t is_populated;
4900 	uint8_t is_flow_udp;
4901 	uint8_t is_flow_tcp;
4902 	qdf_nbuf_t head_skb;
4903 	uint16_t cumulative_l4_checksum;
4904 	uint16_t adjusted_cumulative_ip_length;
4905 	uint16_t cur_aggr;
4906 	uint16_t napi_flush_cumulative_l4_checksum;
4907 	uint16_t napi_flush_cumulative_ip_length;
4908 	qdf_nbuf_t last_skb;
4909 	uint32_t head_skb_ip_hdr_offset;
4910 	uint32_t head_skb_l4_hdr_offset;
4911 	struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
4912 	uint8_t napi_id;
4913 	struct dp_vdev *vdev;
4914 	uint64_t bytes_aggregated;
4915 	uint32_t flush_count;
4916 	uint32_t aggr_count;
4917 	uint8_t do_not_aggregate;
4918 	uint16_t hal_cumultive_ip_len;
4919 	struct dp_soc *soc_hdl;
4920 	/* last aggregate count fetched from RX PKT TLV */
4921 	uint32_t last_hal_aggr_count;
4922 	uint32_t cur_aggr_gso_size;
4923 	qdf_net_udphdr_t *head_skb_udp_hdr;
4924 	uint16_t frags_cumulative_len;
4925 	/* CMEM parameters */
4926 	uint32_t cmem_offset;
4927 	uint32_t metadata;
4928 	uint32_t reo_dest_indication;
4929 	qdf_time_t flow_init_ts;
4930 	qdf_time_t last_accessed_ts;
4931 #ifdef WLAN_SUPPORT_RX_FISA_HIST
4932 	struct fisa_pkt_hist pkt_hist;
4933 #endif
4934 };
4935 
4936 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_fisa_rx_sw_ft)
4937 #define MAX_FSE_CACHE_FL_HST 10
4938 /**
4939  * struct fse_cache_flush_history - Debug history cache flush
4940  * @timestamp: Entry update timestamp
4941  * @flows_added: Number of flows added for this flush
4942  * @flows_deleted: Number of flows deleted for this flush
4943  */
4944 struct fse_cache_flush_history {
4945 	uint64_t timestamp;
4946 	uint32_t flows_added;
4947 	uint32_t flows_deleted;
4948 };
4949 
4950 struct dp_rx_fst {
4951 	/* Software (DP) FST */
4952 	uint8_t *base;
4953 	/* Pointer to HAL FST */
4954 	struct hal_rx_fst *hal_rx_fst;
4955 	/* Base physical address of HAL RX HW FST */
4956 	uint64_t hal_rx_fst_base_paddr;
4957 	/* Maximum number of flows FSE supports */
4958 	uint16_t max_entries;
4959 	/* Num entries in flow table */
4960 	uint16_t num_entries;
4961 	/* SKID Length */
4962 	uint16_t max_skid_length;
4963 	/* Hash mask to obtain legitimate hash entry */
4964 	uint32_t hash_mask;
4965 	/* Lock for adding/deleting entries of FST */
4966 	qdf_spinlock_t dp_rx_fst_lock;
4967 	uint32_t add_flow_count;
4968 	uint32_t del_flow_count;
4969 	uint32_t hash_collision_cnt;
4970 	struct dp_soc *soc_hdl;
4971 	qdf_atomic_t fse_cache_flush_posted;
4972 	qdf_timer_t fse_cache_flush_timer;
4973 	/* Allow FSE cache flush cmd to FW */
4974 	bool fse_cache_flush_allow;
4975 	struct fse_cache_flush_history cache_fl_rec[MAX_FSE_CACHE_FL_HST];
4976 	/* FISA DP stats */
4977 	struct dp_fisa_stats stats;
4978 
4979 	/* CMEM params */
4980 	qdf_work_t fst_update_work;
4981 	qdf_workqueue_t *fst_update_wq;
4982 	qdf_list_t fst_update_list;
4983 	uint32_t meta_counter;
4984 	uint32_t cmem_ba;
4985 	qdf_spinlock_t dp_rx_sw_ft_lock[MAX_REO_DEST_RINGS];
4986 	qdf_event_t cmem_resp_event;
4987 	bool flow_deletion_supported;
4988 	bool fst_in_cmem;
4989 	qdf_atomic_t pm_suspended;
4990 	bool fst_wq_defer;
4991 };
4992 
4993 #endif /* WLAN_SUPPORT_RX_FISA */
4994 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
4995 
4996 #ifdef WLAN_FEATURE_STATS_EXT
4997 /**
4998  * struct dp_req_rx_hw_stats_t - RX peer HW stats query structure
4999  * @pending_tid_stats_cnt: pending tid stats count which waits for REO status
5000  * @is_query_timeout: flag to show is stats query timeout
5001  */
5002 struct dp_req_rx_hw_stats_t {
5003 	qdf_atomic_t pending_tid_stats_cnt;
5004 	bool is_query_timeout;
5005 };
5006 #endif
5007 /* soc level structure to declare arch specific ops for DP */
5008 
5009 /**
5010  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
5011  * @soc: DP SOC handle
5012  * @mac_id: mac id
5013  *
5014  * Return: none
5015  */
5016 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id);
5017 
5018 /**
5019  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
5020  * @soc: DP SOC handle
5021  * @mac_id: mac id
5022  *
5023  * Allocates memory pages for link descriptors, the page size is 4K for
5024  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
5025  * allocated for regular RX/TX and if the there is a proper mac_id link
5026  * descriptors are allocated for RX monitor mode.
5027  *
5028  * Return: QDF_STATUS_SUCCESS: Success
5029  *	   QDF_STATUS_E_FAILURE: Failure
5030  */
5031 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
5032 					    uint32_t mac_id);
5033 
5034 /**
5035  * dp_link_desc_ring_replenish() - Replenish hw link desc rings
5036  * @soc: DP SOC handle
5037  * @mac_id: mac id
5038  *
5039  * Return: None
5040  */
5041 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
5042 
5043 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
5044 void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc);
5045 #else
5046 static inline void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc) {}
5047 #endif
5048 
5049 /**
5050  * dp_srng_alloc() - Allocate memory for SRNG
5051  * @soc  : Data path soc handle
5052  * @srng : SRNG pointer
5053  * @ring_type : Ring Type
5054  * @num_entries: Number of entries
5055  * @cached: cached flag variable
5056  *
5057  * Return: QDF_STATUS
5058  */
5059 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
5060 			 int ring_type, uint32_t num_entries,
5061 			 bool cached);
5062 
5063 /**
5064  * dp_srng_free() - Free SRNG memory
5065  * @soc: Data path soc handle
5066  * @srng: SRNG pointer
5067  *
5068  * Return: None
5069  */
5070 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng);
5071 
5072 /**
5073  * dp_srng_init() - Initialize SRNG
5074  * @soc  : Data path soc handle
5075  * @srng : SRNG pointer
5076  * @ring_type : Ring Type
5077  * @ring_num: Ring number
5078  * @mac_id: mac_id
5079  *
5080  * Return: QDF_STATUS
5081  */
5082 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
5083 			int ring_type, int ring_num, int mac_id);
5084 
5085 /**
5086  * dp_srng_init_idx() - Initialize SRNG
5087  * @soc  : Data path soc handle
5088  * @srng : SRNG pointer
5089  * @ring_type : Ring Type
5090  * @ring_num: Ring number
5091  * @mac_id: mac_id
5092  * @idx: ring index
5093  *
5094  * Return: QDF_STATUS
5095  */
5096 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
5097 			    int ring_type, int ring_num, int mac_id,
5098 			    uint32_t idx);
5099 
5100 /**
5101  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
5102  * @soc: DP SOC handle
5103  * @srng: source ring structure
5104  * @ring_type: type of ring
5105  * @ring_num: ring number
5106  *
5107  * Return: None
5108  */
5109 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
5110 		    int ring_type, int ring_num);
5111 
5112 void dp_print_peer_txrx_stats_be(struct cdp_peer_stats *peer_stats,
5113 				 enum peer_stats_type stats_type);
5114 void dp_print_peer_txrx_stats_li(struct cdp_peer_stats *peer_stats,
5115 				 enum peer_stats_type stats_type);
5116 
5117 /**
5118  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
5119  * @soc: DP soc handle
5120  * @work_done: work done in softirq context
5121  * @start_time: start time for the softirq
5122  *
5123  * Return: enum with yield code
5124  */
5125 enum timer_yield_status
5126 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
5127 			  uint64_t start_time);
5128 
5129 /**
5130  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5131  * @vdev: Datapath VDEV handle
5132  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5133  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5134  *
5135  * Return: None
5136  */
5137 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5138 				  enum cdp_host_reo_dest_ring *reo_dest,
5139 				  bool *hash_based);
5140 
5141 /**
5142  * dp_reo_remap_config() - configure reo remap register value based
5143  *                         nss configuration.
5144  * @soc: DP soc handle
5145  * @remap0: output parameter indicates reo remap 0 register value
5146  * @remap1: output parameter indicates reo remap 1 register value
5147  * @remap2: output parameter indicates reo remap 2 register value
5148  *
5149  * based on offload_radio value below remap configuration
5150  * get applied.
5151  *	0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
5152  *	1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
5153  *	2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
5154  *	3 - both Radios handled by NSS (remap not required)
5155  *	4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
5156  *
5157  * Return: bool type, true if remap is configured else false.
5158  */
5159 
5160 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
5161 			 uint32_t *remap1, uint32_t *remap2);
5162 
5163 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
5164 /**
5165  * dp_tx_comp_get_prefetched_params_from_hal_desc() - Get prefetched TX desc
5166  * @soc: DP soc handle
5167  * @tx_comp_hal_desc: HAL TX Comp Descriptor
5168  * @r_tx_desc: SW Tx Descriptor retrieved from HAL desc.
5169  *
5170  * Return: None
5171  */
5172 void dp_tx_comp_get_prefetched_params_from_hal_desc(
5173 					struct dp_soc *soc,
5174 					void *tx_comp_hal_desc,
5175 					struct dp_tx_desc_s **r_tx_desc);
5176 #endif
5177 #endif /* _DP_TYPES_H_ */
5178