xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_TYPES_H_
20 #define _DP_TYPES_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include <qdf_lock.h>
25 #include <qdf_atomic.h>
26 #include <qdf_util.h>
27 #include <qdf_list.h>
28 #include <qdf_lro.h>
29 #include <queue.h>
30 #include <htt_common.h>
31 
32 #include <cdp_txrx_cmn.h>
33 #ifdef CONFIG_MCL
34 #include <cds_ieee80211_common.h>
35 #else
36 #include <linux/ieee80211.h>
37 #endif
38 
39 #ifndef CONFIG_WIN
40 #include <wdi_event_api.h>    /* WDI subscriber event list */
41 #endif
42 
43 #include <hal_tx.h>
44 #include <hal_reo.h>
45 #include "wlan_cfg.h"
46 #include "hal_rx.h"
47 #include <hal_api.h>
48 #include <hal_api_mon.h>
49 #include "hal_rx.h"
50 
51 #define MAX_BW 7
52 #define MAX_RETRIES 4
53 #define MAX_RECEPTION_TYPES 4
54 
55 #ifndef REMOVE_PKT_LOG
56 #include <pktlog.h>
57 #endif
58 
59 #define REPT_MU_MIMO 1
60 #define REPT_MU_OFDMA_MIMO 3
61 #define DP_VO_TID 6
62 
63 #define DP_MAX_INTERRUPT_CONTEXTS 8
64 #define DP_MAX_TID_MAPS 16 /* MAX TID MAPS AVAILABLE PER PDEV*/
65 #define DSCP_TID_MAP_MAX    (64)
66 #define DP_IP_DSCP_SHIFT 2
67 #define DP_IP_DSCP_MASK 0x3f
68 #define DP_FC0_SUBTYPE_QOS 0x80
69 #define DP_QOS_TID 0x0f
70 #define DP_IPV6_PRIORITY_SHIFT 20
71 #define MAX_MON_LINK_DESC_BANKS 2
72 
73 #if defined(CONFIG_MCL)
74 #define MAX_PDEV_CNT 1
75 #else
76 #define MAX_PDEV_CNT 3
77 #endif
78 
79 #define MAX_LINK_DESC_BANKS 8
80 #define MAX_TXDESC_POOLS 4
81 #define MAX_RXDESC_POOLS 4
82 #define MAX_REO_DEST_RINGS 4
83 #define MAX_TCL_DATA_RINGS 4
84 #define MAX_IDLE_SCATTER_BUFS 16
85 #define DP_MAX_IRQ_PER_CONTEXT 12
86 #define DP_MAX_INTERRUPT_CONTEXTS 8
87 #define DEFAULT_HW_PEER_ID 0xffff
88 
89 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
90 
91 #define DP_MAX_INTERRUPT_CONTEXTS 8
92 
93 #ifndef REMOVE_PKT_LOG
94 enum rx_pktlog_mode {
95 	DP_RX_PKTLOG_DISABLED = 0,
96 	DP_RX_PKTLOG_FULL,
97 	DP_RX_PKTLOG_LITE,
98 };
99 #endif
100 
101 struct dp_soc_cmn;
102 struct dp_pdev;
103 struct dp_vdev;
104 struct dp_tx_desc_s;
105 struct dp_soc;
106 union dp_rx_desc_list_elem_t;
107 
108 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
109 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
110 
111 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
112 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
113 
114 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
115 	TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase))
116 
117 #define DP_MUTEX_TYPE qdf_spinlock_t
118 
119 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
120 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
121 
122 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
123     ((_a)[0] == 0x33 &&                         \
124      (_a)[1] == 0x33)
125 
126 #define DP_FRAME_IS_BROADCAST(_a)              \
127     ((_a)[0] == 0xff &&                         \
128      (_a)[1] == 0xff &&                         \
129      (_a)[2] == 0xff &&                         \
130      (_a)[3] == 0xff &&                         \
131      (_a)[4] == 0xff &&                         \
132      (_a)[5] == 0xff)
133 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
134 		(_llc)->llc_ssap == 0xaa && \
135 		(_llc)->llc_un.type_snap.control == 0x3)
136 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
137 #define DP_FRAME_FC0_TYPE_MASK 0x0c
138 #define DP_FRAME_FC0_TYPE_DATA 0x08
139 #define DP_FRAME_IS_DATA(_frame) \
140 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
141 
142 /**
143  * macros to convert hw mac id to sw mac id:
144  * mac ids used by hardware start from a value of 1 while
145  * those in host software start from a value of 0. Use the
146  * macros below to convert between mac ids used by software and
147  * hardware
148  */
149 #define DP_SW2HW_MACID(id) ((id) + 1)
150 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
151 #define DP_MAC_ADDR_LEN 6
152 
153 /**
154  * enum dp_intr_mode
155  * @DP_INTR_LEGACY: Legacy/Line interrupts, for WIN
156  * @DP_INTR_MSI: MSI interrupts, for MCL
157  * @DP_INTR_POLL: Polling
158  */
159 enum dp_intr_mode {
160 	DP_INTR_LEGACY = 0,
161 	DP_INTR_MSI,
162 	DP_INTR_POLL,
163 };
164 
165 /**
166  * enum dp_tx_frm_type
167  * @dp_tx_frm_std: Regular frame, no added header fragments
168  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
169  * @dp_tx_frm_sg: SG segment
170  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
171  * @dp_tx_frm_me: Multicast to Unicast Converted frame
172  * @dp_tx_frm_raw: Raw Frame
173  */
174 enum dp_tx_frm_type {
175 	dp_tx_frm_std = 0,
176 	dp_tx_frm_tso,
177 	dp_tx_frm_sg,
178 	dp_tx_frm_audio,
179 	dp_tx_frm_me,
180 	dp_tx_frm_raw,
181 };
182 
183 /**
184  * enum dp_ast_type
185  * @dp_ast_type_wds: WDS peer AST type
186  * @dp_ast_type_static: static ast entry type
187  * @dp_ast_type_mec: Multicast echo ast entry type
188  */
189 enum dp_ast_type {
190 	dp_ast_type_wds = 0,
191 	dp_ast_type_static,
192 	dp_ast_type_mec,
193 };
194 
195 /**
196  * enum dp_nss_cfg
197  * @dp_nss_cfg_default: No radios are offloaded
198  * @dp_nss_cfg_first_radio: First radio offloaded
199  * @dp_nss_cfg_second_radio: Second radio offloaded
200  * @dp_nss_cfg_dbdc: Dual radios offloaded
201  */
202 enum dp_nss_cfg {
203 	dp_nss_cfg_default,
204 	dp_nss_cfg_first_radio,
205 	dp_nss_cfg_second_radio,
206 	dp_nss_cfg_dbdc,
207 };
208 
209 /**
210  * struct rx_desc_pool
211  * @pool_size: number of RX descriptor in the pool
212  * @array: pointer to array of RX descriptor
213  * @freelist: pointer to free RX descriptor link list
214  */
215 struct rx_desc_pool {
216 	uint32_t pool_size;
217 	union dp_rx_desc_list_elem_t *array;
218 	union dp_rx_desc_list_elem_t *freelist;
219 };
220 
221 /**
222  * struct dp_tx_ext_desc_elem_s
223  * @next: next extension descriptor pointer
224  * @vaddr: hlos virtual address pointer
225  * @paddr: physical address pointer for descriptor
226  */
227 struct dp_tx_ext_desc_elem_s {
228 	struct dp_tx_ext_desc_elem_s *next;
229 	void *vaddr;
230 	qdf_dma_addr_t paddr;
231 };
232 
233 /**
234  * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool
235  * @elem_count: Number of descriptors in the pool
236  * @elem_size: Size of each descriptor
237  * @num_free: Number of free descriptors
238  * @msdu_ext_desc: MSDU extension descriptor
239  * @desc_pages: multiple page allocation information for actual descriptors
240  * @link_elem_size: size of the link descriptor in cacheable memory used for
241  * 		    chaining the extension descriptors
242  * @desc_link_pages: multiple page allocation information for link descriptors
243  */
244 struct dp_tx_ext_desc_pool_s {
245 	uint16_t elem_count;
246 	int elem_size;
247 	uint16_t num_free;
248 	struct qdf_mem_multi_page_t desc_pages;
249 	int link_elem_size;
250 	struct qdf_mem_multi_page_t desc_link_pages;
251 	struct dp_tx_ext_desc_elem_s *freelist;
252 	qdf_spinlock_t lock;
253 	qdf_dma_mem_context(memctx);
254 };
255 
256 /**
257  * struct dp_tx_desc_s - Tx Descriptor
258  * @next: Next in the chain of descriptors in freelist or in the completion list
259  * @nbuf: Buffer Address
260  * @msdu_ext_desc: MSDU extension descriptor
261  * @id: Descriptor ID
262  * @vdev: vdev over which the packet was transmitted
263  * @pdev: Handle to pdev
264  * @pool_id: Pool ID - used when releasing the descriptor
265  * @flags: Flags to track the state of descriptor and special frame handling
266  * @comp: Pool ID - used when releasing the descriptor
267  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
268  * 		   This is maintained in descriptor to allow more efficient
269  * 		   processing in completion event processing code.
270  * 		    This field is filled in with the htt_pkt_type enum.
271  * @frm_type: Frame Type - ToDo check if this is redundant
272  * @pkt_offset: Offset from which the actual packet data starts
273  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
274  *		Tx completion of ME packet
275  * @pool: handle to flow_pool this descriptor belongs to.
276  */
277 struct dp_tx_desc_s {
278 	struct dp_tx_desc_s *next;
279 	qdf_nbuf_t nbuf;
280 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
281 	uint32_t  id;
282 	struct dp_vdev *vdev;
283 	struct dp_pdev *pdev;
284 	uint8_t  pool_id;
285 	uint16_t flags;
286 	struct hal_tx_desc_comp_s comp;
287 	uint16_t tx_encap_type;
288 	uint8_t frm_type;
289 	uint8_t pkt_offset;
290 	void *me_buffer;
291 	void *tso_desc;
292 	void *tso_num_desc;
293 };
294 
295 /**
296  * enum flow_pool_status - flow pool status
297  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
298  *				and network queues are unpaused
299  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
300  *			   and network queues are paused
301  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
302  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
303  */
304 enum flow_pool_status {
305 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
306 	FLOW_POOL_ACTIVE_PAUSED = 1,
307 	FLOW_POOL_INVALID = 2,
308 	FLOW_POOL_INACTIVE = 3,
309 };
310 
311 /**
312  * struct dp_tx_tso_seg_pool_s
313  * @pool_size: total number of pool elements
314  * @num_free: free element count
315  * @freelist: first free element pointer
316  * @lock: lock for accessing the pool
317  */
318 struct dp_tx_tso_seg_pool_s {
319 	uint16_t pool_size;
320 	uint16_t num_free;
321 	struct qdf_tso_seg_elem_t *freelist;
322 	qdf_spinlock_t lock;
323 };
324 
325 /**
326  * struct dp_tx_tso_num_seg_pool_s {
327  * @num_seg_pool_size: total number of pool elements
328  * @num_free: free element count
329  * @freelist: first free element pointer
330  * @lock: lock for accessing the pool
331  */
332 
333 struct dp_tx_tso_num_seg_pool_s {
334 	uint16_t num_seg_pool_size;
335 	uint16_t num_free;
336 	struct qdf_tso_num_seg_elem_t *freelist;
337 	/*tso mutex */
338 	qdf_spinlock_t lock;
339 };
340 
341 /**
342  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
343  * @elem_size: Size of each descriptor in the pool
344  * @pool_size: Total number of descriptors in the pool
345  * @num_free: Number of free descriptors
346  * @num_allocated: Number of used descriptors
347  * @freelist: Chain of free descriptors
348  * @desc_pages: multiple page allocation information for actual descriptors
349  * @num_invalid_bin: Deleted pool with pending Tx completions.
350  * @flow_pool_array_lock: Lock when operating on flow_pool_array.
351  * @flow_pool_array: List of allocated flow pools
352  * @lock- Lock for descriptor allocation/free from/to the pool
353  */
354 struct dp_tx_desc_pool_s {
355 	uint16_t elem_size;
356 	uint32_t num_allocated;
357 	struct dp_tx_desc_s *freelist;
358 	struct qdf_mem_multi_page_t desc_pages;
359 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
360 	uint16_t pool_size;
361 	uint8_t flow_pool_id;
362 	uint8_t num_invalid_bin;
363 	uint16_t avail_desc;
364 	enum flow_pool_status status;
365 	enum htt_flow_type flow_type;
366 	uint16_t stop_th;
367 	uint16_t start_th;
368 	uint16_t pkt_drop_no_desc;
369 	qdf_spinlock_t flow_pool_lock;
370 	void *pool_owner_ctx;
371 #else
372 	uint16_t elem_count;
373 	uint32_t num_free;
374 	qdf_spinlock_t lock;
375 #endif
376 };
377 
378 /**
379  * struct dp_txrx_pool_stats - flow pool related statistics
380  * @pool_map_count: flow pool map received
381  * @pool_unmap_count: flow pool unmap received
382  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
383  */
384 struct dp_txrx_pool_stats {
385 	uint16_t pool_map_count;
386 	uint16_t pool_unmap_count;
387 	uint16_t pkt_drop_no_pool;
388 };
389 
390 struct dp_srng {
391 	void *hal_srng;
392 	void *base_vaddr_unaligned;
393 	qdf_dma_addr_t base_paddr_unaligned;
394 	uint32_t alloc_size;
395 	int irq;
396 	uint32_t num_entries;
397 };
398 
399 struct dp_rx_reorder_array_elem {
400 	qdf_nbuf_t head;
401 	qdf_nbuf_t tail;
402 };
403 
404 #define DP_RX_BA_INACTIVE 0
405 #define DP_RX_BA_ACTIVE 1
406 struct dp_reo_cmd_info {
407 	uint16_t cmd;
408 	enum hal_reo_cmd_type cmd_type;
409 	void *data;
410 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
411 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
412 };
413 
414 /* Rx TID */
415 struct dp_rx_tid {
416 	/* TID */
417 	int tid;
418 
419 	/* Num of addba requests */
420 	uint32_t num_of_addba_req;
421 
422 	/* Num of addba responses */
423 	uint32_t num_of_addba_resp;
424 
425 	/* Num of delba requests */
426 	uint32_t num_of_delba_req;
427 
428 	/* pn size */
429 	uint8_t pn_size;
430 	/* REO TID queue descriptors */
431 	void *hw_qdesc_vaddr_unaligned;
432 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
433 	qdf_dma_addr_t hw_qdesc_paddr;
434 	uint32_t hw_qdesc_alloc_size;
435 
436 	/* RX ADDBA session state */
437 	int ba_status;
438 
439 	/* RX BA window size */
440 	uint16_t ba_win_size;
441 
442 	/* TODO: Check the following while adding defragmentation support */
443 	struct dp_rx_reorder_array_elem *array;
444 	/* base - single rx reorder element used for non-aggr cases */
445 	struct dp_rx_reorder_array_elem base;
446 
447 	/* only used for defrag right now */
448 	TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem;
449 
450 	/* Store dst desc for reinjection */
451 	void *dst_ring_desc;
452 
453 	/* Sequence and fragments that are being processed currently */
454 	uint32_t curr_seq_num;
455 	uint32_t curr_frag_num;
456 
457 	uint32_t defrag_timeout_ms;
458 	uint16_t dialogtoken;
459 	uint16_t statuscode;
460 	/* user defined ADDBA response status code */
461 	uint16_t userstatuscode;
462 };
463 
464 /* per interrupt context  */
465 struct dp_intr {
466 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
467 				associated with this napi context */
468 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
469 				with this interrupt context */
470 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
471 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
472 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
473 	uint8_t reo_status_ring_mask; /* REO command response ring */
474 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
475 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
476 	struct dp_soc *soc;    /* Reference to SoC structure ,
477 				to get DMA ring handles */
478 	qdf_lro_ctx_t lro_ctx;
479 	uint8_t dp_intr_id;
480 };
481 
482 #define REO_DESC_FREELIST_SIZE 64
483 #define REO_DESC_FREE_DEFER_MS 1000
484 struct reo_desc_list_node {
485 	qdf_list_node_t node;
486 	unsigned long free_ts;
487 	struct dp_rx_tid rx_tid;
488 };
489 
490 /* SoC level data path statistics */
491 struct dp_soc_stats {
492 	struct {
493 		uint32_t added;
494 		uint32_t deleted;
495 		uint32_t aged_out;
496 	} ast;
497 
498 	/* SOC level TX stats */
499 	struct {
500 		/* packets dropped on tx because of no peer */
501 		struct cdp_pkt_info tx_invalid_peer;
502 		/* descriptors in each tcl ring */
503 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
504 		/* Descriptors in use at soc */
505 		uint32_t desc_in_use;
506 		/* tqm_release_reason == FW removed */
507 		uint32_t dropped_fw_removed;
508 
509 	} tx;
510 
511 	/* SOC level RX stats */
512 	struct {
513 		/* Rx errors */
514 		/* Total Packets in Rx Error ring */
515 		uint32_t err_ring_pkts;
516 		/* No of Fragments */
517 		uint32_t rx_frags;
518 		struct {
519 			/* Invalid RBM error count */
520 			uint32_t invalid_rbm;
521 			/* Invalid VDEV Error count */
522 			uint32_t invalid_vdev;
523 			/* Invalid PDEV error count */
524 			uint32_t invalid_pdev;
525 			/* Invalid PEER Error count */
526 			struct cdp_pkt_info rx_invalid_peer;
527 			/* HAL ring access Fail error count */
528 			uint32_t hal_ring_access_fail;
529 			/* RX DMA error count */
530 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
531 			/* REO Error count */
532 			uint32_t reo_error[HAL_REO_ERR_MAX];
533 			/* HAL REO ERR Count */
534 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
535 		} err;
536 
537 		/* packet count per core - per ring */
538 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
539 	} rx;
540 };
541 
542 #define DP_MAC_ADDR_LEN 6
543 union dp_align_mac_addr {
544 	uint8_t raw[DP_MAC_ADDR_LEN];
545 	struct {
546 		uint16_t bytes_ab;
547 		uint16_t bytes_cd;
548 		uint16_t bytes_ef;
549 	} align2;
550 	struct {
551 		uint32_t bytes_abcd;
552 		uint16_t bytes_ef;
553 	} align4;
554 	struct {
555 		uint16_t bytes_ab;
556 		uint32_t bytes_cdef;
557 	} align4_2;
558 };
559 
560 /*
561  * dp_ast_entry
562  *
563  * @ast_idx: Hardware AST Index
564  * @mac_addr:  MAC Address for this AST entry
565  * @peer: Next Hop peer (for non-WDS nodes, this will be point to
566  *        associated peer with this MAC address)
567  * @next_hop: Set to 1 if this is for a WDS node
568  * @is_active: flag to indicate active data traffic on this node
569  *             (used for aging out/expiry)
570  * @ase_list_elem: node in peer AST list
571  * @is_bss: flag to indicate if entry corresponds to bss peer
572  * @pdev_id: pdev ID
573  * @vdev_id: vdev ID
574  * @type: flag to indicate type of the entry(static/WDS/MEC)
575  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
576  */
577 struct dp_ast_entry {
578 	uint16_t ast_idx;
579 	/* MAC address */
580 	union dp_align_mac_addr mac_addr;
581 	struct dp_peer *peer;
582 	bool next_hop;
583 	bool is_active;
584 	bool is_bss;
585 	uint8_t pdev_id;
586 	uint8_t vdev_id;
587 	enum cdp_txrx_ast_entry_type type;
588 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
589 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
590 };
591 
592 /* SOC level htt stats */
593 struct htt_t2h_stats {
594 	/* lock to protect htt_stats_msg update */
595 	qdf_spinlock_t lock;
596 
597 	/* work queue to process htt stats */
598 	qdf_work_t work;
599 
600 	/* T2H Ext stats message queue */
601 	qdf_nbuf_queue_t msg;
602 
603 	/* number of completed stats in htt_stats_msg */
604 	uint32_t num_stats;
605 };
606 
607 /* SOC level structure for data path */
608 struct dp_soc {
609 	/* Common base structure - Should be the first member */
610 	struct cdp_soc_t cdp_soc;
611 
612 	/* SoC Obj */
613 	void *ctrl_psoc;
614 
615 	/* OS device abstraction */
616 	qdf_device_t osdev;
617 
618 	/* WLAN config context */
619 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
620 
621 	/* HTT handle for host-fw interaction */
622 	void *htt_handle;
623 
624 	/* Commint init done */
625 	qdf_atomic_t cmn_init_done;
626 
627 	/* Opaque hif handle */
628 	struct hif_opaque_softc *hif_handle;
629 
630 	/* PDEVs on this SOC */
631 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
632 
633 	/* Number of PDEVs */
634 	uint8_t pdev_count;
635 
636 	/*cce disable*/
637 	bool cce_disable;
638 
639 	/* Link descriptor memory banks */
640 	struct {
641 		void *base_vaddr_unaligned;
642 		void *base_vaddr;
643 		qdf_dma_addr_t base_paddr_unaligned;
644 		qdf_dma_addr_t base_paddr;
645 		uint32_t size;
646 	} link_desc_banks[MAX_LINK_DESC_BANKS];
647 
648 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
649 	struct dp_srng wbm_idle_link_ring;
650 
651 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
652 	 */
653 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
654 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
655 	uint32_t wbm_idle_scatter_buf_size;
656 
657 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
658 	qdf_spinlock_t flow_pool_array_lock;
659 	tx_pause_callback pause_cb;
660 	struct dp_txrx_pool_stats pool_stats;
661 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
662 	/* Tx SW descriptor pool */
663 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
664 
665 	/* Tx MSDU Extension descriptor pool */
666 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
667 
668 	/* Tx TSO descriptor pool */
669 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
670 
671 	/* Tx TSO Num of segments pool */
672 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
673 
674 	/* Tx H/W queues lock */
675 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
676 
677 	/* Rx SW descriptor pool for RXDMA buffer */
678 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
679 
680 	/* Rx SW descriptor pool for RXDMA monitor buffer */
681 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
682 
683 	/* Rx SW descriptor pool for RXDMA status buffer */
684 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
685 
686 	/* DP rx desc lock */
687 	DP_MUTEX_TYPE rx_desc_mutex[MAX_RXDESC_POOLS];
688 
689 	/* HAL SOC handle */
690 	void *hal_soc;
691 
692 	/* DP Interrupts */
693 	struct dp_intr intr_ctx[DP_MAX_INTERRUPT_CONTEXTS];
694 
695 	/* REO destination rings */
696 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
697 
698 	/* Number of REO destination rings */
699 	uint8_t num_reo_dest_rings;
700 
701 	/* REO exception ring - See if should combine this with reo_dest_ring */
702 	struct dp_srng reo_exception_ring;
703 
704 	/* REO reinjection ring */
705 	struct dp_srng reo_reinject_ring;
706 
707 	/* REO command ring */
708 	struct dp_srng reo_cmd_ring;
709 
710 	/* REO command status ring */
711 	struct dp_srng reo_status_ring;
712 
713 	/* WBM Rx release ring */
714 	struct dp_srng rx_rel_ring;
715 
716 	/* Number of TCL data rings */
717 	uint8_t num_tcl_data_rings;
718 
719 	/* TCL data ring */
720 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
721 
722 	/* TCL command ring */
723 	struct dp_srng tcl_cmd_ring;
724 
725 	/* TCL command status ring */
726 	struct dp_srng tcl_status_ring;
727 
728 	/* WBM Tx completion rings */
729 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
730 
731 	/* Common WBM link descriptor release ring (SW to WBM) */
732 	struct dp_srng wbm_desc_rel_ring;
733 
734 	/* Tx ring map for interrupt processing */
735 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
736 
737 	/* Rx ring map for interrupt processing */
738 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
739 
740 	/* peer ID to peer object map (array of pointers to peer objects) */
741 	struct dp_peer **peer_id_to_obj_map;
742 
743 	struct {
744 		unsigned mask;
745 		unsigned idx_bits;
746 		TAILQ_HEAD(, dp_peer) * bins;
747 	} peer_hash;
748 
749 	/* rx defrag state – TBD: do we need this per radio? */
750 	struct {
751 		struct {
752 			TAILQ_HEAD(, dp_rx_tid) waitlist;
753 			uint32_t timeout_ms;
754 		} defrag;
755 		struct {
756 			int defrag_timeout_check;
757 			int dup_check;
758 		} flags;
759 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
760 		qdf_spinlock_t reo_cmd_lock;
761 	} rx;
762 
763 	/* optional rx processing function */
764 	void (*rx_opt_proc)(
765 		struct dp_vdev *vdev,
766 		struct dp_peer *peer,
767 		unsigned tid,
768 		qdf_nbuf_t msdu_list);
769 
770 	/* pool addr for mcast enhance buff */
771 	struct {
772 		int size;
773 		uint32_t paddr;
774 		uint32_t *vaddr;
775 		struct dp_tx_me_buf_t *freelist;
776 		int buf_in_use;
777 		qdf_dma_mem_context(memctx);
778 	} me_buf;
779 
780 	/**
781 	 * peer ref mutex:
782 	 * 1. Protect peer object lookups until the returned peer object's
783 	 *	reference count is incremented.
784 	 * 2. Provide mutex when accessing peer object lookup structures.
785 	 */
786 	DP_MUTEX_TYPE peer_ref_mutex;
787 
788 	/* maximum value for peer_id */
789 	int max_peers;
790 
791 	/* SoC level data path statistics */
792 	struct dp_soc_stats stats;
793 
794 	/* Enable processing of Tx completion status words */
795 	bool process_tx_status;
796 	bool process_rx_status;
797 	struct dp_ast_entry *ast_table[WLAN_UMAC_PSOC_MAX_PEERS * 2];
798 	struct {
799 		unsigned mask;
800 		unsigned idx_bits;
801 		TAILQ_HEAD(, dp_ast_entry) * bins;
802 	} ast_hash;
803 
804 	qdf_spinlock_t ast_lock;
805 	qdf_timer_t wds_aging_timer;
806 
807 	/*interrupt timer*/
808 	qdf_timer_t mon_reap_timer;
809 	uint8_t reap_timer_init;
810 	qdf_timer_t int_timer;
811 	uint8_t intr_mode;
812 
813 	qdf_list_t reo_desc_freelist;
814 	qdf_spinlock_t reo_desc_freelist_lock;
815 
816 #ifdef QCA_SUPPORT_SON
817 	/* The timer to check station's inactivity status */
818 	os_timer_t pdev_bs_inact_timer;
819 	/* The current inactivity count reload value
820 	   based on overload condition */
821 	u_int16_t pdev_bs_inact_reload;
822 
823 	/* The inactivity timer value when not overloaded */
824 	u_int16_t pdev_bs_inact_normal;
825 
826 	/* The inactivity timer value when overloaded */
827 	u_int16_t pdev_bs_inact_overload;
828 
829 	/* The inactivity timer check interval */
830 	u_int16_t pdev_bs_inact_interval;
831 	/* Inactivity timer */
832 #endif /* QCA_SUPPORT_SON */
833 
834 	/* htt stats */
835 	struct htt_t2h_stats htt_stats;
836 
837 	void *external_txrx_handle; /* External data path handle */
838 #ifdef IPA_OFFLOAD
839 	/* IPA uC datapath offload Wlan Tx resources */
840 	struct {
841 		/* Resource info to be passed to IPA */
842 		qdf_dma_addr_t ipa_tcl_ring_base_paddr;
843 		void *ipa_tcl_ring_base_vaddr;
844 		uint32_t ipa_tcl_ring_size;
845 		qdf_dma_addr_t ipa_tcl_hp_paddr;
846 		uint32_t alloc_tx_buf_cnt;
847 
848 		qdf_dma_addr_t ipa_wbm_ring_base_paddr;
849 		void *ipa_wbm_ring_base_vaddr;
850 		uint32_t ipa_wbm_ring_size;
851 		qdf_dma_addr_t ipa_wbm_tp_paddr;
852 
853 		/* TX buffers populated into the WBM ring */
854 		void **tx_buf_pool_vaddr_unaligned;
855 		qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
856 	} ipa_uc_tx_rsc;
857 
858 	/* IPA uC datapath offload Wlan Rx resources */
859 	struct {
860 		/* Resource info to be passed to IPA */
861 		qdf_dma_addr_t ipa_reo_ring_base_paddr;
862 		void *ipa_reo_ring_base_vaddr;
863 		uint32_t ipa_reo_ring_size;
864 		qdf_dma_addr_t ipa_reo_tp_paddr;
865 
866 		/* Resource info to be passed to firmware and IPA */
867 		qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
868 		void *ipa_rx_refill_buf_ring_base_vaddr;
869 		uint32_t ipa_rx_refill_buf_ring_size;
870 		qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
871 	} ipa_uc_rx_rsc;
872 #endif
873 };
874 
875 #ifdef IPA_OFFLOAD
876 /**
877  * dp_ipa_resources - Resources needed for IPA
878  */
879 struct dp_ipa_resources {
880 	qdf_dma_addr_t tx_ring_base_paddr;
881 	uint32_t tx_ring_size;
882 	uint32_t tx_num_alloc_buffer;
883 
884 	qdf_dma_addr_t tx_comp_ring_base_paddr;
885 	uint32_t tx_comp_ring_size;
886 
887 	qdf_dma_addr_t rx_rdy_ring_base_paddr;
888 	uint32_t rx_rdy_ring_size;
889 
890 	qdf_dma_addr_t rx_refill_ring_base_paddr;
891 	uint32_t rx_refill_ring_size;
892 
893 	/* IPA UC doorbell registers paddr */
894 	qdf_dma_addr_t tx_comp_doorbell_paddr;
895 	uint32_t *tx_comp_doorbell_vaddr;
896 	qdf_dma_addr_t rx_ready_doorbell_paddr;
897 };
898 #endif
899 
900 #define MAX_RX_MAC_RINGS 2
901 /* Same as NAC_MAX_CLENT */
902 #define DP_NAC_MAX_CLIENT  24
903 
904 /*
905  * Macros to setup link descriptor cookies - for link descriptors, we just
906  * need first 3 bits to store bank ID. The remaining bytes will be used set a
907  * unique ID, which will be useful in debugging
908  */
909 #define LINK_DESC_BANK_ID_MASK 0x7
910 #define LINK_DESC_ID_SHIFT 3
911 #define LINK_DESC_ID_START 0x8000
912 
913 #define LINK_DESC_COOKIE(_desc_id, _bank_id) \
914 	((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_bank_id))
915 
916 #define LINK_DESC_COOKIE_BANK_ID(_cookie) \
917 	((_cookie) & LINK_DESC_BANK_ID_MASK)
918 
919 /* same as ieee80211_nac_param */
920 enum dp_nac_param_cmd {
921 	/* IEEE80211_NAC_PARAM_ADD */
922 	DP_NAC_PARAM_ADD = 1,
923 	/* IEEE80211_NAC_PARAM_DEL */
924 	DP_NAC_PARAM_DEL,
925 	/* IEEE80211_NAC_PARAM_LIST */
926 	DP_NAC_PARAM_LIST,
927 };
928 
929 /**
930  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
931  * @neighbour_peers_macaddr: neighbour peer's mac address
932  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
933  */
934 struct dp_neighbour_peer {
935 	/* MAC address of neighbour's peer */
936 	union dp_align_mac_addr neighbour_peers_macaddr;
937 	/* node in the list of neighbour's peer */
938 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
939 };
940 
941 /* PDEV level structure for data path */
942 struct dp_pdev {
943 	/* PDEV handle from OSIF layer TBD: see if we really need osif_pdev */
944 	void *osif_pdev;
945 
946 	/* PDEV Id */
947 	int pdev_id;
948 
949 	/* TXRX SOC handle */
950 	struct dp_soc *soc;
951 
952 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
953 	struct dp_srng rx_refill_buf_ring;
954 
955 	/* Second ring used to replenish rx buffers */
956 	struct dp_srng rx_refill_buf_ring2;
957 
958 	/* Empty ring used by firmware to post rx buffers to the MAC */
959 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
960 
961 	/* wlan_cfg pdev ctxt*/
962 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
963 
964 	/* RXDMA monitor buffer replenish ring */
965 	struct dp_srng rxdma_mon_buf_ring;
966 
967 	/* RXDMA monitor destination ring */
968 	struct dp_srng rxdma_mon_dst_ring;
969 
970 	/* RXDMA monitor status ring. TBD: Check format of this ring */
971 	struct dp_srng rxdma_mon_status_ring;
972 
973 	struct dp_srng rxdma_mon_desc_ring;
974 
975 	/* RXDMA error destination ring */
976 	struct dp_srng rxdma_err_dst_ring[MAX_RX_MAC_RINGS];
977 
978 	/* Link descriptor memory banks */
979 	struct {
980 		void *base_vaddr_unaligned;
981 		void *base_vaddr;
982 		qdf_dma_addr_t base_paddr_unaligned;
983 		qdf_dma_addr_t base_paddr;
984 		uint32_t size;
985 	} link_desc_banks[MAX_MON_LINK_DESC_BANKS];
986 
987 
988 	/**
989 	 * TODO: See if we need a ring map here for LMAC rings.
990 	 * 1. Monitor rings are currently planning to be processed on receiving
991 	 * PPDU end interrupts and hence wont need ring based interrupts.
992 	 * 2. Rx buffer rings will be replenished during REO destination
993 	 * processing and doesn't require regular interrupt handling - we will
994 	 * only handle low water mark interrupts which is not expected
995 	 * frequently
996 	 */
997 
998 	/* VDEV list */
999 	TAILQ_HEAD(, dp_vdev) vdev_list;
1000 
1001 	/* Number of vdevs this device have */
1002 	uint16_t vdev_count;
1003 
1004 	/* PDEV transmit lock */
1005 	qdf_spinlock_t tx_lock;
1006 
1007 #ifndef REMOVE_PKT_LOG
1008 	bool pkt_log_init;
1009 	/* Pktlog pdev */
1010 	struct pktlog_dev_t *pl_dev;
1011 #endif /* #ifndef REMOVE_PKT_LOG */
1012 
1013 	/* Monitor mode interface and status storage */
1014 	struct dp_vdev *monitor_vdev;
1015 
1016 	/* monitor mode mutex */
1017 	qdf_spinlock_t mon_mutex;
1018 
1019 	/*tx_mutex for me*/
1020 	DP_MUTEX_TYPE tx_mutex;
1021 
1022 	/* Smart Mesh */
1023 	bool filter_neighbour_peers;
1024 	/* smart mesh mutex */
1025 	qdf_spinlock_t neighbour_peer_mutex;
1026 	/* Neighnour peer list */
1027 	TAILQ_HEAD(, dp_neighbour_peer) neighbour_peers_list;
1028 	/* msdu chain head & tail */
1029 	qdf_nbuf_t invalid_peer_head_msdu;
1030 	qdf_nbuf_t invalid_peer_tail_msdu;
1031 
1032 	/* Band steering  */
1033 	/* TBD */
1034 
1035 	/* PDEV level data path statistics */
1036 	struct cdp_pdev_stats stats;
1037 
1038 	/* Global RX decap mode for the device */
1039 	enum htt_pkt_type rx_decap_mode;
1040 
1041 	/* Enhanced Stats is enabled */
1042 	bool enhanced_stats_en;
1043 
1044 	/* advance filter mode and type*/
1045 	uint8_t mon_filter_mode;
1046 	uint16_t fp_mgmt_filter;
1047 	uint16_t fp_ctrl_filter;
1048 	uint16_t fp_data_filter;
1049 	uint16_t mo_mgmt_filter;
1050 	uint16_t mo_ctrl_filter;
1051 	uint16_t mo_data_filter;
1052 
1053 	qdf_atomic_t num_tx_outstanding;
1054 
1055 	qdf_atomic_t num_tx_exception;
1056 
1057 	/* MCL specific local peer handle */
1058 	struct {
1059 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
1060 		uint8_t freelist;
1061 		qdf_spinlock_t lock;
1062 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
1063 	} local_peer_ids;
1064 
1065 	/* dscp_tid_map_*/
1066 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
1067 
1068 	struct hal_rx_ppdu_info ppdu_info;
1069 
1070 	/* operating channel */
1071 	uint8_t operating_channel;
1072 
1073 	qdf_nbuf_queue_t rx_status_q;
1074 	uint32_t mon_ppdu_status;
1075 	struct cdp_mon_status rx_mon_recv_status;
1076 
1077 	/* pool addr for mcast enhance buff */
1078 	struct {
1079 		int size;
1080 		uint32_t paddr;
1081 		char *vaddr;
1082 		struct dp_tx_me_buf_t *freelist;
1083 		int buf_in_use;
1084 		qdf_dma_mem_context(memctx);
1085 	} me_buf;
1086 
1087 	/* Number of VAPs with mcast enhancement enabled */
1088 	qdf_atomic_t mc_num_vap_attached;
1089 
1090 	qdf_atomic_t stats_cmd_complete;
1091 
1092 #ifdef IPA_OFFLOAD
1093 	ipa_uc_op_cb_type ipa_uc_op_cb;
1094 	void *usr_ctxt;
1095 	struct dp_ipa_resources ipa_resource;
1096 #endif
1097 
1098 	/* TBD */
1099 
1100 	/* map this pdev to a particular Reo Destination ring */
1101 	enum cdp_host_reo_dest_ring reo_dest;
1102 
1103 #ifndef REMOVE_PKT_LOG
1104 	/* Packet log mode */
1105 	uint8_t rx_pktlog_mode;
1106 #endif
1107 
1108 	/* WDI event handlers */
1109 	struct wdi_event_subscribe_t **wdi_event_list;
1110 
1111 	/* ppdu_id of last received HTT TX stats */
1112 	uint32_t last_ppdu_id;
1113 	struct {
1114 		uint8_t last_user;
1115 		qdf_nbuf_t buf;
1116 	} tx_ppdu_info;
1117 
1118 	bool tx_sniffer_enable;
1119 	/* mirror copy mode */
1120 	bool mcopy_mode;
1121 
1122 	struct {
1123 		uint16_t tx_ppdu_id;
1124 		uint16_t tx_peer_id;
1125 		uint16_t rx_ppdu_id;
1126 	} m_copy_id;
1127 
1128 	/* To check if PPDU Tx stats are enabled for Pktlog */
1129 	bool pktlog_ppdu_stats;
1130 
1131 	void *dp_txrx_handle; /* Advanced data path handle */
1132 
1133 #ifdef ATH_SUPPORT_NAC_RSSI
1134 	bool nac_rssi_filtering;
1135 #endif
1136 };
1137 
1138 struct dp_peer;
1139 
1140 /* VDEV structure for data path state */
1141 struct dp_vdev {
1142 	/* OS device abstraction */
1143 	qdf_device_t osdev;
1144 	/* physical device that is the parent of this virtual device */
1145 	struct dp_pdev *pdev;
1146 
1147 	/* Handle to the OS shim SW's virtual device */
1148 	ol_osif_vdev_handle osif_vdev;
1149 
1150 	/* vdev_id - ID used to specify a particular vdev to the target */
1151 	uint8_t vdev_id;
1152 
1153 	/* MAC address */
1154 	union dp_align_mac_addr mac_addr;
1155 
1156 	/* node in the pdev's list of vdevs */
1157 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
1158 
1159 	/* dp_peer list */
1160 	TAILQ_HEAD(, dp_peer) peer_list;
1161 
1162 	/* callback to hand rx frames to the OS shim */
1163 	ol_txrx_rx_fp osif_rx;
1164 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
1165 	ol_txrx_get_key_fp osif_get_key;
1166 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
1167 
1168 #ifdef notyet
1169 	/* callback to check if the msdu is an WAI (WAPI) frame */
1170 	ol_rx_check_wai_fp osif_check_wai;
1171 #endif
1172 
1173 	/* proxy arp function */
1174 	ol_txrx_proxy_arp_fp osif_proxy_arp;
1175 
1176 	/* callback to hand rx monitor 802.11 MPDU to the OS shim */
1177 	ol_txrx_rx_mon_fp osif_rx_mon;
1178 
1179 	ol_txrx_mcast_me_fp me_convert;
1180 	/* deferred vdev deletion state */
1181 	struct {
1182 		/* VDEV delete pending */
1183 		int pending;
1184 		/*
1185 		* callback and a context argument to provide a
1186 		* notification for when the vdev is deleted.
1187 		*/
1188 		ol_txrx_vdev_delete_cb callback;
1189 		void *context;
1190 	} delete;
1191 
1192 	/* tx data delivery notification callback function */
1193 	struct {
1194 		ol_txrx_data_tx_cb func;
1195 		void *ctxt;
1196 	} tx_non_std_data_callback;
1197 
1198 
1199 	/* safe mode control to bypass the encrypt and decipher process*/
1200 	uint32_t safemode;
1201 
1202 	/* rx filter related */
1203 	uint32_t drop_unenc;
1204 #ifdef notyet
1205 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
1206 	uint32_t filters_num;
1207 #endif
1208 	/* TDLS Link status */
1209 	bool tdls_link_connected;
1210 	bool is_tdls_frame;
1211 
1212 
1213 	/* VDEV operating mode */
1214 	enum wlan_op_mode opmode;
1215 
1216 	/* Tx encapsulation type for this VAP */
1217 	enum htt_cmn_pkt_type tx_encap_type;
1218 	/* Rx Decapsulation type for this VAP */
1219 	enum htt_cmn_pkt_type rx_decap_type;
1220 
1221 	/* BSS peer */
1222 	struct dp_peer *vap_bss_peer;
1223 
1224 	/* WDS enabled */
1225 	bool wds_enabled;
1226 
1227 	/* WDS Aging timer period */
1228 	uint32_t wds_aging_timer_val;
1229 
1230 	/* NAWDS enabled */
1231 	bool nawds_enabled;
1232 
1233 	/* Default HTT meta data for this VDEV */
1234 	/* TBD: check alignment constraints */
1235 	uint16_t htt_tcl_metadata;
1236 
1237 	/* Mesh mode vdev */
1238 	uint32_t mesh_vdev;
1239 
1240 	/* Mesh mode rx filter setting */
1241 	uint32_t mesh_rx_filter;
1242 
1243 	/* DSCP-TID mapping table ID */
1244 	uint8_t dscp_tid_map_id;
1245 
1246 	/* Multicast enhancement enabled */
1247 	uint8_t mcast_enhancement_en;
1248 
1249 	/* per vdev rx nbuf queue */
1250 	qdf_nbuf_queue_t rxq;
1251 
1252 	uint8_t tx_ring_id;
1253 	struct dp_tx_desc_pool_s *tx_desc;
1254 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
1255 
1256 	/* VDEV Stats */
1257 	struct cdp_vdev_stats stats;
1258 	bool lro_enable;
1259 
1260 	/* Is this a proxySTA VAP */
1261 	bool proxysta_vdev;
1262 	/* Is isolation mode enabled */
1263 	bool isolation_vdev;
1264 
1265 	/* Address search flags to be configured in HAL descriptor */
1266 	uint8_t hal_desc_addr_search_flags;
1267 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1268 	struct dp_tx_desc_pool_s *pool;
1269 #endif
1270 	/* AP BRIDGE enabled */
1271 	uint32_t ap_bridge_enabled;
1272 
1273 	enum cdp_sec_type  sec_type;
1274 
1275 #ifdef ATH_SUPPORT_NAC_RSSI
1276 	bool cdp_nac_rssi_enabled;
1277 	struct {
1278 		uint8_t bssid_mac[6];
1279 		uint8_t client_mac[6];
1280 		uint8_t  chan_num;
1281 		uint8_t client_rssi_valid;
1282 		uint8_t client_rssi;
1283 		uint8_t vdev_id;
1284 	} cdp_nac_rssi;
1285 #endif
1286 };
1287 
1288 
1289 enum {
1290 	dp_sec_mcast = 0,
1291 	dp_sec_ucast
1292 };
1293 
1294 #ifdef WDS_VENDOR_EXTENSION
1295 typedef struct {
1296 	uint8_t	wds_tx_mcast_4addr:1,
1297 		wds_tx_ucast_4addr:1,
1298 		wds_rx_filter:1,      /* enforce rx filter */
1299 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
1300 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
1301 
1302 } dp_ecm_policy;
1303 #endif
1304 
1305 /* Peer structure for data path state */
1306 struct dp_peer {
1307 	/* VDEV to which this peer is associated */
1308 	struct dp_vdev *vdev;
1309 
1310 	struct dp_ast_entry *self_ast_entry;
1311 
1312 	qdf_atomic_t ref_cnt;
1313 
1314 	/* TODO: See if multiple peer IDs are required in wifi3.0 */
1315 	/* peer ID(s) for this peer */
1316 	uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
1317 
1318 	union dp_align_mac_addr mac_addr;
1319 
1320 	/* node in the vdev's list of peers */
1321 	TAILQ_ENTRY(dp_peer) peer_list_elem;
1322 	/* node in the hash table bin's list of peers */
1323 	TAILQ_ENTRY(dp_peer) hash_list_elem;
1324 
1325 	/* TID structures */
1326 	struct dp_rx_tid rx_tid[DP_MAX_TIDS];
1327 
1328 	/* TBD: No transmit TID state required? */
1329 
1330 	struct {
1331 		enum htt_sec_type sec_type;
1332 		u_int32_t michael_key[2]; /* relevant for TKIP */
1333 	} security[2]; /* 0 -> multicast, 1 -> unicast */
1334 
1335 	/*
1336 	* rx proc function: this either is a copy of pdev's rx_opt_proc for
1337 	* regular rx processing, or has been redirected to a /dev/null discard
1338 	* function when peer deletion is in progress.
1339 	*/
1340 	void (*rx_opt_proc)(struct dp_vdev *vdev, struct dp_peer *peer,
1341 		unsigned tid, qdf_nbuf_t msdu_list);
1342 
1343 	/* set when node is authorized */
1344 	uint8_t authorize:1;
1345 
1346 	u_int8_t nac;
1347 
1348 	/* Band steering: Set when node is inactive */
1349 	uint8_t peer_bs_inact_flag:1;
1350 	u_int16_t peer_bs_inact; /* inactivity mark count */
1351 
1352 	/* NAWDS Flag and Bss Peer bit */
1353 	uint8_t nawds_enabled:1,
1354 				bss_peer:1,
1355 				wapi:1,
1356 				wds_enabled:1;
1357 
1358 	/* MCL specific peer local id */
1359 	uint16_t local_id;
1360 	enum ol_txrx_peer_state state;
1361 	qdf_spinlock_t peer_info_lock;
1362 
1363 	qdf_time_t last_assoc_rcvd;
1364 	qdf_time_t last_disassoc_rcvd;
1365 	qdf_time_t last_deauth_rcvd;
1366 	/* Peer Stats */
1367 	struct cdp_peer_stats stats;
1368 
1369 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
1370 	/* TBD */
1371 
1372 #ifdef WDS_VENDOR_EXTENSION
1373 	dp_ecm_policy wds_ecm;
1374 #endif
1375 	bool delete_in_progress;
1376 };
1377 
1378 #ifdef CONFIG_WIN
1379 /*
1380  * dp_invalid_peer_msg
1381  * @nbuf: data buffer
1382  * @wh: 802.11 header
1383  * @vdev_id: id of vdev
1384  */
1385 struct dp_invalid_peer_msg {
1386 	qdf_nbuf_t nbuf;
1387 	struct ieee80211_frame *wh;
1388 	uint8_t vdev_id;
1389 };
1390 #endif
1391 
1392 /*
1393  * dp_tx_me_buf_t: ME buffer
1394  * next: pointer to next buffer
1395  * data: Destination Mac address
1396  */
1397 struct dp_tx_me_buf_t {
1398 	/* Note: ME buf pool initialization logic expects next pointer to
1399 	 * be the first element. Dont add anything before next */
1400 	struct dp_tx_me_buf_t *next;
1401 	uint8_t data[DP_MAC_ADDR_LEN];
1402 };
1403 
1404 #endif /* _DP_TYPES_H_ */
1405