xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_TYPES_H_
20 #define _DP_TYPES_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include <qdf_lock.h>
25 #include <qdf_atomic.h>
26 #include <qdf_util.h>
27 #include <qdf_list.h>
28 #include <qdf_lro.h>
29 #include <queue.h>
30 #include <htt_common.h>
31 
32 #include <cdp_txrx_cmn.h>
33 #ifdef CONFIG_MCL
34 #include <cds_ieee80211_common.h>
35 #else
36 #include <linux/ieee80211.h>
37 #endif
38 
39 #ifndef CONFIG_WIN
40 #include <wdi_event_api.h>    /* WDI subscriber event list */
41 #endif
42 
43 #include <hal_tx.h>
44 #include <hal_reo.h>
45 #include "wlan_cfg.h"
46 #include "hal_rx.h"
47 #include <hal_api.h>
48 #include <hal_api_mon.h>
49 #include "hal_rx.h"
50 
51 #define MAX_BW 7
52 #define MAX_RETRIES 4
53 #define MAX_RECEPTION_TYPES 4
54 
55 #ifndef REMOVE_PKT_LOG
56 #include <pktlog.h>
57 #endif
58 
59 #define REPT_MU_MIMO 1
60 #define REPT_MU_OFDMA_MIMO 3
61 #define DP_VO_TID 6
62 
63 #define DP_MAX_INTERRUPT_CONTEXTS 8
64 #define DP_MAX_TID_MAPS 16 /* MAX TID MAPS AVAILABLE PER PDEV*/
65 #define DSCP_TID_MAP_MAX    (64)
66 #define DP_IP_DSCP_SHIFT 2
67 #define DP_IP_DSCP_MASK 0x3f
68 #define DP_FC0_SUBTYPE_QOS 0x80
69 #define DP_QOS_TID 0x0f
70 #define DP_IPV6_PRIORITY_SHIFT 20
71 #define MAX_MON_LINK_DESC_BANKS 2
72 #define DP_VDEV_ALL 0xff
73 
74 #if defined(CONFIG_MCL)
75 #define MAX_PDEV_CNT 1
76 #else
77 #define MAX_PDEV_CNT 3
78 #endif
79 
80 #define MAX_LINK_DESC_BANKS 8
81 #define MAX_TXDESC_POOLS 4
82 #define MAX_RXDESC_POOLS 4
83 #define MAX_REO_DEST_RINGS 4
84 #define MAX_TCL_DATA_RINGS 4
85 #define MAX_IDLE_SCATTER_BUFS 16
86 #define DP_MAX_IRQ_PER_CONTEXT 12
87 #define DP_MAX_INTERRUPT_CONTEXTS 8
88 #define DEFAULT_HW_PEER_ID 0xffff
89 
90 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
91 
92 #define DP_MAX_INTERRUPT_CONTEXTS 8
93 
94 #ifndef REMOVE_PKT_LOG
95 enum rx_pktlog_mode {
96 	DP_RX_PKTLOG_DISABLED = 0,
97 	DP_RX_PKTLOG_FULL,
98 	DP_RX_PKTLOG_LITE,
99 };
100 #endif
101 
102 struct dp_soc_cmn;
103 struct dp_pdev;
104 struct dp_vdev;
105 struct dp_tx_desc_s;
106 struct dp_soc;
107 union dp_rx_desc_list_elem_t;
108 
109 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
110 	TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
111 
112 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
113 	TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
114 
115 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
116 	TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase))
117 
118 #define DP_MUTEX_TYPE qdf_spinlock_t
119 
120 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
121 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
122 
123 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
124     ((_a)[0] == 0x33 &&                         \
125      (_a)[1] == 0x33)
126 
127 #define DP_FRAME_IS_BROADCAST(_a)              \
128     ((_a)[0] == 0xff &&                         \
129      (_a)[1] == 0xff &&                         \
130      (_a)[2] == 0xff &&                         \
131      (_a)[3] == 0xff &&                         \
132      (_a)[4] == 0xff &&                         \
133      (_a)[5] == 0xff)
134 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
135 		(_llc)->llc_ssap == 0xaa && \
136 		(_llc)->llc_un.type_snap.control == 0x3)
137 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
138 #define DP_FRAME_FC0_TYPE_MASK 0x0c
139 #define DP_FRAME_FC0_TYPE_DATA 0x08
140 #define DP_FRAME_IS_DATA(_frame) \
141 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
142 
143 /**
144  * macros to convert hw mac id to sw mac id:
145  * mac ids used by hardware start from a value of 1 while
146  * those in host software start from a value of 0. Use the
147  * macros below to convert between mac ids used by software and
148  * hardware
149  */
150 #define DP_SW2HW_MACID(id) ((id) + 1)
151 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
152 #define DP_MAC_ADDR_LEN 6
153 
154 /**
155  * enum dp_intr_mode
156  * @DP_INTR_LEGACY: Legacy/Line interrupts, for WIN
157  * @DP_INTR_MSI: MSI interrupts, for MCL
158  * @DP_INTR_POLL: Polling
159  */
160 enum dp_intr_mode {
161 	DP_INTR_LEGACY = 0,
162 	DP_INTR_MSI,
163 	DP_INTR_POLL,
164 };
165 
166 /**
167  * enum dp_tx_frm_type
168  * @dp_tx_frm_std: Regular frame, no added header fragments
169  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
170  * @dp_tx_frm_sg: SG segment
171  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
172  * @dp_tx_frm_me: Multicast to Unicast Converted frame
173  * @dp_tx_frm_raw: Raw Frame
174  */
175 enum dp_tx_frm_type {
176 	dp_tx_frm_std = 0,
177 	dp_tx_frm_tso,
178 	dp_tx_frm_sg,
179 	dp_tx_frm_audio,
180 	dp_tx_frm_me,
181 	dp_tx_frm_raw,
182 };
183 
184 /**
185  * enum dp_ast_type
186  * @dp_ast_type_wds: WDS peer AST type
187  * @dp_ast_type_static: static ast entry type
188  * @dp_ast_type_mec: Multicast echo ast entry type
189  */
190 enum dp_ast_type {
191 	dp_ast_type_wds = 0,
192 	dp_ast_type_static,
193 	dp_ast_type_mec,
194 };
195 
196 /**
197  * enum dp_nss_cfg
198  * @dp_nss_cfg_default: No radios are offloaded
199  * @dp_nss_cfg_first_radio: First radio offloaded
200  * @dp_nss_cfg_second_radio: Second radio offloaded
201  * @dp_nss_cfg_dbdc: Dual radios offloaded
202  */
203 enum dp_nss_cfg {
204 	dp_nss_cfg_default,
205 	dp_nss_cfg_first_radio,
206 	dp_nss_cfg_second_radio,
207 	dp_nss_cfg_dbdc,
208 };
209 
210 /**
211  * struct rx_desc_pool
212  * @pool_size: number of RX descriptor in the pool
213  * @array: pointer to array of RX descriptor
214  * @freelist: pointer to free RX descriptor link list
215  * @lock: Protection for the RX descriptor pool
216  * @owner: owner for nbuf
217  */
218 struct rx_desc_pool {
219 	uint32_t pool_size;
220 	union dp_rx_desc_list_elem_t *array;
221 	union dp_rx_desc_list_elem_t *freelist;
222 	qdf_spinlock_t lock;
223 	uint8_t owner;
224 };
225 
226 /**
227  * struct dp_tx_ext_desc_elem_s
228  * @next: next extension descriptor pointer
229  * @vaddr: hlos virtual address pointer
230  * @paddr: physical address pointer for descriptor
231  */
232 struct dp_tx_ext_desc_elem_s {
233 	struct dp_tx_ext_desc_elem_s *next;
234 	void *vaddr;
235 	qdf_dma_addr_t paddr;
236 };
237 
238 /**
239  * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool
240  * @elem_count: Number of descriptors in the pool
241  * @elem_size: Size of each descriptor
242  * @num_free: Number of free descriptors
243  * @msdu_ext_desc: MSDU extension descriptor
244  * @desc_pages: multiple page allocation information for actual descriptors
245  * @link_elem_size: size of the link descriptor in cacheable memory used for
246  * 		    chaining the extension descriptors
247  * @desc_link_pages: multiple page allocation information for link descriptors
248  */
249 struct dp_tx_ext_desc_pool_s {
250 	uint16_t elem_count;
251 	int elem_size;
252 	uint16_t num_free;
253 	struct qdf_mem_multi_page_t desc_pages;
254 	int link_elem_size;
255 	struct qdf_mem_multi_page_t desc_link_pages;
256 	struct dp_tx_ext_desc_elem_s *freelist;
257 	qdf_spinlock_t lock;
258 	qdf_dma_mem_context(memctx);
259 };
260 
261 /**
262  * struct dp_tx_desc_s - Tx Descriptor
263  * @next: Next in the chain of descriptors in freelist or in the completion list
264  * @nbuf: Buffer Address
265  * @msdu_ext_desc: MSDU extension descriptor
266  * @id: Descriptor ID
267  * @vdev: vdev over which the packet was transmitted
268  * @pdev: Handle to pdev
269  * @pool_id: Pool ID - used when releasing the descriptor
270  * @flags: Flags to track the state of descriptor and special frame handling
271  * @comp: Pool ID - used when releasing the descriptor
272  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
273  * 		   This is maintained in descriptor to allow more efficient
274  * 		   processing in completion event processing code.
275  * 		    This field is filled in with the htt_pkt_type enum.
276  * @frm_type: Frame Type - ToDo check if this is redundant
277  * @pkt_offset: Offset from which the actual packet data starts
278  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
279  *		Tx completion of ME packet
280  * @pool: handle to flow_pool this descriptor belongs to.
281  */
282 struct dp_tx_desc_s {
283 	struct dp_tx_desc_s *next;
284 	qdf_nbuf_t nbuf;
285 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
286 	uint32_t  id;
287 	struct dp_vdev *vdev;
288 	struct dp_pdev *pdev;
289 	uint8_t  pool_id;
290 	uint16_t flags;
291 	struct hal_tx_desc_comp_s comp;
292 	uint16_t tx_encap_type;
293 	uint8_t frm_type;
294 	uint8_t pkt_offset;
295 	void *me_buffer;
296 	void *tso_desc;
297 	void *tso_num_desc;
298 };
299 
300 /**
301  * enum flow_pool_status - flow pool status
302  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
303  *				and network queues are unpaused
304  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
305  *			   and network queues are paused
306  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
307  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
308  */
309 enum flow_pool_status {
310 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
311 	FLOW_POOL_ACTIVE_PAUSED = 1,
312 	FLOW_POOL_INVALID = 2,
313 	FLOW_POOL_INACTIVE = 3,
314 };
315 
316 /**
317  * struct dp_tx_tso_seg_pool_s
318  * @pool_size: total number of pool elements
319  * @num_free: free element count
320  * @freelist: first free element pointer
321  * @lock: lock for accessing the pool
322  */
323 struct dp_tx_tso_seg_pool_s {
324 	uint16_t pool_size;
325 	uint16_t num_free;
326 	struct qdf_tso_seg_elem_t *freelist;
327 	qdf_spinlock_t lock;
328 };
329 
330 /**
331  * struct dp_tx_tso_num_seg_pool_s {
332  * @num_seg_pool_size: total number of pool elements
333  * @num_free: free element count
334  * @freelist: first free element pointer
335  * @lock: lock for accessing the pool
336  */
337 
338 struct dp_tx_tso_num_seg_pool_s {
339 	uint16_t num_seg_pool_size;
340 	uint16_t num_free;
341 	struct qdf_tso_num_seg_elem_t *freelist;
342 	/*tso mutex */
343 	qdf_spinlock_t lock;
344 };
345 
346 /**
347  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
348  * @elem_size: Size of each descriptor in the pool
349  * @pool_size: Total number of descriptors in the pool
350  * @num_free: Number of free descriptors
351  * @num_allocated: Number of used descriptors
352  * @freelist: Chain of free descriptors
353  * @desc_pages: multiple page allocation information for actual descriptors
354  * @num_invalid_bin: Deleted pool with pending Tx completions.
355  * @flow_pool_array_lock: Lock when operating on flow_pool_array.
356  * @flow_pool_array: List of allocated flow pools
357  * @lock- Lock for descriptor allocation/free from/to the pool
358  */
359 struct dp_tx_desc_pool_s {
360 	uint16_t elem_size;
361 	uint32_t num_allocated;
362 	struct dp_tx_desc_s *freelist;
363 	struct qdf_mem_multi_page_t desc_pages;
364 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
365 	uint16_t pool_size;
366 	uint8_t flow_pool_id;
367 	uint8_t num_invalid_bin;
368 	uint16_t avail_desc;
369 	enum flow_pool_status status;
370 	enum htt_flow_type flow_type;
371 	uint16_t stop_th;
372 	uint16_t start_th;
373 	uint16_t pkt_drop_no_desc;
374 	qdf_spinlock_t flow_pool_lock;
375 	uint8_t pool_create_cnt;
376 	void *pool_owner_ctx;
377 #else
378 	uint16_t elem_count;
379 	uint32_t num_free;
380 	qdf_spinlock_t lock;
381 #endif
382 };
383 
384 /**
385  * struct dp_txrx_pool_stats - flow pool related statistics
386  * @pool_map_count: flow pool map received
387  * @pool_unmap_count: flow pool unmap received
388  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
389  */
390 struct dp_txrx_pool_stats {
391 	uint16_t pool_map_count;
392 	uint16_t pool_unmap_count;
393 	uint16_t pkt_drop_no_pool;
394 };
395 
396 struct dp_srng {
397 	void *hal_srng;
398 	void *base_vaddr_unaligned;
399 	qdf_dma_addr_t base_paddr_unaligned;
400 	uint32_t alloc_size;
401 	int irq;
402 	uint32_t num_entries;
403 };
404 
405 struct dp_rx_reorder_array_elem {
406 	qdf_nbuf_t head;
407 	qdf_nbuf_t tail;
408 };
409 
410 #define DP_RX_BA_INACTIVE 0
411 #define DP_RX_BA_ACTIVE 1
412 struct dp_reo_cmd_info {
413 	uint16_t cmd;
414 	enum hal_reo_cmd_type cmd_type;
415 	void *data;
416 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
417 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
418 };
419 
420 /* Rx TID */
421 struct dp_rx_tid {
422 	/* TID */
423 	int tid;
424 
425 	/* Num of addba requests */
426 	uint32_t num_of_addba_req;
427 
428 	/* Num of addba responses */
429 	uint32_t num_of_addba_resp;
430 
431 	/* Num of delba requests */
432 	uint32_t num_of_delba_req;
433 
434 	/* pn size */
435 	uint8_t pn_size;
436 	/* REO TID queue descriptors */
437 	void *hw_qdesc_vaddr_unaligned;
438 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
439 	qdf_dma_addr_t hw_qdesc_paddr;
440 	uint32_t hw_qdesc_alloc_size;
441 
442 	/* RX ADDBA session state */
443 	int ba_status;
444 
445 	/* RX BA window size */
446 	uint16_t ba_win_size;
447 
448 	/* TODO: Check the following while adding defragmentation support */
449 	struct dp_rx_reorder_array_elem *array;
450 	/* base - single rx reorder element used for non-aggr cases */
451 	struct dp_rx_reorder_array_elem base;
452 
453 	/* only used for defrag right now */
454 	TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem;
455 
456 	/* Store dst desc for reinjection */
457 	void *dst_ring_desc;
458 	struct dp_rx_desc *head_frag_desc;
459 
460 	/* Sequence and fragments that are being processed currently */
461 	uint32_t curr_seq_num;
462 	uint32_t curr_frag_num;
463 
464 	uint32_t defrag_timeout_ms;
465 	uint16_t dialogtoken;
466 	uint16_t statuscode;
467 	/* user defined ADDBA response status code */
468 	uint16_t userstatuscode;
469 };
470 
471 /* per interrupt context  */
472 struct dp_intr {
473 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
474 				associated with this napi context */
475 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
476 				with this interrupt context */
477 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
478 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
479 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
480 	uint8_t reo_status_ring_mask; /* REO command response ring */
481 	uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
482 	uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
483 	struct dp_soc *soc;    /* Reference to SoC structure ,
484 				to get DMA ring handles */
485 	qdf_lro_ctx_t lro_ctx;
486 	uint8_t dp_intr_id;
487 };
488 
489 #define REO_DESC_FREELIST_SIZE 64
490 #define REO_DESC_FREE_DEFER_MS 1000
491 struct reo_desc_list_node {
492 	qdf_list_node_t node;
493 	unsigned long free_ts;
494 	struct dp_rx_tid rx_tid;
495 };
496 
497 /* SoC level data path statistics */
498 struct dp_soc_stats {
499 	struct {
500 		uint32_t added;
501 		uint32_t deleted;
502 		uint32_t aged_out;
503 	} ast;
504 
505 	/* SOC level TX stats */
506 	struct {
507 		/* packets dropped on tx because of no peer */
508 		struct cdp_pkt_info tx_invalid_peer;
509 		/* descriptors in each tcl ring */
510 		uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
511 		/* Descriptors in use at soc */
512 		uint32_t desc_in_use;
513 		/* tqm_release_reason == FW removed */
514 		uint32_t dropped_fw_removed;
515 
516 	} tx;
517 
518 	/* SOC level RX stats */
519 	struct {
520 		/* Rx errors */
521 		/* Total Packets in Rx Error ring */
522 		uint32_t err_ring_pkts;
523 		/* No of Fragments */
524 		uint32_t rx_frags;
525 		struct {
526 			/* Invalid RBM error count */
527 			uint32_t invalid_rbm;
528 			/* Invalid VDEV Error count */
529 			uint32_t invalid_vdev;
530 			/* Invalid PDEV error count */
531 			uint32_t invalid_pdev;
532 			/* Invalid PEER Error count */
533 			struct cdp_pkt_info rx_invalid_peer;
534 			/* HAL ring access Fail error count */
535 			uint32_t hal_ring_access_fail;
536 			/* RX DMA error count */
537 			uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
538 			/* REO Error count */
539 			uint32_t reo_error[HAL_REO_ERR_MAX];
540 			/* HAL REO ERR Count */
541 			uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
542 		} err;
543 
544 		/* packet count per core - per ring */
545 		uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
546 	} rx;
547 };
548 
549 #define DP_MAC_ADDR_LEN 6
550 union dp_align_mac_addr {
551 	uint8_t raw[DP_MAC_ADDR_LEN];
552 	struct {
553 		uint16_t bytes_ab;
554 		uint16_t bytes_cd;
555 		uint16_t bytes_ef;
556 	} align2;
557 	struct {
558 		uint32_t bytes_abcd;
559 		uint16_t bytes_ef;
560 	} align4;
561 	struct {
562 		uint16_t bytes_ab;
563 		uint32_t bytes_cdef;
564 	} align4_2;
565 };
566 
567 /*
568  * dp_ast_entry
569  *
570  * @ast_idx: Hardware AST Index
571  * @mac_addr:  MAC Address for this AST entry
572  * @peer: Next Hop peer (for non-WDS nodes, this will be point to
573  *        associated peer with this MAC address)
574  * @next_hop: Set to 1 if this is for a WDS node
575  * @is_active: flag to indicate active data traffic on this node
576  *             (used for aging out/expiry)
577  * @ase_list_elem: node in peer AST list
578  * @is_bss: flag to indicate if entry corresponds to bss peer
579  * @pdev_id: pdev ID
580  * @vdev_id: vdev ID
581  * @type: flag to indicate type of the entry(static/WDS/MEC)
582  * @hash_list_elem: node in soc AST hash list (mac address used as hash)
583  */
584 struct dp_ast_entry {
585 	uint16_t ast_idx;
586 	/* MAC address */
587 	union dp_align_mac_addr mac_addr;
588 	struct dp_peer *peer;
589 	bool next_hop;
590 	bool is_active;
591 	bool is_bss;
592 	uint8_t pdev_id;
593 	uint8_t vdev_id;
594 	enum cdp_txrx_ast_entry_type type;
595 	TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
596 	TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
597 };
598 
599 /* SOC level htt stats */
600 struct htt_t2h_stats {
601 	/* lock to protect htt_stats_msg update */
602 	qdf_spinlock_t lock;
603 
604 	/* work queue to process htt stats */
605 	qdf_work_t work;
606 
607 	/* T2H Ext stats message queue */
608 	qdf_nbuf_queue_t msg;
609 
610 	/* number of completed stats in htt_stats_msg */
611 	uint32_t num_stats;
612 };
613 
614 /* SOC level structure for data path */
615 struct dp_soc {
616 	/* Common base structure - Should be the first member */
617 	struct cdp_soc_t cdp_soc;
618 
619 	/* SoC Obj */
620 	void *ctrl_psoc;
621 
622 	/* OS device abstraction */
623 	qdf_device_t osdev;
624 
625 	/* WLAN config context */
626 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
627 
628 	/* HTT handle for host-fw interaction */
629 	void *htt_handle;
630 
631 	/* Commint init done */
632 	qdf_atomic_t cmn_init_done;
633 
634 	/* Opaque hif handle */
635 	struct hif_opaque_softc *hif_handle;
636 
637 	/* PDEVs on this SOC */
638 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
639 
640 	/* Number of PDEVs */
641 	uint8_t pdev_count;
642 
643 	/*cce disable*/
644 	bool cce_disable;
645 
646 	/* Link descriptor memory banks */
647 	struct {
648 		void *base_vaddr_unaligned;
649 		void *base_vaddr;
650 		qdf_dma_addr_t base_paddr_unaligned;
651 		qdf_dma_addr_t base_paddr;
652 		uint32_t size;
653 	} link_desc_banks[MAX_LINK_DESC_BANKS];
654 
655 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
656 	struct dp_srng wbm_idle_link_ring;
657 
658 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
659 	 */
660 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
661 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
662 	uint32_t wbm_idle_scatter_buf_size;
663 
664 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
665 	qdf_spinlock_t flow_pool_array_lock;
666 	tx_pause_callback pause_cb;
667 	struct dp_txrx_pool_stats pool_stats;
668 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
669 	/* Tx SW descriptor pool */
670 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
671 
672 	/* Tx MSDU Extension descriptor pool */
673 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
674 
675 	/* Tx TSO descriptor pool */
676 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
677 
678 	/* Tx TSO Num of segments pool */
679 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
680 
681 	/* Tx H/W queues lock */
682 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
683 
684 	/* Rx SW descriptor pool for RXDMA buffer */
685 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
686 
687 	/* Rx SW descriptor pool for RXDMA monitor buffer */
688 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
689 
690 	/* Rx SW descriptor pool for RXDMA status buffer */
691 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
692 
693 	/* HAL SOC handle */
694 	void *hal_soc;
695 
696 	/* DP Interrupts */
697 	struct dp_intr intr_ctx[DP_MAX_INTERRUPT_CONTEXTS];
698 
699 	/* REO destination rings */
700 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
701 
702 	/* Number of REO destination rings */
703 	uint8_t num_reo_dest_rings;
704 
705 	/* REO exception ring - See if should combine this with reo_dest_ring */
706 	struct dp_srng reo_exception_ring;
707 
708 	/* REO reinjection ring */
709 	struct dp_srng reo_reinject_ring;
710 
711 	/* REO command ring */
712 	struct dp_srng reo_cmd_ring;
713 
714 	/* REO command status ring */
715 	struct dp_srng reo_status_ring;
716 
717 	/* WBM Rx release ring */
718 	struct dp_srng rx_rel_ring;
719 
720 	/* Number of TCL data rings */
721 	uint8_t num_tcl_data_rings;
722 
723 	/* TCL data ring */
724 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
725 
726 	/* TCL command ring */
727 	struct dp_srng tcl_cmd_ring;
728 
729 	/* TCL command status ring */
730 	struct dp_srng tcl_status_ring;
731 
732 	/* WBM Tx completion rings */
733 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
734 
735 	/* Common WBM link descriptor release ring (SW to WBM) */
736 	struct dp_srng wbm_desc_rel_ring;
737 
738 	/* Tx ring map for interrupt processing */
739 	uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
740 
741 	/* Rx ring map for interrupt processing */
742 	uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
743 
744 	/* peer ID to peer object map (array of pointers to peer objects) */
745 	struct dp_peer **peer_id_to_obj_map;
746 
747 	struct {
748 		unsigned mask;
749 		unsigned idx_bits;
750 		TAILQ_HEAD(, dp_peer) * bins;
751 	} peer_hash;
752 
753 	/* rx defrag state – TBD: do we need this per radio? */
754 	struct {
755 		struct {
756 			TAILQ_HEAD(, dp_rx_tid) waitlist;
757 			uint32_t timeout_ms;
758 			qdf_spinlock_t defrag_lock;
759 		} defrag;
760 		struct {
761 			int defrag_timeout_check;
762 			int dup_check;
763 		} flags;
764 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
765 		qdf_spinlock_t reo_cmd_lock;
766 	} rx;
767 
768 	/* optional rx processing function */
769 	void (*rx_opt_proc)(
770 		struct dp_vdev *vdev,
771 		struct dp_peer *peer,
772 		unsigned tid,
773 		qdf_nbuf_t msdu_list);
774 
775 	/* pool addr for mcast enhance buff */
776 	struct {
777 		int size;
778 		uint32_t paddr;
779 		uint32_t *vaddr;
780 		struct dp_tx_me_buf_t *freelist;
781 		int buf_in_use;
782 		qdf_dma_mem_context(memctx);
783 	} me_buf;
784 
785 	/**
786 	 * peer ref mutex:
787 	 * 1. Protect peer object lookups until the returned peer object's
788 	 *	reference count is incremented.
789 	 * 2. Provide mutex when accessing peer object lookup structures.
790 	 */
791 	DP_MUTEX_TYPE peer_ref_mutex;
792 
793 	/* maximum value for peer_id */
794 	uint32_t max_peers;
795 
796 	/* SoC level data path statistics */
797 	struct dp_soc_stats stats;
798 
799 	/* Enable processing of Tx completion status words */
800 	bool process_tx_status;
801 	bool process_rx_status;
802 	struct dp_ast_entry *ast_table[WLAN_UMAC_PSOC_MAX_PEERS * 2];
803 	struct {
804 		unsigned mask;
805 		unsigned idx_bits;
806 		TAILQ_HEAD(, dp_ast_entry) * bins;
807 	} ast_hash;
808 
809 	qdf_spinlock_t ast_lock;
810 	qdf_timer_t wds_aging_timer;
811 
812 	/*interrupt timer*/
813 	qdf_timer_t mon_reap_timer;
814 	uint8_t reap_timer_init;
815 	qdf_timer_t int_timer;
816 	uint8_t intr_mode;
817 
818 	qdf_list_t reo_desc_freelist;
819 	qdf_spinlock_t reo_desc_freelist_lock;
820 
821 #ifdef QCA_SUPPORT_SON
822 	/* The timer to check station's inactivity status */
823 	os_timer_t pdev_bs_inact_timer;
824 	/* The current inactivity count reload value
825 	   based on overload condition */
826 	u_int16_t pdev_bs_inact_reload;
827 
828 	/* The inactivity timer value when not overloaded */
829 	u_int16_t pdev_bs_inact_normal;
830 
831 	/* The inactivity timer value when overloaded */
832 	u_int16_t pdev_bs_inact_overload;
833 
834 	/* The inactivity timer check interval */
835 	u_int16_t pdev_bs_inact_interval;
836 	/* Inactivity timer */
837 #endif /* QCA_SUPPORT_SON */
838 
839 	/* htt stats */
840 	struct htt_t2h_stats htt_stats;
841 
842 	void *external_txrx_handle; /* External data path handle */
843 #ifdef IPA_OFFLOAD
844 	/* IPA uC datapath offload Wlan Tx resources */
845 	struct {
846 		/* Resource info to be passed to IPA */
847 		qdf_dma_addr_t ipa_tcl_ring_base_paddr;
848 		void *ipa_tcl_ring_base_vaddr;
849 		uint32_t ipa_tcl_ring_size;
850 		qdf_dma_addr_t ipa_tcl_hp_paddr;
851 		uint32_t alloc_tx_buf_cnt;
852 
853 		qdf_dma_addr_t ipa_wbm_ring_base_paddr;
854 		void *ipa_wbm_ring_base_vaddr;
855 		uint32_t ipa_wbm_ring_size;
856 		qdf_dma_addr_t ipa_wbm_tp_paddr;
857 
858 		/* TX buffers populated into the WBM ring */
859 		void **tx_buf_pool_vaddr_unaligned;
860 		qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
861 	} ipa_uc_tx_rsc;
862 
863 	/* IPA uC datapath offload Wlan Rx resources */
864 	struct {
865 		/* Resource info to be passed to IPA */
866 		qdf_dma_addr_t ipa_reo_ring_base_paddr;
867 		void *ipa_reo_ring_base_vaddr;
868 		uint32_t ipa_reo_ring_size;
869 		qdf_dma_addr_t ipa_reo_tp_paddr;
870 
871 		/* Resource info to be passed to firmware and IPA */
872 		qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
873 		void *ipa_rx_refill_buf_ring_base_vaddr;
874 		uint32_t ipa_rx_refill_buf_ring_size;
875 		qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
876 	} ipa_uc_rx_rsc;
877 #endif
878 };
879 
880 #ifdef IPA_OFFLOAD
881 /**
882  * dp_ipa_resources - Resources needed for IPA
883  */
884 struct dp_ipa_resources {
885 	qdf_dma_addr_t tx_ring_base_paddr;
886 	uint32_t tx_ring_size;
887 	uint32_t tx_num_alloc_buffer;
888 
889 	qdf_dma_addr_t tx_comp_ring_base_paddr;
890 	uint32_t tx_comp_ring_size;
891 
892 	qdf_dma_addr_t rx_rdy_ring_base_paddr;
893 	uint32_t rx_rdy_ring_size;
894 
895 	qdf_dma_addr_t rx_refill_ring_base_paddr;
896 	uint32_t rx_refill_ring_size;
897 
898 	/* IPA UC doorbell registers paddr */
899 	qdf_dma_addr_t tx_comp_doorbell_paddr;
900 	uint32_t *tx_comp_doorbell_vaddr;
901 	qdf_dma_addr_t rx_ready_doorbell_paddr;
902 };
903 #endif
904 
905 #define MAX_RX_MAC_RINGS 2
906 /* Same as NAC_MAX_CLENT */
907 #define DP_NAC_MAX_CLIENT  24
908 
909 /*
910  * Macros to setup link descriptor cookies - for link descriptors, we just
911  * need first 3 bits to store bank ID. The remaining bytes will be used set a
912  * unique ID, which will be useful in debugging
913  */
914 #define LINK_DESC_BANK_ID_MASK 0x7
915 #define LINK_DESC_ID_SHIFT 3
916 #define LINK_DESC_ID_START 0x8000
917 
918 #define LINK_DESC_COOKIE(_desc_id, _bank_id) \
919 	((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_bank_id))
920 
921 #define LINK_DESC_COOKIE_BANK_ID(_cookie) \
922 	((_cookie) & LINK_DESC_BANK_ID_MASK)
923 
924 /* same as ieee80211_nac_param */
925 enum dp_nac_param_cmd {
926 	/* IEEE80211_NAC_PARAM_ADD */
927 	DP_NAC_PARAM_ADD = 1,
928 	/* IEEE80211_NAC_PARAM_DEL */
929 	DP_NAC_PARAM_DEL,
930 	/* IEEE80211_NAC_PARAM_LIST */
931 	DP_NAC_PARAM_LIST,
932 };
933 
934 /**
935  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
936  * @neighbour_peers_macaddr: neighbour peer's mac address
937  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
938  */
939 struct dp_neighbour_peer {
940 	/* MAC address of neighbour's peer */
941 	union dp_align_mac_addr neighbour_peers_macaddr;
942 	/* node in the list of neighbour's peer */
943 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
944 };
945 
946 /**
947  * struct ppdu_info - PPDU Status info descriptor
948  * @ppdu_id         - Unique ppduid assigned by firmware for every tx packet
949  * @max_ppdu_id     - wrap around for ppdu id
950  * @last_tlv_cnt    - Keep track for missing ppdu tlvs
951  * @last_user       - last ppdu processed for user
952  * @is_ampdu        - set if Ampdu aggregate
953  * @nbuf            - ppdu descriptor payload
954  * @ppdu_desc       - ppdu descriptor
955  * @ppdu_info_list_elem - linked list of ppdu tlvs
956  */
957 struct ppdu_info {
958 	uint32_t ppdu_id;
959 	uint32_t max_ppdu_id;
960 	uint16_t tlv_bitmap;
961 	uint16_t last_tlv_cnt;
962 	uint16_t last_user:8,
963 		 is_ampdu:1;
964 	qdf_nbuf_t nbuf;
965 	struct cdp_tx_completion_ppdu *ppdu_desc;
966 	TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
967 };
968 
969 /* PDEV level structure for data path */
970 struct dp_pdev {
971 	/* PDEV handle from OSIF layer TBD: see if we really need osif_pdev */
972 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev;
973 
974 	/* PDEV Id */
975 	int pdev_id;
976 
977 	/* TXRX SOC handle */
978 	struct dp_soc *soc;
979 
980 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
981 	struct dp_srng rx_refill_buf_ring;
982 
983 	/* Second ring used to replenish rx buffers */
984 	struct dp_srng rx_refill_buf_ring2;
985 
986 	/* Empty ring used by firmware to post rx buffers to the MAC */
987 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
988 
989 	/* wlan_cfg pdev ctxt*/
990 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
991 
992 	/* RXDMA monitor buffer replenish ring */
993 	struct dp_srng rxdma_mon_buf_ring[NUM_RXDMA_RINGS_PER_PDEV];
994 
995 	/* RXDMA monitor destination ring */
996 	struct dp_srng rxdma_mon_dst_ring[NUM_RXDMA_RINGS_PER_PDEV];
997 
998 	/* RXDMA monitor status ring. TBD: Check format of this ring */
999 	struct dp_srng rxdma_mon_status_ring[NUM_RXDMA_RINGS_PER_PDEV];
1000 
1001 	struct dp_srng rxdma_mon_desc_ring[NUM_RXDMA_RINGS_PER_PDEV];
1002 
1003 	/* RXDMA error destination ring */
1004 	struct dp_srng rxdma_err_dst_ring[NUM_RXDMA_RINGS_PER_PDEV];
1005 
1006 	/* Link descriptor memory banks */
1007 	struct {
1008 		void *base_vaddr_unaligned;
1009 		void *base_vaddr;
1010 		qdf_dma_addr_t base_paddr_unaligned;
1011 		qdf_dma_addr_t base_paddr;
1012 		uint32_t size;
1013 	} link_desc_banks[NUM_RXDMA_RINGS_PER_PDEV][MAX_MON_LINK_DESC_BANKS];
1014 
1015 
1016 	/**
1017 	 * TODO: See if we need a ring map here for LMAC rings.
1018 	 * 1. Monitor rings are currently planning to be processed on receiving
1019 	 * PPDU end interrupts and hence wont need ring based interrupts.
1020 	 * 2. Rx buffer rings will be replenished during REO destination
1021 	 * processing and doesn't require regular interrupt handling - we will
1022 	 * only handle low water mark interrupts which is not expected
1023 	 * frequently
1024 	 */
1025 
1026 	/* VDEV list */
1027 	TAILQ_HEAD(, dp_vdev) vdev_list;
1028 
1029 	/* vdev list lock */
1030 	qdf_spinlock_t vdev_list_lock;
1031 
1032 	/* Number of vdevs this device have */
1033 	uint16_t vdev_count;
1034 
1035 	/* PDEV transmit lock */
1036 	qdf_spinlock_t tx_lock;
1037 
1038 #ifndef REMOVE_PKT_LOG
1039 	bool pkt_log_init;
1040 	/* Pktlog pdev */
1041 	struct pktlog_dev_t *pl_dev;
1042 #endif /* #ifndef REMOVE_PKT_LOG */
1043 
1044 	/* Monitor mode interface and status storage */
1045 	struct dp_vdev *monitor_vdev;
1046 
1047 	/* monitor mode lock */
1048 	qdf_spinlock_t mon_lock;
1049 
1050 	/*tx_mutex for me*/
1051 	DP_MUTEX_TYPE tx_mutex;
1052 
1053 	/* Smart Mesh */
1054 	bool filter_neighbour_peers;
1055 	/* smart mesh mutex */
1056 	qdf_spinlock_t neighbour_peer_mutex;
1057 	/* Neighnour peer list */
1058 	TAILQ_HEAD(, dp_neighbour_peer) neighbour_peers_list;
1059 	/* msdu chain head & tail */
1060 	qdf_nbuf_t invalid_peer_head_msdu;
1061 	qdf_nbuf_t invalid_peer_tail_msdu;
1062 
1063 	/* Band steering  */
1064 	/* TBD */
1065 
1066 	/* PDEV level data path statistics */
1067 	struct cdp_pdev_stats stats;
1068 
1069 	/* Global RX decap mode for the device */
1070 	enum htt_pkt_type rx_decap_mode;
1071 
1072 	/* Enhanced Stats is enabled */
1073 	bool enhanced_stats_en;
1074 
1075 	/* advance filter mode and type*/
1076 	uint8_t mon_filter_mode;
1077 	uint16_t fp_mgmt_filter;
1078 	uint16_t fp_ctrl_filter;
1079 	uint16_t fp_data_filter;
1080 	uint16_t mo_mgmt_filter;
1081 	uint16_t mo_ctrl_filter;
1082 	uint16_t mo_data_filter;
1083 
1084 	qdf_atomic_t num_tx_outstanding;
1085 
1086 	qdf_atomic_t num_tx_exception;
1087 
1088 	/* MCL specific local peer handle */
1089 	struct {
1090 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
1091 		uint8_t freelist;
1092 		qdf_spinlock_t lock;
1093 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
1094 	} local_peer_ids;
1095 
1096 	/* dscp_tid_map_*/
1097 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
1098 
1099 	struct hal_rx_ppdu_info ppdu_info;
1100 
1101 	/* operating channel */
1102 	uint8_t operating_channel;
1103 
1104 	qdf_nbuf_queue_t rx_status_q;
1105 	uint32_t mon_ppdu_status;
1106 	struct cdp_mon_status rx_mon_recv_status;
1107 	/* monitor mode status/destination ring PPDU and MPDU count */
1108 	struct cdp_pdev_mon_stats rx_mon_stats;
1109 
1110 	/* pool addr for mcast enhance buff */
1111 	struct {
1112 		int size;
1113 		uint32_t paddr;
1114 		char *vaddr;
1115 		struct dp_tx_me_buf_t *freelist;
1116 		int buf_in_use;
1117 		qdf_dma_mem_context(memctx);
1118 	} me_buf;
1119 
1120 	/* Number of VAPs with mcast enhancement enabled */
1121 	qdf_atomic_t mc_num_vap_attached;
1122 
1123 	qdf_atomic_t stats_cmd_complete;
1124 
1125 #ifdef IPA_OFFLOAD
1126 	ipa_uc_op_cb_type ipa_uc_op_cb;
1127 	void *usr_ctxt;
1128 	struct dp_ipa_resources ipa_resource;
1129 #endif
1130 
1131 	/* TBD */
1132 
1133 	/* map this pdev to a particular Reo Destination ring */
1134 	enum cdp_host_reo_dest_ring reo_dest;
1135 
1136 #ifndef REMOVE_PKT_LOG
1137 	/* Packet log mode */
1138 	uint8_t rx_pktlog_mode;
1139 #endif
1140 
1141 	/* WDI event handlers */
1142 	struct wdi_event_subscribe_t **wdi_event_list;
1143 
1144 	/* ppdu_id of last received HTT TX stats */
1145 	uint32_t last_ppdu_id;
1146 	struct {
1147 		uint8_t last_user;
1148 		qdf_nbuf_t buf;
1149 	} tx_ppdu_info;
1150 
1151 	bool tx_sniffer_enable;
1152 	/* mirror copy mode */
1153 	bool mcopy_mode;
1154 	bool bpr_enable;
1155 	struct {
1156 		uint16_t tx_ppdu_id;
1157 		uint16_t tx_peer_id;
1158 		uint16_t rx_ppdu_id;
1159 	} m_copy_id;
1160 
1161 	/* To check if PPDU Tx stats are enabled for Pktlog */
1162 	bool pktlog_ppdu_stats;
1163 
1164 	void *dp_txrx_handle; /* Advanced data path handle */
1165 
1166 #ifdef ATH_SUPPORT_NAC_RSSI
1167 	bool nac_rssi_filtering;
1168 #endif
1169 	/* list of ppdu tlvs */
1170 	TAILQ_HEAD(, ppdu_info) ppdu_info_list;
1171 	uint32_t tlv_count;
1172 	uint32_t list_depth;
1173 	uint32_t ppdu_id;
1174 	bool first_nbuf;
1175 	struct {
1176 		uint8_t *mgmt_buf; /* Ptr to mgmt. payload in HTT ppdu stats */
1177 		uint32_t mgmt_buf_len; /* Len of mgmt. payload in ppdu stats */
1178 		uint32_t ppdu_id;
1179 	} mgmtctrl_frm_info;
1180 };
1181 
1182 struct dp_peer;
1183 
1184 /* VDEV structure for data path state */
1185 struct dp_vdev {
1186 	/* OS device abstraction */
1187 	qdf_device_t osdev;
1188 	/* physical device that is the parent of this virtual device */
1189 	struct dp_pdev *pdev;
1190 
1191 	/* Handle to the OS shim SW's virtual device */
1192 	ol_osif_vdev_handle osif_vdev;
1193 
1194 	/* Handle to the UMAC handle */
1195 	struct cdp_ctrl_objmgr_vdev *ctrl_vdev;
1196 	/* vdev_id - ID used to specify a particular vdev to the target */
1197 	uint8_t vdev_id;
1198 
1199 	/* MAC address */
1200 	union dp_align_mac_addr mac_addr;
1201 
1202 	/* node in the pdev's list of vdevs */
1203 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
1204 
1205 	/* dp_peer list */
1206 	TAILQ_HEAD(, dp_peer) peer_list;
1207 
1208 	/* callback to hand rx frames to the OS shim */
1209 	ol_txrx_rx_fp osif_rx;
1210 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
1211 	ol_txrx_get_key_fp osif_get_key;
1212 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
1213 
1214 #ifdef notyet
1215 	/* callback to check if the msdu is an WAI (WAPI) frame */
1216 	ol_rx_check_wai_fp osif_check_wai;
1217 #endif
1218 
1219 	/* proxy arp function */
1220 	ol_txrx_proxy_arp_fp osif_proxy_arp;
1221 
1222 	/* callback to hand rx monitor 802.11 MPDU to the OS shim */
1223 	ol_txrx_rx_mon_fp osif_rx_mon;
1224 
1225 	ol_txrx_mcast_me_fp me_convert;
1226 
1227 	/* completion function used by this vdev*/
1228 	ol_txrx_completion_fp tx_comp;
1229 
1230 	/* deferred vdev deletion state */
1231 	struct {
1232 		/* VDEV delete pending */
1233 		int pending;
1234 		/*
1235 		* callback and a context argument to provide a
1236 		* notification for when the vdev is deleted.
1237 		*/
1238 		ol_txrx_vdev_delete_cb callback;
1239 		void *context;
1240 	} delete;
1241 
1242 	/* tx data delivery notification callback function */
1243 	struct {
1244 		ol_txrx_data_tx_cb func;
1245 		void *ctxt;
1246 	} tx_non_std_data_callback;
1247 
1248 
1249 	/* safe mode control to bypass the encrypt and decipher process*/
1250 	uint32_t safemode;
1251 
1252 	/* rx filter related */
1253 	uint32_t drop_unenc;
1254 #ifdef notyet
1255 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
1256 	uint32_t filters_num;
1257 #endif
1258 	/* TDLS Link status */
1259 	bool tdls_link_connected;
1260 	bool is_tdls_frame;
1261 
1262 
1263 	/* VDEV operating mode */
1264 	enum wlan_op_mode opmode;
1265 
1266 	/* Tx encapsulation type for this VAP */
1267 	enum htt_cmn_pkt_type tx_encap_type;
1268 	/* Rx Decapsulation type for this VAP */
1269 	enum htt_cmn_pkt_type rx_decap_type;
1270 
1271 	/* BSS peer */
1272 	struct dp_peer *vap_bss_peer;
1273 
1274 	/* WDS enabled */
1275 	bool wds_enabled;
1276 
1277 	/* WDS Aging timer period */
1278 	uint32_t wds_aging_timer_val;
1279 
1280 	/* NAWDS enabled */
1281 	bool nawds_enabled;
1282 
1283 	/* Default HTT meta data for this VDEV */
1284 	/* TBD: check alignment constraints */
1285 	uint16_t htt_tcl_metadata;
1286 
1287 	/* Mesh mode vdev */
1288 	uint32_t mesh_vdev;
1289 
1290 	/* Mesh mode rx filter setting */
1291 	uint32_t mesh_rx_filter;
1292 
1293 	/* DSCP-TID mapping table ID */
1294 	uint8_t dscp_tid_map_id;
1295 
1296 	/* Multicast enhancement enabled */
1297 	uint8_t mcast_enhancement_en;
1298 
1299 	/* per vdev rx nbuf queue */
1300 	qdf_nbuf_queue_t rxq;
1301 
1302 	uint8_t tx_ring_id;
1303 	struct dp_tx_desc_pool_s *tx_desc;
1304 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
1305 
1306 	/* VDEV Stats */
1307 	struct cdp_vdev_stats stats;
1308 	bool lro_enable;
1309 
1310 	/* Is this a proxySTA VAP */
1311 	bool proxysta_vdev;
1312 	/* Is isolation mode enabled */
1313 	bool isolation_vdev;
1314 
1315 	/* Address search flags to be configured in HAL descriptor */
1316 	uint8_t hal_desc_addr_search_flags;
1317 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1318 	struct dp_tx_desc_pool_s *pool;
1319 #endif
1320 	/* AP BRIDGE enabled */
1321 	uint32_t ap_bridge_enabled;
1322 
1323 	enum cdp_sec_type  sec_type;
1324 
1325 #ifdef ATH_SUPPORT_NAC_RSSI
1326 	bool cdp_nac_rssi_enabled;
1327 	struct {
1328 		uint8_t bssid_mac[6];
1329 		uint8_t client_mac[6];
1330 		uint8_t  chan_num;
1331 		uint8_t client_rssi_valid;
1332 		uint8_t client_rssi;
1333 		uint8_t vdev_id;
1334 	} cdp_nac_rssi;
1335 #endif
1336 };
1337 
1338 
1339 enum {
1340 	dp_sec_mcast = 0,
1341 	dp_sec_ucast
1342 };
1343 
1344 #ifdef WDS_VENDOR_EXTENSION
1345 typedef struct {
1346 	uint8_t	wds_tx_mcast_4addr:1,
1347 		wds_tx_ucast_4addr:1,
1348 		wds_rx_filter:1,      /* enforce rx filter */
1349 		wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames    */
1350 		wds_rx_mcast_4addr:1;  /* when set, accept 4addr multicast frames  */
1351 
1352 } dp_ecm_policy;
1353 #endif
1354 
1355 /* Peer structure for data path state */
1356 struct dp_peer {
1357 	/* VDEV to which this peer is associated */
1358 	struct dp_vdev *vdev;
1359 
1360 	struct cdp_ctrl_objmgr_peer *ctrl_peer;
1361 
1362 	struct dp_ast_entry *self_ast_entry;
1363 
1364 	qdf_atomic_t ref_cnt;
1365 
1366 	/* TODO: See if multiple peer IDs are required in wifi3.0 */
1367 	/* peer ID(s) for this peer */
1368 	uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
1369 
1370 	union dp_align_mac_addr mac_addr;
1371 
1372 	/* node in the vdev's list of peers */
1373 	TAILQ_ENTRY(dp_peer) peer_list_elem;
1374 	/* node in the hash table bin's list of peers */
1375 	TAILQ_ENTRY(dp_peer) hash_list_elem;
1376 
1377 	/* TID structures */
1378 	struct dp_rx_tid rx_tid[DP_MAX_TIDS];
1379 
1380 	/* TBD: No transmit TID state required? */
1381 
1382 	struct {
1383 		enum htt_sec_type sec_type;
1384 		u_int32_t michael_key[2]; /* relevant for TKIP */
1385 	} security[2]; /* 0 -> multicast, 1 -> unicast */
1386 
1387 	/*
1388 	* rx proc function: this either is a copy of pdev's rx_opt_proc for
1389 	* regular rx processing, or has been redirected to a /dev/null discard
1390 	* function when peer deletion is in progress.
1391 	*/
1392 	void (*rx_opt_proc)(struct dp_vdev *vdev, struct dp_peer *peer,
1393 		unsigned tid, qdf_nbuf_t msdu_list);
1394 
1395 	/* set when node is authorized */
1396 	uint8_t authorize:1;
1397 
1398 	u_int8_t nac;
1399 
1400 	/* Band steering: Set when node is inactive */
1401 	uint8_t peer_bs_inact_flag:1;
1402 	u_int16_t peer_bs_inact; /* inactivity mark count */
1403 
1404 	/* NAWDS Flag and Bss Peer bit */
1405 	uint8_t nawds_enabled:1,
1406 				bss_peer:1,
1407 				wapi:1,
1408 				wds_enabled:1;
1409 
1410 	/* MCL specific peer local id */
1411 	uint16_t local_id;
1412 	enum ol_txrx_peer_state state;
1413 	qdf_spinlock_t peer_info_lock;
1414 
1415 	qdf_time_t last_assoc_rcvd;
1416 	qdf_time_t last_disassoc_rcvd;
1417 	qdf_time_t last_deauth_rcvd;
1418 	/* Peer Stats */
1419 	struct cdp_peer_stats stats;
1420 
1421 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
1422 	/* TBD */
1423 
1424 #ifdef WDS_VENDOR_EXTENSION
1425 	dp_ecm_policy wds_ecm;
1426 #endif
1427 	bool delete_in_progress;
1428 };
1429 
1430 #ifdef CONFIG_WIN
1431 /*
1432  * dp_invalid_peer_msg
1433  * @nbuf: data buffer
1434  * @wh: 802.11 header
1435  * @vdev_id: id of vdev
1436  */
1437 struct dp_invalid_peer_msg {
1438 	qdf_nbuf_t nbuf;
1439 	struct ieee80211_frame *wh;
1440 	uint8_t vdev_id;
1441 };
1442 #endif
1443 
1444 /*
1445  * dp_tx_me_buf_t: ME buffer
1446  * next: pointer to next buffer
1447  * data: Destination Mac address
1448  */
1449 struct dp_tx_me_buf_t {
1450 	/* Note: ME buf pool initialization logic expects next pointer to
1451 	 * be the first element. Dont add anything before next */
1452 	struct dp_tx_me_buf_t *next;
1453 	uint8_t data[DP_MAC_ADDR_LEN];
1454 };
1455 
1456 #endif /* _DP_TYPES_H_ */
1457