xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h (revision d2cd9eab9b38f8dceb85c744ffada78cad4f5940)
1 /*
2  * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_TYPES_H_
20 #define _DP_TYPES_H_
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include <qdf_lock.h>
25 #include <qdf_atomic.h>
26 #include <qdf_util.h>
27 #include <qdf_list.h>
28 #include <qdf_lro.h>
29 #include <queue.h>
30 #include <htt_common.h>
31 
32 #include <cdp_txrx_cmn.h>
33 #ifdef CONFIG_MCL
34 #include <cds_ieee80211_common.h>
35 #else
36 #include <ieee80211.h>
37 #endif
38 
39 #ifndef CONFIG_WIN
40 #include <wdi_event_api.h>    /* WDI subscriber event list */
41 #endif
42 
43 #include <hal_tx.h>
44 #include <hal_reo.h>
45 #include "wlan_cfg.h"
46 #include "hal_rx.h"
47 #include <hal_api.h>
48 #include <hal_api_mon.h>
49 #include "hal_rx.h"
50 
51 #define MAX_TCL_RING 3
52 #define MAX_RXDMA_ERRORS 32
53 #define SUPPORTED_BW 4
54 #define SUPPORTED_RECEPTION_TYPES 4
55 #define REPT_MU_MIMO 1
56 #define REPT_MU_OFDMA_MIMO 3
57 #define REO_ERROR_TYPE_MAX (HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET+1)
58 #define DP_VO_TID 6
59 
60 #define DP_MAX_INTERRUPT_CONTEXTS 8
61 #define DP_MAX_TID_MAPS 16 /* MAX TID MAPS AVAILABLE PER PDEV*/
62 #define DSCP_TID_MAP_MAX    (64)
63 #define DP_IP_DSCP_SHIFT 2
64 #define DP_IP_DSCP_MASK 0x3f
65 #define DP_FC0_SUBTYPE_QOS 0x80
66 #define DP_QOS_TID 0x0f
67 #define DP_IPV6_PRIORITY_SHIFT 20
68 #define MAX_MON_LINK_DESC_BANKS 2
69 
70 #if defined(CONFIG_MCL)
71 #define MAX_PDEV_CNT 1
72 #else
73 #define MAX_PDEV_CNT 3
74 #endif
75 #define MAX_LINK_DESC_BANKS 8
76 #define MAX_TXDESC_POOLS 4
77 #define MAX_RXDESC_POOLS 4
78 #define MAX_REO_DEST_RINGS 4
79 #define MAX_TCL_DATA_RINGS 4
80 #define DP_MAX_TX_RINGS 8
81 #define DP_MAX_RX_RINGS 8
82 #define MAX_IDLE_SCATTER_BUFS 16
83 #define DP_MAX_IRQ_PER_CONTEXT 12
84 #define DP_MAX_INTERRUPT_CONTEXTS 8
85 #define DEFAULT_HW_PEER_ID 0xffff
86 
87 #define MAX_TX_HW_QUEUES 3
88 
89 #define DP_MAX_INTERRUPT_CONTEXTS 8
90 
91 struct dp_soc_cmn;
92 struct dp_pdev;
93 struct dp_vdev;
94 union dp_tx_desc_list_elem_t;
95 struct dp_soc;
96 union dp_rx_desc_list_elem_t;
97 
98 #define DP_MUTEX_TYPE qdf_spinlock_t
99 
100 #define DP_FRAME_IS_MULTICAST(_a)  (*(_a) & 0x01)
101 #define DP_FRAME_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
102 
103 #define DP_FRAME_IS_IPV6_MULTICAST(_a)         \
104     ((_a)[0] == 0x33 &&                         \
105      (_a)[1] == 0x33)
106 
107 #define DP_FRAME_IS_BROADCAST(_a)              \
108     ((_a)[0] == 0xff &&                         \
109      (_a)[1] == 0xff &&                         \
110      (_a)[2] == 0xff &&                         \
111      (_a)[3] == 0xff &&                         \
112      (_a)[4] == 0xff &&                         \
113      (_a)[5] == 0xff)
114 #define IS_LLC_PRESENT(typeorlen) ((typeorlen) >= 0x600)
115 #define DP_FRAME_FC0_TYPE_MASK 0x0c
116 #define DP_FRAME_FC0_TYPE_DATA 0x08
117 #define DP_FRAME_IS_DATA(_frame) \
118 	(((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
119 
120 /**
121  * macros to convert hw mac id to sw mac id:
122  * mac ids used by hardware start from a value of 1 while
123  * those in host software start from a value of 0. Use the
124  * macros below to convert between mac ids used by software and
125  * hardware
126  */
127 #define DP_SW2HW_MACID(id) ((id) + 1)
128 
129 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
130 #define DP_MAC_ADDR_LEN 6
131 
132 /**
133  * enum dp_tx_frm_type
134  * @dp_tx_frm_std: Regular frame, no added header fragments
135  * @dp_tx_frm_tso: TSO segment, with a modified IP header added
136  * @dp_tx_frm_sg: SG segment
137  * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
138  * @dp_tx_frm_me: Multicast to Unicast Converted frame
139  * @dp_tx_frm_raw: Raw Frame
140  */
141 enum dp_tx_frm_type {
142 	dp_tx_frm_std = 0,
143 	dp_tx_frm_tso,
144 	dp_tx_frm_sg,
145 	dp_tx_frm_audio,
146 	dp_tx_frm_me,
147 	dp_tx_frm_raw,
148 };
149 
150 /**
151  * struct rx_desc_pool
152  * @pool_size: number of RX descriptor in the pool
153  * @array: pointer to array of RX descriptor
154  * @freelist: pointer to free RX descriptor link list
155  */
156 struct rx_desc_pool {
157 	uint32_t pool_size;
158 	union dp_rx_desc_list_elem_t *array;
159 	union dp_rx_desc_list_elem_t *freelist;
160 };
161 
162 /**
163  * struct dp_tx_ext_desc_elem_s
164  * @next: next extension descriptor pointer
165  * @vaddr: hlos virtual address pointer
166  * @paddr: physical address pointer for descriptor
167  */
168 struct dp_tx_ext_desc_elem_s {
169 	struct dp_tx_ext_desc_elem_s *next;
170 	void *vaddr;
171 	qdf_dma_addr_t paddr;
172 };
173 
174 /**
175  * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool
176  * @elem_count: Number of descriptors in the pool
177  * @elem_size: Size of each descriptor
178  * @num_free: Number of free descriptors
179  * @msdu_ext_desc: MSDU extension descriptor
180  * @desc_pages: multiple page allocation information for actual descriptors
181  * @link_elem_size: size of the link descriptor in cacheable memory used for
182  * 		    chaining the extension descriptors
183  * @desc_link_pages: multiple page allocation information for link descriptors
184  */
185 struct dp_tx_ext_desc_pool_s {
186 	uint16_t elem_count;
187 	int elem_size;
188 	uint16_t num_free;
189 	struct qdf_mem_multi_page_t desc_pages;
190 	int link_elem_size;
191 	struct qdf_mem_multi_page_t desc_link_pages;
192 	struct dp_tx_ext_desc_elem_s *freelist;
193 	qdf_spinlock_t lock;
194 	qdf_dma_mem_context(memctx);
195 };
196 
197 /**
198  * struct dp_tx_desc_s - Tx Descriptor
199  * @next: Next in the chain of descriptors in freelist or in the completion list
200  * @nbuf: Buffer Address
201  * @msdu_ext_desc: MSDU extension descriptor
202  * @id: Descriptor ID
203  * @vdev: vdev over which the packet was transmitted
204  * @pdev: Handle to pdev
205  * @pool_id: Pool ID - used when releasing the descriptor
206  * @flags: Flags to track the state of descriptor and special frame handling
207  * @comp: Pool ID - used when releasing the descriptor
208  * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
209  * 		   This is maintained in descriptor to allow more efficient
210  * 		   processing in completion event processing code.
211  * 		    This field is filled in with the htt_pkt_type enum.
212  * @frm_type: Frame Type - ToDo check if this is redundant
213  * @pkt_offset: Offset from which the actual packet data starts
214  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
215  *		Tx completion of ME packet
216  */
217 struct dp_tx_desc_s {
218 	struct dp_tx_desc_s *next;
219 	qdf_nbuf_t nbuf;
220 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
221 	uint32_t  id;
222 	struct dp_vdev *vdev;
223 	struct dp_pdev *pdev;
224 	uint8_t  pool_id;
225 	uint8_t flags;
226 	struct hal_tx_desc_comp_s comp;
227 	uint16_t tx_encap_type;
228 	uint8_t frm_type;
229 	uint8_t pkt_offset;
230 	void *me_buffer;
231 	void *tso_desc;
232 	void *tso_num_desc;
233 };
234 
235 /**
236  * struct dp_tx_tso_seg_pool_s
237  * @pool_size: total number of pool elements
238  * @num_free: free element count
239  * @freelist: first free element pointer
240  * @lock: lock for accessing the pool
241  */
242 struct dp_tx_tso_seg_pool_s {
243 	uint16_t pool_size;
244 	uint16_t num_free;
245 	struct qdf_tso_seg_elem_t *freelist;
246 	qdf_spinlock_t lock;
247 };
248 
249 /**
250  * struct dp_tx_tso_num_seg_pool_s {
251  * @num_seg_pool_size: total number of pool elements
252  * @num_free: free element count
253  * @freelist: first free element pointer
254  * @lock: lock for accessing the pool
255  */
256 
257 struct dp_tx_tso_num_seg_pool_s {
258 	uint16_t num_seg_pool_size;
259 	uint16_t num_free;
260 	struct qdf_tso_num_seg_elem_t *freelist;
261 	/*tso mutex */
262 	qdf_spinlock_t lock;
263 };
264 
265 /**
266  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
267  * @elem_size: Size of each descriptor in the pool
268  * @elem_count: Total number of descriptors in the pool
269  * @num_allocated: Number of used descriptors
270  * @num_free: Number of free descriptors
271  * @freelist: Chain of free descriptors
272  * @desc_pages: multiple page allocation information for actual descriptors
273  * @lock- Lock for descriptor allocation/free from/to the pool
274  */
275 struct dp_tx_desc_pool_s {
276 	uint16_t elem_size;
277 	uint16_t elem_count;
278 	uint32_t num_allocated;
279 	uint32_t num_free;
280 	struct dp_tx_desc_s *freelist;
281 	struct qdf_mem_multi_page_t desc_pages;
282 	qdf_spinlock_t lock;
283 };
284 
285 
286 struct dp_srng {
287 	void *hal_srng;
288 	void *base_vaddr_unaligned;
289 	qdf_dma_addr_t base_paddr_unaligned;
290 	uint32_t alloc_size;
291 	int irq;
292 };
293 
294 struct dp_rx_reorder_array_elem {
295 	qdf_nbuf_t head;
296 	qdf_nbuf_t tail;
297 };
298 
299 #define DP_RX_BA_INACTIVE 0
300 #define DP_RX_BA_ACTIVE 1
301 struct dp_reo_cmd_info {
302 	uint16_t cmd;
303 	enum hal_reo_cmd_type cmd_type;
304 	void *data;
305 	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
306 	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
307 };
308 
309 /* Rx TID */
310 struct dp_rx_tid {
311 	/* TID */
312 	int tid;
313 
314 	/* REO TID queue descriptors */
315 	void *hw_qdesc_vaddr_unaligned;
316 	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
317 	qdf_dma_addr_t hw_qdesc_paddr;
318 	uint32_t hw_qdesc_alloc_size;
319 
320 	/* RX ADDBA session state */
321 	int ba_status;
322 
323 	/* RX BA window size */
324 	uint16_t ba_win_size;
325 
326 	/* TODO: Check the following while adding defragmentation support */
327 	struct dp_rx_reorder_array_elem *array;
328 	/* base - single rx reorder element used for non-aggr cases */
329 	struct dp_rx_reorder_array_elem base;
330 
331 	/* only used for defrag right now */
332 	TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem;
333 
334 	/* MSDU link pointers used for reinjection */
335 	struct hal_rx_msdu_link_ptr_info
336 		transcap_msdu_link_ptr[HAL_RX_MAX_SAVED_RING_DESC];
337 
338 	struct hal_rx_mpdu_desc_info transcap_rx_mpdu_desc_info;
339 	uint8_t curr_ring_desc_idx;
340 
341 	/* Sequence and fragments that are being processed currently */
342 	uint32_t curr_seq_num;
343 	uint32_t curr_frag_num;
344 
345 	uint32_t defrag_timeout_ms;
346 	uint16_t dialogtoken;
347 	uint16_t statuscode;
348 };
349 
350 /* per interrupt context  */
351 struct dp_intr {
352 	uint8_t tx_ring_mask;   /* WBM Tx completion rings (0-2)
353 				associated with this napi context */
354 	uint8_t rx_ring_mask;   /* Rx REO rings (0-3) associated
355 				with this interrupt context */
356 	uint8_t rx_mon_ring_mask;  /* Rx monitor ring mask (0-2) */
357 	uint8_t rx_err_ring_mask; /* REO Exception Ring */
358 	uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
359 	uint8_t reo_status_ring_mask; /* REO command response ring */
360 	struct dp_soc *soc;    /* Reference to SoC structure ,
361 				to get DMA ring handles */
362 	qdf_lro_ctx_t lro_ctx;
363 };
364 
365 #define REO_DESC_FREELIST_SIZE 64
366 #define REO_DESC_FREE_DEFER_MS 1000
367 struct reo_desc_list_node {
368 	qdf_list_node_t node;
369 	unsigned long free_ts;
370 	struct dp_rx_tid rx_tid;
371 };
372 
373 struct dp_ast_entry {
374 	uint16_t ast_idx;
375 	uint8_t mac_addr[DP_MAC_ADDR_LEN];
376 	uint8_t next_hop;
377 	struct dp_peer *peer;
378 	TAILQ_ENTRY(dp_ast_entry) ast_entry_elem;
379 };
380 
381 /* SOC level structure for data path */
382 struct dp_soc {
383 	/* Common base structure - Should be the first member */
384 	struct cdp_soc_t cdp_soc;
385 
386 	/* SoC/softc handle from OSIF layer */
387 	void *osif_soc;
388 
389 	/* OS device abstraction */
390 	qdf_device_t osdev;
391 
392 	/* WLAN config context */
393 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
394 
395 	/* HTT handle for host-fw interaction */
396 	void *htt_handle;
397 
398 	/* Commint init done */
399 	qdf_atomic_t cmn_init_done;
400 
401 	/* Opaque hif handle */
402 	struct hif_opaque_softc *hif_handle;
403 
404 	/* PDEVs on this SOC */
405 	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
406 
407 	/* Number of PDEVs */
408 	uint8_t pdev_count;
409 
410 	/* Link descriptor memory banks */
411 	struct {
412 		void *base_vaddr_unaligned;
413 		void *base_vaddr;
414 		qdf_dma_addr_t base_paddr_unaligned;
415 		qdf_dma_addr_t base_paddr;
416 		uint32_t size;
417 	} link_desc_banks[MAX_LINK_DESC_BANKS];
418 
419 	/* Link descriptor Idle list for HW internal use (SRNG mode) */
420 	struct dp_srng wbm_idle_link_ring;
421 
422 	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
423 	 */
424 	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
425 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
426 	uint32_t wbm_idle_scatter_buf_size;
427 
428 	/* Tx SW descriptor pool */
429 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
430 
431 	/* Tx MSDU Extension descriptor pool */
432 	struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
433 
434 	/* Tx TSO descriptor pool */
435 	struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
436 
437 	/* Tx TSO Num of segments pool */
438 	struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
439 
440 	/* Tx H/W queues lock */
441 	qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
442 
443 	/* Rx SW descriptor pool for RXDMA buffer */
444 	struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
445 
446 	/* Rx SW descriptor pool for RXDMA monitor buffer */
447 	struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
448 
449 	/* Rx SW descriptor pool for RXDMA status buffer */
450 	struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
451 
452 	/* DP rx desc lock */
453 	DP_MUTEX_TYPE rx_desc_mutex[MAX_RXDESC_POOLS];
454 
455 	/* HAL SOC handle */
456 	void *hal_soc;
457 
458 	/* DP Interrupts */
459 	struct dp_intr intr_ctx[DP_MAX_INTERRUPT_CONTEXTS];
460 
461 	/* REO destination rings */
462 	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
463 
464 	/* Number of REO destination rings */
465 	uint8_t num_reo_dest_rings;
466 
467 	/* REO exception ring - See if should combine this with reo_dest_ring */
468 	struct dp_srng reo_exception_ring;
469 
470 	/* REO reinjection ring */
471 	struct dp_srng reo_reinject_ring;
472 
473 	/* REO command ring */
474 	struct dp_srng reo_cmd_ring;
475 
476 	/* REO command status ring */
477 	struct dp_srng reo_status_ring;
478 
479 	/* WBM Rx release ring */
480 	struct dp_srng rx_rel_ring;
481 
482 	/* Number of TCL data rings */
483 	uint8_t num_tcl_data_rings;
484 
485 	/* TCL data ring */
486 	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
487 
488 	/* TCL command ring */
489 	struct dp_srng tcl_cmd_ring;
490 
491 	/* TCL command status ring */
492 	struct dp_srng tcl_status_ring;
493 
494 	/* WBM Tx completion rings */
495 	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
496 
497 	/* Common WBM link descriptor release ring (SW to WBM) */
498 	struct dp_srng wbm_desc_rel_ring;
499 
500 	/* Tx ring map for interrupt processing */
501 	struct dp_srng *tx_ring_map[DP_MAX_TX_RINGS];
502 
503 	/* Rx ring map for interrupt processing */
504 	struct dp_srng *rx_ring_map[DP_MAX_RX_RINGS];
505 
506 #ifndef CONFIG_WIN
507 	/* WDI event handlers */
508 	struct wdi_event_subscribe_t **wdi_event_list;
509 #endif
510 
511 	/* peer ID to peer object map (array of pointers to peer objects) */
512 	struct dp_peer **peer_id_to_obj_map;
513 
514 	struct {
515 		unsigned mask;
516 		unsigned idx_bits;
517 		TAILQ_HEAD(, dp_peer) * bins;
518 	} peer_hash;
519 
520 	/* rx defrag state – TBD: do we need this per radio? */
521 	struct {
522 		struct {
523 			TAILQ_HEAD(, dp_rx_tid) waitlist;
524 			uint32_t timeout_ms;
525 		} defrag;
526 		struct {
527 			int defrag_timeout_check;
528 			int dup_check;
529 		} flags;
530 		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
531 		qdf_spinlock_t reo_cmd_lock;
532 	} rx;
533 
534 	/* optional rx processing function */
535 	void (*rx_opt_proc)(
536 		struct dp_vdev *vdev,
537 		struct dp_peer *peer,
538 		unsigned tid,
539 		qdf_nbuf_t msdu_list);
540 
541 	/* pool addr for mcast enhance buff */
542 	struct {
543 		int size;
544 		uint32_t paddr;
545 		uint32_t *vaddr;
546 		struct dp_tx_me_buf_t *freelist;
547 		int buf_in_use;
548 		qdf_dma_mem_context(memctx);
549 	} me_buf;
550 
551 	/**
552 	 * peer ref mutex:
553 	 * 1. Protect peer object lookups until the returned peer object's
554 	 *	reference count is incremented.
555 	 * 2. Provide mutex when accessing peer object lookup structures.
556 	 */
557 	DP_MUTEX_TYPE peer_ref_mutex;
558 
559 	/* maximum value for peer_id */
560 	int max_peers;
561 
562 	/* SoC level data path statistics */
563 	struct {
564 		/* SOC level TX stats */
565 		struct {
566 			/* packets dropped on tx because of no peer */
567 			struct cdp_pkt_info tx_invalid_peer;
568 			/* descriptors in each tcl ring */
569 			uint32_t tcl_ring_full[MAX_TCL_RING];
570 			/* Descriptors in use at soc */
571 			uint32_t desc_in_use;
572 		} tx;
573 		/* SOC level RX stats */
574 		struct {
575 		/* Rx errors */
576 			/* Total Packets in Rx Error ring */
577 			uint32_t err_ring_pkts;
578 			/* No of Fragments */
579 			uint32_t rx_frags;
580 			struct {
581 				/* Invalid RBM error count */
582 				uint32_t invalid_rbm;
583 				/* Invalid VDEV Error count */
584 				uint32_t invalid_vdev;
585 				/* Invalid PDEV error count */
586 				uint32_t invalid_pdev;
587 				/* Invalid PEER Error count */
588 				struct cdp_pkt_info rx_invalid_peer;
589 				/* HAL ring access Fail error count */
590 				uint32_t hal_ring_access_fail;
591 				/* RX DMA error count */
592 				uint32_t rxdma_error[MAX_RXDMA_ERRORS];
593 				/* REO Error count */
594 				uint32_t reo_error[REO_ERROR_TYPE_MAX];
595 				/* HAL REO ERR Count */
596 				uint32_t hal_reo_error[CDP_MAX_RX_RINGS];
597 			} err;
598 
599 			/* packet count per core - per ring */
600 			uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
601 		} rx;
602 	} stats;
603 
604 	/* Enable processing of Tx completion status words */
605 	bool process_tx_status;
606 
607 	struct dp_ast_entry *ast_table[WLAN_UMAC_PSOC_MAX_PEERS];
608 
609 #ifdef DP_INTR_POLL_BASED
610 	/*interrupt timer*/
611 	qdf_timer_t int_timer;
612 #endif
613 	qdf_list_t reo_desc_freelist;
614 	qdf_spinlock_t reo_desc_freelist_lock;
615 	/* Obj Mgr SoC */
616 	struct wlan_objmgr_psoc *psoc;
617 	qdf_nbuf_t invalid_peer_head_msdu;
618 	qdf_nbuf_t invalid_peer_tail_msdu;
619 #ifdef QCA_SUPPORT_SON
620 	/* The timer to check station's inactivity status */
621 	os_timer_t pdev_bs_inact_timer;
622 	/* The current inactivity count reload value
623 	   based on overload condition */
624 	u_int16_t pdev_bs_inact_reload;
625 
626 	/* The inactivity timer value when not overloaded */
627 	u_int16_t pdev_bs_inact_normal;
628 
629 	/* The inactivity timer value when overloaded */
630 	u_int16_t pdev_bs_inact_overload;
631 
632 	/* The inactivity timer check interval */
633 	u_int16_t pdev_bs_inact_interval;
634 	/* Inactivity timer */
635 #endif /* QCA_SUPPORT_SON */
636 	/* T2H Ext stats message queue */
637 	qdf_nbuf_queue_t htt_stats_msg;
638 	/* T2H Ext stats message length */
639 	uint32_t htt_msg_len;
640 };
641 #define MAX_RX_MAC_RINGS 2
642 /* Same as NAC_MAX_CLENT */
643 #define DP_NAC_MAX_CLIENT  24
644 
645 /* same as ieee80211_nac_param */
646 enum dp_nac_param_cmd {
647 	/* IEEE80211_NAC_PARAM_ADD */
648 	DP_NAC_PARAM_ADD = 1,
649 	/* IEEE80211_NAC_PARAM_DEL */
650 	DP_NAC_PARAM_DEL,
651 	/* IEEE80211_NAC_PARAM_LIST */
652 	DP_NAC_PARAM_LIST,
653 };
654 #define DP_MAC_ADDR_LEN 6
655 union dp_align_mac_addr {
656 	uint8_t raw[DP_MAC_ADDR_LEN];
657 	struct {
658 		uint16_t bytes_ab;
659 		uint16_t bytes_cd;
660 		uint16_t bytes_ef;
661 	} align2;
662 	struct {
663 		uint32_t bytes_abcd;
664 		uint16_t bytes_ef;
665 	} align4;
666 };
667 
668 /**
669  * struct dp_neighbour_peer - neighbour peer list type for smart mesh
670  * @neighbour_peers_macaddr: neighbour peer's mac address
671  * @neighbour_peer_list_elem: neighbour peer list TAILQ element
672  */
673 struct dp_neighbour_peer {
674 	/* MAC address of neighbour's peer */
675 	union dp_align_mac_addr neighbour_peers_macaddr;
676 	/* node in the list of neighbour's peer */
677 	TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
678 };
679 
680 /* PDEV level structure for data path */
681 struct dp_pdev {
682 	/* PDEV handle from OSIF layer TBD: see if we really need osif_pdev */
683 	void *osif_pdev;
684 
685 	/* PDEV Id */
686 	int pdev_id;
687 
688 	/* TXRX SOC handle */
689 	struct dp_soc *soc;
690 
691 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
692 	struct dp_srng rx_refill_buf_ring;
693 
694 	/* Empty ring used by firmware to post rx buffers to the MAC */
695 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
696 
697 	/* wlan_cfg pdev ctxt*/
698 	 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
699 
700 	/* RXDMA monitor buffer replenish ring */
701 	struct dp_srng rxdma_mon_buf_ring;
702 
703 	/* RXDMA monitor destination ring */
704 	struct dp_srng rxdma_mon_dst_ring;
705 
706 	/* RXDMA monitor status ring. TBD: Check format of this ring */
707 	struct dp_srng rxdma_mon_status_ring;
708 
709 	struct dp_srng rxdma_mon_desc_ring;
710 
711 	/* Link descriptor memory banks */
712 	struct {
713 		void *base_vaddr_unaligned;
714 		void *base_vaddr;
715 		qdf_dma_addr_t base_paddr_unaligned;
716 		qdf_dma_addr_t base_paddr;
717 		uint32_t size;
718 	} link_desc_banks[MAX_MON_LINK_DESC_BANKS];
719 
720 
721 	/**
722 	 * TODO: See if we need a ring map here for LMAC rings.
723 	 * 1. Monitor rings are currently planning to be processed on receiving
724 	 * PPDU end interrupts and hence wont need ring based interrupts.
725 	 * 2. Rx buffer rings will be replenished during REO destination
726 	 * processing and doesn't require regular interrupt handling - we will
727 	 * only handle low water mark interrupts which is not expected
728 	 * frequently
729 	 */
730 
731 	/* VDEV list */
732 	TAILQ_HEAD(, dp_vdev) vdev_list;
733 
734 	/* Number of vdevs this device have */
735 	uint16_t vdev_count;
736 
737 	/* PDEV transmit lock */
738 	qdf_spinlock_t tx_lock;
739 
740 #ifdef notyet
741 	/* Pktlog pdev */
742 	ol_pktlog_dev_t *pl_dev;
743 #endif
744 
745 	/* Monitor mode interface and status storage */
746 	struct dp_vdev *monitor_vdev;
747 
748 	/* monitor mode mutex */
749 	qdf_spinlock_t mon_mutex;
750 
751 	/*tx_mutex for me*/
752 	DP_MUTEX_TYPE tx_mutex;
753 
754 	/* Smart Mesh */
755 	bool filter_neighbour_peers;
756 	/* smart mesh mutex */
757 	qdf_spinlock_t neighbour_peer_mutex;
758 	/* Neighnour peer list */
759 	TAILQ_HEAD(, dp_neighbour_peer) neighbour_peers_list;
760 
761 	/* Band steering  */
762 	/* TBD */
763 
764 	/* PDEV level data path statistics */
765 	struct cdp_pdev_stats stats;
766 
767 	/* Global RX decap mode for the device */
768 	enum htt_pkt_type rx_decap_mode;
769 
770 	/* Enhanced Stats is enabled */
771 	bool ap_stats_tx_cal_enable;
772 
773 	qdf_atomic_t num_tx_outstanding;
774 
775 	qdf_atomic_t num_tx_exception;
776 
777 	/* MCL specific local peer handle */
778 	struct {
779 		uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
780 		uint8_t freelist;
781 		qdf_spinlock_t lock;
782 		struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
783 	} local_peer_ids;
784 
785 	/* dscp_tid_map_*/
786 	uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
787 
788 	struct hal_rx_ppdu_info ppdu_info;
789 
790 	/* operating channel */
791 	uint8_t operating_channel;
792 
793 	qdf_nbuf_queue_t rx_status_q;
794 	uint32_t mon_ppdu_id;
795 	uint32_t mon_ppdu_status;
796 	struct cdp_mon_status rx_mon_recv_status;
797 
798 	/* pool addr for mcast enhance buff */
799 	struct {
800 		int size;
801 		uint32_t paddr;
802 		char *vaddr;
803 		struct dp_tx_me_buf_t *freelist;
804 		int buf_in_use;
805 		qdf_dma_mem_context(memctx);
806 	} me_buf;
807 
808 	/* Number of VAPs with mcast enhancement enabled */
809 	qdf_atomic_t mc_num_vap_attached;
810 
811 	/* TBD */
812 
813 	/* map this pdev to a particular Reo Destination ring */
814 	enum cdp_host_reo_dest_ring reo_dest;
815 };
816 
817 struct dp_peer;
818 
819 /* VDEV structure for data path state */
820 struct dp_vdev {
821 	/* OS device abstraction */
822 	qdf_device_t osdev;
823 	/* physical device that is the parent of this virtual device */
824 	struct dp_pdev *pdev;
825 
826 	/* Handle to the OS shim SW's virtual device */
827 	ol_osif_vdev_handle osif_vdev;
828 
829 	/* vdev_id - ID used to specify a particular vdev to the target */
830 	uint8_t vdev_id;
831 
832 	/* MAC address */
833 	union dp_align_mac_addr mac_addr;
834 
835 	/* node in the pdev's list of vdevs */
836 	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
837 
838 	/* dp_peer list */
839 	TAILQ_HEAD(, dp_peer) peer_list;
840 
841 	/* callback to hand rx frames to the OS shim */
842 	ol_txrx_rx_fp osif_rx;
843 	ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
844 	ol_txrx_tx_free_ext_fp osif_tx_free_ext;
845 
846 #ifdef notyet
847 	/* callback to check if the msdu is an WAI (WAPI) frame */
848 	ol_rx_check_wai_fp osif_check_wai;
849 #endif
850 
851 	/* proxy arp function */
852 	ol_txrx_proxy_arp_fp osif_proxy_arp;
853 
854 	/* callback to hand rx monitor 802.11 MPDU to the OS shim */
855 	ol_txrx_rx_mon_fp osif_rx_mon;
856 
857 	ol_txrx_mcast_me_fp me_convert;
858 	/* deferred vdev deletion state */
859 	struct {
860 		/* VDEV delete pending */
861 		int pending;
862 		/*
863 		* callback and a context argument to provide a
864 		* notification for when the vdev is deleted.
865 		*/
866 		ol_txrx_vdev_delete_cb callback;
867 		void *context;
868 	} delete;
869 
870 	/* safe mode control to bypass the encrypt and decipher process*/
871 	uint32_t safemode;
872 
873 	/* rx filter related */
874 	uint32_t drop_unenc;
875 #ifdef notyet
876 	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
877 	uint32_t filters_num;
878 #endif
879 	/* TDLS Link status */
880 	bool tdls_link_connected;
881 
882 	/* VDEV operating mode */
883 	enum wlan_op_mode opmode;
884 
885 	/* Tx encapsulation type for this VAP */
886 	enum htt_cmn_pkt_type tx_encap_type;
887 	/* Rx Decapsulation type for this VAP */
888 	enum htt_cmn_pkt_type rx_decap_type;
889 
890 	/* BSS peer */
891 	struct dp_peer *vap_bss_peer;
892 
893 	/* WDS enabled */
894 	bool wds_enabled;
895 
896 	/* NAWDS enabled */
897 	bool nawds_enabled;
898 
899 	/* Default HTT meta data for this VDEV */
900 	/* TBD: check alignment constraints */
901 	uint16_t htt_tcl_metadata;
902 
903 	/* Mesh mode vdev */
904 	uint32_t mesh_vdev;
905 
906 	/* Mesh mode rx filter setting */
907 	uint32_t mesh_rx_filter;
908 
909 	/* DSCP-TID mapping table ID */
910 	uint8_t dscp_tid_map_id;
911 
912 	/* Multicast enhancement enabled */
913 	uint8_t mcast_enhancement_en;
914 
915 	/* per vdev rx nbuf queue */
916 	qdf_nbuf_queue_t rxq;
917 
918 	uint8_t tx_ring_id;
919 	struct dp_tx_desc_pool_s *tx_desc;
920 	struct dp_tx_ext_desc_pool_s *tx_ext_desc;
921 
922 	/* VDEV Stats */
923 	struct cdp_vdev_stats stats;
924 	bool lro_enable;
925 
926 	/* Is this a proxySTA VAP */
927 	bool proxysta_vdev;
928 
929 	/* Address search flags to be configured in HAL descriptor */
930 	uint8_t hal_desc_addr_search_flags;
931 };
932 
933 
934 enum {
935 	dp_sec_mcast = 0,
936 	dp_sec_ucast
937 };
938 
939 /* Peer structure for data path state */
940 struct dp_peer {
941 	/* VDEV to which this peer is associated */
942 	struct dp_vdev *vdev;
943 
944 	struct dp_ast_entry self_ast_entry;
945 
946 	qdf_atomic_t ref_cnt;
947 
948 	/* TODO: See if multiple peer IDs are required in wifi3.0 */
949 	/* peer ID(s) for this peer */
950 	uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
951 
952 	union dp_align_mac_addr mac_addr;
953 
954 	/* node in the vdev's list of peers */
955 	TAILQ_ENTRY(dp_peer) peer_list_elem;
956 	/* node in the hash table bin's list of peers */
957 	TAILQ_ENTRY(dp_peer) hash_list_elem;
958 
959 	/* TID structures */
960 	struct dp_rx_tid rx_tid[DP_MAX_TIDS];
961 
962 	/* TBD: No transmit TID state required? */
963 
964 	struct {
965 		enum htt_sec_type sec_type;
966 		u_int32_t michael_key[2]; /* relevant for TKIP */
967 	} security[2]; /* 0 -> multicast, 1 -> unicast */
968 
969 	/*
970 	* rx proc function: this either is a copy of pdev's rx_opt_proc for
971 	* regular rx processing, or has been redirected to a /dev/null discard
972 	* function when peer deletion is in progress.
973 	*/
974 	void (*rx_opt_proc)(struct dp_vdev *vdev, struct dp_peer *peer,
975 		unsigned tid, qdf_nbuf_t msdu_list);
976 
977 	/* set when node is authorized */
978 	uint8_t authorize:1;
979 
980 	/* Band steering: Set when node is inactive */
981 	uint8_t peer_bs_inact_flag:1;
982 	u_int16_t peer_bs_inact; /* inactivity mark count */
983 
984 	/* NAWDS Flag and Bss Peer bit */
985 	uint8_t nawds_enabled:1,
986 				bss_peer:1,
987 				wapi:1;
988 
989 	/* MCL specific peer local id */
990 	uint16_t local_id;
991 	enum ol_txrx_peer_state state;
992 	qdf_spinlock_t peer_info_lock;
993 
994 	qdf_time_t last_assoc_rcvd;
995 	qdf_time_t last_disassoc_rcvd;
996 	qdf_time_t last_deauth_rcvd;
997 	/* Peer Stats */
998 	struct cdp_peer_stats stats;
999 
1000 	TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
1001 	/* TBD */
1002 };
1003 
1004 #ifdef CONFIG_WIN
1005 /*
1006  * dp_invalid_peer_msg
1007  * @nbuf: data buffer
1008  * @wh: 802.11 header
1009  * @vdev_id: id of vdev
1010  */
1011 struct dp_invalid_peer_msg {
1012 	qdf_nbuf_t nbuf;
1013 	struct ieee80211_frame *wh;
1014 	uint8_t vdev_id;
1015 };
1016 #endif
1017 
1018 /*
1019  * dp_tx_me_buf_t: ME buffer
1020  * data: Destination Mac address
1021  * next: pointer to next buffer
1022  */
1023 struct dp_tx_me_buf_t {
1024 	uint8_t data[DP_MAC_ADDR_LEN];
1025 	struct dp_tx_me_buf_t *next;
1026 };
1027 #endif /* _DP_TYPES_H_ */
1028