xref: /wlan-dirver/qcacld-3.0/core/dp/txrx/ol_txrx_types.h (revision 1240fc7c98a5a5c6056b3d260d6ade7605f836ed)
1 /*
2  * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /**
29  * @file ol_txrx_types.h
30  * @brief Define the major data types used internally by the host datapath SW.
31  */
32 #ifndef _OL_TXRX_TYPES__H_
33 #define _OL_TXRX_TYPES__H_
34 
35 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
36 #include <qdf_mem.h>
37 #include <cds_queue.h>          /* TAILQ */
38 #include <a_types.h>            /* A_UINT8 */
39 #include <htt.h>                /* htt_sec_type, htt_pkt_type, etc. */
40 #include <qdf_atomic.h>         /* qdf_atomic_t */
41 #include <wdi_event_api.h>      /* wdi_event_subscribe */
42 #include <qdf_timer.h>		/* qdf_timer_t */
43 #include <qdf_lock.h>           /* qdf_spinlock */
44 #include <pktlog.h>             /* ol_pktlog_dev_handle */
45 #include <ol_txrx_stats.h>
46 #include <txrx.h>
47 #include "ol_txrx_htt_api.h"
48 #include "ol_htt_tx_api.h"
49 #include "ol_htt_rx_api.h"
50 #include "ol_txrx_ctrl_api.h" /* WLAN_MAX_STA_COUNT */
51 #include "ol_txrx_osif_api.h" /* ol_rx_callback */
52 #include "cdp_txrx_flow_ctrl_v2.h"
53 #include "cdp_txrx_peer_ops.h"
54 
55 /*
56  * The target may allocate multiple IDs for a peer.
57  * In particular, the target may allocate one ID to represent the
58  * multicast key the peer uses, and another ID to represent the
59  * unicast key the peer uses.
60  */
61 #define MAX_NUM_PEER_ID_PER_PEER 16
62 
63 /* OL_TXRX_NUM_EXT_TIDS -
64  * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
65  */
66 #define OL_TXRX_NUM_EXT_TIDS 19
67 
68 #define OL_TX_NUM_QOS_TIDS 16   /* 16 regular TIDs */
69 #define OL_TX_NON_QOS_TID 16
70 #define OL_TX_MGMT_TID    17
71 #define OL_TX_NUM_TIDS    18
72 #define OL_RX_MCAST_TID   18  /* Mcast TID only between f/w & host */
73 
74 #define OL_TX_VDEV_MCAST_BCAST    0 /* HTT_TX_EXT_TID_MCAST_BCAST */
75 #define OL_TX_VDEV_DEFAULT_MGMT   1 /* HTT_TX_EXT_TID_DEFALT_MGMT */
76 #define OL_TX_VDEV_NUM_QUEUES     2
77 
78 #define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
79 #define OL_TXRX_MGMT_NUM_TYPES 8
80 
81 #define OL_TX_MUTEX_TYPE qdf_spinlock_t
82 #define OL_RX_MUTEX_TYPE qdf_spinlock_t
83 
84 /* TXRX Histogram defines */
85 #define TXRX_DATA_HISTROGRAM_GRANULARITY      1000
86 #define TXRX_DATA_HISTROGRAM_NUM_INTERVALS    100
87 
88 #define OL_TXRX_INVALID_VDEV_ID		(-1)
89 
90 struct ol_txrx_pdev_t;
91 struct ol_txrx_vdev_t;
92 struct ol_txrx_peer_t;
93 
94 /* rx filter related */
95 #define MAX_PRIVACY_FILTERS           4 /* max privacy filters */
96 
97 enum privacy_filter {
98 	PRIVACY_FILTER_ALWAYS,
99 	PRIVACY_FILTER_KEY_UNAVAILABLE,
100 };
101 
102 enum privacy_filter_packet_type {
103 	PRIVACY_FILTER_PACKET_UNICAST,
104 	PRIVACY_FILTER_PACKET_MULTICAST,
105 	PRIVACY_FILTER_PACKET_BOTH
106 };
107 
108 struct privacy_exemption {
109 	/* ethertype -
110 	 * type of ethernet frames this filter applies to, in host byte order
111 	 */
112 	uint16_t ether_type;
113 	enum privacy_filter filter_type;
114 	enum privacy_filter_packet_type packet_type;
115 };
116 
117 enum ol_tx_frm_type {
118 	OL_TX_FRM_STD = 0, /* regular frame - no added header fragments */
119 	OL_TX_FRM_TSO,     /* TSO segment, with a modified IP header added */
120 	OL_TX_FRM_AUDIO,   /* audio frames, with a custom LLC/SNAP hdr added */
121 	OL_TX_FRM_NO_FREE, /* frame requires special tx completion callback */
122 	ol_tx_frm_freed = 0xff, /* the tx desc is in free list */
123 };
124 
125 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
126 
127 #define MAX_NO_PEERS_IN_LIMIT (2*10 + 2)
128 
129 enum ol_tx_peer_bal_state {
130 	ol_tx_peer_bal_enable = 0,
131 	ol_tx_peer_bal_disable,
132 };
133 
134 enum ol_tx_peer_bal_timer_state {
135 	ol_tx_peer_bal_timer_disable = 0,
136 	ol_tx_peer_bal_timer_active,
137 	ol_tx_peer_bal_timer_inactive,
138 };
139 
140 struct ol_tx_limit_peer_t {
141 	u_int16_t limit_flag;
142 	u_int16_t peer_id;
143 	u_int16_t limit;
144 };
145 
146 enum tx_peer_level {
147 	TXRX_IEEE11_B = 0,
148 	TXRX_IEEE11_A_G,
149 	TXRX_IEEE11_N,
150 	TXRX_IEEE11_AC,
151 	TXRX_IEEE11_AX,
152 	TXRX_IEEE11_MAX,
153 };
154 
155 struct tx_peer_threshold {
156 	u_int32_t tput_thresh;
157 	u_int32_t tx_limit;
158 };
159 #endif
160 
161 
162 struct ol_tx_desc_t {
163 	qdf_nbuf_t netbuf;
164 	void *htt_tx_desc;
165 	uint16_t id;
166 	qdf_dma_addr_t htt_tx_desc_paddr;
167 	void *htt_frag_desc; /* struct msdu_ext_desc_t * */
168 	qdf_dma_addr_t htt_frag_desc_paddr;
169 	qdf_atomic_t ref_cnt;
170 	enum htt_tx_status status;
171 
172 #ifdef QCA_COMPUTE_TX_DELAY
173 	uint32_t entry_timestamp_ticks;
174 #endif
175 	/*
176 	 * Allow tx descriptors to be stored in (doubly-linked) lists.
177 	 * This is mainly used for HL tx queuing and scheduling, but is
178 	 * also used by LL+HL for batch processing of tx frames.
179 	 */
180 	TAILQ_ENTRY(ol_tx_desc_t) tx_desc_list_elem;
181 
182 	/*
183 	 * Remember whether the tx frame is a regular packet, or whether
184 	 * the driver added extra header fragments (e.g. a modified IP header
185 	 * for TSO fragments, or an added LLC/SNAP header for audio interworking
186 	 * data) that need to be handled in a special manner.
187 	 * This field is filled in with the ol_tx_frm_type enum.
188 	 */
189 	uint8_t pkt_type;
190 
191 	u_int8_t vdev_id;
192 
193 	struct ol_txrx_vdev_t *vdev;
194 
195 	void *txq;
196 
197 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
198 	/*
199 	 * used by tx encap, to restore the os buf start offset
200 	 * after tx complete
201 	 */
202 	uint8_t orig_l2_hdr_bytes;
203 #endif
204 
205 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
206 	struct ol_tx_flow_pool_t *pool;
207 #endif
208 	void *tso_desc;
209 	void *tso_num_desc;
210 };
211 
212 typedef TAILQ_HEAD(some_struct_name, ol_tx_desc_t) ol_tx_desc_list;
213 
214 union ol_tx_desc_list_elem_t {
215 	union ol_tx_desc_list_elem_t *next;
216 	struct ol_tx_desc_t tx_desc;
217 };
218 
219 union ol_txrx_align_mac_addr_t {
220 	uint8_t raw[OL_TXRX_MAC_ADDR_LEN];
221 	struct {
222 		uint16_t bytes_ab;
223 		uint16_t bytes_cd;
224 		uint16_t bytes_ef;
225 	} align2;
226 	struct {
227 		uint32_t bytes_abcd;
228 		uint16_t bytes_ef;
229 	} align4;
230 };
231 
232 struct ol_rx_reorder_timeout_list_elem_t {
233 	TAILQ_ENTRY(ol_rx_reorder_timeout_list_elem_t)
234 	reorder_timeout_list_elem;
235 	uint32_t timestamp_ms;
236 	struct ol_txrx_peer_t *peer;
237 	uint8_t tid;
238 	uint8_t active;
239 };
240 
241 #define TXRX_TID_TO_WMM_AC(_tid) ( \
242 		(((_tid) >> 1) == 3) ? TXRX_WMM_AC_VO :	\
243 		(((_tid) >> 1) == 2) ? TXRX_WMM_AC_VI :	\
244 		(((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
245 		TXRX_WMM_AC_BE)
246 
247 enum {
248 	OL_TX_SCHED_WRR_ADV_CAT_BE,
249 	OL_TX_SCHED_WRR_ADV_CAT_BK,
250 	OL_TX_SCHED_WRR_ADV_CAT_VI,
251 	OL_TX_SCHED_WRR_ADV_CAT_VO,
252 	OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA,
253 	OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT,
254 	OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA,
255 	OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT,
256 
257 	OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES /* must be last */
258 };
259 
260 A_COMPILE_TIME_ASSERT(ol_tx_sched_htt_ac_values,
261 	/* check that regular WMM AC enum values match */
262 	((int)OL_TX_SCHED_WRR_ADV_CAT_VO == (int)HTT_AC_WMM_VO) &&
263 	((int)OL_TX_SCHED_WRR_ADV_CAT_VI == (int)HTT_AC_WMM_VI) &&
264 	((int)OL_TX_SCHED_WRR_ADV_CAT_BK == (int)HTT_AC_WMM_BK) &&
265 	((int)OL_TX_SCHED_WRR_ADV_CAT_BE == (int)HTT_AC_WMM_BE) &&
266 
267 	/* check that extension AC enum values match */
268 	((int)OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA
269 		== (int)HTT_AC_EXT_NON_QOS) &&
270 	((int)OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT
271 		== (int)HTT_AC_EXT_UCAST_MGMT) &&
272 	((int)OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA
273 		== (int)HTT_AC_EXT_MCAST_DATA) &&
274 	((int)OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT
275 		== (int)HTT_AC_EXT_MCAST_MGMT));
276 
277 struct ol_tx_reorder_cat_timeout_t {
278 	TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
279 	qdf_timer_t timer;
280 	uint32_t duration_ms;
281 	struct ol_txrx_pdev_t *pdev;
282 };
283 
284 enum ol_tx_scheduler_status {
285 	ol_tx_scheduler_idle = 0,
286 	ol_tx_scheduler_running,
287 };
288 
289 enum ol_tx_queue_status {
290 	ol_tx_queue_empty = 0,
291 	ol_tx_queue_active,
292 	ol_tx_queue_paused,
293 };
294 
295 struct ol_txrx_msdu_info_t {
296 	struct htt_msdu_info_t htt;
297 	struct ol_txrx_peer_t *peer;
298 	struct qdf_tso_info_t tso_info;
299 };
300 
301 enum {
302 	ol_tx_aggr_untried = 0,
303 	ol_tx_aggr_enabled,
304 	ol_tx_aggr_disabled,
305 	ol_tx_aggr_retry,
306 	ol_tx_aggr_in_progress,
307 };
308 
309 #define OL_TX_MAX_GROUPS_PER_QUEUE 1
310 #define OL_TX_MAX_VDEV_ID 16
311 #define OL_TXQ_GROUP_VDEV_ID_MASK_GET(_membership)           \
312 	(((_membership) & 0xffff0000) >> 16)
313 #define OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(_mask, _vdev_id)   \
314 	((_mask >> _vdev_id) & 0x01)
315 #define OL_TXQ_GROUP_AC_MASK_GET(_membership)           \
316 	((_membership) & 0x0000ffff)
317 #define OL_TXQ_GROUP_AC_BIT_MASK_GET(_mask, _ac_mask)   \
318 	((_mask >> _ac_mask) & 0x01)
319 #define OL_TXQ_GROUP_MEMBERSHIP_GET(_vdev_mask, _ac_mask)     \
320 	((_vdev_mask << 16) | _ac_mask)
321 
322 struct ol_tx_frms_queue_t {
323 	/* list_elem -
324 	 * Allow individual tx frame queues to be linked together into
325 	 * scheduler queues of tx frame queues
326 	 */
327 	TAILQ_ENTRY(ol_tx_frms_queue_t) list_elem;
328 	uint8_t aggr_state;
329 	struct {
330 		uint8_t total;
331 		/* pause requested by ctrl SW rather than txrx SW */
332 		uint8_t by_ctrl;
333 	} paused_count;
334 	uint8_t ext_tid;
335 	uint16_t frms;
336 	uint32_t bytes;
337 	ol_tx_desc_list head;
338 	enum ol_tx_queue_status flag;
339 	struct ol_tx_queue_group_t *group_ptrs[OL_TX_MAX_GROUPS_PER_QUEUE];
340 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
341 	struct ol_txrx_peer_t *peer;
342 #endif
343 };
344 
345 enum {
346 	ol_tx_log_entry_type_invalid,
347 	ol_tx_log_entry_type_queue_state,
348 	ol_tx_log_entry_type_enqueue,
349 	ol_tx_log_entry_type_dequeue,
350 	ol_tx_log_entry_type_drop,
351 	ol_tx_log_entry_type_queue_free,
352 
353 	ol_tx_log_entry_type_wrap,
354 };
355 
356 struct ol_tx_log_queue_state_var_sz_t {
357 	uint32_t active_bitmap;
358 	uint16_t credit;
359 	uint8_t num_cats_active;
360 	uint8_t data[1];
361 };
362 
363 struct ol_tx_log_queue_add_t {
364 	uint8_t num_frms;
365 	uint8_t tid;
366 	uint16_t peer_id;
367 	uint16_t num_bytes;
368 };
369 
370 struct ol_mac_addr {
371 	uint8_t mac_addr[OL_TXRX_MAC_ADDR_LEN];
372 };
373 
374 struct ol_tx_sched_t;
375 
376 #ifndef ol_txrx_local_peer_id_t
377 #define ol_txrx_local_peer_id_t uint8_t /* default */
378 #endif
379 
380 #ifdef QCA_COMPUTE_TX_DELAY
381 /*
382  * Delay histogram bins: 16 bins of 10 ms each to count delays
383  * from 0-160 ms, plus one overflow bin for delays > 160 ms.
384  */
385 #define QCA_TX_DELAY_HIST_INTERNAL_BINS 17
386 #define QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS 10
387 
388 struct ol_tx_delay_data {
389 	struct {
390 		uint64_t transmit_sum_ticks;
391 		uint64_t queue_sum_ticks;
392 		uint32_t transmit_num;
393 		uint32_t queue_num;
394 	} avgs;
395 	uint16_t hist_bins_queue[QCA_TX_DELAY_HIST_INTERNAL_BINS];
396 };
397 
398 #endif /* QCA_COMPUTE_TX_DELAY */
399 
400 /* Thermal Mitigation */
401 enum throttle_phase {
402 	THROTTLE_PHASE_OFF,
403 	THROTTLE_PHASE_ON,
404 	/* Invalid */
405 	THROTTLE_PHASE_MAX,
406 };
407 
408 #define THROTTLE_TX_THRESHOLD (100)
409 
410 /*
411  * Threshold to stop/start priority queue in term of % the actual flow start
412  * and stop thresholds. When num of available descriptors falls below
413  * stop_priority_th, priority queue will be paused. When num of available
414  * descriptors are greater than start_priority_th, priority queue will be
415  * un-paused.
416  */
417 #define TX_PRIORITY_TH   (80)
418 
419 /*
420  * No of maximum descriptor used by TSO jumbo packet with
421  * 64K aggregation.
422  */
423 #define MAX_TSO_SEGMENT_DESC (44)
424 
425 typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *usr_ctxt);
426 
427 struct ol_tx_queue_group_t {
428 	qdf_atomic_t credit;
429 	u_int32_t membership;
430 };
431 #define OL_TX_MAX_TXQ_GROUPS 2
432 
433 #define OL_TX_GROUP_STATS_LOG_SIZE 128
434 struct ol_tx_group_credit_stats_t {
435 	struct {
436 		struct {
437 			u_int16_t member_vdevs;
438 			u_int16_t credit;
439 		} grp[OL_TX_MAX_TXQ_GROUPS];
440 	} stats[OL_TX_GROUP_STATS_LOG_SIZE];
441 	u_int16_t last_valid_index;
442 	u_int16_t wrap_around;
443 };
444 
445 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
446 
447 /**
448  * enum flow_pool_status - flow pool status
449  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
450  *				and network queues are unpaused
451  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
452  *			   and network queues are paused
453  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
454  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
455  * @FLOW_POOL_NON_PRIO_PAUSED: non-priority queues are paused
456  */
457 enum flow_pool_status {
458 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
459 	FLOW_POOL_ACTIVE_PAUSED = 1,
460 	FLOW_POOL_NON_PRIO_PAUSED = 2,
461 	FLOW_POOL_INVALID = 3,
462 	FLOW_POOL_INACTIVE = 4
463 };
464 
465 /**
466  * struct ol_txrx_pool_stats - flow pool related statistics
467  * @pool_map_count: flow pool map received
468  * @pool_unmap_count: flow pool unmap received
469  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
470  */
471 struct ol_txrx_pool_stats {
472 	uint16_t pool_map_count;
473 	uint16_t pool_unmap_count;
474 	uint16_t pkt_drop_no_pool;
475 };
476 
477 /**
478  * struct ol_tx_flow_pool_t - flow_pool info
479  * @flow_pool_list_elem: flow_pool_list element
480  * @flow_pool_lock: flow_pool lock
481  * @flow_pool_id: flow_pool id
482  * @flow_pool_size: flow_pool size
483  * @avail_desc: available descriptors
484  * @deficient_desc: deficient descriptors
485  * @status: flow pool status
486  * @flow_type: flow pool type
487  * @member_flow_id: member flow id
488  * @stop_th: stop threshold
489  * @start_th: start threshold
490  * @freelist: tx descriptor freelist
491  * @pkt_drop_no_desc: drop due to no descriptors
492  * @ref_cnt: pool's ref count
493  * @stop_priority_th: Threshold to stop priority queue
494  * @start_priority_th: Threshold to start priority queue
495  */
496 struct ol_tx_flow_pool_t {
497 	TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
498 	qdf_spinlock_t flow_pool_lock;
499 	uint8_t flow_pool_id;
500 	uint16_t flow_pool_size;
501 	uint16_t avail_desc;
502 	uint16_t deficient_desc;
503 	enum flow_pool_status status;
504 	enum htt_flow_type flow_type;
505 	uint8_t member_flow_id;
506 	uint16_t stop_th;
507 	uint16_t start_th;
508 	union ol_tx_desc_list_elem_t *freelist;
509 	uint16_t pkt_drop_no_desc;
510 	qdf_atomic_t ref_cnt;
511 	uint16_t stop_priority_th;
512 	uint16_t start_priority_th;
513 };
514 
515 #endif
516 
517 /*
518  * struct ol_txrx_peer_id_map - Map of firmware peer_ids to peers on host
519  * @peer: Pointer to peer object
520  * @peer_id_ref_cnt: No. of firmware references to the peer_id
521  * @del_peer_id_ref_cnt: No. of outstanding unmap events for peer_id
522  *                       after the peer object is deleted on the host.
523  *
524  * peer_id is used as an index into the array of ol_txrx_peer_id_map.
525  */
526 struct ol_txrx_peer_id_map {
527 	struct ol_txrx_peer_t *peer;
528 	qdf_atomic_t peer_id_ref_cnt;
529 	qdf_atomic_t del_peer_id_ref_cnt;
530 };
531 
532 /**
533  * ol_txrx_stats_req_internal - specifications of the requested
534  * statistics internally
535  */
536 struct ol_txrx_stats_req_internal {
537     struct ol_txrx_stats_req base;
538     TAILQ_ENTRY(ol_txrx_stats_req_internal) req_list_elem;
539     int serviced; /* state of this request */
540     int offset;
541 };
542 
543 /*
544  * As depicted in the diagram below, the pdev contains an array of
545  * NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
546  * Each element identifies all the tx queues that are active for
547  * the TID, from the different peers.
548  *
549  * Each peer contains an array of NUM_EXT_TID ol_tx_frms_queue_t elements.
550  * Each element identifies the tx frames for the TID that need to be sent
551  * to the peer.
552  *
553  *
554  *  pdev: ol_tx_active_queues_in_tid_t active_in_tids[NUM_EXT_TIDS]
555  *                                TID
556  *       0            1            2                     17
557  *  +============+============+============+==    ==+============+
558  *  | active (y) | active (n) | active (n) |        | active (y) |
559  *  |------------+------------+------------+--    --+------------|
560  *  | queues     | queues     | queues     |        | queues     |
561  *  +============+============+============+==    ==+============+
562  *       |                                               |
563  *    .--+-----------------------------------------------'
564  *    |  |
565  *    |  |     peer X:                            peer Y:
566  *    |  |     ol_tx_frms_queue_t                 ol_tx_frms_queue_t
567  *    |  |     tx_queues[NUM_EXT_TIDS]            tx_queues[NUM_EXT_TIDS]
568  *    |  | TID +======+                       TID +======+
569  *    |  `---->| next |-------------------------->| next |--X
570  *    |     0  | prev |   .------.   .------.  0  | prev |   .------.
571  *    |        | txq  |-->|txdesc|-->|txdesc|     | txq  |-->|txdesc|
572  *    |        +======+   `------'   `------'     +======+   `------'
573  *    |        | next |      |          |      1  | next |      |
574  *    |     1  | prev |      v          v         | prev |      v
575  *    |        | txq  |   .------.   .------.     | txq  |   .------.
576  *    |        +======+   |netbuf|   |netbuf|     +======+   |netbuf|
577  *    |        | next |   `------'   `------'     | next |   `------'
578  *    |     2  | prev |                        2  | prev |
579  *    |        | txq  |                           | txq  |
580  *    |        +======+                           +======+
581  *    |        |      |                           |      |
582  *    |
583  *    |
584  *    |        |      |                           |      |
585  *    |        +======+                           +======+
586  *    `------->| next |--X                        | next |
587  *          17 | prev |   .------.             17 | prev |
588  *             | txq  |-->|txdesc|                | txq  |
589  *             +======+   `------'                +======+
590  *                           |
591  *                           v
592  *                        .------.
593  *                        |netbuf|
594  *                        `------'
595  */
596 struct ol_txrx_pdev_t {
597 	/* ctrl_pdev - handle for querying config info */
598 	struct cdp_cfg *ctrl_pdev;
599 
600 	/* osdev - handle for mem alloc / free, map / unmap */
601 	qdf_device_t osdev;
602 
603 	htt_pdev_handle htt_pdev;
604 
605 #ifdef WLAN_FEATURE_FASTPATH
606 	struct CE_handle    *ce_tx_hdl; /* Handle to Tx packet posting CE */
607 	struct CE_handle    *ce_htt_msg_hdl; /* Handle to TxRx completion CE */
608 #endif /* WLAN_FEATURE_FASTPATH */
609 
610 	struct {
611 		int is_high_latency;
612 		int host_addba;
613 		int ll_pause_txq_limit;
614 		int default_tx_comp_req;
615 	} cfg;
616 
617 	/* WDI subscriber's event list */
618 	wdi_event_subscribe **wdi_event_list;
619 
620 #if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
621 	bool pkt_log_init;
622 	/* Pktlog pdev */
623 	struct pktlog_dev_t *pl_dev;
624 #endif /* #ifndef REMOVE_PKT_LOG */
625 
626 	enum ol_sec_type sec_types[htt_num_sec_types];
627 	/* standard frame type */
628 	enum wlan_frm_fmt frame_format;
629 	enum htt_pkt_type htt_pkt_type;
630 
631 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
632 	/* txrx encap/decap   */
633 	uint8_t sw_tx_encap;
634 	uint8_t sw_rx_decap;
635 	uint8_t target_tx_tran_caps;
636 	uint8_t target_rx_tran_caps;
637 	/* llc process */
638 	uint8_t sw_tx_llc_proc_enable;
639 	uint8_t sw_rx_llc_proc_enable;
640 	/* A-MSDU */
641 	uint8_t sw_subfrm_hdr_recovery_enable;
642 	/* Protected Frame bit handling */
643 	uint8_t sw_pf_proc_enable;
644 #endif
645 	/*
646 	 * target tx credit -
647 	 * not needed for LL, but used for HL download scheduler to keep
648 	 * track of roughly how much space is available in the target for
649 	 * tx frames
650 	 */
651 	qdf_atomic_t target_tx_credit;
652 	qdf_atomic_t orig_target_tx_credit;
653 
654 	/* Peer mac address to staid mapping */
655 	struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];
656 
657 	/* ol_txrx_vdev list */
658 	TAILQ_HEAD(, ol_txrx_vdev_t) vdev_list;
659 
660 	TAILQ_HEAD(, ol_txrx_stats_req_internal) req_list;
661 	int req_list_depth;
662 	qdf_spinlock_t req_list_spinlock;
663 
664 	/* peer ID to peer object map (array of pointers to peer objects) */
665 	struct ol_txrx_peer_id_map *peer_id_to_obj_map;
666 
667 	struct {
668 		unsigned int mask;
669 		unsigned int idx_bits;
670 
671 		TAILQ_HEAD(, ol_txrx_peer_t) * bins;
672 	} peer_hash;
673 
674 	/* rx specific processing */
675 	struct {
676 		struct {
677 			TAILQ_HEAD(, ol_rx_reorder_t) waitlist;
678 			uint32_t timeout_ms;
679 		} defrag;
680 		struct {
681 			int defrag_timeout_check;
682 			int dup_check;
683 		} flags;
684 
685 		struct {
686 			struct ol_tx_reorder_cat_timeout_t
687 				access_cats[TXRX_NUM_WMM_AC];
688 		} reorder_timeout;
689 		qdf_spinlock_t mutex;
690 	} rx;
691 
692 	/* rx proc function */
693 	void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
694 			    struct ol_txrx_peer_t *peer,
695 			    unsigned int tid, qdf_nbuf_t msdu_list);
696 
697 	/* tx data delivery notification callback function */
698 	struct {
699 		ol_txrx_data_tx_cb func;
700 		void *ctxt;
701 	} tx_data_callback;
702 
703 	/* tx management delivery notification callback functions */
704 	struct {
705 		ol_txrx_mgmt_tx_cb download_cb;
706 		ol_txrx_mgmt_tx_cb ota_ack_cb;
707 		void *ctxt;
708 	} tx_mgmt_cb;
709 
710 	data_stall_detect_cb data_stall_detect_callback;
711 	/* packetdump callback functions */
712 	tp_ol_packetdump_cb ol_tx_packetdump_cb;
713 	tp_ol_packetdump_cb ol_rx_packetdump_cb;
714 
715 #ifdef WLAN_FEATURE_TSF_PLUS
716 	tp_ol_timestamp_cb ol_tx_timestamp_cb;
717 #endif
718 
719 	struct {
720 		uint16_t pool_size;
721 		uint16_t num_free;
722 		union ol_tx_desc_list_elem_t *array;
723 		union ol_tx_desc_list_elem_t *freelist;
724 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
725 		uint8_t num_invalid_bin;
726 		qdf_spinlock_t flow_pool_list_lock;
727 		TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
728 #endif
729 		uint32_t page_size;
730 		uint16_t desc_reserved_size;
731 		uint8_t page_divider;
732 		uint32_t offset_filter;
733 		struct qdf_mem_multi_page_t desc_pages;
734 #ifdef DESC_DUP_DETECT_DEBUG
735 		unsigned long *free_list_bitmap;
736 #endif
737 	} tx_desc;
738 
739 	uint8_t is_mgmt_over_wmi_enabled;
740 #if defined(QCA_LL_TX_FLOW_CONTROL_V2)
741 	struct ol_txrx_pool_stats pool_stats;
742 	uint32_t num_msdu_desc;
743 #ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
744 	struct ol_tx_flow_pool_t *mgmt_pool;
745 #endif
746 #endif
747 
748 	struct {
749 		int (*cmp)(union htt_rx_pn_t *new,
750 			   union htt_rx_pn_t *old,
751 			   int is_unicast, int opmode);
752 		int len;
753 	} rx_pn[htt_num_sec_types];
754 
755 	/* tx mutex */
756 	OL_TX_MUTEX_TYPE tx_mutex;
757 
758 	/*
759 	 * peer ref mutex:
760 	 * 1. Protect peer object lookups until the returned peer object's
761 	 *    reference count is incremented.
762 	 * 2. Provide mutex when accessing peer object lookup structures.
763 	 */
764 	OL_RX_MUTEX_TYPE peer_ref_mutex;
765 
766 	/*
767 	 * last_real_peer_mutex:
768 	 * Protect lookups of any vdev's last_real_peer pointer until the
769 	 * reference count for the pointed-to peer object is incremented.
770 	 * This mutex could be in the vdev struct, but it's slightly simpler
771 	 * to have a single lock in the pdev struct.  Since the lock is only
772 	 * held for an extremely short time, and since it's very unlikely for
773 	 * two vdev's to concurrently access the lock, there's no real
774 	 * benefit to having a per-vdev lock.
775 	 */
776 	OL_RX_MUTEX_TYPE last_real_peer_mutex;
777 
778 	qdf_spinlock_t peer_map_unmap_lock;
779 
780 	struct {
781 		struct {
782 			struct {
783 				struct {
784 					uint64_t ppdus;
785 					uint64_t mpdus;
786 				} normal;
787 				struct {
788 					/*
789 					 * mpdu_bad is general -
790 					 * replace it with the specific counters
791 					 * below
792 					 */
793 					uint64_t mpdu_bad;
794 					/* uint64_t mpdu_fcs; */
795 					/* uint64_t mpdu_duplicate; */
796 					/* uint64_t mpdu_pn_replay; */
797 					/* uint64_t mpdu_bad_sender; */
798 					/* ^ comment: peer not found */
799 					/* uint64_t mpdu_flushed; */
800 					/* uint64_t msdu_defrag_mic_err; */
801 					uint64_t msdu_mc_dup_drop;
802 				} err;
803 			} rx;
804 		} priv;
805 		struct ol_txrx_stats pub;
806 	} stats;
807 
808 #if defined(ENABLE_RX_REORDER_TRACE)
809 	struct {
810 		uint32_t mask;
811 		uint32_t idx;
812 		uint64_t cnt;
813 #define TXRX_RX_REORDER_TRACE_SIZE_LOG2 8       /* 256 entries */
814 		struct {
815 			uint16_t reorder_idx;
816 			uint16_t seq_num;
817 			uint8_t num_mpdus;
818 			uint8_t tid;
819 		} *data;
820 	} rx_reorder_trace;
821 #endif /* ENABLE_RX_REORDER_TRACE */
822 
823 #if defined(ENABLE_RX_PN_TRACE)
824 	struct {
825 		uint32_t mask;
826 		uint32_t idx;
827 		uint64_t cnt;
828 #define TXRX_RX_PN_TRACE_SIZE_LOG2 5    /* 32 entries */
829 		struct {
830 			struct ol_txrx_peer_t *peer;
831 			uint32_t pn32;
832 			uint16_t seq_num;
833 			uint8_t unicast;
834 			uint8_t tid;
835 		} *data;
836 	} rx_pn_trace;
837 #endif /* ENABLE_RX_PN_TRACE */
838 
839 #if defined(PERE_IP_HDR_ALIGNMENT_WAR)
840 	bool host_80211_enable;
841 #endif
842 
843 	/*
844 	 * tx_sched only applies for HL, but is defined unconditionally
845 	 * rather than  only if defined(CONFIG_HL_SUPPORT).
846 	 * This is because the struct only
847 	 * occupies a few bytes, and to avoid the complexity of
848 	 * wrapping references
849 	 * to the struct members in "defined(CONFIG_HL_SUPPORT)" conditional
850 	 * compilation.
851 	 * If this struct gets expanded to a non-trivial size,
852 	 * then it should be
853 	 * conditionally compiled to only apply if defined(CONFIG_HL_SUPPORT).
854 	 */
855 	qdf_spinlock_t tx_queue_spinlock;
856 	struct {
857 		enum ol_tx_scheduler_status tx_sched_status;
858 		struct ol_tx_sched_t *scheduler;
859 		struct ol_tx_frms_queue_t *last_used_txq;
860 	} tx_sched;
861 	/*
862 	 * tx_queue only applies for HL, but is defined unconditionally to avoid
863 	 * wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
864 	 * conditional compilation.
865 	 */
866 	struct {
867 		qdf_atomic_t rsrc_cnt;
868 		/* threshold_lo - when to start tx desc margin replenishment */
869 		uint16_t rsrc_threshold_lo;
870 		/*
871 		 * threshold_hi - where to stop during tx desc margin
872 		 * replenishment
873 		 */
874 		uint16_t rsrc_threshold_hi;
875 	} tx_queue;
876 
877 #if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
878 #define OL_TXQ_LOG_SIZE 512
879 	qdf_spinlock_t txq_log_spinlock;
880 	struct {
881 		int size;
882 		int oldest_record_offset;
883 		int offset;
884 		int allow_wrap;
885 		u_int32_t wrapped;
886 		/* aligned to u_int32_t boundary */
887 		u_int8_t data[OL_TXQ_LOG_SIZE];
888 	} txq_log;
889 #endif
890 
891 #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
892 	qdf_spinlock_t peer_stat_mutex;
893 #endif
894 
895 	int rssi_update_shift;
896 	int rssi_new_weight;
897 #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
898 	struct {
899 		ol_txrx_local_peer_id_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
900 		ol_txrx_local_peer_id_t freelist;
901 		qdf_spinlock_t lock;
902 		ol_txrx_peer_handle map[OL_TXRX_NUM_LOCAL_PEER_IDS];
903 	} local_peer_ids;
904 #endif
905 
906 #ifdef QCA_COMPUTE_TX_DELAY
907 #ifdef QCA_COMPUTE_TX_DELAY_PER_TID
908 #define QCA_TX_DELAY_NUM_CATEGORIES \
909 	(OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
910 #else
911 #define QCA_TX_DELAY_NUM_CATEGORIES 1
912 #endif
913 	struct {
914 		qdf_spinlock_t mutex;
915 		struct {
916 			struct ol_tx_delay_data copies[2]; /* ping-pong */
917 			int in_progress_idx;
918 			uint32_t avg_start_time_ticks;
919 		} cats[QCA_TX_DELAY_NUM_CATEGORIES];
920 		uint32_t tx_compl_timestamp_ticks;
921 		uint32_t avg_period_ticks;
922 		uint32_t hist_internal_bin_width_mult;
923 		uint32_t hist_internal_bin_width_shift;
924 	} tx_delay;
925 
926 	uint16_t packet_count[QCA_TX_DELAY_NUM_CATEGORIES];
927 	uint16_t packet_loss_count[QCA_TX_DELAY_NUM_CATEGORIES];
928 
929 #endif /* QCA_COMPUTE_TX_DELAY */
930 
931 	struct {
932 		qdf_spinlock_t mutex;
933 		/* timer used to monitor the throttle "on" phase and
934 		 * "off" phase
935 		 */
936 		qdf_timer_t phase_timer;
937 		/* timer used to send tx frames */
938 		qdf_timer_t tx_timer;
939 		/* This is the time in ms of the throttling window, it will
940 		 * include an "on" phase and an "off" phase
941 		 */
942 		uint32_t throttle_period_ms;
943 		/* Current throttle level set by the client ex. level 0,
944 		 * level 1, etc
945 		 */
946 		enum throttle_level current_throttle_level;
947 		/* Index that points to the phase within the throttle period */
948 		enum throttle_phase current_throttle_phase;
949 		/* Maximum number of frames to send to the target at one time */
950 		uint32_t tx_threshold;
951 		/* stores time in ms of on/off phase for each throttle level */
952 		int throttle_time_ms[THROTTLE_LEVEL_MAX][THROTTLE_PHASE_MAX];
953 		/* mark true if traffic is paused due to thermal throttling */
954 		bool is_paused;
955 	} tx_throttle;
956 
957 #if defined(FEATURE_TSO)
958 	struct {
959 		uint16_t pool_size;
960 		uint16_t num_free;
961 		struct qdf_tso_seg_elem_t *freelist;
962 		/* tso mutex */
963 		OL_TX_MUTEX_TYPE tso_mutex;
964 	} tso_seg_pool;
965 	struct {
966 		uint16_t num_seg_pool_size;
967 		uint16_t num_free;
968 		struct qdf_tso_num_seg_elem_t *freelist;
969 		/* tso mutex */
970 		OL_TX_MUTEX_TYPE tso_num_seg_mutex;
971 	} tso_num_seg_pool;
972 #endif
973 
974 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
975 	struct {
976 		enum ol_tx_peer_bal_state enabled;
977 		qdf_spinlock_t mutex;
978 		/* timer used to trigger more frames for bad peers */
979 		qdf_timer_t peer_bal_timer;
980 		/*This is the time in ms of the peer balance timer period */
981 		u_int32_t peer_bal_period_ms;
982 		/*This is the txq limit */
983 		u_int32_t peer_bal_txq_limit;
984 		/*This is the state of the peer balance timer */
985 		enum ol_tx_peer_bal_timer_state peer_bal_timer_state;
986 		/*This is the counter about active peers which are under
987 		 *tx flow control
988 		 */
989 		u_int32_t peer_num;
990 		/*This is peer list which are under tx flow control */
991 		struct ol_tx_limit_peer_t limit_list[MAX_NO_PEERS_IN_LIMIT];
992 		/*This is threshold configurationl */
993 		struct tx_peer_threshold ctl_thresh[TXRX_IEEE11_MAX];
994 	} tx_peer_bal;
995 #endif /* CONFIG_Hl_SUPPORT && QCA_BAD_PEER_TX_FLOW_CL */
996 
997 	struct ol_tx_queue_group_t txq_grps[OL_TX_MAX_TXQ_GROUPS];
998 #ifdef DEBUG_HL_LOGGING
999 		qdf_spinlock_t grp_stat_spinlock;
1000 		struct ol_tx_group_credit_stats_t grp_stats;
1001 #endif
1002 	int tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES];
1003 	uint8_t ocb_peer_valid;
1004 	struct ol_txrx_peer_t *ocb_peer;
1005 	tx_pause_callback pause_cb;
1006 
1007 	struct {
1008 		void (*lro_flush_cb)(void *);
1009 		qdf_atomic_t lro_dev_cnt;
1010 	} lro_info;
1011 	struct ol_txrx_peer_t *self_peer;
1012 	qdf_work_t peer_unmap_timer_work;
1013 
1014 #ifdef IPA_OFFLOAD
1015 	ipa_uc_op_cb_type ipa_uc_op_cb;
1016 	void *usr_ctxt;
1017 	struct ol_txrx_ipa_resources ipa_resource;
1018 #endif /* IPA_UC_OFFLOAD */
1019 };
1020 
1021 struct ol_txrx_vdev_t {
1022 	struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
1023 				      * the parent of this virtual device
1024 				      */
1025 	uint8_t vdev_id;             /* ID used to specify a particular vdev
1026 				      * to the target
1027 				      */
1028 	void *osif_dev;
1029 	union ol_txrx_align_mac_addr_t mac_addr; /* MAC address */
1030 	/* tx paused - NO LONGER NEEDED? */
1031 	TAILQ_ENTRY(ol_txrx_vdev_t) vdev_list_elem; /* node in the pdev's list
1032 						     * of vdevs
1033 						     */
1034 	TAILQ_HEAD(peer_list_t, ol_txrx_peer_t) peer_list;
1035 	struct ol_txrx_peer_t *last_real_peer; /* last real peer created for
1036 						* this vdev (not "self"
1037 						* pseudo-peer)
1038 						*/
1039 	ol_txrx_rx_fp rx; /* receive function used by this vdev */
1040 
1041 	struct {
1042 		/*
1043 		 * If the vdev object couldn't be deleted immediately because
1044 		 * it still had some peer objects left, remember that a delete
1045 		 * was requested, so it can be deleted once all its peers have
1046 		 * been deleted.
1047 		 */
1048 		int pending;
1049 		/*
1050 		 * Store a function pointer and a context argument to provide a
1051 		 * notification for when the vdev is deleted.
1052 		 */
1053 		ol_txrx_vdev_delete_cb callback;
1054 		void *context;
1055 		atomic_t detaching;
1056 	} delete;
1057 
1058 	/* safe mode control to bypass the encrypt and decipher process */
1059 	uint32_t safemode;
1060 
1061 	/* rx filter related */
1062 	uint32_t drop_unenc;
1063 	struct privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
1064 	uint32_t num_filters;
1065 
1066 	enum wlan_op_mode opmode;
1067 
1068 #ifdef QCA_IBSS_SUPPORT
1069 	/* ibss mode related */
1070 	int16_t ibss_peer_num;  /* the number of active peers */
1071 	int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
1072 #endif
1073 
1074 #if defined(CONFIG_HL_SUPPORT)
1075 	struct ol_tx_frms_queue_t txqs[OL_TX_VDEV_NUM_QUEUES];
1076 #endif
1077 
1078 	struct {
1079 		struct {
1080 			qdf_nbuf_t head;
1081 			qdf_nbuf_t tail;
1082 			int depth;
1083 		} txq;
1084 		uint32_t paused_reason;
1085 		qdf_spinlock_t mutex;
1086 		qdf_timer_t timer;
1087 		int max_q_depth;
1088 		bool is_q_paused;
1089 		bool is_q_timer_on;
1090 		uint32_t q_pause_cnt;
1091 		uint32_t q_unpause_cnt;
1092 		uint32_t q_overflow_cnt;
1093 	} ll_pause;
1094 	bool disable_intrabss_fwd;
1095 	qdf_atomic_t os_q_paused;
1096 	uint16_t tx_fl_lwm;
1097 	uint16_t tx_fl_hwm;
1098 	qdf_spinlock_t flow_control_lock;
1099 	ol_txrx_tx_flow_control_fp osif_flow_control_cb;
1100 	ol_txrx_tx_flow_control_is_pause_fp osif_flow_control_is_pause;
1101 	void *osif_fc_ctx;
1102 
1103 #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
1104 	union ol_txrx_align_mac_addr_t hl_tdls_ap_mac_addr;
1105 	bool hlTdlsFlag;
1106 #endif
1107 
1108 #if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
1109 	qdf_atomic_t tx_desc_count;
1110 #endif
1111 
1112 	uint16_t wait_on_peer_id;
1113 	union ol_txrx_align_mac_addr_t last_peer_mac_addr;
1114 	qdf_event_t wait_delete_comp;
1115 #if defined(FEATURE_TSO)
1116 	struct {
1117 		int pool_elems; /* total number of elements in the pool */
1118 		int alloc_cnt; /* number of allocated elements */
1119 		uint32_t *freelist; /* free list of qdf_tso_seg_elem_t */
1120 	} tso_pool_t;
1121 #endif
1122 
1123 	/* last channel change event recieved */
1124 	struct {
1125 		bool is_valid;  /* whether the rest of the members are valid */
1126 		uint16_t mhz;
1127 		uint16_t band_center_freq1;
1128 		uint16_t band_center_freq2;
1129 		WLAN_PHY_MODE phy_mode;
1130 	} ocb_channel_event;
1131 
1132 	/* Information about the schedules in the schedule */
1133 	struct ol_txrx_ocb_chan_info *ocb_channel_info;
1134 	uint32_t ocb_channel_count;
1135 
1136 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1137 	struct ol_tx_flow_pool_t *pool;
1138 #endif
1139 	/* intra bss forwarded tx and rx packets count */
1140 	uint64_t fwd_tx_packets;
1141 	uint64_t fwd_rx_packets;
1142 	bool is_wisa_mode_enable;
1143 	uint8_t mac_id;
1144 };
1145 
1146 struct ol_rx_reorder_array_elem_t {
1147 	qdf_nbuf_t head;
1148 	qdf_nbuf_t tail;
1149 };
1150 
1151 struct ol_rx_reorder_t {
1152 	uint8_t win_sz;
1153 	uint8_t win_sz_mask;
1154 	uint8_t num_mpdus;
1155 	struct ol_rx_reorder_array_elem_t *array;
1156 	/* base - single rx reorder element used for non-aggr cases */
1157 	struct ol_rx_reorder_array_elem_t base;
1158 #if defined(QCA_SUPPORT_OL_RX_REORDER_TIMEOUT)
1159 	struct ol_rx_reorder_timeout_list_elem_t timeout;
1160 #endif
1161 	/* only used for defrag right now */
1162 	TAILQ_ENTRY(ol_rx_reorder_t) defrag_waitlist_elem;
1163 	uint32_t defrag_timeout_ms;
1164 	/* get back to parent ol_txrx_peer_t when ol_rx_reorder_t is in a
1165 	 * waitlist
1166 	 */
1167 	uint16_t tid;
1168 };
1169 
1170 enum {
1171 	txrx_sec_mcast = 0,
1172 	txrx_sec_ucast
1173 };
1174 
1175 typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
1176 				      tx_msdu_info);
1177 
1178 #define OL_TXRX_PEER_SECURITY_MULTICAST  0
1179 #define OL_TXRX_PEER_SECURITY_UNICAST    1
1180 #define OL_TXRX_PEER_SECURITY_MAX        2
1181 
1182 
1183 /* Allow 6000 ms to receive peer unmap events after peer is deleted */
1184 #define OL_TXRX_PEER_UNMAP_TIMEOUT (6000)
1185 
1186 struct ol_txrx_cached_bufq_t {
1187 	/* cached_bufq is used to enqueue the pending RX frames from a peer
1188 	 * before the peer is registered for data service. The list will be
1189 	 * flushed to HDD once that station is registered.
1190 	 */
1191 	struct list_head cached_bufq;
1192 	/* mutual exclusion lock to access the cached_bufq queue */
1193 	qdf_spinlock_t bufq_lock;
1194 	/* # entries in queue after which  subsequent adds will be dropped */
1195 	uint32_t thresh;
1196 	/* # entries in present in cached_bufq */
1197 	uint32_t curr;
1198 	/* # max num of entries in the queue if bufq thresh was not in place */
1199 	uint32_t high_water_mark;
1200 	/* # max num of entries in the queue if we did not drop packets */
1201 	uint32_t qdepth_no_thresh;
1202 	/* # of packes (beyond threshold) dropped from cached_bufq */
1203 	uint32_t dropped;
1204 };
1205 
1206 struct ol_txrx_peer_t {
1207 	struct ol_txrx_vdev_t *vdev;
1208 
1209 	qdf_atomic_t ref_cnt;
1210 	qdf_atomic_t delete_in_progress;
1211 	qdf_atomic_t flush_in_progress;
1212 
1213 	/* The peer state tracking is used for HL systems
1214 	 * that don't support tx and rx filtering within the target.
1215 	 * In such systems, the peer's state determines what kind of
1216 	 * tx and rx filtering, if any, is done.
1217 	 * This variable doesn't apply to LL systems, or to HL systems for
1218 	 * which the target handles tx and rx filtering. However, it is
1219 	 * simplest to declare and update this variable unconditionally,
1220 	 * for all systems.
1221 	 */
1222 	enum ol_txrx_peer_state state;
1223 	qdf_spinlock_t peer_info_lock;
1224 
1225 	/* Wrapper around the cached_bufq list */
1226 	struct ol_txrx_cached_bufq_t bufq_info;
1227 
1228 	ol_tx_filter_func tx_filter;
1229 
1230 	/* peer ID(s) for this peer */
1231 	uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
1232 #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
1233 	uint16_t local_id;
1234 #endif
1235 
1236 	union ol_txrx_align_mac_addr_t mac_addr;
1237 
1238 	/* node in the vdev's list of peers */
1239 	TAILQ_ENTRY(ol_txrx_peer_t) peer_list_elem;
1240 	/* node in the hash table bin's list of peers */
1241 	TAILQ_ENTRY(ol_txrx_peer_t) hash_list_elem;
1242 
1243 	/*
1244 	 * per TID info -
1245 	 * stored in separate arrays to avoid alignment padding mem overhead
1246 	 */
1247 	struct ol_rx_reorder_t tids_rx_reorder[OL_TXRX_NUM_EXT_TIDS];
1248 	union htt_rx_pn_t tids_last_pn[OL_TXRX_NUM_EXT_TIDS];
1249 	uint8_t tids_last_pn_valid[OL_TXRX_NUM_EXT_TIDS];
1250 	uint8_t tids_rekey_flag[OL_TXRX_NUM_EXT_TIDS];
1251 	uint16_t tids_next_rel_idx[OL_TXRX_NUM_EXT_TIDS];
1252 	uint16_t tids_last_seq[OL_TXRX_NUM_EXT_TIDS];
1253 	uint16_t tids_mcast_last_seq[OL_TXRX_NUM_EXT_TIDS];
1254 
1255 	struct {
1256 		enum htt_sec_type sec_type;
1257 		uint32_t michael_key[2];        /* relevant for TKIP */
1258 	} security[2];          /* 0 -> multicast, 1 -> unicast */
1259 
1260 	/*
1261 	 * rx proc function: this either is a copy of pdev's rx_opt_proc for
1262 	 * regular rx processing, or has been redirected to a /dev/null discard
1263 	 * function when peer deletion is in progress.
1264 	 */
1265 	void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
1266 			    struct ol_txrx_peer_t *peer,
1267 			    unsigned int tid, qdf_nbuf_t msdu_list);
1268 
1269 #if defined(CONFIG_HL_SUPPORT)
1270 	struct ol_tx_frms_queue_t txqs[OL_TX_NUM_TIDS];
1271 #endif
1272 
1273 #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
1274 	ol_txrx_peer_stats_t stats;
1275 #endif
1276 	int16_t rssi_dbm;
1277 
1278 	/* NAWDS Flag and Bss Peer bit */
1279 	uint16_t nawds_enabled:1, bss_peer:1, valid:1;
1280 
1281 	/* QoS info */
1282 	uint8_t qos_capable;
1283 	/* U-APSD tid mask */
1284 	uint8_t uapsd_mask;
1285 	/*flag indicating key installed */
1286 	uint8_t keyinstalled;
1287 
1288 	/* Bit to indicate if PN check is done in fw */
1289 	qdf_atomic_t fw_pn_check;
1290 
1291 #ifdef WLAN_FEATURE_11W
1292 	/* PN counter for Robust Management Frames */
1293 	uint64_t last_rmf_pn;
1294 	uint32_t rmf_pn_replays;
1295 	uint8_t last_rmf_pn_valid;
1296 #endif
1297 
1298 	/* Properties of the last received PPDU */
1299 	int16_t last_pkt_rssi_cmb;
1300 	int16_t last_pkt_rssi[4];
1301 	uint8_t last_pkt_legacy_rate;
1302 	uint8_t last_pkt_legacy_rate_sel;
1303 	uint32_t last_pkt_timestamp_microsec;
1304 	uint8_t last_pkt_timestamp_submicrosec;
1305 	uint32_t last_pkt_tsf;
1306 	uint8_t last_pkt_tid;
1307 	uint16_t last_pkt_center_freq;
1308 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
1309 	u_int16_t tx_limit;
1310 	u_int16_t tx_limit_flag;
1311 	u_int16_t tx_pause_flag;
1312 #endif
1313 	qdf_time_t last_assoc_rcvd;
1314 	qdf_time_t last_disassoc_rcvd;
1315 	qdf_time_t last_deauth_rcvd;
1316 	qdf_atomic_t fw_create_pending;
1317 	qdf_timer_t peer_unmap_timer;
1318 };
1319 
1320 struct ol_rx_remote_data {
1321 	qdf_nbuf_t msdu;
1322 	uint8_t mac_id;
1323 };
1324 
1325 #define INVALID_REORDER_INDEX 0xFFFF
1326 
1327 #endif /* _OL_TXRX_TYPES__H_ */
1328