1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: ol_txrx_types.h
22  * Define the major data types used internally by the host datapath SW.
23  */
24 #ifndef _OL_TXRX_TYPES__H_
25 #define _OL_TXRX_TYPES__H_
26 
27 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
28 #include <qdf_mem.h>
29 #include "queue.h"          /* TAILQ */
30 #include <a_types.h>            /* A_UINT8 */
31 #include <htt.h>                /* htt_sec_type, htt_pkt_type, etc. */
32 #include <qdf_atomic.h>         /* qdf_atomic_t */
33 #include <wdi_event_api.h>      /* wdi_event_subscribe */
34 #include <qdf_timer.h>		/* qdf_timer_t */
35 #include <qdf_lock.h>           /* qdf_spinlock */
36 #include <ol_txrx_stats.h>
37 #include "ol_txrx_htt_api.h"
38 #include "ol_htt_tx_api.h"
39 #include "ol_htt_rx_api.h"
40 #include "ol_txrx_ctrl_api.h" /* WLAN_MAX_STA_COUNT */
41 #include "ol_txrx_osif_api.h" /* ol_rx_callback */
42 #include "cdp_txrx_flow_ctrl_v2.h"
43 #include "cdp_txrx_peer_ops.h"
44 #include <qdf_trace.h>
45 #include "qdf_hrtimer.h"
46 
47 /*
48  * The target may allocate multiple IDs for a peer.
49  * In particular, the target may allocate one ID to represent the
50  * multicast key the peer uses, and another ID to represent the
51  * unicast key the peer uses.
52  */
53 #define MAX_NUM_PEER_ID_PER_PEER 16
54 
55 /* OL_TXRX_NUM_EXT_TIDS -
56  * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
57  */
58 #define OL_TXRX_NUM_EXT_TIDS 19
59 
60 #define OL_TX_NUM_QOS_TIDS 16   /* 16 regular TIDs */
61 #define OL_TX_NON_QOS_TID 16
62 #define OL_TX_MGMT_TID    17
63 #define OL_TX_NUM_TIDS    18
64 #define OL_RX_MCAST_TID   18  /* Mcast TID only between f/w & host */
65 
66 #define OL_TX_VDEV_MCAST_BCAST    0 /* HTT_TX_EXT_TID_MCAST_BCAST */
67 #define OL_TX_VDEV_DEFAULT_MGMT   1 /* HTT_TX_EXT_TID_DEFALT_MGMT */
68 #define OL_TX_VDEV_NUM_QUEUES     2
69 
70 #define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
71 #define OL_TXRX_MGMT_NUM_TYPES 8
72 
73 #define OL_TX_MUTEX_TYPE qdf_spinlock_t
74 #define OL_RX_MUTEX_TYPE qdf_spinlock_t
75 
76 /* TXRX Histogram defines */
77 #define TXRX_DATA_HISTROGRAM_GRANULARITY      1000
78 #define TXRX_DATA_HISTROGRAM_NUM_INTERVALS    100
79 
80 #define OL_TXRX_INVALID_VDEV_ID		(-1)
81 #define ETHERTYPE_OCB_TX   0x8151
82 #define ETHERTYPE_OCB_RX   0x8152
83 
84 #define OL_TXRX_MAX_PDEV_CNT	1
85 
86 struct ol_txrx_pdev_t;
87 struct ol_txrx_vdev_t;
88 struct ol_txrx_peer_t;
89 
90 /* rx filter related */
91 #define MAX_PRIVACY_FILTERS           4 /* max privacy filters */
92 
93 enum privacy_filter {
94 	PRIVACY_FILTER_ALWAYS,
95 	PRIVACY_FILTER_KEY_UNAVAILABLE,
96 };
97 
98 enum privacy_filter_packet_type {
99 	PRIVACY_FILTER_PACKET_UNICAST,
100 	PRIVACY_FILTER_PACKET_MULTICAST,
101 	PRIVACY_FILTER_PACKET_BOTH
102 };
103 
104 struct privacy_exemption {
105 	/* ethertype -
106 	 * type of ethernet frames this filter applies to, in host byte order
107 	 */
108 	uint16_t ether_type;
109 	enum privacy_filter filter_type;
110 	enum privacy_filter_packet_type packet_type;
111 };
112 
113 enum ol_tx_frm_type {
114 	OL_TX_FRM_STD = 0, /* regular frame - no added header fragments */
115 	OL_TX_FRM_TSO,     /* TSO segment, with a modified IP header added */
116 	OL_TX_FRM_AUDIO,   /* audio frames, with a custom LLC/SNAP hdr added */
117 	OL_TX_FRM_NO_FREE, /* frame requires special tx completion callback */
118 	ol_tx_frm_freed = 0xff, /* the tx desc is in free list */
119 };
120 
121 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
122 
123 #define MAX_NO_PEERS_IN_LIMIT (2*10 + 2)
124 
125 enum ol_tx_peer_bal_state {
126 	ol_tx_peer_bal_enable = 0,
127 	ol_tx_peer_bal_disable,
128 };
129 
130 enum ol_tx_peer_bal_timer_state {
131 	ol_tx_peer_bal_timer_disable = 0,
132 	ol_tx_peer_bal_timer_active,
133 	ol_tx_peer_bal_timer_inactive,
134 };
135 
136 struct ol_tx_limit_peer_t {
137 	u_int16_t limit_flag;
138 	u_int16_t peer_id;
139 	u_int16_t limit;
140 };
141 
142 enum tx_peer_level {
143 	TXRX_IEEE11_B = 0,
144 	TXRX_IEEE11_A_G,
145 	TXRX_IEEE11_N,
146 	TXRX_IEEE11_AC,
147 	TXRX_IEEE11_AX,
148 	TXRX_IEEE11_MAX,
149 };
150 
151 struct tx_peer_threshold {
152 	u_int32_t tput_thresh;
153 	u_int32_t tx_limit;
154 };
155 #endif
156 
157 
158 struct ol_tx_desc_t {
159 	qdf_nbuf_t netbuf;
160 	void *htt_tx_desc;
161 	uint16_t id;
162 	qdf_dma_addr_t htt_tx_desc_paddr;
163 	void *htt_frag_desc; /* struct msdu_ext_desc_t * */
164 	qdf_dma_addr_t htt_frag_desc_paddr;
165 	qdf_atomic_t ref_cnt;
166 	enum htt_tx_status status;
167 
168 #ifdef QCA_COMPUTE_TX_DELAY
169 	uint32_t entry_timestamp_ticks;
170 #endif
171 
172 #ifdef DESC_TIMESTAMP_DEBUG_INFO
173 	struct {
174 		uint64_t prev_tx_ts;
175 		uint64_t curr_tx_ts;
176 		uint64_t last_comp_ts;
177 	} desc_debug_info;
178 #endif
179 
180 	/*
181 	 * Allow tx descriptors to be stored in (doubly-linked) lists.
182 	 * This is mainly used for HL tx queuing and scheduling, but is
183 	 * also used by LL+HL for batch processing of tx frames.
184 	 */
185 	TAILQ_ENTRY(ol_tx_desc_t) tx_desc_list_elem;
186 
187 	/*
188 	 * Remember whether the tx frame is a regular packet, or whether
189 	 * the driver added extra header fragments (e.g. a modified IP header
190 	 * for TSO fragments, or an added LLC/SNAP header for audio interworking
191 	 * data) that need to be handled in a special manner.
192 	 * This field is filled in with the ol_tx_frm_type enum.
193 	 */
194 	uint8_t pkt_type;
195 
196 	u_int8_t vdev_id;
197 
198 	struct ol_txrx_vdev_t *vdev;
199 
200 	void *txq;
201 
202 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
203 	/*
204 	 * used by tx encap, to restore the os buf start offset
205 	 * after tx complete
206 	 */
207 	uint8_t orig_l2_hdr_bytes;
208 #endif
209 
210 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
211 	struct ol_tx_flow_pool_t *pool;
212 #endif
213 	void *tso_desc;
214 	void *tso_num_desc;
215 };
216 
217 typedef TAILQ_HEAD(some_struct_name, ol_tx_desc_t) ol_tx_desc_list;
218 
219 union ol_tx_desc_list_elem_t {
220 	union ol_tx_desc_list_elem_t *next;
221 	struct ol_tx_desc_t tx_desc;
222 };
223 
224 union ol_txrx_align_mac_addr_t {
225 	uint8_t raw[QDF_MAC_ADDR_SIZE];
226 	struct {
227 		uint16_t bytes_ab;
228 		uint16_t bytes_cd;
229 		uint16_t bytes_ef;
230 	} align2;
231 	struct {
232 		uint32_t bytes_abcd;
233 		uint16_t bytes_ef;
234 	} align4;
235 };
236 
237 struct ol_rx_reorder_timeout_list_elem_t {
238 	TAILQ_ENTRY(ol_rx_reorder_timeout_list_elem_t)
239 	reorder_timeout_list_elem;
240 	uint32_t timestamp_ms;
241 	struct ol_txrx_peer_t *peer;
242 	uint8_t tid;
243 	uint8_t active;
244 };
245 
246 /* wait on peer deletion timeout value in milliseconds */
247 #define PEER_DELETION_TIMEOUT 500
248 
249 enum txrx_wmm_ac {
250 	TXRX_WMM_AC_BE,
251 	TXRX_WMM_AC_BK,
252 	TXRX_WMM_AC_VI,
253 	TXRX_WMM_AC_VO,
254 
255 	TXRX_NUM_WMM_AC
256 };
257 
258 #define TXRX_TID_TO_WMM_AC(_tid) ( \
259 		(((_tid) >> 1) == 3) ? TXRX_WMM_AC_VO :	\
260 		(((_tid) >> 1) == 2) ? TXRX_WMM_AC_VI :	\
261 		(((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
262 		TXRX_WMM_AC_BE)
263 
264 enum {
265 	OL_TX_SCHED_WRR_ADV_CAT_BE,
266 	OL_TX_SCHED_WRR_ADV_CAT_BK,
267 	OL_TX_SCHED_WRR_ADV_CAT_VI,
268 	OL_TX_SCHED_WRR_ADV_CAT_VO,
269 	OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA,
270 	OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT,
271 	OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA,
272 	OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT,
273 
274 	OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES /* must be last */
275 };
276 
277 A_COMPILE_TIME_ASSERT(ol_tx_sched_htt_ac_values,
278 	/* check that regular WMM AC enum values match */
279 	((int)OL_TX_SCHED_WRR_ADV_CAT_VO == (int)HTT_AC_WMM_VO) &&
280 	((int)OL_TX_SCHED_WRR_ADV_CAT_VI == (int)HTT_AC_WMM_VI) &&
281 	((int)OL_TX_SCHED_WRR_ADV_CAT_BK == (int)HTT_AC_WMM_BK) &&
282 	((int)OL_TX_SCHED_WRR_ADV_CAT_BE == (int)HTT_AC_WMM_BE) &&
283 
284 	/* check that extension AC enum values match */
285 	((int)OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA
286 		== (int)HTT_AC_EXT_NON_QOS) &&
287 	((int)OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT
288 		== (int)HTT_AC_EXT_UCAST_MGMT) &&
289 	((int)OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA
290 		== (int)HTT_AC_EXT_MCAST_DATA) &&
291 	((int)OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT
292 		== (int)HTT_AC_EXT_MCAST_MGMT));
293 
294 struct ol_tx_reorder_cat_timeout_t {
295 	TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
296 	qdf_timer_t timer;
297 	uint32_t duration_ms;
298 	struct ol_txrx_pdev_t *pdev;
299 };
300 
301 enum ol_tx_scheduler_status {
302 	ol_tx_scheduler_idle = 0,
303 	ol_tx_scheduler_running,
304 };
305 
306 enum ol_tx_queue_status {
307 	ol_tx_queue_empty = 0,
308 	ol_tx_queue_active,
309 	ol_tx_queue_paused,
310 };
311 
312 struct ol_txrx_msdu_info_t {
313 	struct htt_msdu_info_t htt;
314 	struct ol_txrx_peer_t *peer;
315 	struct qdf_tso_info_t tso_info;
316 };
317 
318 enum {
319 	ol_tx_aggr_untried = 0,
320 	ol_tx_aggr_enabled,
321 	ol_tx_aggr_disabled,
322 	ol_tx_aggr_retry,
323 	ol_tx_aggr_in_progress,
324 };
325 
326 #define OL_TX_MAX_GROUPS_PER_QUEUE 1
327 #define OL_TX_MAX_VDEV_ID 16
328 #define OL_TXQ_GROUP_VDEV_ID_MASK_GET(_membership)           \
329 	(((_membership) & 0xffff0000) >> 16)
330 #define OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(_mask, _vdev_id)   \
331 	((_mask >> _vdev_id) & 0x01)
332 #define OL_TXQ_GROUP_AC_MASK_GET(_membership)           \
333 	((_membership) & 0x0000ffff)
334 #define OL_TXQ_GROUP_AC_BIT_MASK_GET(_mask, _ac_mask)   \
335 	((_mask >> _ac_mask) & 0x01)
336 #define OL_TXQ_GROUP_MEMBERSHIP_GET(_vdev_mask, _ac_mask)     \
337 	((_vdev_mask << 16) | _ac_mask)
338 
339 struct ol_tx_frms_queue_t {
340 	/* list_elem -
341 	 * Allow individual tx frame queues to be linked together into
342 	 * scheduler queues of tx frame queues
343 	 */
344 	TAILQ_ENTRY(ol_tx_frms_queue_t) list_elem;
345 	uint8_t aggr_state;
346 	struct {
347 		uint8_t total;
348 		/* pause requested by ctrl SW rather than txrx SW */
349 		uint8_t by_ctrl;
350 	} paused_count;
351 	uint8_t ext_tid;
352 	uint16_t frms;
353 	uint32_t bytes;
354 	ol_tx_desc_list head;
355 	enum ol_tx_queue_status flag;
356 	struct ol_tx_queue_group_t *group_ptrs[OL_TX_MAX_GROUPS_PER_QUEUE];
357 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
358 	struct ol_txrx_peer_t *peer;
359 #endif
360 };
361 
362 enum {
363 	ol_tx_log_entry_type_invalid,
364 	ol_tx_log_entry_type_queue_state,
365 	ol_tx_log_entry_type_enqueue,
366 	ol_tx_log_entry_type_dequeue,
367 	ol_tx_log_entry_type_drop,
368 	ol_tx_log_entry_type_queue_free,
369 
370 	ol_tx_log_entry_type_wrap,
371 };
372 
373 struct ol_tx_log_queue_state_var_sz_t {
374 	uint32_t active_bitmap;
375 	uint16_t credit;
376 	uint8_t num_cats_active;
377 	uint8_t data[1];
378 };
379 
380 struct ol_tx_log_queue_add_t {
381 	uint8_t num_frms;
382 	uint8_t tid;
383 	uint16_t peer_id;
384 	uint16_t num_bytes;
385 };
386 
387 struct ol_mac_addr {
388 	uint8_t mac_addr[QDF_MAC_ADDR_SIZE];
389 };
390 
391 struct ol_tx_sched_t;
392 
393 #ifndef ol_txrx_local_peer_id_t
394 #define ol_txrx_local_peer_id_t uint8_t /* default */
395 #endif
396 
397 #ifdef QCA_COMPUTE_TX_DELAY
398 /*
399  * Delay histogram bins: 16 bins of 10 ms each to count delays
400  * from 0-160 ms, plus one overflow bin for delays > 160 ms.
401  */
402 #define QCA_TX_DELAY_HIST_INTERNAL_BINS 17
403 #define QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS 10
404 
405 struct ol_tx_delay_data {
406 	struct {
407 		uint64_t transmit_sum_ticks;
408 		uint64_t queue_sum_ticks;
409 		uint32_t transmit_num;
410 		uint32_t queue_num;
411 	} avgs;
412 	uint16_t hist_bins_queue[QCA_TX_DELAY_HIST_INTERNAL_BINS];
413 };
414 
415 #endif /* QCA_COMPUTE_TX_DELAY */
416 
417 /* Thermal Mitigation */
418 enum throttle_phase {
419 	THROTTLE_PHASE_OFF,
420 	THROTTLE_PHASE_ON,
421 	/* Invalid */
422 	THROTTLE_PHASE_MAX,
423 };
424 
425 #define THROTTLE_TX_THRESHOLD (100)
426 
427 /*
428  * Threshold to stop/start priority queue in term of % the actual flow start
429  * and stop thresholds. When num of available descriptors falls below
430  * stop_priority_th, priority queue will be paused. When num of available
431  * descriptors are greater than start_priority_th, priority queue will be
432  * un-paused.
433  */
434 #define TX_PRIORITY_TH   (80)
435 
436 /*
437  * No of maximum descriptor used by TSO jumbo packet with
438  * 64K aggregation.
439  */
440 #define MAX_TSO_SEGMENT_DESC (44)
441 
442 struct ol_tx_queue_group_t {
443 	qdf_atomic_t credit;
444 	u_int32_t membership;
445 	int frm_count;
446 };
447 #define OL_TX_MAX_TXQ_GROUPS 2
448 
449 #define OL_TX_GROUP_STATS_LOG_SIZE 128
450 struct ol_tx_group_credit_stats_t {
451 	struct {
452 		struct {
453 			u_int16_t member_vdevs;
454 			u_int16_t credit;
455 		} grp[OL_TX_MAX_TXQ_GROUPS];
456 	} stats[OL_TX_GROUP_STATS_LOG_SIZE];
457 	u_int16_t last_valid_index;
458 	u_int16_t wrap_around;
459 };
460 
461 
462 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
463 /**
464  * enum flow_pool_status - flow pool status
465  * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
466  *				and network queues are unpaused
467  * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
468  *			   and network queues are paused
469  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
470  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
471  * @FLOW_POOL_NON_PRIO_PAUSED: non-priority queues are paused
472  */
473 enum flow_pool_status {
474 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
475 	FLOW_POOL_ACTIVE_PAUSED = 1,
476 	FLOW_POOL_NON_PRIO_PAUSED = 2,
477 	FLOW_POOL_INVALID = 3,
478 	FLOW_POOL_INACTIVE = 4
479 };
480 
481 /**
482  * struct ol_txrx_pool_stats - flow pool related statistics
483  * @pool_map_count: flow pool map received
484  * @pool_unmap_count: flow pool unmap received
485  * @pool_resize_count: flow pool resize command received
486  * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
487  */
488 struct ol_txrx_pool_stats {
489 	uint16_t pool_map_count;
490 	uint16_t pool_unmap_count;
491 	uint16_t pool_resize_count;
492 	uint16_t pkt_drop_no_pool;
493 };
494 
495 /**
496  * struct ol_tx_flow_pool_t - flow_pool info
497  * @flow_pool_list_elem: flow_pool_list element
498  * @flow_pool_lock: flow_pool lock
499  * @flow_pool_id: flow_pool id
500  * @flow_pool_size: flow_pool size
501  * @avail_desc: available descriptors
502  * @deficient_desc: deficient descriptors
503  * @overflow_desc: overflow descriptors
504  * @status: flow pool status
505  * @flow_type: flow pool type
506  * @member_flow_id: member flow id
507  * @stop_th: stop threshold
508  * @start_th: start threshold
509  * @freelist: tx descriptor freelist
510  * @pkt_drop_no_desc: drop due to no descriptors
511  * @ref_cnt: pool's ref count
512  * @stop_priority_th: Threshold to stop priority queue
513  * @start_priority_th: Threshold to start priority queue
514  */
515 struct ol_tx_flow_pool_t {
516 	TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
517 	qdf_spinlock_t flow_pool_lock;
518 	uint8_t flow_pool_id;
519 	uint16_t flow_pool_size;
520 	uint16_t avail_desc;
521 	uint16_t deficient_desc;
522 	uint16_t overflow_desc;
523 	enum flow_pool_status status;
524 	enum htt_flow_type flow_type;
525 	uint8_t member_flow_id;
526 	uint16_t stop_th;
527 	uint16_t start_th;
528 	union ol_tx_desc_list_elem_t *freelist;
529 	uint16_t pkt_drop_no_desc;
530 	qdf_atomic_t ref_cnt;
531 	uint16_t stop_priority_th;
532 	uint16_t start_priority_th;
533 };
534 #endif
535 
536 #define OL_TXRX_INVALID_PEER_UNMAP_COUNT 0xF
537 /*
538  * struct ol_txrx_peer_id_map - Map of firmware peer_ids to peers on host
539  * @peer: Pointer to peer object
540  * @peer_id_ref_cnt: No. of firmware references to the peer_id
541  * @del_peer_id_ref_cnt: No. of outstanding unmap events for peer_id
542  *                       after the peer object is deleted on the host.
543  *
544  * peer_id is used as an index into the array of ol_txrx_peer_id_map.
545  */
546 struct ol_txrx_peer_id_map {
547 	struct ol_txrx_peer_t *peer;
548 	struct ol_txrx_peer_t *del_peer;
549 	qdf_atomic_t peer_id_ref_cnt;
550 	qdf_atomic_t del_peer_id_ref_cnt;
551 	qdf_atomic_t peer_id_unmap_cnt;
552 };
553 
554 /*
555  * ol_txrx_stats_req_internal - specifications of the requested
556  * statistics internally
557  */
558 struct ol_txrx_stats_req_internal {
559     struct ol_txrx_stats_req base;
560     TAILQ_ENTRY(ol_txrx_stats_req_internal) req_list_elem;
561     int serviced; /* state of this request */
562     int offset;
563 };
564 
565 struct ol_txrx_fw_stats_desc_t {
566 	struct ol_txrx_stats_req_internal *req;
567 	unsigned char desc_id;
568 };
569 
570 struct ol_txrx_fw_stats_desc_elem_t {
571 	struct ol_txrx_fw_stats_desc_elem_t *next;
572 	struct ol_txrx_fw_stats_desc_t desc;
573 };
574 
575 /**
576  * struct ol_txrx_soc_t - soc reference structure
577  * @cdp_soc: common base structure
578  * @psoc: opaque handle for UMAC psoc object
579  * @pdev_list: list of all the pdev on a soc
580  *
581  * This is the reference to the soc and all the data
582  * which is soc specific.
583  */
584 struct ol_txrx_soc_t {
585 	/* Common base structure - Should be the first member */
586 	struct cdp_soc_t cdp_soc;
587 
588 	struct cdp_ctrl_objmgr_psoc *psoc;
589 	struct ol_txrx_pdev_t *pdev_list[OL_TXRX_MAX_PDEV_CNT];
590 };
591 
592 /*
593  * As depicted in the diagram below, the pdev contains an array of
594  * NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
595  * Each element identifies all the tx queues that are active for
596  * the TID, from the different peers.
597  *
598  * Each peer contains an array of NUM_EXT_TID ol_tx_frms_queue_t elements.
599  * Each element identifies the tx frames for the TID that need to be sent
600  * to the peer.
601  *
602  *
603  *  pdev: ol_tx_active_queues_in_tid_t active_in_tids[NUM_EXT_TIDS]
604  *                                TID
605  *       0            1            2                     17
606  *  +============+============+============+==    ==+============+
607  *  | active (y) | active (n) | active (n) |        | active (y) |
608  *  |------------+------------+------------+--    --+------------|
609  *  | queues     | queues     | queues     |        | queues     |
610  *  +============+============+============+==    ==+============+
611  *       |                                               |
612  *    .--+-----------------------------------------------'
613  *    |  |
614  *    |  |     peer X:                            peer Y:
615  *    |  |     ol_tx_frms_queue_t                 ol_tx_frms_queue_t
616  *    |  |     tx_queues[NUM_EXT_TIDS]            tx_queues[NUM_EXT_TIDS]
617  *    |  | TID +======+                       TID +======+
618  *    |  `---->| next |-------------------------->| next |--X
619  *    |     0  | prev |   .------.   .------.  0  | prev |   .------.
620  *    |        | txq  |-->|txdesc|-->|txdesc|     | txq  |-->|txdesc|
621  *    |        +======+   `------'   `------'     +======+   `------'
622  *    |        | next |      |          |      1  | next |      |
623  *    |     1  | prev |      v          v         | prev |      v
624  *    |        | txq  |   .------.   .------.     | txq  |   .------.
625  *    |        +======+   |netbuf|   |netbuf|     +======+   |netbuf|
626  *    |        | next |   `------'   `------'     | next |   `------'
627  *    |     2  | prev |                        2  | prev |
628  *    |        | txq  |                           | txq  |
629  *    |        +======+                           +======+
630  *    |        |      |                           |      |
631  *    |
632  *    |
633  *    |        |      |                           |      |
634  *    |        +======+                           +======+
635  *    `------->| next |--X                        | next |
636  *          17 | prev |   .------.             17 | prev |
637  *             | txq  |-->|txdesc|                | txq  |
638  *             +======+   `------'                +======+
639  *                           |
640  *                           v
641  *                        .------.
642  *                        |netbuf|
643  *                        `------'
644  */
645 struct ol_txrx_pdev_t {
646 	/* soc - reference to soc structure */
647 	struct ol_txrx_soc_t *soc;
648 
649 	/* ctrl_pdev - handle for querying config info */
650 	struct cdp_cfg *ctrl_pdev;
651 
652 	/* osdev - handle for mem alloc / free, map / unmap */
653 	qdf_device_t osdev;
654 
655 	htt_pdev_handle htt_pdev;
656 
657 #ifdef WLAN_FEATURE_FASTPATH
658 	struct CE_handle    *ce_tx_hdl; /* Handle to Tx packet posting CE */
659 	struct CE_handle    *ce_htt_msg_hdl; /* Handle to TxRx completion CE */
660 #endif /* WLAN_FEATURE_FASTPATH */
661 
662 	struct {
663 		int is_high_latency;
664 		int host_addba;
665 		int ll_pause_txq_limit;
666 		int default_tx_comp_req;
667 		u8 credit_update_enabled;
668 		u8 request_tx_comp;
669 	} cfg;
670 
671 	/* WDI subscriber's event list */
672 	wdi_event_subscribe **wdi_event_list;
673 
674 #if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
675 	bool pkt_log_init;
676 	/* Pktlog pdev */
677 	struct pktlog_dev_t *pl_dev;
678 #endif /* #ifndef REMOVE_PKT_LOG */
679 
680 	/* Monitor mode interface*/
681 	struct ol_txrx_vdev_t *monitor_vdev;
682 
683 	enum ol_sec_type sec_types[htt_num_sec_types];
684 	/* standard frame type */
685 	enum wlan_frm_fmt frame_format;
686 	enum htt_pkt_type htt_pkt_type;
687 
688 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
689 	/* txrx encap/decap   */
690 	uint8_t sw_tx_encap;
691 	uint8_t sw_rx_decap;
692 	uint8_t target_tx_tran_caps;
693 	uint8_t target_rx_tran_caps;
694 	/* llc process */
695 	uint8_t sw_tx_llc_proc_enable;
696 	uint8_t sw_rx_llc_proc_enable;
697 	/* A-MSDU */
698 	uint8_t sw_subfrm_hdr_recovery_enable;
699 	/* Protected Frame bit handling */
700 	uint8_t sw_pf_proc_enable;
701 #endif
702 	/*
703 	 * target tx credit -
704 	 * not needed for LL, but used for HL download scheduler to keep
705 	 * track of roughly how much space is available in the target for
706 	 * tx frames
707 	 */
708 	qdf_atomic_t target_tx_credit;
709 	qdf_atomic_t orig_target_tx_credit;
710 
711 	/*
712 	 * needed for SDIO HL, Genoa Adma
713 	 */
714 	qdf_atomic_t pad_reserve_tx_credit;
715 
716 	struct {
717 		uint16_t pool_size;
718 		struct ol_txrx_fw_stats_desc_elem_t *pool;
719 		struct ol_txrx_fw_stats_desc_elem_t *freelist;
720 		qdf_spinlock_t pool_lock;
721 		qdf_atomic_t initialized;
722 	} ol_txrx_fw_stats_desc_pool;
723 
724 	/* Peer mac address to staid mapping */
725 	struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];
726 
727 	/* ol_txrx_vdev list */
728 	TAILQ_HEAD(, ol_txrx_vdev_t) vdev_list;
729 
730 	/* Inactive peer list */
731 	TAILQ_HEAD(, ol_txrx_peer_t) inactive_peer_list;
732 
733 	TAILQ_HEAD(, ol_txrx_stats_req_internal) req_list;
734 	int req_list_depth;
735 	qdf_spinlock_t req_list_spinlock;
736 
737 	/* peer ID to peer object map (array of pointers to peer objects) */
738 	struct ol_txrx_peer_id_map *peer_id_to_obj_map;
739 
740 	struct {
741 		unsigned int mask;
742 		unsigned int idx_bits;
743 
744 		TAILQ_HEAD(, ol_txrx_peer_t) * bins;
745 	} peer_hash;
746 
747 	/* rx specific processing */
748 	struct {
749 		struct {
750 			TAILQ_HEAD(, ol_rx_reorder_t) waitlist;
751 			uint32_t timeout_ms;
752 		} defrag;
753 		struct {
754 			int defrag_timeout_check;
755 			int dup_check;
756 		} flags;
757 
758 		struct {
759 			struct ol_tx_reorder_cat_timeout_t
760 				access_cats[TXRX_NUM_WMM_AC];
761 		} reorder_timeout;
762 		qdf_spinlock_t mutex;
763 	} rx;
764 
765 	/* rx proc function */
766 	void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
767 			    struct ol_txrx_peer_t *peer,
768 			    unsigned int tid, qdf_nbuf_t msdu_list);
769 
770 	/* tx data delivery notification callback function */
771 	struct {
772 		ol_txrx_data_tx_cb func;
773 		void *ctxt;
774 	} tx_data_callback;
775 
776 	/* tx management delivery notification callback functions */
777 	struct {
778 		ol_txrx_mgmt_tx_cb download_cb;
779 		ol_txrx_mgmt_tx_cb ota_ack_cb;
780 		void *ctxt;
781 	} tx_mgmt_cb;
782 
783 	data_stall_detect_cb data_stall_detect_callback;
784 	/* packetdump callback functions */
785 	ol_txrx_pktdump_cb ol_tx_packetdump_cb;
786 	ol_txrx_pktdump_cb ol_rx_packetdump_cb;
787 
788 #ifdef WLAN_FEATURE_TSF_PLUS_SOCK_TS
789 	tp_ol_timestamp_cb ol_tx_timestamp_cb;
790 #endif
791 
792 	struct {
793 		uint16_t pool_size;
794 		uint16_t num_free;
795 		union ol_tx_desc_list_elem_t *array;
796 		union ol_tx_desc_list_elem_t *freelist;
797 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
798 		uint8_t num_invalid_bin;
799 		qdf_spinlock_t flow_pool_list_lock;
800 		TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
801 #endif
802 		uint32_t page_size;
803 		uint16_t desc_reserved_size;
804 		uint8_t page_divider;
805 		uint32_t offset_filter;
806 		struct qdf_mem_multi_page_t desc_pages;
807 #ifdef DESC_DUP_DETECT_DEBUG
808 		unsigned long *free_list_bitmap;
809 #endif
810 #ifdef QCA_LL_PDEV_TX_FLOW_CONTROL
811 		uint16_t stop_th;
812 		uint16_t start_th;
813 		uint16_t stop_priority_th;
814 		uint16_t start_priority_th;
815 		enum flow_pool_status status;
816 #endif
817 	} tx_desc;
818 
819 	/* The pdev_id for this pdev */
820 	uint8_t id;
821 
822 	uint8_t is_mgmt_over_wmi_enabled;
823 #if defined(QCA_LL_TX_FLOW_CONTROL_V2)
824 	struct ol_txrx_pool_stats pool_stats;
825 	uint32_t num_msdu_desc;
826 #ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
827 	struct ol_tx_flow_pool_t *mgmt_pool;
828 #endif
829 #endif
830 
831 	struct {
832 		int (*cmp)(union htt_rx_pn_t *new,
833 			   union htt_rx_pn_t *old,
834 			   int is_unicast, int opmode, bool strict_chk);
835 		int len;
836 	} rx_pn[htt_num_sec_types];
837 
838 	/* tx mutex */
839 	OL_TX_MUTEX_TYPE tx_mutex;
840 
841 	/*
842 	 * peer ref mutex:
843 	 * 1. Protect peer object lookups until the returned peer object's
844 	 *    reference count is incremented.
845 	 * 2. Provide mutex when accessing peer object lookup structures.
846 	 */
847 	OL_RX_MUTEX_TYPE peer_ref_mutex;
848 
849 	/*
850 	 * last_real_peer_mutex:
851 	 * Protect lookups of any vdev's last_real_peer pointer until the
852 	 * reference count for the pointed-to peer object is incremented.
853 	 * This mutex could be in the vdev struct, but it's slightly simpler
854 	 * to have a single lock in the pdev struct.  Since the lock is only
855 	 * held for an extremely short time, and since it's very unlikely for
856 	 * two vdev's to concurrently access the lock, there's no real
857 	 * benefit to having a per-vdev lock.
858 	 */
859 	OL_RX_MUTEX_TYPE last_real_peer_mutex;
860 
861 	qdf_spinlock_t peer_map_unmap_lock;
862 
863 	ol_txrx_peer_unmap_sync_cb peer_unmap_sync_cb;
864 
865 	struct {
866 		struct {
867 			struct {
868 				struct {
869 					uint64_t ppdus;
870 					uint64_t mpdus;
871 				} normal;
872 				struct {
873 					/*
874 					 * mpdu_bad is general -
875 					 * replace it with the specific counters
876 					 * below
877 					 */
878 					uint64_t mpdu_bad;
879 					/* uint64_t mpdu_fcs; */
880 					/* uint64_t mpdu_duplicate; */
881 					/* uint64_t mpdu_pn_replay; */
882 					/* uint64_t mpdu_bad_sender; */
883 					/* ^ comment: peer not found */
884 					/* uint64_t mpdu_flushed; */
885 					/* uint64_t msdu_defrag_mic_err; */
886 					uint64_t msdu_mc_dup_drop;
887 				} err;
888 			} rx;
889 		} priv;
890 		struct ol_txrx_stats pub;
891 	} stats;
892 
893 #if defined(ENABLE_RX_REORDER_TRACE)
894 	struct {
895 		uint32_t mask;
896 		uint32_t idx;
897 		uint64_t cnt;
898 #define TXRX_RX_REORDER_TRACE_SIZE_LOG2 8       /* 256 entries */
899 		struct {
900 			uint16_t reorder_idx;
901 			uint16_t seq_num;
902 			uint8_t num_mpdus;
903 			uint8_t tid;
904 		} *data;
905 	} rx_reorder_trace;
906 #endif /* ENABLE_RX_REORDER_TRACE */
907 
908 #if defined(ENABLE_RX_PN_TRACE)
909 	struct {
910 		uint32_t mask;
911 		uint32_t idx;
912 		uint64_t cnt;
913 #define TXRX_RX_PN_TRACE_SIZE_LOG2 5    /* 32 entries */
914 		struct {
915 			struct ol_txrx_peer_t *peer;
916 			uint32_t pn32;
917 			uint16_t seq_num;
918 			uint8_t unicast;
919 			uint8_t tid;
920 		} *data;
921 	} rx_pn_trace;
922 #endif /* ENABLE_RX_PN_TRACE */
923 
924 	/*
925 	 * tx_sched only applies for HL, but is defined unconditionally
926 	 * rather than  only if defined(CONFIG_HL_SUPPORT).
927 	 * This is because the struct only
928 	 * occupies a few bytes, and to avoid the complexity of
929 	 * wrapping references
930 	 * to the struct members in "defined(CONFIG_HL_SUPPORT)" conditional
931 	 * compilation.
932 	 * If this struct gets expanded to a non-trivial size,
933 	 * then it should be
934 	 * conditionally compiled to only apply if defined(CONFIG_HL_SUPPORT).
935 	 */
936 	qdf_spinlock_t tx_queue_spinlock;
937 	struct {
938 		enum ol_tx_scheduler_status tx_sched_status;
939 		struct ol_tx_sched_t *scheduler;
940 	} tx_sched;
941 	/*
942 	 * tx_queue only applies for HL, but is defined unconditionally to avoid
943 	 * wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
944 	 * conditional compilation.
945 	 */
946 	struct {
947 		qdf_atomic_t rsrc_cnt;
948 		/* threshold_lo - when to start tx desc margin replenishment */
949 		uint16_t rsrc_threshold_lo;
950 		/*
951 		 * threshold_hi - where to stop during tx desc margin
952 		 * replenishment
953 		 */
954 		uint16_t rsrc_threshold_hi;
955 	} tx_queue;
956 
957 #if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
958 #define OL_TXQ_LOG_SIZE 512
959 	qdf_spinlock_t txq_log_spinlock;
960 	struct {
961 		int size;
962 		int oldest_record_offset;
963 		int offset;
964 		int allow_wrap;
965 		u_int32_t wrapped;
966 		/* aligned to u_int32_t boundary */
967 		u_int8_t data[OL_TXQ_LOG_SIZE];
968 	} txq_log;
969 #endif
970 
971 #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
972 	qdf_spinlock_t peer_stat_mutex;
973 #endif
974 
975 	int rssi_update_shift;
976 	int rssi_new_weight;
977 #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
978 	struct {
979 		ol_txrx_local_peer_id_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
980 		ol_txrx_local_peer_id_t freelist;
981 		qdf_spinlock_t lock;
982 		ol_txrx_peer_handle map[OL_TXRX_NUM_LOCAL_PEER_IDS];
983 	} local_peer_ids;
984 #endif
985 
986 #ifdef QCA_COMPUTE_TX_DELAY
987 #ifdef QCA_COMPUTE_TX_DELAY_PER_TID
988 #define QCA_TX_DELAY_NUM_CATEGORIES \
989 	(OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
990 #else
991 #define QCA_TX_DELAY_NUM_CATEGORIES 1
992 #endif
993 	struct {
994 		qdf_spinlock_t mutex;
995 		struct {
996 			struct ol_tx_delay_data copies[2]; /* ping-pong */
997 			int in_progress_idx;
998 			uint32_t avg_start_time_ticks;
999 		} cats[QCA_TX_DELAY_NUM_CATEGORIES];
1000 		uint32_t tx_compl_timestamp_ticks;
1001 		uint32_t avg_period_ticks;
1002 		uint32_t hist_internal_bin_width_mult;
1003 		uint32_t hist_internal_bin_width_shift;
1004 	} tx_delay;
1005 
1006 	uint16_t packet_count[QCA_TX_DELAY_NUM_CATEGORIES];
1007 	uint16_t packet_loss_count[QCA_TX_DELAY_NUM_CATEGORIES];
1008 
1009 #endif /* QCA_COMPUTE_TX_DELAY */
1010 
1011 	struct {
1012 		qdf_spinlock_t mutex;
1013 		/* timer used to monitor the throttle "on" phase and
1014 		 * "off" phase
1015 		 */
1016 		qdf_timer_t phase_timer;
1017 		/* timer used to send tx frames */
1018 		qdf_timer_t tx_timer;
1019 		/* This is the time in ms of the throttling window, it will
1020 		 * include an "on" phase and an "off" phase
1021 		 */
1022 		uint32_t throttle_period_ms;
1023 		/* Current throttle level set by the client ex. level 0,
1024 		 * level 1, etc
1025 		 */
1026 		enum throttle_level current_throttle_level;
1027 		/* Index that points to the phase within the throttle period */
1028 		enum throttle_phase current_throttle_phase;
1029 		/* Maximum number of frames to send to the target at one time */
1030 		uint32_t tx_threshold;
1031 		/* stores time in ms of on/off phase for each throttle level */
1032 		int throttle_time_ms[THROTTLE_LEVEL_MAX][THROTTLE_PHASE_MAX];
1033 		/* mark true if traffic is paused due to thermal throttling */
1034 		bool is_paused;
1035 		/* Save outstanding packet number */
1036 		uint16_t prev_outstanding_num;
1037 	} tx_throttle;
1038 
1039 #if defined(FEATURE_TSO)
1040 	struct {
1041 		uint16_t pool_size;
1042 		uint16_t num_free;
1043 		struct qdf_tso_seg_elem_t *freelist;
1044 		/* tso mutex */
1045 		OL_TX_MUTEX_TYPE tso_mutex;
1046 	} tso_seg_pool;
1047 	struct {
1048 		uint16_t num_seg_pool_size;
1049 		uint16_t num_free;
1050 		struct qdf_tso_num_seg_elem_t *freelist;
1051 		/* tso mutex */
1052 		OL_TX_MUTEX_TYPE tso_num_seg_mutex;
1053 	} tso_num_seg_pool;
1054 #endif
1055 
1056 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
1057 	struct {
1058 		enum ol_tx_peer_bal_state enabled;
1059 		qdf_spinlock_t mutex;
1060 		/* timer used to trigger more frames for bad peers */
1061 		qdf_timer_t peer_bal_timer;
1062 		/*This is the time in ms of the peer balance timer period */
1063 		u_int32_t peer_bal_period_ms;
1064 		/*This is the txq limit */
1065 		u_int32_t peer_bal_txq_limit;
1066 		/*This is the state of the peer balance timer */
1067 		enum ol_tx_peer_bal_timer_state peer_bal_timer_state;
1068 		/*This is the counter about active peers which are under
1069 		 *tx flow control
1070 		 */
1071 		u_int32_t peer_num;
1072 		/*This is peer list which are under tx flow control */
1073 		struct ol_tx_limit_peer_t limit_list[MAX_NO_PEERS_IN_LIMIT];
1074 		/*This is threshold configurationl */
1075 		struct tx_peer_threshold ctl_thresh[TXRX_IEEE11_MAX];
1076 	} tx_peer_bal;
1077 #endif /* CONFIG_Hl_SUPPORT && QCA_BAD_PEER_TX_FLOW_CL */
1078 
1079 	struct ol_tx_queue_group_t txq_grps[OL_TX_MAX_TXQ_GROUPS];
1080 #if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
1081 	defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
1082 	bool limit_lend;
1083 	u16 min_reserve;
1084 #endif
1085 #ifdef DEBUG_HL_LOGGING
1086 		qdf_spinlock_t grp_stat_spinlock;
1087 		struct ol_tx_group_credit_stats_t grp_stats;
1088 #endif
1089 	int tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES];
1090 	uint8_t ocb_peer_valid;
1091 	struct ol_txrx_peer_t *ocb_peer;
1092 	tx_pause_callback pause_cb;
1093 
1094 	void (*offld_flush_cb)(void *);
1095 	struct ol_txrx_peer_t *self_peer;
1096 
1097 	/* dp debug fs */
1098 	struct dentry *dpt_stats_log_dir;
1099 	enum qdf_dpt_debugfs_state state;
1100 	struct qdf_debugfs_fops dpt_debugfs_fops;
1101 
1102 #ifdef IPA_OFFLOAD
1103 	ipa_uc_op_cb_type ipa_uc_op_cb;
1104 	void *usr_ctxt;
1105 	struct ol_txrx_ipa_resources ipa_resource;
1106 #endif /* IPA_UC_OFFLOAD */
1107 	bool new_htt_msg_format;
1108 	uint8_t peer_id_unmap_ref_cnt;
1109 	bool enable_peer_unmap_conf_support;
1110 	bool enable_tx_compl_tsf64;
1111 	uint64_t last_host_time;
1112 	uint64_t last_tsf64_time;
1113 
1114 	/* Current noise-floor reading for the pdev channel */
1115 	int16_t chan_noise_floor;
1116 	uint32_t total_bundle_queue_length;
1117 };
1118 
1119 #define OL_TX_HL_DEL_ACK_HASH_SIZE    256
1120 
1121 /**
1122  * enum ol_tx_hl_packet_type - type for tcp packet
1123  * @TCP_PKT_ACK: TCP ACK frame
1124  * @TCP_PKT_NO_ACK: TCP frame, but not the ack
1125  * @NO_TCP_PKT: Not the TCP frame
1126  */
1127 enum ol_tx_hl_packet_type {
1128 	TCP_PKT_ACK,
1129 	TCP_PKT_NO_ACK,
1130 	NO_TCP_PKT
1131 };
1132 
1133 /**
1134  * struct packet_info - tcp packet information
1135  */
1136 struct packet_info {
1137 	/** @type: flag the packet type */
1138 	enum ol_tx_hl_packet_type type;
1139 	/** @stream_id: stream identifier */
1140 	uint16_t stream_id;
1141 	/** @ack_number: tcp ack number */
1142 	uint32_t ack_number;
1143 	/** @dst_ip: destination ip address */
1144 	uint32_t dst_ip;
1145 	/** @src_ip: source ip address */
1146 	uint32_t src_ip;
1147 	/** @dst_port: destination port */
1148 	uint16_t dst_port;
1149 	/** @src_port: source port */
1150 	uint16_t src_port;
1151 };
1152 
1153 /**
1154  * struct tcp_stream_node - tcp stream node
1155  */
1156 struct tcp_stream_node {
1157 	/** @next: next tcp stream node */
1158 	struct tcp_stream_node *next;
1159 	/** @no_of_ack_replaced: count for ack replaced frames */
1160 	uint8_t no_of_ack_replaced;
1161 	/** @stream_id: stream identifier */
1162 	uint16_t stream_id;
1163 	/** @dst_ip: destination ip address */
1164 	uint32_t dst_ip;
1165 	/** @src_ip: source ip address */
1166 	uint32_t src_ip;
1167 	/** @dst_port: destination port */
1168 	uint16_t dst_port;
1169 	/** @src_port: source port */
1170 	uint16_t src_port;
1171 	/** @ack_number: tcp ack number */
1172 	uint32_t ack_number;
1173 	/** @head: point to the tcp ack frame */
1174 	qdf_nbuf_t head;
1175 };
1176 
1177 /**
1178  * struct tcp_del_ack_hash_node - hash node for tcp delayed ack
1179  */
1180 struct tcp_del_ack_hash_node {
1181 	/** @hash_node_lock: spin lock */
1182 	qdf_spinlock_t hash_node_lock;
1183 	/** @no_of_entries: number of entries */
1184 	uint8_t no_of_entries;
1185 	/** @head: the head of the steam node list */
1186 	struct tcp_stream_node *head;
1187 };
1188 
1189 struct ol_txrx_vdev_t {
1190 	struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
1191 				      * the parent of this virtual device
1192 				      */
1193 	uint8_t vdev_id;             /* ID used to specify a particular vdev
1194 				      * to the target
1195 				      */
1196 	void *osif_dev;
1197 
1198 	void *ctrl_vdev; /* vdev objmgr handle */
1199 
1200 	union ol_txrx_align_mac_addr_t mac_addr; /* MAC address */
1201 	/* tx paused - NO LONGER NEEDED? */
1202 	TAILQ_ENTRY(ol_txrx_vdev_t) vdev_list_elem; /* node in the pdev's list
1203 						     * of vdevs
1204 						     */
1205 	TAILQ_HEAD(peer_list_t, ol_txrx_peer_t) peer_list;
1206 	struct ol_txrx_peer_t *last_real_peer; /* last real peer created for
1207 						* this vdev (not "self"
1208 						* pseudo-peer)
1209 						*/
1210 	ol_txrx_rx_fp rx; /* receive function used by this vdev */
1211 	ol_txrx_stats_rx_fp stats_rx; /* receive function used by this vdev */
1212 
1213 	struct {
1214 		uint32_t txack_success;
1215 		uint32_t txack_failed;
1216 	} txrx_stats;
1217 
1218 	/* completion function used by this vdev*/
1219 	ol_txrx_completion_fp tx_comp;
1220 
1221 	/* delete notifier to DP component */
1222 	ol_txrx_vdev_delete_cb vdev_del_notify;
1223 
1224 	struct {
1225 		/*
1226 		 * If the vdev object couldn't be deleted immediately because
1227 		 * it still had some peer objects left, remember that a delete
1228 		 * was requested, so it can be deleted once all its peers have
1229 		 * been deleted.
1230 		 */
1231 		int pending;
1232 		/*
1233 		 * Store a function pointer and a context argument to provide a
1234 		 * notification for when the vdev is deleted.
1235 		 */
1236 		ol_txrx_vdev_delete_cb callback;
1237 		void *context;
1238 		atomic_t detaching;
1239 	} delete;
1240 
1241 	/* safe mode control to bypass the encrypt and decipher process */
1242 	uint32_t safemode;
1243 
1244 	/* rx filter related */
1245 	uint32_t drop_unenc;
1246 	struct privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
1247 	uint32_t num_filters;
1248 
1249 	enum wlan_op_mode opmode;
1250 	enum wlan_op_subtype subtype;
1251 	enum QDF_OPMODE qdf_opmode;
1252 
1253 #ifdef QCA_IBSS_SUPPORT
1254 	/* ibss mode related */
1255 	int16_t ibss_peer_num;  /* the number of active peers */
1256 	int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
1257 #endif
1258 
1259 #if defined(CONFIG_HL_SUPPORT)
1260 	struct ol_tx_frms_queue_t txqs[OL_TX_VDEV_NUM_QUEUES];
1261 #endif
1262 
1263 	struct {
1264 		struct {
1265 			qdf_nbuf_t head;
1266 			qdf_nbuf_t tail;
1267 			int depth;
1268 		} txq;
1269 		uint32_t paused_reason;
1270 		qdf_spinlock_t mutex;
1271 		qdf_timer_t timer;
1272 		int max_q_depth;
1273 		bool is_q_paused;
1274 		bool is_q_timer_on;
1275 		uint32_t q_pause_cnt;
1276 		uint32_t q_unpause_cnt;
1277 		uint32_t q_overflow_cnt;
1278 	} ll_pause;
1279 	bool disable_intrabss_fwd;
1280 	qdf_atomic_t os_q_paused;
1281 	uint16_t tx_fl_lwm;
1282 	uint16_t tx_fl_hwm;
1283 	qdf_spinlock_t flow_control_lock;
1284 	ol_txrx_tx_flow_control_fp osif_flow_control_cb;
1285 	ol_txrx_tx_flow_control_is_pause_fp osif_flow_control_is_pause;
1286 	void *osif_fc_ctx;
1287 
1288 #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
1289 	/** @driver_del_ack_enabled: true if tcp delayed ack enabled*/
1290 	bool driver_del_ack_enabled;
1291 	/** @no_of_tcpack_replaced: number of tcp ack replaced */
1292 	uint32_t no_of_tcpack_replaced;
1293 	/** @no_of_tcpack: number of tcp ack frames */
1294 	uint32_t no_of_tcpack;
1295 
1296 	/** @tcp_ack_hash: hash table for tcp delay ack running information */
1297 	struct {
1298 		/** @node: tcp ack frame will be stored in this hash table */
1299 		struct tcp_del_ack_hash_node node[OL_TX_HL_DEL_ACK_HASH_SIZE];
1300 		/** @timer: timeout if no more tcp ack feeding */
1301 		qdf_hrtimer_data_t timer;
1302 		/** @is_timer_running: is timer running? */
1303 		qdf_atomic_t is_timer_running;
1304 		/** @tcp_node_in_use_count: number of nodes in use */
1305 		qdf_atomic_t tcp_node_in_use_count;
1306 		/** @tcp_del_ack_tq: bh to handle the tcp delayed ack */
1307 		qdf_bh_t tcp_del_ack_tq;
1308 		/** @tcp_free_list: free list */
1309 		struct tcp_stream_node *tcp_free_list;
1310 		/** @tcp_free_list_lock: spin lock */
1311 		qdf_spinlock_t tcp_free_list_lock;
1312 	} tcp_ack_hash;
1313 #endif
1314 
1315 #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
1316 	union ol_txrx_align_mac_addr_t hl_tdls_ap_mac_addr;
1317 	bool hlTdlsFlag;
1318 #endif
1319 
1320 #if defined(QCA_HL_NETDEV_FLOW_CONTROL)
1321 	qdf_atomic_t tx_desc_count;
1322 	int tx_desc_limit;
1323 	int queue_restart_th;
1324 	int queue_stop_th;
1325 	int prio_q_paused;
1326 #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
1327 
1328 	uint16_t wait_on_peer_id;
1329 	union ol_txrx_align_mac_addr_t last_peer_mac_addr;
1330 	qdf_event_t wait_delete_comp;
1331 #if defined(FEATURE_TSO)
1332 	struct {
1333 		int pool_elems; /* total number of elements in the pool */
1334 		int alloc_cnt; /* number of allocated elements */
1335 		uint32_t *freelist; /* free list of qdf_tso_seg_elem_t */
1336 	} tso_pool_t;
1337 #endif
1338 
1339 	/* last channel change event received */
1340 	struct {
1341 		bool is_valid;  /* whether the rest of the members are valid */
1342 		uint16_t mhz;
1343 		uint16_t band_center_freq1;
1344 		uint16_t band_center_freq2;
1345 		WLAN_PHY_MODE phy_mode;
1346 	} ocb_channel_event;
1347 
1348 	/* Information about the schedules in the schedule */
1349 	struct ol_txrx_ocb_chan_info *ocb_channel_info;
1350 	uint32_t ocb_channel_count;
1351 
1352 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1353 	struct ol_tx_flow_pool_t *pool;
1354 #endif
1355 	/* intra bss forwarded tx and rx packets count */
1356 	uint64_t fwd_tx_packets;
1357 	uint64_t fwd_rx_packets;
1358 	bool is_wisa_mode_enable;
1359 	uint8_t mac_id;
1360 
1361 	uint64_t no_of_bundle_sent_after_threshold;
1362 	uint64_t no_of_bundle_sent_in_timer;
1363 	uint64_t no_of_pkt_not_added_in_queue;
1364 	bool bundling_required;
1365 	struct {
1366 		struct {
1367 			qdf_nbuf_t head;
1368 			qdf_nbuf_t tail;
1369 			int depth;
1370 		} txq;
1371 		qdf_spinlock_t mutex;
1372 		qdf_timer_t timer;
1373 	} bundle_queue;
1374 };
1375 
1376 struct ol_rx_reorder_array_elem_t {
1377 	qdf_nbuf_t head;
1378 	qdf_nbuf_t tail;
1379 };
1380 
1381 struct ol_rx_reorder_t {
1382 	uint8_t win_sz;
1383 	uint8_t win_sz_mask;
1384 	uint8_t num_mpdus;
1385 	struct ol_rx_reorder_array_elem_t *array;
1386 	/* base - single rx reorder element used for non-aggr cases */
1387 	struct ol_rx_reorder_array_elem_t base;
1388 #if defined(QCA_SUPPORT_OL_RX_REORDER_TIMEOUT)
1389 	struct ol_rx_reorder_timeout_list_elem_t timeout;
1390 #endif
1391 	/* only used for defrag right now */
1392 	TAILQ_ENTRY(ol_rx_reorder_t) defrag_waitlist_elem;
1393 	uint32_t defrag_timeout_ms;
1394 	/* get back to parent ol_txrx_peer_t when ol_rx_reorder_t is in a
1395 	 * waitlist
1396 	 */
1397 	uint16_t tid;
1398 };
1399 
1400 enum {
1401 	txrx_sec_mcast = 0,
1402 	txrx_sec_ucast
1403 };
1404 
1405 typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
1406 				      tx_msdu_info);
1407 
1408 #define OL_TXRX_PEER_SECURITY_MULTICAST  0
1409 #define OL_TXRX_PEER_SECURITY_UNICAST    1
1410 #define OL_TXRX_PEER_SECURITY_MAX        2
1411 
1412 
1413 /* Allow 6000 ms to receive peer unmap events after peer is deleted */
1414 #define OL_TXRX_PEER_UNMAP_TIMEOUT (6000)
1415 
1416 struct ol_txrx_cached_bufq_t {
1417 	/* cached_bufq is used to enqueue the pending RX frames from a peer
1418 	 * before the peer is registered for data service. The list will be
1419 	 * flushed to HDD once that station is registered.
1420 	 */
1421 	struct list_head cached_bufq;
1422 	/* mutual exclusion lock to access the cached_bufq queue */
1423 	qdf_spinlock_t bufq_lock;
1424 	/* # entries in queue after which  subsequent adds will be dropped */
1425 	uint32_t thresh;
1426 	/* # entries in present in cached_bufq */
1427 	uint32_t curr;
1428 	/* # max num of entries in the queue if bufq thresh was not in place */
1429 	uint32_t high_water_mark;
1430 	/* # max num of entries in the queue if we did not drop packets */
1431 	uint32_t qdepth_no_thresh;
1432 	/* # of packes (beyond threshold) dropped from cached_bufq */
1433 	uint32_t dropped;
1434 };
1435 
1436 struct ol_txrx_peer_t {
1437 	struct ol_txrx_vdev_t *vdev;
1438 
1439 	/* UMAC peer objmgr handle */
1440 	struct cdp_ctrl_objmgr_peer *ctrl_peer;
1441 
1442 	qdf_atomic_t ref_cnt;
1443 	qdf_atomic_t del_ref_cnt;
1444 	qdf_atomic_t access_list[PEER_DEBUG_ID_MAX];
1445 	qdf_atomic_t delete_in_progress;
1446 	qdf_atomic_t flush_in_progress;
1447 
1448 	/* The peer state tracking is used for HL systems
1449 	 * that don't support tx and rx filtering within the target.
1450 	 * In such systems, the peer's state determines what kind of
1451 	 * tx and rx filtering, if any, is done.
1452 	 * This variable doesn't apply to LL systems, or to HL systems for
1453 	 * which the target handles tx and rx filtering. However, it is
1454 	 * simplest to declare and update this variable unconditionally,
1455 	 * for all systems.
1456 	 */
1457 	enum ol_txrx_peer_state state;
1458 	qdf_spinlock_t peer_info_lock;
1459 
1460 	/* Wrapper around the cached_bufq list */
1461 	struct ol_txrx_cached_bufq_t bufq_info;
1462 
1463 	ol_tx_filter_func tx_filter;
1464 
1465 	/* peer ID(s) for this peer */
1466 	uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
1467 #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
1468 	uint16_t local_id;
1469 #endif
1470 
1471 	union ol_txrx_align_mac_addr_t mac_addr;
1472 
1473 	/* node in the vdev's list of peers */
1474 	TAILQ_ENTRY(ol_txrx_peer_t) peer_list_elem;
1475 	/* node in the hash table bin's list of peers */
1476 	TAILQ_ENTRY(ol_txrx_peer_t) hash_list_elem;
1477 	/* node in the pdev's inactive list of peers */
1478 	TAILQ_ENTRY(ol_txrx_peer_t)inactive_peer_list_elem;
1479 
1480 	/*
1481 	 * per TID info -
1482 	 * stored in separate arrays to avoid alignment padding mem overhead
1483 	 */
1484 	struct ol_rx_reorder_t tids_rx_reorder[OL_TXRX_NUM_EXT_TIDS];
1485 	union htt_rx_pn_t tids_last_pn[OL_TXRX_NUM_EXT_TIDS];
1486 	uint8_t tids_last_pn_valid[OL_TXRX_NUM_EXT_TIDS];
1487 	uint8_t tids_rekey_flag[OL_TXRX_NUM_EXT_TIDS];
1488 	uint16_t tids_next_rel_idx[OL_TXRX_NUM_EXT_TIDS];
1489 	uint16_t tids_last_seq[OL_TXRX_NUM_EXT_TIDS];
1490 	uint16_t tids_mcast_last_seq[OL_TXRX_NUM_EXT_TIDS];
1491 
1492 	struct {
1493 		enum htt_sec_type sec_type;
1494 		uint32_t michael_key[2];        /* relevant for TKIP */
1495 	} security[2];          /* 0 -> multicast, 1 -> unicast */
1496 
1497 	/*
1498 	 * rx proc function: this either is a copy of pdev's rx_opt_proc for
1499 	 * regular rx processing, or has been redirected to a /dev/null discard
1500 	 * function when peer deletion is in progress.
1501 	 */
1502 	void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
1503 			    struct ol_txrx_peer_t *peer,
1504 			    unsigned int tid, qdf_nbuf_t msdu_list);
1505 
1506 #if defined(CONFIG_HL_SUPPORT)
1507 	struct ol_tx_frms_queue_t txqs[OL_TX_NUM_TIDS];
1508 #endif
1509 
1510 #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
1511 	ol_txrx_peer_stats_t stats;
1512 #endif
1513 	int16_t rssi_dbm;
1514 
1515 	/* NAWDS Flag and Bss Peer bit */
1516 	uint16_t nawds_enabled:1, bss_peer:1, valid:1;
1517 
1518 	/* QoS info */
1519 	uint8_t qos_capable;
1520 	/* U-APSD tid mask */
1521 	uint8_t uapsd_mask;
1522 	/*flag indicating key installed */
1523 	uint8_t keyinstalled;
1524 
1525 	/* Bit to indicate if PN check is done in fw */
1526 	qdf_atomic_t fw_pn_check;
1527 
1528 	/* PN counter for Robust Management Frames */
1529 	uint64_t last_rmf_pn;
1530 	uint32_t rmf_pn_replays;
1531 	uint8_t last_rmf_pn_valid;
1532 
1533 	/* Properties of the last received PPDU */
1534 	int16_t last_pkt_rssi_cmb;
1535 	int16_t last_pkt_rssi[4];
1536 	uint8_t last_pkt_legacy_rate;
1537 	uint8_t last_pkt_legacy_rate_sel;
1538 	uint32_t last_pkt_timestamp_microsec;
1539 	uint8_t last_pkt_timestamp_submicrosec;
1540 	uint32_t last_pkt_tsf;
1541 	uint8_t last_pkt_tid;
1542 	uint16_t last_pkt_center_freq;
1543 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
1544 	u_int16_t tx_limit;
1545 	u_int16_t tx_limit_flag;
1546 	u_int16_t tx_pause_flag;
1547 #endif
1548 	qdf_time_t last_assoc_rcvd;
1549 	qdf_time_t last_disassoc_rcvd;
1550 	qdf_time_t last_deauth_rcvd;
1551 	qdf_atomic_t fw_create_pending;
1552 	qdf_timer_t peer_unmap_timer;
1553 	bool is_tdls_peer; /* Mark peer as tdls peer */
1554 	bool tdls_offchan_enabled; /* TDLS OffChan operation in use */
1555 };
1556 
1557 struct ol_rx_remote_data {
1558 	qdf_nbuf_t msdu;
1559 	uint8_t mac_id;
1560 };
1561 
1562 struct ol_fw_data {
1563 	void *data;
1564 	uint32_t len;
1565 };
1566 
1567 #define INVALID_REORDER_INDEX 0xFFFF
1568 
1569 #define SPS_DESC_SIZE 8
1570 
1571 #endif /* _OL_TXRX_TYPES__H_ */
1572