1  /*
2   * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  #include <qdf_atomic.h>         /* qdf_atomic_inc, etc. */
21  #include <qdf_lock.h>           /* qdf_os_spinlock */
22  #include <qdf_time.h>           /* qdf_system_ticks, etc. */
23  #include <qdf_nbuf.h>           /* qdf_nbuf_t */
24  #include <qdf_net_types.h>      /* QDF_NBUF_TX_EXT_TID_INVALID */
25  
26  #include "queue.h"          /* TAILQ */
27  #ifdef QCA_COMPUTE_TX_DELAY
28  #include <enet.h>               /* ethernet_hdr_t, etc. */
29  #include <ipv6_defs.h>          /* ipv6_traffic_class */
30  #endif
31  
32  #include <ol_txrx_api.h>        /* ol_txrx_vdev_handle, etc. */
33  #include <ol_htt_tx_api.h>      /* htt_tx_compl_desc_id */
34  #include <ol_txrx_htt_api.h>    /* htt_tx_status */
35  
36  #include <ol_ctrl_txrx_api.h>
37  #include <cdp_txrx_tx_delay.h>
38  #include <ol_txrx_types.h>      /* ol_txrx_vdev_t, etc */
39  #include <ol_tx_desc.h>         /* ol_tx_desc_find, ol_tx_desc_frame_free */
40  #ifdef QCA_COMPUTE_TX_DELAY
41  #include <ol_tx_classify.h>     /* ol_tx_dest_addr_find */
42  #endif
43  #include <ol_txrx_internal.h>   /* OL_TX_DESC_NO_REFS, etc. */
44  #include <ol_osif_txrx_api.h>
45  #include <ol_tx.h>              /* ol_tx_reinject */
46  #include <ol_tx_send.h>
47  
48  #include <ol_cfg.h>             /* ol_cfg_is_high_latency */
49  #include <ol_tx_sched.h>
50  #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
51  #include <ol_txrx_encap.h>      /* OL_TX_RESTORE_HDR, etc */
52  #endif
53  #include <ol_tx_queue.h>
54  #include <ol_txrx.h>
55  #include <pktlog_ac_fmt.h>
56  #include <cdp_txrx_handle.h>
57  #include <wlan_reg_services_api.h>
58  #include "qdf_hrtimer.h"
59  
60  /* High/Low tx resource count in percentage */
61  /* Set default high threshold to 15% */
62  #ifndef TX_RESOURCE_HIGH_TH_IN_PER
63  #define TX_RESOURCE_HIGH_TH_IN_PER 15
64  #endif
65  
66  /* Set default low threshold to 5% */
67  #ifndef TX_RESOURCE_LOW_TH_IN_PER
68  #define TX_RESOURCE_LOW_TH_IN_PER 5
69  #endif
70  
71  #ifdef QCA_HL_NETDEV_FLOW_CONTROL
72  static u16 ol_txrx_tx_desc_alloc_table[TXRX_FC_MAX] = {
73  	[TXRX_FC_5GH_80M_2x2] = 2000,
74  	[TXRX_FC_2GH_40M_2x2] = 800,
75  };
76  #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
77  
78  /* tx filtering is handled within the target FW */
79  #define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
80  
81  u_int16_t
ol_tx_desc_pool_size_hl(struct cdp_cfg * ctrl_pdev)82  ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
83  {
84  	uint16_t desc_pool_size;
85  	uint16_t steady_state_tx_lifetime_ms;
86  	uint16_t safety_factor;
87  
88  	/*
89  	 * Steady-state tx latency:
90  	 *     roughly 1-2 ms flight time
91  	 *   + roughly 1-2 ms prep time,
92  	 *   + roughly 1-2 ms target->host notification time.
93  	 * = roughly 6 ms total
94  	 * Thus, steady state number of frames =
95  	 * steady state max throughput / frame size * tx latency, e.g.
96  	 * 1 Gbps / 1500 bytes * 6 ms = 500
97  	 *
98  	 */
99  	steady_state_tx_lifetime_ms = 6;
100  
101  	safety_factor = 8;
102  
103  	desc_pool_size =
104  		ol_cfg_max_thruput_mbps(ctrl_pdev) *
105  		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
106  		(8 * OL_TX_AVG_FRM_BYTES) *
107  		steady_state_tx_lifetime_ms *
108  		safety_factor;
109  
110  	/* minimum */
111  	if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
112  		desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
113  
114  	/* maximum */
115  	if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
116  		desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
117  
118  	return desc_pool_size;
119  }
120  
121  #ifdef CONFIG_TX_DESC_HI_PRIO_RESERVE
122  
123  /**
124   * ol_tx_hl_desc_alloc() - Allocate and initialize a tx descriptor
125   *                        for a HL system.
126   * @pdev: the data physical device sending the data
127   * @vdev: the virtual device sending the data
128   * @msdu: the tx frame
129   * @msdu_info: the tx meta data
130   *
131   * Return: the tx descriptor
132   */
133  static inline
ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * msdu_info)134  struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
135  					 struct ol_txrx_vdev_t *vdev,
136  					 qdf_nbuf_t msdu,
137  					 struct ol_txrx_msdu_info_t *msdu_info)
138  {
139  	struct ol_tx_desc_t *tx_desc = NULL;
140  
141  	if (qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) >
142  	    TXRX_HL_TX_DESC_HI_PRIO_RESERVED) {
143  		tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
144  	} else if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
145  		if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
146  		    QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
147  		    (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
148  		    QDF_NBUF_CB_PACKET_TYPE_EAPOL)) {
149  			tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
150  			ol_txrx_info("Got tx desc from resv pool");
151  		}
152  	}
153  	return tx_desc;
154  }
155  
156  #elif defined(QCA_HL_NETDEV_FLOW_CONTROL)
ol_tx_desc_is_high_prio(qdf_nbuf_t msdu)157  bool ol_tx_desc_is_high_prio(qdf_nbuf_t msdu)
158  {
159  	enum qdf_proto_subtype proto_subtype;
160  	bool high_prio = false;
161  
162  	if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
163  		if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
164  		    QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
165  		    (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
166  		    QDF_NBUF_CB_PACKET_TYPE_EAPOL))
167  			high_prio = true;
168  	} else if (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
169  		   QDF_NBUF_CB_PACKET_TYPE_ARP) {
170  		high_prio = true;
171  	} else if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
172  		   QDF_NBUF_CB_PACKET_TYPE_ICMPv6)) {
173  		proto_subtype = qdf_nbuf_get_icmpv6_subtype(msdu);
174  		switch (proto_subtype) {
175  		case QDF_PROTO_ICMPV6_NA:
176  		case QDF_PROTO_ICMPV6_NS:
177  			high_prio = true;
178  		default:
179  			high_prio = false;
180  		}
181  	}
182  	return high_prio;
183  }
184  
185  static inline
ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * msdu_info)186  struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
187  					 struct ol_txrx_vdev_t *vdev,
188  					 qdf_nbuf_t msdu,
189  					 struct ol_txrx_msdu_info_t *msdu_info)
190  {
191  	struct ol_tx_desc_t *tx_desc =
192  			ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
193  
194  	if (!tx_desc)
195  		return NULL;
196  
197  	qdf_spin_lock_bh(&pdev->tx_mutex);
198  	/* return if TX flow control disabled */
199  	if (vdev->tx_desc_limit == 0) {
200  		qdf_spin_unlock_bh(&pdev->tx_mutex);
201  		return tx_desc;
202  	}
203  
204  	if (!qdf_atomic_read(&vdev->os_q_paused) &&
205  	    (qdf_atomic_read(&vdev->tx_desc_count) >= vdev->queue_stop_th)) {
206  		/*
207  		 * Pause normal priority
208  		 * netdev queues if tx desc limit crosses
209  		 */
210  		pdev->pause_cb(vdev->vdev_id,
211  			       WLAN_STOP_NON_PRIORITY_QUEUE,
212  			       WLAN_DATA_FLOW_CONTROL);
213  		qdf_atomic_set(&vdev->os_q_paused, 1);
214  	} else if (ol_tx_desc_is_high_prio(msdu) && !vdev->prio_q_paused &&
215  		   (qdf_atomic_read(&vdev->tx_desc_count)
216  		   == vdev->tx_desc_limit)) {
217  		/* Pause high priority queue */
218  		pdev->pause_cb(vdev->vdev_id,
219  			       WLAN_NETIF_PRIORITY_QUEUE_OFF,
220  			       WLAN_DATA_FLOW_CONTROL_PRIORITY);
221  		vdev->prio_q_paused = 1;
222  	}
223  	qdf_spin_unlock_bh(&pdev->tx_mutex);
224  
225  	return tx_desc;
226  }
227  
228  #else
229  
230  static inline
ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * msdu_info)231  struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
232  					 struct ol_txrx_vdev_t *vdev,
233  					 qdf_nbuf_t msdu,
234  					 struct ol_txrx_msdu_info_t *msdu_info)
235  {
236  	struct ol_tx_desc_t *tx_desc = NULL;
237  
238  	tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
239  	return tx_desc;
240  }
241  #endif
242  
243  static inline uint16_t
ol_txrx_rsrc_threshold_lo(int desc_pool_size)244  ol_txrx_rsrc_threshold_lo(int desc_pool_size)
245  {
246  	int threshold_low;
247  
248  	/* always maintain a 5% margin of unallocated descriptors */
249  	threshold_low = ((TX_RESOURCE_LOW_TH_IN_PER) *
250  			 desc_pool_size) / 100;
251  
252  	return threshold_low;
253  }
254  
255  static inline uint16_t
ol_txrx_rsrc_threshold_hi(int desc_pool_size)256  ol_txrx_rsrc_threshold_hi(int desc_pool_size)
257  {
258  	int threshold_high;
259  	/* when freeing up descriptors, keep going until
260  	 * there's a 15% margin
261  	 */
262  	threshold_high = ((TX_RESOURCE_HIGH_TH_IN_PER) *
263  			  desc_pool_size) / 100;
264  
265  	return threshold_high;
266  }
267  
ol_tx_init_pdev(ol_txrx_pdev_handle pdev)268  void ol_tx_init_pdev(ol_txrx_pdev_handle pdev)
269  {
270  	uint16_t desc_pool_size, i;
271  
272  	desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
273  
274  	qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
275  	qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
276  
277  	pdev->tx_queue.rsrc_threshold_lo =
278  		ol_txrx_rsrc_threshold_lo(desc_pool_size);
279  	pdev->tx_queue.rsrc_threshold_hi =
280  		ol_txrx_rsrc_threshold_hi(desc_pool_size);
281  
282  	for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
283  		qdf_atomic_init(&pdev->txq_grps[i].credit);
284  
285  	ol_tx_target_credit_init(pdev, desc_pool_size);
286  }
287  
288  #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
ol_tx_encap_wrapper(struct ol_txrx_pdev_t * pdev,ol_txrx_vdev_handle vdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * tx_msdu_info)289  static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
290  				      ol_txrx_vdev_handle vdev,
291  				      struct ol_tx_desc_t *tx_desc,
292  				      qdf_nbuf_t msdu,
293  				      struct ol_txrx_msdu_info_t *tx_msdu_info)
294  {
295  	if (OL_TX_ENCAP(vdev, tx_desc, msdu, tx_msdu_info) != A_OK) {
296  		qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
297  		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
298  		if (tx_msdu_info->peer) {
299  			/* remove the peer reference added above */
300  			ol_txrx_peer_release_ref(tx_msdu_info->peer,
301  						 PEER_DEBUG_ID_OL_INTERNAL);
302  		}
303  		return -EINVAL;
304  	}
305  
306  	return 0;
307  }
308  #else
ol_tx_encap_wrapper(struct ol_txrx_pdev_t * pdev,ol_txrx_vdev_handle vdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * tx_msdu_info)309  static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
310  				      ol_txrx_vdev_handle vdev,
311  				      struct ol_tx_desc_t *tx_desc,
312  				      qdf_nbuf_t msdu,
313  				      struct ol_txrx_msdu_info_t *tx_msdu_info)
314  {
315  	/* no-op */
316  	return 0;
317  }
318  #endif
319  
320  /**
321   * parse_ocb_tx_header() - Function to check for OCB
322   * @msdu:   Pointer to OS packet (qdf_nbuf_t)
323   * @tx_ctrl: TX control header on a packet and extract it if present
324   *
325   * Return: true if ocb parsing is successful
326   */
327  #ifdef WLAN_FEATURE_DSRC
328  #define OCB_HEADER_VERSION     1
parse_ocb_tx_header(qdf_nbuf_t msdu,struct ocb_tx_ctrl_hdr_t * tx_ctrl)329  static bool parse_ocb_tx_header(qdf_nbuf_t msdu,
330  				struct ocb_tx_ctrl_hdr_t *tx_ctrl)
331  {
332  	qdf_ether_header_t *eth_hdr_p;
333  	struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
334  
335  	/* Check if TX control header is present */
336  	eth_hdr_p = (qdf_ether_header_t *)qdf_nbuf_data(msdu);
337  	if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
338  		/* TX control header is not present. Nothing to do.. */
339  		return true;
340  
341  	/* Remove the ethernet header */
342  	qdf_nbuf_pull_head(msdu, sizeof(qdf_ether_header_t));
343  
344  	/* Parse the TX control header */
345  	tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *)qdf_nbuf_data(msdu);
346  
347  	if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
348  		if (tx_ctrl)
349  			qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
350  				     sizeof(*tx_ctrl_hdr));
351  	} else {
352  		/* The TX control header is invalid. */
353  		return false;
354  	}
355  
356  	/* Remove the TX control header */
357  	qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
358  	return true;
359  }
360  #else
parse_ocb_tx_header(qdf_nbuf_t msdu,struct ocb_tx_ctrl_hdr_t * tx_ctrl)361  static bool parse_ocb_tx_header(qdf_nbuf_t msdu,
362  				struct ocb_tx_ctrl_hdr_t *tx_ctrl)
363  {
364  	return true;
365  }
366  #endif
367  
368  /**
369   * ol_txrx_mgmt_tx_desc_alloc() - Allocate and initialize a tx descriptor
370   *				 for management frame
371   * @pdev: the data physical device sending the data
372   * @vdev: the virtual device sending the data
373   * @tx_mgmt_frm: the tx management frame
374   * @tx_msdu_info: the tx meta data
375   *
376   * Return: the tx descriptor
377   */
378  struct ol_tx_desc_t *
ol_txrx_mgmt_tx_desc_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,qdf_nbuf_t tx_mgmt_frm,struct ol_txrx_msdu_info_t * tx_msdu_info)379  ol_txrx_mgmt_tx_desc_alloc(
380  	struct ol_txrx_pdev_t *pdev,
381  	struct ol_txrx_vdev_t *vdev,
382  	qdf_nbuf_t tx_mgmt_frm,
383  	struct ol_txrx_msdu_info_t *tx_msdu_info)
384  {
385  	struct ol_tx_desc_t *tx_desc;
386  
387  	tx_msdu_info->htt.action.tx_comp_req = 1;
388  	tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
389  	return tx_desc;
390  }
391  
392  /**
393   * ol_txrx_mgmt_send_frame() - send a management frame
394   * @vdev: virtual device sending the frame
395   * @tx_desc: tx desc
396   * @tx_mgmt_frm: management frame to send
397   * @tx_msdu_info: the tx meta data
398   * @chanfreq: download change frequency
399   *
400   * Return:
401   *      0 -> the frame is accepted for transmission, -OR-
402   *      1 -> the frame was not accepted
403   */
ol_txrx_mgmt_send_frame(struct ol_txrx_vdev_t * vdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t tx_mgmt_frm,struct ol_txrx_msdu_info_t * tx_msdu_info,uint16_t chanfreq)404  int ol_txrx_mgmt_send_frame(
405  	struct ol_txrx_vdev_t *vdev,
406  	struct ol_tx_desc_t *tx_desc,
407  	qdf_nbuf_t tx_mgmt_frm,
408  	struct ol_txrx_msdu_info_t *tx_msdu_info,
409  	uint16_t chanfreq)
410  {
411  	struct ol_txrx_pdev_t *pdev = vdev->pdev;
412  	struct ol_tx_frms_queue_t *txq;
413  	int status = 1;
414  
415  	/*
416  	 * 1.  Look up the peer and queue the frame in the peer's mgmt queue.
417  	 * 2.  Invoke the download scheduler.
418  	 */
419  	txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
420  	if (!txq) {
421  		/* TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
422  		 *			     msdu);
423  		 */
424  		qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
425  		ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
426  					     1 /* error */);
427  		goto out; /* can't accept the tx mgmt frame */
428  	}
429  	/* Initialize the HTT tx desc l2 header offset field.
430  	 * Even though tx encap does not apply to mgmt frames,
431  	 * htt_tx_desc_mpdu_header still needs to be called,
432  	 * to specify that there was no L2 header added by tx encap,
433  	 * so the frame's length does not need to be adjusted to account for
434  	 * an added L2 header.
435  	 */
436  	htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
437  	if (qdf_unlikely(htt_tx_desc_init(
438  			pdev->htt_pdev, tx_desc->htt_tx_desc,
439  			tx_desc->htt_tx_desc_paddr,
440  			ol_tx_desc_id(pdev, tx_desc),
441  			tx_mgmt_frm,
442  			&tx_msdu_info->htt, &tx_msdu_info->tso_info, NULL, 0)))
443  		goto out;
444  	htt_tx_desc_display(tx_desc->htt_tx_desc);
445  	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
446  
447  	ol_tx_enqueue(vdev->pdev, txq, tx_desc, tx_msdu_info);
448  	ol_tx_sched(vdev->pdev);
449  	status = 0;
450  out:
451  	if (tx_msdu_info->peer) {
452  		/* remove the peer reference added above */
453  		ol_txrx_peer_release_ref(tx_msdu_info->peer,
454  					 PEER_DEBUG_ID_OL_INTERNAL);
455  	}
456  
457  	return status;
458  }
459  
460  /**
461   * ol_tx_hl_base() - send tx frames for a HL system.
462   * @vdev: the virtual device sending the data
463   * @tx_spec: indicate what non-standard transmission actions to apply
464   * @msdu_list: the tx frames to send
465   * @tx_comp_req: tx completion req
466   * @call_sched: will schedule the tx if true
467   *
468   * Return: NULL if all MSDUs are accepted
469   */
470  static inline qdf_nbuf_t
ol_tx_hl_base(ol_txrx_vdev_handle vdev,enum ol_tx_spec tx_spec,qdf_nbuf_t msdu_list,int tx_comp_req,bool call_sched)471  ol_tx_hl_base(
472  	ol_txrx_vdev_handle vdev,
473  	enum ol_tx_spec tx_spec,
474  	qdf_nbuf_t msdu_list,
475  	int tx_comp_req,
476  	bool call_sched)
477  {
478  	struct ol_txrx_pdev_t *pdev = vdev->pdev;
479  	qdf_nbuf_t msdu = msdu_list;
480  	struct ol_txrx_msdu_info_t tx_msdu_info;
481  	struct ocb_tx_ctrl_hdr_t tx_ctrl;
482  	htt_pdev_handle htt_pdev = pdev->htt_pdev;
483  
484  	tx_msdu_info.tso_info.is_tso = 0;
485  
486  	/*
487  	 * The msdu_list variable could be used instead of the msdu var,
488  	 * but just to clarify which operations are done on a single MSDU
489  	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
490  	 * within the list.
491  	 */
492  	while (msdu) {
493  		qdf_nbuf_t next;
494  		struct ol_tx_frms_queue_t *txq;
495  		struct ol_tx_desc_t *tx_desc = NULL;
496  
497  		qdf_mem_zero(&tx_ctrl, sizeof(tx_ctrl));
498  		tx_msdu_info.peer = NULL;
499  		/*
500  		 * The netbuf will get stored into a (peer-TID) tx queue list
501  		 * inside the ol_tx_classify_store function or else dropped,
502  		 * so store the next pointer immediately.
503  		 */
504  		next = qdf_nbuf_next(msdu);
505  
506  		tx_desc = ol_tx_hl_desc_alloc(pdev, vdev, msdu, &tx_msdu_info);
507  
508  		if (!tx_desc) {
509  			/*
510  			 * If we're out of tx descs, there's no need to try
511  			 * to allocate tx descs for the remaining MSDUs.
512  			 */
513  			TXRX_STATS_MSDU_LIST_INCR(pdev, tx.dropped.host_reject,
514  						  msdu);
515  			return msdu; /* the list of unaccepted MSDUs */
516  		}
517  
518  		/* OL_TXRX_PROT_AN_LOG(pdev->prot_an_tx_sent, msdu);*/
519  
520  		qdf_dp_trace_log_pkt(vdev->vdev_id, msdu, QDF_TX,
521  				     QDF_TRACE_DEFAULT_PDEV_ID,
522  				     vdev->qdf_opmode);
523  		DPTRACE(qdf_dp_trace_data_pkt(msdu, QDF_TRACE_DEFAULT_PDEV_ID,
524  					      QDF_DP_TRACE_TX_PACKET_RECORD,
525  					      tx_desc->id, QDF_TX));
526  
527  		if (tx_spec != OL_TX_SPEC_STD) {
528  #if defined(FEATURE_WLAN_TDLS)
529  			if (tx_spec & OL_TX_SPEC_NO_FREE) {
530  				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
531  			} else if (tx_spec & OL_TX_SPEC_TSO) {
532  #else
533  				if (tx_spec & OL_TX_SPEC_TSO) {
534  #endif
535  					tx_desc->pkt_type = OL_TX_FRM_TSO;
536  				}
537  				if (ol_txrx_tx_is_raw(tx_spec)) {
538  					/* CHECK THIS: does this need
539  					 * to happen after htt_tx_desc_init?
540  					 */
541  					/* different types of raw frames */
542  					u_int8_t sub_type =
543  						ol_txrx_tx_raw_subtype(
544  								tx_spec);
545  					htt_tx_desc_type(htt_pdev,
546  							 tx_desc->htt_tx_desc,
547  							 htt_pkt_type_raw,
548  							 sub_type);
549  				}
550  			}
551  
552  			tx_msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
553  			tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
554  			tx_msdu_info.htt.info.frame_type = htt_frm_type_data;
555  			tx_msdu_info.htt.info.l2_hdr_type = pdev->htt_pkt_type;
556  
557  			if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(msdu)
558  									== 1) {
559  				tx_msdu_info.htt.action.tx_comp_req = 1;
560  				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
561  			} else {
562  				tx_msdu_info.htt.action.tx_comp_req =
563  								tx_comp_req;
564  			}
565  
566  			/* If the vdev is in OCB mode,
567  			 * parse the tx control header.
568  			 */
569  			if (vdev->opmode == wlan_op_mode_ocb) {
570  				if (!parse_ocb_tx_header(msdu, &tx_ctrl)) {
571  					/* There was an error parsing
572  					 * the header.Skip this packet.
573  					 */
574  					goto MSDU_LOOP_BOTTOM;
575  				}
576  			}
577  
578  			txq = ol_tx_classify(vdev, tx_desc, msdu,
579  					     &tx_msdu_info);
580  
581  			/* initialize the HW tx descriptor */
582  			htt_tx_desc_init(
583  					pdev->htt_pdev, tx_desc->htt_tx_desc,
584  					tx_desc->htt_tx_desc_paddr,
585  					ol_tx_desc_id(pdev, tx_desc),
586  					msdu,
587  					&tx_msdu_info.htt,
588  					&tx_msdu_info.tso_info,
589  					&tx_ctrl,
590  					vdev->opmode == wlan_op_mode_ocb);
591  
592  			if ((!txq) || TX_FILTER_CHECK(&tx_msdu_info)) {
593  				/* drop this frame,
594  				 * but try sending subsequent frames
595  				 */
596  				/* TXRX_STATS_MSDU_LIST_INCR(pdev,
597  				 * tx.dropped.no_txq, msdu);
598  				 */
599  				qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
600  				ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
601  				if (tx_msdu_info.peer) {
602  					/* remove the peer reference
603  					 * added above
604  					 */
605  					ol_txrx_peer_release_ref(
606  						tx_msdu_info.peer,
607  						PEER_DEBUG_ID_OL_INTERNAL);
608  				}
609  				goto MSDU_LOOP_BOTTOM;
610  			}
611  
612  			if (tx_msdu_info.peer) {
613  				/*
614  				 * If the state is not associated then drop all
615  				 * the data packets received for that peer
616  				 */
617  				if (tx_msdu_info.peer->state ==
618  						OL_TXRX_PEER_STATE_DISC) {
619  					qdf_atomic_inc(
620  						&pdev->tx_queue.rsrc_cnt);
621  					ol_tx_desc_frame_free_nonstd(pdev,
622  								     tx_desc,
623  								     1);
624  					ol_txrx_peer_release_ref(
625  						tx_msdu_info.peer,
626  						PEER_DEBUG_ID_OL_INTERNAL);
627  					msdu = next;
628  					continue;
629  				} else if (tx_msdu_info.peer->state !=
630  						OL_TXRX_PEER_STATE_AUTH) {
631  					if (tx_msdu_info.htt.info.ethertype !=
632  						ETHERTYPE_PAE &&
633  						tx_msdu_info.htt.info.ethertype
634  							!= ETHERTYPE_WAI) {
635  						qdf_atomic_inc(
636  							&pdev->tx_queue.
637  								rsrc_cnt);
638  						ol_tx_desc_frame_free_nonstd(
639  								pdev,
640  								tx_desc, 1);
641  						ol_txrx_peer_release_ref(
642  						 tx_msdu_info.peer,
643  						 PEER_DEBUG_ID_OL_INTERNAL);
644  						msdu = next;
645  						continue;
646  					}
647  				}
648  			}
649  			/*
650  			 * Initialize the HTT tx desc l2 header offset field.
651  			 * htt_tx_desc_mpdu_header  needs to be called to
652  			 * make sure, the l2 header size is initialized
653  			 * correctly to handle cases where TX ENCAP is disabled
654  			 * or Tx Encap fails to perform Encap
655  			 */
656  			htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
657  
658  			/*
659  			 * Note: when the driver is built without support for
660  			 * SW tx encap,the following macro is a no-op.
661  			 * When the driver is built with support for SW tx
662  			 * encap, it performs encap, and if an error is
663  			 * encountered, jumps to the MSDU_LOOP_BOTTOM label.
664  			 */
665  			if (ol_tx_encap_wrapper(pdev, vdev, tx_desc, msdu,
666  						&tx_msdu_info))
667  				goto MSDU_LOOP_BOTTOM;
668  
669  			/*
670  			 * If debug display is enabled, show the meta-data
671  			 * being downloaded to the target via the
672  			 * HTT tx descriptor.
673  			 */
674  			htt_tx_desc_display(tx_desc->htt_tx_desc);
675  
676  			ol_tx_enqueue(pdev, txq, tx_desc, &tx_msdu_info);
677  			if (tx_msdu_info.peer) {
678  				OL_TX_PEER_STATS_UPDATE(tx_msdu_info.peer,
679  							msdu);
680  				/* remove the peer reference added above */
681  				ol_txrx_peer_release_ref
682  						(tx_msdu_info.peer,
683  						 PEER_DEBUG_ID_OL_INTERNAL);
684  			}
685  MSDU_LOOP_BOTTOM:
686  			msdu = next;
687  		}
688  
689  		if (call_sched)
690  			ol_tx_sched(pdev);
691  		return NULL; /* all MSDUs were accepted */
692  }
693  
694  #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
695  
696  /**
697   * ol_tx_pdev_reset_driver_del_ack() - reset driver delayed ack enabled flag
698   * @soc_hdl: soc handle
699   * @pdev_id: datapath pdev identifier
700   *
701   * Return: none
702   */
703  void
704  ol_tx_pdev_reset_driver_del_ack(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
705  {
706  	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
707  	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
708  	struct ol_txrx_vdev_t *vdev;
709  
710  	if (!pdev)
711  		return;
712  
713  	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
714  		vdev->driver_del_ack_enabled = false;
715  
716  		dp_debug("vdev_id %d driver_del_ack_enabled %d",
717  			 vdev->vdev_id, vdev->driver_del_ack_enabled);
718  	}
719  }
720  
721  /**
722   * ol_tx_vdev_set_driver_del_ack_enable() - set driver delayed ack enabled flag
723   * @soc_hdl: datapath soc handle
724   * @vdev_id: vdev id
725   * @rx_packets: number of rx packets
726   * @time_in_ms: time in ms
727   * @high_th: high threshold
728   * @low_th: low threshold
729   *
730   * Return: none
731   */
732  void
733  ol_tx_vdev_set_driver_del_ack_enable(struct cdp_soc_t *soc_hdl,
734  				     uint8_t vdev_id,
735  				     unsigned long rx_packets,
736  				     uint32_t time_in_ms,
737  				     uint32_t high_th,
738  				     uint32_t low_th)
739  {
740  	struct ol_txrx_vdev_t *vdev =
741  			(struct ol_txrx_vdev_t *)
742  			ol_txrx_get_vdev_from_vdev_id(vdev_id);
743  	bool old_driver_del_ack_enabled;
744  
745  	if ((!vdev) || (low_th > high_th))
746  		return;
747  
748  	old_driver_del_ack_enabled = vdev->driver_del_ack_enabled;
749  	if (rx_packets > high_th)
750  		vdev->driver_del_ack_enabled = true;
751  	else if (rx_packets < low_th)
752  		vdev->driver_del_ack_enabled = false;
753  
754  	if (old_driver_del_ack_enabled != vdev->driver_del_ack_enabled) {
755  		dp_debug("vdev_id %d driver_del_ack_enabled %d rx_packets %ld time_in_ms %d high_th %d low_th %d",
756  			 vdev->vdev_id, vdev->driver_del_ack_enabled,
757  			 rx_packets, time_in_ms, high_th, low_th);
758  	}
759  }
760  
761  /**
762   * ol_tx_hl_send_all_tcp_ack() - send all queued tcp ack packets
763   * @vdev: vdev handle
764   *
765   * Return: none
766   */
767  void ol_tx_hl_send_all_tcp_ack(struct ol_txrx_vdev_t *vdev)
768  {
769  	int i;
770  	struct tcp_stream_node *tcp_node_list;
771  	struct tcp_stream_node *temp;
772  	struct ol_txrx_pdev_t *pdev = vdev->pdev;
773  
774  	for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
775  		tcp_node_list = NULL;
776  		qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
777  		if (vdev->tcp_ack_hash.node[i].no_of_entries)
778  			tcp_node_list = vdev->tcp_ack_hash.node[i].head;
779  
780  		vdev->tcp_ack_hash.node[i].no_of_entries = 0;
781  		vdev->tcp_ack_hash.node[i].head = NULL;
782  		qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
783  
784  		/* Send all packets */
785  		while (tcp_node_list) {
786  			int tx_comp_req = pdev->cfg.default_tx_comp_req ||
787  						pdev->cfg.request_tx_comp;
788  			qdf_nbuf_t msdu_list;
789  
790  			temp = tcp_node_list;
791  			tcp_node_list = temp->next;
792  
793  			msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
794  						  temp->head,
795  						  tx_comp_req, false);
796  			if (msdu_list)
797  				qdf_nbuf_tx_free(msdu_list, 1/*error*/);
798  			ol_txrx_vdev_free_tcp_node(vdev, temp);
799  		}
800  	}
801  	ol_tx_sched(vdev->pdev);
802  }
803  
804  /**
805   * tcp_del_ack_tasklet() - tasklet function to send ack packets
806   * @data: vdev handle
807   *
808   * Return: none
809   */
810  void tcp_del_ack_tasklet(void *data)
811  {
812  	struct ol_txrx_vdev_t *vdev = data;
813  
814  	ol_tx_hl_send_all_tcp_ack(vdev);
815  }
816  
817  /**
818   * ol_tx_get_stream_id() - get stream_id from packet info
819   * @info: packet info
820   *
821   * Return: stream_id
822   */
823  uint16_t ol_tx_get_stream_id(struct packet_info *info)
824  {
825  	return ((info->dst_port + info->dst_ip + info->src_port + info->src_ip)
826  					 & (OL_TX_HL_DEL_ACK_HASH_SIZE - 1));
827  }
828  
829  /**
830   * ol_tx_is_tcp_ack() - check whether the packet is tcp ack frame
831   * @msdu: packet
832   *
833   * Return: true if the packet is tcp ack frame
834   */
835  static bool
836  ol_tx_is_tcp_ack(qdf_nbuf_t msdu)
837  {
838  	uint16_t ether_type;
839  	uint8_t  protocol;
840  	uint8_t  flag, ip_header_len, tcp_header_len;
841  	uint32_t seg_len;
842  	uint8_t  *skb_data;
843  	uint32_t skb_len;
844  	bool tcp_acked = false;
845  	uint32_t tcp_header_off;
846  
847  	qdf_nbuf_peek_header(msdu, &skb_data, &skb_len);
848  	if (skb_len < (QDF_NBUF_TRAC_IPV4_OFFSET +
849  	    QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
850  	    QDF_NBUF_TRAC_TCP_FLAGS_OFFSET))
851  		goto exit;
852  
853  	ether_type = (uint16_t)(*(uint16_t *)
854  			(skb_data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
855  	protocol = (uint16_t)(*(uint16_t *)
856  			(skb_data + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
857  
858  	if ((QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE) == ether_type) &&
859  	    (protocol == QDF_NBUF_TRAC_TCP_TYPE)) {
860  		ip_header_len = ((uint8_t)(*(uint8_t *)
861  				(skb_data + QDF_NBUF_TRAC_IPV4_OFFSET)) &
862  				QDF_NBUF_TRAC_IPV4_HEADER_MASK) << 2;
863  		tcp_header_off = QDF_NBUF_TRAC_IPV4_OFFSET + ip_header_len;
864  
865  		tcp_header_len = ((uint8_t)(*(uint8_t *)
866  			(skb_data + tcp_header_off +
867  			QDF_NBUF_TRAC_TCP_HEADER_LEN_OFFSET))) >> 2;
868  		seg_len = skb_len - tcp_header_len - tcp_header_off;
869  		flag = (uint8_t)(*(uint8_t *)
870  			(skb_data + tcp_header_off +
871  			QDF_NBUF_TRAC_TCP_FLAGS_OFFSET));
872  
873  		if ((flag == QDF_NBUF_TRAC_TCP_ACK_MASK) && (seg_len == 0))
874  			tcp_acked = true;
875  	}
876  
877  exit:
878  
879  	return tcp_acked;
880  }
881  
882  /**
883   * ol_tx_get_packet_info() - update packet info for passed msdu
884   * @msdu: packet
885   * @info: packet info
886   *
887   * Return: none
888   */
889  void ol_tx_get_packet_info(qdf_nbuf_t msdu, struct packet_info *info)
890  {
891  	uint16_t ether_type;
892  	uint8_t  protocol;
893  	uint8_t  flag, ip_header_len, tcp_header_len;
894  	uint32_t seg_len;
895  	uint8_t  *skb_data;
896  	uint32_t skb_len;
897  	uint32_t tcp_header_off;
898  
899  	info->type = NO_TCP_PKT;
900  
901  	qdf_nbuf_peek_header(msdu, &skb_data, &skb_len);
902  	if (skb_len < (QDF_NBUF_TRAC_IPV4_OFFSET +
903  	    QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
904  	    QDF_NBUF_TRAC_TCP_FLAGS_OFFSET))
905  		return;
906  
907  	ether_type = (uint16_t)(*(uint16_t *)
908  			(skb_data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
909  	protocol = (uint16_t)(*(uint16_t *)
910  			(skb_data + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
911  
912  	if ((QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE) == ether_type) &&
913  	    (protocol == QDF_NBUF_TRAC_TCP_TYPE)) {
914  		ip_header_len = ((uint8_t)(*(uint8_t *)
915  				(skb_data + QDF_NBUF_TRAC_IPV4_OFFSET)) &
916  				QDF_NBUF_TRAC_IPV4_HEADER_MASK) << 2;
917  		tcp_header_off = QDF_NBUF_TRAC_IPV4_OFFSET + ip_header_len;
918  
919  		tcp_header_len = ((uint8_t)(*(uint8_t *)
920  			(skb_data + tcp_header_off +
921  			QDF_NBUF_TRAC_TCP_HEADER_LEN_OFFSET))) >> 2;
922  		seg_len = skb_len - tcp_header_len - tcp_header_off;
923  		flag = (uint8_t)(*(uint8_t *)
924  			(skb_data + tcp_header_off +
925  			QDF_NBUF_TRAC_TCP_FLAGS_OFFSET));
926  
927  		info->src_ip = QDF_SWAP_U32((uint32_t)(*(uint32_t *)
928  			(skb_data + QDF_NBUF_TRAC_IPV4_SRC_ADDR_OFFSET)));
929  		info->dst_ip = QDF_SWAP_U32((uint32_t)(*(uint32_t *)
930  			(skb_data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET)));
931  		info->src_port = QDF_SWAP_U16((uint16_t)(*(uint16_t *)
932  				(skb_data + tcp_header_off +
933  				QDF_NBUF_TRAC_TCP_SPORT_OFFSET)));
934  		info->dst_port = QDF_SWAP_U16((uint16_t)(*(uint16_t *)
935  				(skb_data + tcp_header_off +
936  				QDF_NBUF_TRAC_TCP_DPORT_OFFSET)));
937  		info->stream_id = ol_tx_get_stream_id(info);
938  
939  		if ((flag == QDF_NBUF_TRAC_TCP_ACK_MASK) && (seg_len == 0)) {
940  			info->type = TCP_PKT_ACK;
941  			info->ack_number = (uint32_t)(*(uint32_t *)
942  				(skb_data + tcp_header_off +
943  				QDF_NBUF_TRAC_TCP_ACK_OFFSET));
944  			info->ack_number = QDF_SWAP_U32(info->ack_number);
945  		} else {
946  			info->type = TCP_PKT_NO_ACK;
947  		}
948  	}
949  }
950  
951  /**
952   * ol_tx_hl_find_and_send_tcp_stream() - find and send tcp stream for passed
953   *                                       stream info
954   * @vdev: vdev handle
955   * @info: packet info
956   *
957   * Return: none
958   */
959  void ol_tx_hl_find_and_send_tcp_stream(struct ol_txrx_vdev_t *vdev,
960  				       struct packet_info *info)
961  {
962  	uint8_t no_of_entries;
963  	struct tcp_stream_node *node_to_be_remove = NULL;
964  	struct ol_txrx_pdev_t *pdev = vdev->pdev;
965  
966  	/* remove tcp node from hash */
967  	qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
968  			hash_node_lock);
969  
970  	no_of_entries = vdev->tcp_ack_hash.node[info->stream_id].
971  			no_of_entries;
972  	if (no_of_entries > 1) {
973  		/* collision case */
974  		struct tcp_stream_node *head =
975  			vdev->tcp_ack_hash.node[info->stream_id].head;
976  		struct tcp_stream_node *temp;
977  
978  		if ((head->dst_ip == info->dst_ip) &&
979  		    (head->src_ip == info->src_ip) &&
980  		    (head->src_port == info->src_port) &&
981  		    (head->dst_port == info->dst_port)) {
982  			node_to_be_remove = head;
983  			vdev->tcp_ack_hash.node[info->stream_id].head =
984  				head->next;
985  			vdev->tcp_ack_hash.node[info->stream_id].
986  				no_of_entries--;
987  		} else {
988  			temp = head;
989  			while (temp->next) {
990  				if ((temp->next->dst_ip == info->dst_ip) &&
991  				    (temp->next->src_ip == info->src_ip) &&
992  				    (temp->next->src_port == info->src_port) &&
993  				    (temp->next->dst_port == info->dst_port)) {
994  					node_to_be_remove = temp->next;
995  					temp->next = temp->next->next;
996  					vdev->tcp_ack_hash.
997  						node[info->stream_id].
998  						no_of_entries--;
999  					break;
1000  				}
1001  				temp = temp->next;
1002  			}
1003  		}
1004  	} else if (no_of_entries == 1) {
1005  		/* Only one tcp_node */
1006  		node_to_be_remove =
1007  			 vdev->tcp_ack_hash.node[info->stream_id].head;
1008  		vdev->tcp_ack_hash.node[info->stream_id].head = NULL;
1009  		vdev->tcp_ack_hash.node[info->stream_id].no_of_entries = 0;
1010  	}
1011  	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.
1012  			  node[info->stream_id].hash_node_lock);
1013  
1014  	/* send packets */
1015  	if (node_to_be_remove) {
1016  		int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1017  					pdev->cfg.request_tx_comp;
1018  		qdf_nbuf_t msdu_list;
1019  
1020  		msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1021  					  node_to_be_remove->head,
1022  					  tx_comp_req, true);
1023  		if (msdu_list)
1024  			qdf_nbuf_tx_free(msdu_list, 1/*error*/);
1025  		ol_txrx_vdev_free_tcp_node(vdev, node_to_be_remove);
1026  	}
1027  }
1028  
1029  static struct tcp_stream_node *
1030  ol_tx_hl_rep_tcp_ack(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu,
1031  		     struct packet_info *info, bool *is_found,
1032  		     bool *start_timer)
1033  {
1034  	struct tcp_stream_node *node_to_be_remove = NULL;
1035  	struct tcp_stream_node *head =
1036  		 vdev->tcp_ack_hash.node[info->stream_id].head;
1037  	struct tcp_stream_node *temp;
1038  
1039  	if ((head->dst_ip == info->dst_ip) &&
1040  	    (head->src_ip == info->src_ip) &&
1041  	    (head->src_port == info->src_port) &&
1042  	    (head->dst_port == info->dst_port)) {
1043  		*is_found = true;
1044  		if ((head->ack_number < info->ack_number) &&
1045  		    (head->no_of_ack_replaced <
1046  		    ol_cfg_get_del_ack_count_value(vdev->pdev->ctrl_pdev))) {
1047  			/* replace ack packet */
1048  			qdf_nbuf_tx_free(head->head, 1);
1049  			head->head = msdu;
1050  			head->ack_number = info->ack_number;
1051  			head->no_of_ack_replaced++;
1052  			*start_timer = true;
1053  
1054  			vdev->no_of_tcpack_replaced++;
1055  
1056  			if (head->no_of_ack_replaced ==
1057  			    ol_cfg_get_del_ack_count_value(
1058  			    vdev->pdev->ctrl_pdev)) {
1059  				node_to_be_remove = head;
1060  				vdev->tcp_ack_hash.node[info->stream_id].head =
1061  					head->next;
1062  				vdev->tcp_ack_hash.node[info->stream_id].
1063  					no_of_entries--;
1064  			}
1065  		} else {
1066  			/* append and send packets */
1067  			head->head->next = msdu;
1068  			node_to_be_remove = head;
1069  			vdev->tcp_ack_hash.node[info->stream_id].head =
1070  				head->next;
1071  			vdev->tcp_ack_hash.node[info->stream_id].
1072  				no_of_entries--;
1073  		}
1074  	} else {
1075  		temp = head;
1076  		while (temp->next) {
1077  			if ((temp->next->dst_ip == info->dst_ip) &&
1078  			    (temp->next->src_ip == info->src_ip) &&
1079  			    (temp->next->src_port == info->src_port) &&
1080  			    (temp->next->dst_port == info->dst_port)) {
1081  				*is_found = true;
1082  				if ((temp->next->ack_number <
1083  					info->ack_number) &&
1084  				    (temp->next->no_of_ack_replaced <
1085  					 ol_cfg_get_del_ack_count_value(
1086  					 vdev->pdev->ctrl_pdev))) {
1087  					/* replace ack packet */
1088  					qdf_nbuf_tx_free(temp->next->head, 1);
1089  					temp->next->head  = msdu;
1090  					temp->next->ack_number =
1091  						info->ack_number;
1092  					temp->next->no_of_ack_replaced++;
1093  					*start_timer = true;
1094  
1095  					vdev->no_of_tcpack_replaced++;
1096  
1097  					if (temp->next->no_of_ack_replaced ==
1098  					   ol_cfg_get_del_ack_count_value(
1099  					   vdev->pdev->ctrl_pdev)) {
1100  						node_to_be_remove = temp->next;
1101  						temp->next = temp->next->next;
1102  						vdev->tcp_ack_hash.
1103  							node[info->stream_id].
1104  							no_of_entries--;
1105  					}
1106  				} else {
1107  					/* append and send packets */
1108  					temp->next->head->next = msdu;
1109  					node_to_be_remove = temp->next;
1110  					temp->next = temp->next->next;
1111  					vdev->tcp_ack_hash.
1112  						node[info->stream_id].
1113  						no_of_entries--;
1114  				}
1115  				break;
1116  			}
1117  			temp = temp->next;
1118  		}
1119  	}
1120  	return node_to_be_remove;
1121  }
1122  
1123  /**
1124   * ol_tx_hl_find_and_replace_tcp_ack() - find and replace tcp ack packet for
1125   *                                       passed packet info
1126   * @vdev: vdev handle
1127   * @msdu: packet
1128   * @info: packet info
1129   *
1130   * Return: none
1131   */
1132  void ol_tx_hl_find_and_replace_tcp_ack(struct ol_txrx_vdev_t *vdev,
1133  				       qdf_nbuf_t msdu,
1134  				       struct packet_info *info)
1135  {
1136  	uint8_t no_of_entries;
1137  	struct tcp_stream_node *node_to_be_remove = NULL;
1138  	bool is_found = false, start_timer = false;
1139  	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1140  
1141  	/* replace ack if required or send packets */
1142  	qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
1143  			hash_node_lock);
1144  
1145  	no_of_entries = vdev->tcp_ack_hash.node[info->stream_id].no_of_entries;
1146  	if (no_of_entries > 0) {
1147  		node_to_be_remove = ol_tx_hl_rep_tcp_ack(vdev, msdu, info,
1148  							 &is_found,
1149  							 &start_timer);
1150  	}
1151  
1152  	if (no_of_entries == 0 || !is_found) {
1153  		/* Alloc new tcp node */
1154  		struct tcp_stream_node *new_node;
1155  
1156  		new_node = ol_txrx_vdev_alloc_tcp_node(vdev);
1157  		if (!new_node) {
1158  			qdf_spin_unlock_bh(&vdev->tcp_ack_hash.
1159  					  node[info->stream_id].hash_node_lock);
1160  			dp_alert("Malloc failed");
1161  			return;
1162  		}
1163  		new_node->stream_id = info->stream_id;
1164  		new_node->dst_ip = info->dst_ip;
1165  		new_node->src_ip = info->src_ip;
1166  		new_node->dst_port = info->dst_port;
1167  		new_node->src_port = info->src_port;
1168  		new_node->ack_number = info->ack_number;
1169  		new_node->head = msdu;
1170  		new_node->next = NULL;
1171  		new_node->no_of_ack_replaced = 0;
1172  
1173  		start_timer = true;
1174  		/* insert new_node */
1175  		if (!vdev->tcp_ack_hash.node[info->stream_id].head) {
1176  			vdev->tcp_ack_hash.node[info->stream_id].head =
1177  				new_node;
1178  			vdev->tcp_ack_hash.node[info->stream_id].
1179  				no_of_entries = 1;
1180  		} else {
1181  			struct tcp_stream_node *temp =
1182  				 vdev->tcp_ack_hash.node[info->stream_id].head;
1183  			while (temp->next)
1184  				temp = temp->next;
1185  
1186  			temp->next = new_node;
1187  			vdev->tcp_ack_hash.node[info->stream_id].
1188  				no_of_entries++;
1189  		}
1190  	}
1191  	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
1192  			  hash_node_lock);
1193  
1194  	/* start timer */
1195  	if (start_timer &&
1196  	    (!qdf_atomic_read(&vdev->tcp_ack_hash.is_timer_running))) {
1197  		qdf_hrtimer_start(&vdev->tcp_ack_hash.timer,
1198  				  qdf_ns_to_ktime((
1199  						ol_cfg_get_del_ack_timer_value(
1200  						vdev->pdev->ctrl_pdev) *
1201  						1000000)),
1202  			__QDF_HRTIMER_MODE_REL);
1203  		qdf_atomic_set(&vdev->tcp_ack_hash.is_timer_running, 1);
1204  	}
1205  
1206  	/* send packets */
1207  	if (node_to_be_remove) {
1208  		int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1209  					pdev->cfg.request_tx_comp;
1210  		qdf_nbuf_t msdu_list = NULL;
1211  
1212  		msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1213  					  node_to_be_remove->head,
1214  					  tx_comp_req, true);
1215  		if (msdu_list)
1216  			qdf_nbuf_tx_free(msdu_list, 1/*error*/);
1217  		ol_txrx_vdev_free_tcp_node(vdev, node_to_be_remove);
1218  	}
1219  }
1220  
1221  /**
1222   * ol_tx_hl_vdev_tcp_del_ack_timer() - delayed ack timer function
1223   * @timer: timer handle
1224   *
1225   * Return: enum
1226   */
1227  enum qdf_hrtimer_restart_status
1228  ol_tx_hl_vdev_tcp_del_ack_timer(qdf_hrtimer_data_t *timer)
1229  {
1230  	struct ol_txrx_vdev_t *vdev = qdf_container_of(timer,
1231  						       struct ol_txrx_vdev_t,
1232  						       tcp_ack_hash.timer);
1233  	enum qdf_hrtimer_restart_status ret = QDF_HRTIMER_NORESTART;
1234  
1235  	qdf_sched_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq);
1236  	qdf_atomic_set(&vdev->tcp_ack_hash.is_timer_running, 0);
1237  	return ret;
1238  }
1239  
1240  /**
1241   * ol_tx_hl_del_ack_queue_flush_all() - drop all queued packets
1242   * @vdev: vdev handle
1243   *
1244   * Return: none
1245   */
1246  void ol_tx_hl_del_ack_queue_flush_all(struct ol_txrx_vdev_t *vdev)
1247  {
1248  	int i;
1249  	struct tcp_stream_node *tcp_node_list;
1250  	struct tcp_stream_node *temp;
1251  
1252  	qdf_hrtimer_cancel(&vdev->tcp_ack_hash.timer);
1253  	for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
1254  		tcp_node_list = NULL;
1255  		qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
1256  
1257  		if (vdev->tcp_ack_hash.node[i].no_of_entries)
1258  			tcp_node_list = vdev->tcp_ack_hash.node[i].head;
1259  
1260  		vdev->tcp_ack_hash.node[i].no_of_entries = 0;
1261  		vdev->tcp_ack_hash.node[i].head = NULL;
1262  		qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
1263  
1264  		/* free all packets */
1265  		while (tcp_node_list) {
1266  			temp = tcp_node_list;
1267  			tcp_node_list = temp->next;
1268  
1269  			qdf_nbuf_tx_free(temp->head, 1/*error*/);
1270  			ol_txrx_vdev_free_tcp_node(vdev, temp);
1271  		}
1272  	}
1273  	ol_txrx_vdev_deinit_tcp_del_ack(vdev);
1274  }
1275  
1276  /**
1277   * ol_txrx_vdev_init_tcp_del_ack() - initialize tcp delayed ack structure
1278   * @vdev: vdev handle
1279   *
1280   * Return: none
1281   */
1282  void ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
1283  {
1284  	int i;
1285  
1286  	vdev->driver_del_ack_enabled = false;
1287  
1288  	dp_debug("vdev-id=%u, driver_del_ack_enabled=%d",
1289  		 vdev->vdev_id,
1290  		 vdev->driver_del_ack_enabled);
1291  
1292  	vdev->no_of_tcpack = 0;
1293  	vdev->no_of_tcpack_replaced = 0;
1294  
1295  	qdf_hrtimer_init(&vdev->tcp_ack_hash.timer,
1296  			 ol_tx_hl_vdev_tcp_del_ack_timer,
1297  			 __QDF_CLOCK_MONOTONIC,
1298  			 __QDF_HRTIMER_MODE_REL,
1299  			 QDF_CONTEXT_HARDWARE
1300  			 );
1301  	qdf_create_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq,
1302  		      tcp_del_ack_tasklet,
1303  		      vdev);
1304  	qdf_atomic_init(&vdev->tcp_ack_hash.is_timer_running);
1305  	qdf_atomic_init(&vdev->tcp_ack_hash.tcp_node_in_use_count);
1306  	qdf_spinlock_create(&vdev->tcp_ack_hash.tcp_free_list_lock);
1307  	vdev->tcp_ack_hash.tcp_free_list = NULL;
1308  	for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
1309  		qdf_spinlock_create(&vdev->tcp_ack_hash.node[i].hash_node_lock);
1310  		vdev->tcp_ack_hash.node[i].no_of_entries = 0;
1311  		vdev->tcp_ack_hash.node[i].head = NULL;
1312  	}
1313  }
1314  
1315  /**
1316   * ol_txrx_vdev_deinit_tcp_del_ack() - deinitialize tcp delayed ack structure
1317   * @vdev: vdev handle
1318   *
1319   * Return: none
1320   */
1321  void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
1322  {
1323  	struct tcp_stream_node *temp;
1324  
1325  	qdf_destroy_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq);
1326  
1327  	qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1328  	while (vdev->tcp_ack_hash.tcp_free_list) {
1329  		temp = vdev->tcp_ack_hash.tcp_free_list;
1330  		vdev->tcp_ack_hash.tcp_free_list = temp->next;
1331  		qdf_mem_free(temp);
1332  	}
1333  	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1334  }
1335  
1336  /**
1337   * ol_txrx_vdev_free_tcp_node() - add tcp node in free list
1338   * @vdev: vdev handle
1339   * @node: tcp stream node
1340   *
1341   * Return: none
1342   */
1343  void ol_txrx_vdev_free_tcp_node(struct ol_txrx_vdev_t *vdev,
1344  				struct tcp_stream_node *node)
1345  {
1346  	qdf_atomic_dec(&vdev->tcp_ack_hash.tcp_node_in_use_count);
1347  
1348  	qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1349  	if (vdev->tcp_ack_hash.tcp_free_list) {
1350  		node->next = vdev->tcp_ack_hash.tcp_free_list;
1351  		vdev->tcp_ack_hash.tcp_free_list = node;
1352  	} else {
1353  		vdev->tcp_ack_hash.tcp_free_list = node;
1354  		node->next = NULL;
1355  	}
1356  	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1357  }
1358  
1359  /**
1360   * ol_txrx_vdev_alloc_tcp_node() - allocate tcp node
1361   * @vdev: vdev handle
1362   *
1363   * Return: tcp stream node
1364   */
1365  struct tcp_stream_node *ol_txrx_vdev_alloc_tcp_node(struct ol_txrx_vdev_t *vdev)
1366  {
1367  	struct tcp_stream_node *node = NULL;
1368  
1369  	qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1370  	if (vdev->tcp_ack_hash.tcp_free_list) {
1371  		node = vdev->tcp_ack_hash.tcp_free_list;
1372  		vdev->tcp_ack_hash.tcp_free_list = node->next;
1373  	}
1374  	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1375  
1376  	if (!node) {
1377  		node = qdf_mem_malloc(sizeof(struct ol_txrx_vdev_t));
1378  		if (!node)
1379  			return NULL;
1380  	}
1381  	qdf_atomic_inc(&vdev->tcp_ack_hash.tcp_node_in_use_count);
1382  	return node;
1383  }
1384  
1385  qdf_nbuf_t
1386  ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
1387  {
1388  	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1389  	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1390  				pdev->cfg.request_tx_comp;
1391  	struct packet_info pkt_info;
1392  	qdf_nbuf_t temp;
1393  
1394  	if (ol_tx_is_tcp_ack(msdu_list))
1395  		vdev->no_of_tcpack++;
1396  
1397  	/* check Enable through ini */
1398  	if (!ol_cfg_get_del_ack_enable_value(vdev->pdev->ctrl_pdev) ||
1399  	    (!vdev->driver_del_ack_enabled)) {
1400  		if (qdf_atomic_read(&vdev->tcp_ack_hash.tcp_node_in_use_count))
1401  			ol_tx_hl_send_all_tcp_ack(vdev);
1402  
1403  		return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1404  				    tx_comp_req, true);
1405  	}
1406  
1407  	ol_tx_get_packet_info(msdu_list, &pkt_info);
1408  
1409  	if (pkt_info.type == TCP_PKT_NO_ACK) {
1410  		ol_tx_hl_find_and_send_tcp_stream(vdev, &pkt_info);
1411  		temp = ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1412  				     tx_comp_req, true);
1413  		return temp;
1414  	}
1415  
1416  	if (pkt_info.type == TCP_PKT_ACK) {
1417  		ol_tx_hl_find_and_replace_tcp_ack(vdev, msdu_list, &pkt_info);
1418  		return NULL;
1419  	}
1420  
1421  	temp = ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1422  			     tx_comp_req, true);
1423  	return temp;
1424  }
1425  #else
1426  
1427  #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
1428  void
1429  ol_tx_pdev_reset_bundle_require(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1430  {
1431  	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1432  	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
1433  								    pdev_id);
1434  	struct ol_txrx_vdev_t *vdev;
1435  
1436  	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1437  		vdev->bundling_required = false;
1438  		ol_txrx_info("vdev_id %d bundle_require %d",
1439  			     vdev->vdev_id, vdev->bundling_required);
1440  	}
1441  }
1442  
1443  void
1444  ol_tx_vdev_set_bundle_require(uint8_t vdev_id, unsigned long tx_bytes,
1445  			      uint32_t time_in_ms, uint32_t high_th,
1446  			      uint32_t low_th)
1447  {
1448  	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)
1449  				ol_txrx_get_vdev_from_vdev_id(vdev_id);
1450  	bool old_bundle_required;
1451  
1452  	if ((!vdev) || (low_th > high_th))
1453  		return;
1454  
1455  	old_bundle_required = vdev->bundling_required;
1456  	if (tx_bytes > ((high_th * time_in_ms * 1500) / 1000))
1457  		vdev->bundling_required = true;
1458  	else if (tx_bytes < ((low_th * time_in_ms * 1500) / 1000))
1459  		vdev->bundling_required = false;
1460  
1461  	if (old_bundle_required != vdev->bundling_required)
1462  		ol_txrx_info("vdev_id %d bundle_require %d tx_bytes %ld time_in_ms %d high_th %d low_th %d",
1463  			     vdev->vdev_id, vdev->bundling_required, tx_bytes,
1464  			     time_in_ms, high_th, low_th);
1465  }
1466  
1467  /**
1468   * ol_tx_hl_queue_flush_all() - drop all packets in vdev bundle queue
1469   * @vdev: vdev handle
1470   *
1471   * Return: none
1472   */
1473  void
1474  ol_tx_hl_queue_flush_all(struct ol_txrx_vdev_t *vdev)
1475  {
1476  	qdf_spin_lock_bh(&vdev->bundle_queue.mutex);
1477  	if (vdev->bundle_queue.txq.depth != 0) {
1478  		qdf_timer_stop(&vdev->bundle_queue.timer);
1479  		vdev->pdev->total_bundle_queue_length -=
1480  				vdev->bundle_queue.txq.depth;
1481  		qdf_nbuf_tx_free(vdev->bundle_queue.txq.head, 1/*error*/);
1482  		vdev->bundle_queue.txq.depth = 0;
1483  		vdev->bundle_queue.txq.head = NULL;
1484  		vdev->bundle_queue.txq.tail = NULL;
1485  	}
1486  	qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
1487  }
1488  
1489  /**
1490   * ol_tx_hl_vdev_queue_append() - append pkt in tx queue
1491   * @vdev: vdev handle
1492   * @msdu_list: msdu list
1493   *
1494   * Return: none
1495   */
1496  static void
1497  ol_tx_hl_vdev_queue_append(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu_list)
1498  {
1499  	qdf_spin_lock_bh(&vdev->bundle_queue.mutex);
1500  
1501  	if (!vdev->bundle_queue.txq.head) {
1502  		qdf_timer_start(
1503  			&vdev->bundle_queue.timer,
1504  			ol_cfg_get_bundle_timer_value(vdev->pdev->ctrl_pdev));
1505  		vdev->bundle_queue.txq.head = msdu_list;
1506  		vdev->bundle_queue.txq.tail = msdu_list;
1507  	} else {
1508  		qdf_nbuf_set_next(vdev->bundle_queue.txq.tail, msdu_list);
1509  	}
1510  
1511  	while (qdf_nbuf_next(msdu_list)) {
1512  		vdev->bundle_queue.txq.depth++;
1513  		vdev->pdev->total_bundle_queue_length++;
1514  		msdu_list = qdf_nbuf_next(msdu_list);
1515  	}
1516  
1517  	vdev->bundle_queue.txq.depth++;
1518  	vdev->pdev->total_bundle_queue_length++;
1519  	vdev->bundle_queue.txq.tail = msdu_list;
1520  	qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
1521  }
1522  
1523  /**
1524   * ol_tx_hl_vdev_queue_send_all() - send all packets in vdev bundle queue
1525   * @vdev: vdev handle
1526   * @call_sched: invoke scheduler
1527   *
1528   * Return: NULL for success
1529   */
1530  static qdf_nbuf_t
1531  ol_tx_hl_vdev_queue_send_all(struct ol_txrx_vdev_t *vdev, bool call_sched,
1532  			     bool in_timer_context)
1533  {
1534  	qdf_nbuf_t msdu_list = NULL;
1535  	qdf_nbuf_t skb_list_head, skb_list_tail;
1536  	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1537  	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1538  				pdev->cfg.request_tx_comp;
1539  	int pkt_to_sent;
1540  
1541  	qdf_spin_lock_bh(&vdev->bundle_queue.mutex);
1542  
1543  	if (!vdev->bundle_queue.txq.depth) {
1544  		qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
1545  		return msdu_list;
1546  	}
1547  
1548  	if (likely((qdf_atomic_read(&vdev->tx_desc_count) +
1549  		    vdev->bundle_queue.txq.depth) <
1550  		    vdev->queue_stop_th)) {
1551  		qdf_timer_stop(&vdev->bundle_queue.timer);
1552  		vdev->pdev->total_bundle_queue_length -=
1553  				vdev->bundle_queue.txq.depth;
1554  		msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1555  					  vdev->bundle_queue.txq.head,
1556  					  tx_comp_req, call_sched);
1557  		vdev->bundle_queue.txq.depth = 0;
1558  		vdev->bundle_queue.txq.head = NULL;
1559  		vdev->bundle_queue.txq.tail = NULL;
1560  	} else {
1561  		pkt_to_sent = vdev->queue_stop_th -
1562  			qdf_atomic_read(&vdev->tx_desc_count);
1563  
1564  		if (pkt_to_sent) {
1565  			skb_list_head = vdev->bundle_queue.txq.head;
1566  
1567  			while (pkt_to_sent) {
1568  				skb_list_tail =
1569  					vdev->bundle_queue.txq.head;
1570  				vdev->bundle_queue.txq.head =
1571  				    qdf_nbuf_next(vdev->bundle_queue.txq.head);
1572  				vdev->pdev->total_bundle_queue_length--;
1573  				vdev->bundle_queue.txq.depth--;
1574  				pkt_to_sent--;
1575  				if (!vdev->bundle_queue.txq.head) {
1576  					qdf_timer_stop(
1577  						&vdev->bundle_queue.timer);
1578  					break;
1579  				}
1580  			}
1581  
1582  			qdf_nbuf_set_next(skb_list_tail, NULL);
1583  			msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1584  						  skb_list_head, tx_comp_req,
1585  						  call_sched);
1586  		}
1587  
1588  		if (in_timer_context &&	vdev->bundle_queue.txq.head) {
1589  			qdf_timer_start(
1590  				&vdev->bundle_queue.timer,
1591  				ol_cfg_get_bundle_timer_value(
1592  					vdev->pdev->ctrl_pdev));
1593  		}
1594  	}
1595  	qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
1596  
1597  	return msdu_list;
1598  }
1599  
1600  /**
1601   * ol_tx_hl_pdev_queue_send_all() - send all packets from all vdev bundle queue
1602   * @pdev: pdev handle
1603   *
1604   * Return: NULL for success
1605   */
1606  qdf_nbuf_t
1607  ol_tx_hl_pdev_queue_send_all(struct ol_txrx_pdev_t *pdev)
1608  {
1609  	struct ol_txrx_vdev_t *vdev;
1610  	qdf_nbuf_t msdu_list;
1611  
1612  	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1613  		msdu_list = ol_tx_hl_vdev_queue_send_all(vdev, false, false);
1614  		if (msdu_list)
1615  			qdf_nbuf_tx_free(msdu_list, 1/*error*/);
1616  	}
1617  	ol_tx_sched(pdev);
1618  	return NULL; /* all msdus were accepted */
1619  }
1620  
1621  /**
1622   * ol_tx_hl_vdev_bundle_timer() - bundle timer function
1623   * @vdev: vdev handle
1624   *
1625   * Return: none
1626   */
1627  void
1628  ol_tx_hl_vdev_bundle_timer(void *ctx)
1629  {
1630  	qdf_nbuf_t msdu_list;
1631  	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)ctx;
1632  
1633  	vdev->no_of_bundle_sent_in_timer++;
1634  	msdu_list = ol_tx_hl_vdev_queue_send_all(vdev, true, true);
1635  	if (msdu_list)
1636  		qdf_nbuf_tx_free(msdu_list, 1/*error*/);
1637  }
1638  
1639  qdf_nbuf_t
1640  ol_tx_hl(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu_list)
1641  {
1642  	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1643  	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1644  				pdev->cfg.request_tx_comp;
1645  
1646  	/* No queuing for high priority packets */
1647  	if (ol_tx_desc_is_high_prio(msdu_list)) {
1648  		vdev->no_of_pkt_not_added_in_queue++;
1649  		return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1650  					     tx_comp_req, true);
1651  	} else if (vdev->bundling_required &&
1652  	    (ol_cfg_get_bundle_size(vdev->pdev->ctrl_pdev) > 1)) {
1653  		ol_tx_hl_vdev_queue_append(vdev, msdu_list);
1654  
1655  		if (pdev->total_bundle_queue_length >=
1656  		    ol_cfg_get_bundle_size(vdev->pdev->ctrl_pdev)) {
1657  			vdev->no_of_bundle_sent_after_threshold++;
1658  			return ol_tx_hl_pdev_queue_send_all(pdev);
1659  		}
1660  	} else {
1661  		if (vdev->bundle_queue.txq.depth != 0) {
1662  			ol_tx_hl_vdev_queue_append(vdev, msdu_list);
1663  			return ol_tx_hl_vdev_queue_send_all(vdev, true, false);
1664  		} else {
1665  			vdev->no_of_pkt_not_added_in_queue++;
1666  			return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1667  					     tx_comp_req, true);
1668  		}
1669  	}
1670  
1671  	return NULL; /* all msdus were accepted */
1672  }
1673  
1674  #else
1675  
1676  qdf_nbuf_t
1677  ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
1678  {
1679  	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1680  	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1681  				pdev->cfg.request_tx_comp;
1682  
1683  	return ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1684  			     msdu_list, tx_comp_req, true);
1685  }
1686  #endif
1687  #endif
1688  
1689  qdf_nbuf_t ol_tx_non_std_hl(struct ol_txrx_vdev_t *vdev,
1690  			    enum ol_tx_spec tx_spec,
1691  			    qdf_nbuf_t msdu_list)
1692  {
1693  	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1694  	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1695  				pdev->cfg.request_tx_comp;
1696  
1697  	if (!tx_comp_req) {
1698  		if ((tx_spec == OL_TX_SPEC_NO_FREE) &&
1699  		    (pdev->tx_data_callback.func))
1700  			tx_comp_req = 1;
1701  	}
1702  	return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req, true);
1703  }
1704  
1705  #ifdef FEATURE_WLAN_TDLS
1706  void ol_txrx_copy_mac_addr_raw(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1707  			       uint8_t *bss_addr)
1708  {
1709  	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1710  	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1711  								     vdev_id);
1712  
1713  	if (!vdev)
1714  		return;
1715  
1716  	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
1717  	if (bss_addr && vdev->last_real_peer &&
1718  	    !qdf_mem_cmp((u8 *)bss_addr,
1719  			     vdev->last_real_peer->mac_addr.raw,
1720  			     QDF_MAC_ADDR_SIZE))
1721  		qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
1722  			     vdev->last_real_peer->mac_addr.raw,
1723  			     QDF_MAC_ADDR_SIZE);
1724  	qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
1725  }
1726  
1727  void
1728  ol_txrx_add_last_real_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1729  			   uint8_t vdev_id)
1730  {
1731  	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1732  	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1733  	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1734  								     vdev_id);
1735  	ol_txrx_peer_handle peer;
1736  
1737  	if (!pdev || !vdev)
1738  		return;
1739  
1740  	peer = ol_txrx_find_peer_by_addr(
1741  		(struct cdp_pdev *)pdev,
1742  		vdev->hl_tdls_ap_mac_addr.raw);
1743  
1744  	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
1745  	if (!vdev->last_real_peer && peer &&
1746  	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID)) {
1747  		vdev->last_real_peer = peer;
1748  		qdf_mem_zero(vdev->hl_tdls_ap_mac_addr.raw,
1749  			     QDF_MAC_ADDR_SIZE);
1750  	}
1751  	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
1752  }
1753  
1754  bool is_vdev_restore_last_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1755  			       uint8_t *peer_mac)
1756  {
1757  	struct ol_txrx_peer_t *peer;
1758  	struct ol_txrx_pdev_t *pdev;
1759  	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1760  	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1761  								     vdev_id);
1762  
1763  	if (!vdev)
1764  		return false;
1765  
1766  	pdev = vdev->pdev;
1767  	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev, peer_mac);
1768  
1769  	return vdev->last_real_peer && (vdev->last_real_peer == peer);
1770  }
1771  
1772  void ol_txrx_update_last_real_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1773  				   uint8_t vdev_id, bool restore_last_peer)
1774  {
1775  	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1776  	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1777  	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1778  								     vdev_id);
1779  	struct ol_txrx_peer_t *peer;
1780  
1781  	if (!restore_last_peer || !pdev || !vdev)
1782  		return;
1783  
1784  	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
1785  					 vdev->hl_tdls_ap_mac_addr.raw);
1786  
1787  	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
1788  	if (!vdev->last_real_peer && peer &&
1789  	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID)) {
1790  		vdev->last_real_peer = peer;
1791  		qdf_mem_zero(vdev->hl_tdls_ap_mac_addr.raw,
1792  			     QDF_MAC_ADDR_SIZE);
1793  	}
1794  	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
1795  }
1796  
1797  void ol_txrx_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1798  				   uint8_t *peer_mac, bool val)
1799  {
1800  	ol_txrx_peer_handle peer;
1801  	struct ol_txrx_pdev_t *pdev;
1802  	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1803  	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1804  								     vdev_id);
1805  
1806  	if (!vdev)
1807  		return;
1808  
1809  	pdev = vdev->pdev;
1810  	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev, peer_mac);
1811  
1812  	ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
1813  			  peer, qdf_atomic_read(&peer->ref_cnt));
1814  
1815  	/* Mark peer as tdls */
1816  	if (peer)
1817  		peer->is_tdls_peer = val;
1818  }
1819  
1820  void ol_txrx_set_tdls_offchan_enabled(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1821  				      uint8_t *peer_mac, bool val)
1822  {
1823  	ol_txrx_peer_handle peer;
1824  	struct ol_txrx_pdev_t *pdev;
1825  	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1826  	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1827  								     vdev_id);
1828  
1829  	if (!vdev)
1830  		return;
1831  
1832  	pdev = vdev->pdev;
1833  	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev, peer_mac);
1834  
1835  	ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
1836  			  peer, qdf_atomic_read(&peer->ref_cnt));
1837  
1838  	/* Set TDLS Offchan operation enable/disable */
1839  	if (peer && peer->is_tdls_peer)
1840  		peer->tdls_offchan_enabled = val;
1841  }
1842  #endif
1843  
1844  #if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
1845  /**
1846   * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
1847   * @pdev: the physical device object
1848   *
1849   * Return: None
1850   */
1851  void ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
1852  {
1853  	qdf_spinlock_create(&pdev->txq_log_spinlock);
1854  	pdev->txq_log.size = OL_TXQ_LOG_SIZE;
1855  	pdev->txq_log.oldest_record_offset = 0;
1856  	pdev->txq_log.offset = 0;
1857  	pdev->txq_log.allow_wrap = 1;
1858  	pdev->txq_log.wrapped = 0;
1859  }
1860  
1861  /**
1862   * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
1863   * @pdev: the physical device object
1864   *
1865   * Return: None
1866   */
1867  void ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
1868  {
1869  	qdf_spinlock_destroy(&pdev->txq_log_spinlock);
1870  }
1871  #endif
1872  
1873  #if defined(DEBUG_HL_LOGGING)
1874  
1875  /**
1876   * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
1877   * @pdev: the physical device object
1878   *
1879   * Return: None
1880   */
1881  void ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
1882  {
1883  	qdf_spinlock_create(&pdev->grp_stat_spinlock);
1884  	pdev->grp_stats.last_valid_index = -1;
1885  	pdev->grp_stats.wrap_around = 0;
1886  }
1887  
1888  /**
1889   * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
1890   * @pdev: the physical device object
1891   *
1892   * Return: None
1893   */
1894  void ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
1895  {
1896  	qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
1897  }
1898  #endif
1899  
1900  #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
1901  
1902  /**
1903   * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
1904   * @soc_hdl: Datapath soc handle
1905   * @vdev_id: id of vdev
1906   * @flag: flag
1907   *
1908   * Return: None
1909   */
1910  void
1911  ol_txrx_hl_tdls_flag_reset(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1912  			   bool flag)
1913  {
1914  	struct ol_txrx_vdev_t *vdev =
1915  		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
1916  	if (!vdev) {
1917  		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1918  			  "%s: Invalid vdev_id %d", __func__, vdev_id);
1919  		return;
1920  	}
1921  
1922  	vdev->hlTdlsFlag = flag;
1923  }
1924  #endif
1925  
1926  /**
1927   * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
1928   * @vdev: the virtual device object
1929   *
1930   * Return: None
1931   */
1932  void ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
1933  {
1934  	uint8_t i;
1935  
1936  	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1937  		TAILQ_INIT(&vdev->txqs[i].head);
1938  		vdev->txqs[i].paused_count.total = 0;
1939  		vdev->txqs[i].frms = 0;
1940  		vdev->txqs[i].bytes = 0;
1941  		vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
1942  		vdev->txqs[i].flag = ol_tx_queue_empty;
1943  		/* aggregation is not applicable for vdev tx queues */
1944  		vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
1945  		ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
1946  		ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
1947  	}
1948  }
1949  
1950  /**
1951   * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
1952   * @vdev: the virtual device object
1953   *
1954   * Return: None
1955   */
1956  void ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
1957  {
1958  	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1959  	struct ol_tx_frms_queue_t *txq;
1960  	int i;
1961  
1962  	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1963  		txq = &vdev->txqs[i];
1964  		ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
1965  	}
1966  }
1967  
1968  /**
1969   * ol_txrx_peer_txqs_init() - initialise peer tx queues
1970   * @pdev: the physical device object
1971   * @peer: peer object
1972   *
1973   * Return: None
1974   */
1975  void ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
1976  			    struct ol_txrx_peer_t *peer)
1977  {
1978  	uint8_t i;
1979  	struct ol_txrx_vdev_t *vdev = peer->vdev;
1980  
1981  	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1982  	for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1983  		TAILQ_INIT(&peer->txqs[i].head);
1984  		peer->txqs[i].paused_count.total = 0;
1985  		peer->txqs[i].frms = 0;
1986  		peer->txqs[i].bytes = 0;
1987  		peer->txqs[i].ext_tid = i;
1988  		peer->txqs[i].flag = ol_tx_queue_empty;
1989  		peer->txqs[i].aggr_state = ol_tx_aggr_untried;
1990  		ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
1991  		ol_txrx_set_txq_peer(&peer->txqs[i], peer);
1992  	}
1993  	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1994  
1995  	/* aggregation is not applicable for mgmt and non-QoS tx queues */
1996  	for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
1997  		peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
1998  
1999  	ol_txrx_peer_pause(peer);
2000  }
2001  
2002  /**
2003   * ol_txrx_peer_tx_queue_free() - free peer tx queues
2004   * @pdev: the physical device object
2005   * @peer: peer object
2006   *
2007   * Return: None
2008   */
2009  void ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
2010  				struct ol_txrx_peer_t *peer)
2011  {
2012  	struct ol_tx_frms_queue_t *txq;
2013  	uint8_t i;
2014  
2015  	for (i = 0; i < OL_TX_NUM_TIDS; i++) {
2016  		txq = &peer->txqs[i];
2017  		ol_tx_queue_free(pdev, txq, i, true);
2018  	}
2019  }
2020  
2021  #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
2022  
2023  /**
2024   * ol_txrx_update_group_credit() - update group credit for tx queue
2025   * @group: for which credit needs to be updated
2026   * @credit: credits
2027   * @absolute: TXQ group absolute
2028   *
2029   * Return: allocated pool size
2030   */
2031  void ol_txrx_update_group_credit(
2032  		struct ol_tx_queue_group_t *group,
2033  		int32_t credit,
2034  		u_int8_t absolute)
2035  {
2036  	if (absolute)
2037  		qdf_atomic_set(&group->credit, credit);
2038  	else
2039  		qdf_atomic_add(credit, &group->credit);
2040  }
2041  
2042  /**
2043   * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
2044   *				      vdev id mask and ac mask is not matching
2045   * @pdev: the data physical device
2046   * @group_id: TXQ group id
2047   * @credit: TXQ group credit count
2048   * @absolute: TXQ group absolute
2049   * @vdev_id_mask: TXQ vdev group id mask
2050   * @ac_mask: TQX access category mask
2051   *
2052   * Return: None
2053   */
2054  void ol_txrx_update_tx_queue_groups(
2055  		ol_txrx_pdev_handle pdev,
2056  		u_int8_t group_id,
2057  		int32_t credit,
2058  		u_int8_t absolute,
2059  		u_int32_t vdev_id_mask,
2060  		u_int32_t ac_mask
2061  		)
2062  {
2063  	struct ol_tx_queue_group_t *group;
2064  	u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
2065  	u_int32_t membership;
2066  	struct ol_txrx_vdev_t *vdev;
2067  
2068  	if (group_id >= OL_TX_MAX_TXQ_GROUPS) {
2069  		ol_txrx_warn("invalid group_id=%u, ignore update", group_id);
2070  		return;
2071  	}
2072  
2073  	group = &pdev->txq_grps[group_id];
2074  
2075  	membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
2076  
2077  	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
2078  	/*
2079  	 * if the membership (vdev id mask and ac mask)
2080  	 * matches then no need to update tx qeue groups.
2081  	 */
2082  	if (group->membership == membership)
2083  		/* Update Credit Only */
2084  		goto credit_update;
2085  
2086  	credit += ol_txrx_distribute_group_credits(pdev, group_id,
2087  						   vdev_id_mask);
2088  	/*
2089  	 * membership (vdev id mask and ac mask) is not matching
2090  	 * TODO: ignoring ac mask for now
2091  	 */
2092  	qdf_assert(ac_mask == 0xffff);
2093  	group_vdev_id_mask =
2094  		OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
2095  
2096  	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2097  		group_vdev_bit_mask =
2098  			OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
2099  					group_vdev_id_mask, vdev->vdev_id);
2100  		vdev_bit_mask =
2101  			OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
2102  					vdev_id_mask, vdev->vdev_id);
2103  
2104  		if (group_vdev_bit_mask != vdev_bit_mask) {
2105  			/*
2106  			 * Change in vdev tx queue group
2107  			 */
2108  			if (!vdev_bit_mask) {
2109  				/* Set Group Pointer (vdev and peer) to NULL */
2110  				ol_txrx_info("Group membership removed for vdev_id %d from group_id %d",
2111  					     vdev->vdev_id, group_id);
2112  				ol_tx_set_vdev_group_ptr(
2113  						pdev, vdev->vdev_id, NULL);
2114  			} else {
2115  				/* Set Group Pointer (vdev and peer) */
2116  				ol_txrx_info("Group membership updated for vdev_id %d to group_id %d",
2117  					     vdev->vdev_id, group_id);
2118  				ol_tx_set_vdev_group_ptr(
2119  						pdev, vdev->vdev_id, group);
2120  			}
2121  		}
2122  	}
2123  	/* Update membership */
2124  	group->membership = membership;
2125  	ol_txrx_info("Group membership updated for group_id %d membership 0x%x",
2126  		     group_id, group->membership);
2127  credit_update:
2128  	/* Update Credit */
2129  	ol_txrx_update_group_credit(group, credit, absolute);
2130  	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
2131  }
2132  #endif
2133  
2134  #if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
2135  	defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
2136  #define MIN_INIT_GROUP_CREDITS	10
2137  int ol_txrx_distribute_group_credits(struct ol_txrx_pdev_t *pdev,
2138  				     u8 group_id,
2139  				     u32 vdevid_mask_new)
2140  {
2141  	struct ol_tx_queue_group_t *grp = &pdev->txq_grps[group_id];
2142  	struct ol_tx_queue_group_t *grp_nxt = &pdev->txq_grps[!group_id];
2143  	int creds_nxt = qdf_atomic_read(&grp_nxt->credit);
2144  	int vdevid_mask = OL_TXQ_GROUP_VDEV_ID_MASK_GET(grp->membership);
2145  	int vdevid_mask_othgrp =
2146  		OL_TXQ_GROUP_VDEV_ID_MASK_GET(grp_nxt->membership);
2147  	int creds_distribute = 0;
2148  
2149  	/* if vdev added to the group is the first vdev */
2150  	if ((vdevid_mask == 0) && (vdevid_mask_new != 0)) {
2151  		/* if other group has members */
2152  		if (vdevid_mask_othgrp) {
2153  			if (creds_nxt < MIN_INIT_GROUP_CREDITS)
2154  				creds_distribute = creds_nxt / 2;
2155  			else
2156  				creds_distribute = MIN_INIT_GROUP_CREDITS;
2157  
2158  			ol_txrx_update_group_credit(grp_nxt, -creds_distribute,
2159  						    0);
2160  		} else {
2161  			/*
2162  			 * Other grp has no members, give all credits to this
2163  			 * grp.
2164  			 */
2165  			creds_distribute =
2166  				qdf_atomic_read(&pdev->target_tx_credit);
2167  		}
2168  	/* if all vdevs are removed from this grp */
2169  	} else if ((vdevid_mask != 0) && (vdevid_mask_new == 0)) {
2170  		if (vdevid_mask_othgrp)
2171  			/* Transfer credits to other grp */
2172  			ol_txrx_update_group_credit(grp_nxt,
2173  						    qdf_atomic_read(&grp->
2174  						    credit),
2175  						    0);
2176  		/* Set current grp credits to zero */
2177  		ol_txrx_update_group_credit(grp, 0, 1);
2178  	}
2179  
2180  	return creds_distribute;
2181  }
2182  #endif /*
2183  	* FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL &&
2184  	* FEATURE_HL_DBS_GROUP_CREDIT_SHARING
2185  	*/
2186  
2187  #ifdef QCA_HL_NETDEV_FLOW_CONTROL
2188  int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc_hdl,
2189  				     uint8_t pdev_id,
2190  				     tx_pause_callback flowcontrol)
2191  {
2192  	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2193  	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
2194  	u32 desc_pool_size;
2195  
2196  	if (!pdev || !flowcontrol) {
2197  		ol_txrx_err("pdev or pause_cb is NULL");
2198  		return QDF_STATUS_E_INVAL;
2199  	}
2200  
2201  	desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
2202  	/*
2203  	 * Assert if the tx descriptor pool size meets the requirements
2204  	 * Maximum 2 sessions are allowed on a band.
2205  	 */
2206  	QDF_ASSERT((2 * ol_txrx_tx_desc_alloc_table[TXRX_FC_5GH_80M_2x2] +
2207  		    ol_txrx_tx_desc_alloc_table[TXRX_FC_2GH_40M_2x2])
2208  		    <= desc_pool_size);
2209  
2210  	pdev->pause_cb = flowcontrol;
2211  	return 0;
2212  }
2213  
2214  int ol_txrx_set_vdev_os_queue_status(struct cdp_soc_t *soc_hdl, u8 vdev_id,
2215  				     enum netif_action_type action)
2216  {
2217  	struct ol_txrx_vdev_t *vdev =
2218  	(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2219  
2220  	if (!vdev) {
2221  		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2222  			  "%s: Invalid vdev_id %d", __func__, vdev_id);
2223  		return -EINVAL;
2224  	}
2225  
2226  	switch (action) {
2227  	case WLAN_NETIF_PRIORITY_QUEUE_ON:
2228  		qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
2229  		vdev->prio_q_paused = 0;
2230  		qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
2231  		break;
2232  	case WLAN_WAKE_NON_PRIORITY_QUEUE:
2233  		qdf_atomic_set(&vdev->os_q_paused, 0);
2234  		break;
2235  	default:
2236  		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2237  			  "%s: Invalid action %d", __func__, action);
2238  		return -EINVAL;
2239  	}
2240  	return 0;
2241  }
2242  
2243  int ol_txrx_set_vdev_tx_desc_limit(struct cdp_soc_t *soc_hdl, u8 vdev_id,
2244  				   u32 chan_freq)
2245  {
2246  	struct ol_txrx_vdev_t *vdev =
2247  	(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2248  	enum ol_txrx_fc_limit_id fc_limit_id;
2249  	u32 td_limit;
2250  
2251  	if (!vdev) {
2252  		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2253  			  "%s: Invalid vdev_id %d", __func__, vdev_id);
2254  		return -EINVAL;
2255  	}
2256  
2257  	/* TODO: Handle no of spatial streams and channel BW */
2258  	if (WLAN_REG_IS_5GHZ_CH_FREQ(chan_freq))
2259  		fc_limit_id = TXRX_FC_5GH_80M_2x2;
2260  	else
2261  		fc_limit_id = TXRX_FC_2GH_40M_2x2;
2262  
2263  	qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
2264  	td_limit = ol_txrx_tx_desc_alloc_table[fc_limit_id];
2265  	vdev->tx_desc_limit = td_limit;
2266  	vdev->queue_stop_th = td_limit - TXRX_HL_TX_DESC_HI_PRIO_RESERVED;
2267  	vdev->queue_restart_th = td_limit - TXRX_HL_TX_DESC_QUEUE_RESTART_TH;
2268  	qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
2269  
2270  	return 0;
2271  }
2272  
2273  void ol_tx_dump_flow_pool_info_compact(struct ol_txrx_pdev_t *pdev)
2274  {
2275  	char *comb_log_str;
2276  	int bytes_written = 0;
2277  	uint32_t free_size;
2278  	struct ol_txrx_vdev_t *vdev;
2279  	int i = 0;
2280  
2281  	free_size = WLAN_MAX_VDEVS * 100;
2282  	comb_log_str = qdf_mem_malloc(free_size);
2283  	if (!comb_log_str)
2284  		return;
2285  
2286  	qdf_spin_lock_bh(&pdev->tx_mutex);
2287  	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2288  		bytes_written += snprintf(&comb_log_str[bytes_written],
2289  				free_size, "%d (%d,%d)(%d,%d)(%d,%d) |",
2290  				vdev->vdev_id, vdev->tx_desc_limit,
2291  				qdf_atomic_read(&vdev->tx_desc_count),
2292  				qdf_atomic_read(&vdev->os_q_paused),
2293  				vdev->prio_q_paused, vdev->queue_stop_th,
2294  				vdev->queue_restart_th);
2295  		free_size -= bytes_written;
2296  	}
2297  	qdf_spin_unlock_bh(&pdev->tx_mutex);
2298  	qdf_nofl_debug("STATS | FC: %s", comb_log_str);
2299  
2300  	free_size = WLAN_MAX_VDEVS * 100;
2301  	bytes_written = 0;
2302  	qdf_mem_zero(comb_log_str, free_size);
2303  
2304  	bytes_written = snprintf(&comb_log_str[bytes_written], free_size,
2305  				 "%d ",
2306  				 qdf_atomic_read(&pdev->target_tx_credit));
2307  	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
2308  		bytes_written += snprintf(&comb_log_str[bytes_written],
2309  					  free_size, "|%d, (0x%x, %d)", i,
2310  					  OL_TXQ_GROUP_VDEV_ID_MASK_GET(
2311  					  pdev->txq_grps[i].membership),
2312  					  qdf_atomic_read(
2313  					  &pdev->txq_grps[i].credit));
2314  	       free_size -= bytes_written;
2315  	}
2316  	qdf_nofl_debug("STATS | CREDIT: %s", comb_log_str);
2317  	qdf_mem_free(comb_log_str);
2318  }
2319  
2320  void ol_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
2321  {
2322  	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2323  	ol_txrx_pdev_handle pdev;
2324  	struct ol_txrx_vdev_t *vdev;
2325  
2326  	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
2327  	if (!pdev) {
2328  		ol_txrx_err("pdev is NULL");
2329  		return;
2330  	}
2331  
2332  	qdf_spin_lock_bh(&pdev->tx_mutex);
2333  	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2334  		txrx_nofl_info("vdev_id %d", vdev->vdev_id);
2335  		txrx_nofl_info("limit %d available %d stop_threshold %d restart_threshold %d",
2336  			       vdev->tx_desc_limit,
2337  			       qdf_atomic_read(&vdev->tx_desc_count),
2338  			       vdev->queue_stop_th, vdev->queue_restart_th);
2339  		txrx_nofl_info("q_paused %d prio_q_paused %d",
2340  			       qdf_atomic_read(&vdev->os_q_paused),
2341  			       vdev->prio_q_paused);
2342  		txrx_nofl_info("no_of_bundle_sent_after_threshold %lld",
2343  			       vdev->no_of_bundle_sent_after_threshold);
2344  		txrx_nofl_info("no_of_bundle_sent_in_timer %lld",
2345  			       vdev->no_of_bundle_sent_in_timer);
2346  		txrx_nofl_info("no_of_pkt_not_added_in_queue %lld",
2347  			       vdev->no_of_pkt_not_added_in_queue);
2348  	}
2349  	qdf_spin_unlock_bh(&pdev->tx_mutex);
2350  }
2351  #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
2352