xref: /wlan-dirver/qcacld-3.0/core/dp/txrx/ol_tx_queue.c (revision c9c48ca4083fb01ec092e64d20a4c9151f3eb28c)
1 /*
2  * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
29 #include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
30 #include <ol_cfg.h>             /* ol_cfg_addba_retry */
31 #include <htt.h>                /* HTT_TX_EXT_TID_MGMT */
32 #include <ol_htt_tx_api.h>      /* htt_tx_desc_tid */
33 #include <ol_txrx_api.h>        /* ol_txrx_vdev_handle */
34 #include <ol_txrx_ctrl_api.h>   /* ol_txrx_sync, ol_tx_addba_conf */
35 #include <cdp_txrx_tx_throttle.h>
36 #include <ol_ctrl_txrx_api.h>   /* ol_ctrl_addba_req */
37 #include <ol_txrx_internal.h>   /* TXRX_ASSERT1, etc. */
38 #include <ol_tx_desc.h>         /* ol_tx_desc, ol_tx_desc_frame_list_free */
39 #include <ol_tx.h>              /* ol_tx_vdev_ll_pause_queue_send */
40 #include <ol_tx_sched.h>	/* ol_tx_sched_notify, etc. */
41 #include <ol_tx_queue.h>
42 #include <ol_txrx.h>          /* ol_tx_desc_pool_size_hl */
43 #include <ol_txrx_dbg.h>        /* ENABLE_TX_QUEUE_LOG */
44 #include <qdf_types.h>          /* bool */
45 #include "cdp_txrx_flow_ctrl_legacy.h"
46 #include <ol_txrx_peer_find.h>
47 
48 #if defined(CONFIG_HL_SUPPORT)
49 
50 #ifndef offsetof
51 #define offsetof(type, field)   ((qdf_size_t)(&((type *)0)->field))
52 #endif
53 
54 /*--- function prototypes for optional host ADDBA negotiation ---------------*/
55 
56 #define OL_TX_QUEUE_ADDBA_CHECK(pdev, txq, tx_msdu_info) /* no-op */
57 
58 #ifndef container_of
59 #define container_of(ptr, type, member) ((type *)( \
60 			(char *)(ptr) - (char *)(&((type *)0)->member)))
61 #endif
62 /*--- function definitions --------------------------------------------------*/
63 
64 /**
65  * ol_tx_queue_vdev_flush() - try to flush pending frames in the tx queues
66  *			      no matter it's queued in the TX scheduler or not
67  * @pdev: the physical device object
68  * @vdev: the virtual device object
69  *
70  * Return: None
71  */
72 static void
73 ol_tx_queue_vdev_flush(struct ol_txrx_pdev_t *pdev, struct ol_txrx_vdev_t *vdev)
74 {
75 #define PEER_ARRAY_COUNT        10
76 	struct ol_tx_frms_queue_t *txq;
77 	struct ol_txrx_peer_t *peer, *peers[PEER_ARRAY_COUNT];
78 	int i, j, peer_count;
79 
80 	/* flush VDEV TX queues */
81 	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
82 		txq = &vdev->txqs[i];
83 		/*
84 		 * currently txqs of MCAST_BCAST/DEFAULT_MGMT packet are using
85 		 * tid HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST/HTT_TX_EXT_TID_MGMT
86 		 * when inserted into scheduler, so use same tid when we flush
87 		 * them
88 		 */
89 		if (i == OL_TX_VDEV_MCAST_BCAST)
90 			ol_tx_queue_free(pdev,
91 					txq,
92 					HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST,
93 					false);
94 		else if (i == OL_TX_VDEV_DEFAULT_MGMT)
95 			ol_tx_queue_free(pdev,
96 					txq,
97 					HTT_TX_EXT_TID_MGMT,
98 					false);
99 		else
100 			ol_tx_queue_free(pdev,
101 					txq,
102 					(i + OL_TX_NUM_TIDS),
103 					false);
104 	}
105 	/* flush PEER TX queues */
106 	do {
107 		peer_count = 0;
108 		/* select candidate peers */
109 		qdf_spin_lock_bh(&pdev->peer_ref_mutex);
110 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
111 			for (i = 0; i < OL_TX_NUM_TIDS; i++) {
112 				txq = &peer->txqs[i];
113 				if (txq->frms) {
114 					qdf_atomic_inc(&peer->ref_cnt);
115 					QDF_TRACE(QDF_MODULE_ID_TXRX,
116 						 QDF_TRACE_LEVEL_INFO_HIGH,
117 						 "%s: peer %p peer->ref_cnt %d",
118 						  __func__, peer,
119 						  qdf_atomic_read
120 							(&peer->ref_cnt));
121 					peers[peer_count++] = peer;
122 					break;
123 				}
124 			}
125 			if (peer_count >= PEER_ARRAY_COUNT)
126 				break;
127 		}
128 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
129 		/* flush TX queues of candidate peers */
130 		for (i = 0; i < peer_count; i++) {
131 			for (j = 0; j < OL_TX_NUM_TIDS; j++) {
132 				txq = &peers[i]->txqs[j];
133 				if (txq->frms)
134 					ol_tx_queue_free(pdev, txq, j, true);
135 			}
136 			TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
137 				   "%s: Delete Peer %p\n", __func__, peer);
138 			ol_txrx_peer_unref_delete(peers[i]);
139 		}
140 	} while (peer_count >= PEER_ARRAY_COUNT);
141 }
142 
143 /**
144  * ol_tx_queue_flush() - try to flush pending frames in the tx queues
145  *			 no matter it's queued in the TX scheduler or not
146  * @pdev: the physical device object
147  *
148  * Return: None
149  */
150 static inline void
151 ol_tx_queue_flush(struct ol_txrx_pdev_t *pdev)
152 {
153 	struct ol_txrx_vdev_t *vdev;
154 
155 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
156 		ol_tx_queue_vdev_flush(pdev, vdev);
157 	}
158 }
159 
160 void
161 ol_tx_queue_discard(
162 	struct ol_txrx_pdev_t *pdev,
163 	bool flush_all,
164 	ol_tx_desc_list *tx_descs)
165 {
166 	u_int16_t num;
167 	u_int16_t discarded, actual_discarded = 0;
168 
169 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
170 
171 	if (flush_all == true)
172 		/* flush all the pending tx queues in the scheduler */
173 		num = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev) -
174 			qdf_atomic_read(&pdev->tx_queue.rsrc_cnt);
175 	else
176 		num = pdev->tx_queue.rsrc_threshold_hi -
177 			pdev->tx_queue.rsrc_threshold_lo;
178 
179 	TX_SCHED_DEBUG_PRINT("+%s : %u\n,", __func__,
180 			     qdf_atomic_read(&pdev->tx_queue.rsrc_cnt));
181 	while (num > 0) {
182 		discarded = ol_tx_sched_discard_select(
183 				pdev, (u_int16_t)num, tx_descs, flush_all);
184 		if (discarded == 0)
185 			/*
186 			 * No more packets could be discarded.
187 			 * Probably tx queues are empty.
188 			 */
189 			break;
190 
191 		num -= discarded;
192 		actual_discarded += discarded;
193 	}
194 	qdf_atomic_add(actual_discarded, &pdev->tx_queue.rsrc_cnt);
195 	TX_SCHED_DEBUG_PRINT("-%s\n", __func__);
196 
197 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
198 
199 	if (flush_all == true && num > 0)
200 		/*
201 		 * try to flush pending frames in the tx queues
202 		 * which are not queued in the TX scheduler.
203 		 */
204 		ol_tx_queue_flush(pdev);
205 }
206 
207 #ifdef CONFIG_PER_VDEV_TX_DESC_POOL
208 
209 /**
210  * is_ol_tx_discard_frames_success() - check whether currently queued tx frames
211  *				       can be discarded or not
212  * @pdev: the physical device object
213  * @tx_desc: tx desciptor ptr
214  *
215  * Return: Success if available tx descriptors are too few
216  */
217 static bool
218 is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t *pdev,
219 				struct ol_tx_desc_t *tx_desc)
220 {
221 	ol_txrx_vdev_handle vdev;
222 	vdev = tx_desc->vdev;
223 	return qdf_atomic_read(&vdev->tx_desc_count) >
224 			((ol_tx_desc_pool_size_hl(pdev->ctrl_pdev) >> 1)
225 			- TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED);
226 }
227 #else
228 
229 static inline bool
230 is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t *pdev,
231 				struct ol_tx_desc_t *tx_desc)
232 {
233 	return qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) <=
234 				pdev->tx_queue.rsrc_threshold_lo;
235 }
236 #endif
237 
238 void
239 ol_tx_enqueue(
240 	struct ol_txrx_pdev_t *pdev,
241 	struct ol_tx_frms_queue_t *txq,
242 	struct ol_tx_desc_t *tx_desc,
243 	struct ol_txrx_msdu_info_t *tx_msdu_info)
244 {
245 	int bytes;
246 	struct ol_tx_sched_notify_ctx_t notify_ctx;
247 
248 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
249 
250 	/*
251 	 * If too few tx descriptors are available, drop some currently-queued
252 	 * tx frames, to provide enough tx descriptors for new frames, which
253 	 * may be higher priority than the current frames.
254 	 */
255 	if (is_ol_tx_discard_frames_success(pdev, tx_desc)) {
256 		ol_tx_desc_list tx_descs;
257 		TAILQ_INIT(&tx_descs);
258 		ol_tx_queue_discard(pdev, false, &tx_descs);
259 		/*Discard Frames in Discard List*/
260 		ol_tx_desc_frame_list_free(pdev, &tx_descs, 1 /* error */);
261 	}
262 
263 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
264 	TAILQ_INSERT_TAIL(&txq->head, tx_desc, tx_desc_list_elem);
265 
266 	bytes = qdf_nbuf_len(tx_desc->netbuf);
267 	txq->frms++;
268 	txq->bytes += bytes;
269 	ol_tx_queue_log_enqueue(pdev, tx_msdu_info, 1, bytes);
270 
271 	if (txq->flag != ol_tx_queue_paused) {
272 		notify_ctx.event = OL_TX_ENQUEUE_FRAME;
273 		notify_ctx.frames = 1;
274 		notify_ctx.bytes = qdf_nbuf_len(tx_desc->netbuf);
275 		notify_ctx.txq = txq;
276 		notify_ctx.info.tx_msdu_info = tx_msdu_info;
277 		ol_tx_sched_notify(pdev, &notify_ctx);
278 		txq->flag = ol_tx_queue_active;
279 	}
280 
281 	if (!ETHERTYPE_IS_EAPOL_WAPI(tx_msdu_info->htt.info.ethertype))
282 		OL_TX_QUEUE_ADDBA_CHECK(pdev, txq, tx_msdu_info);
283 
284 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
285 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
286 }
287 
288 u_int16_t
289 ol_tx_dequeue(
290 	struct ol_txrx_pdev_t *pdev,
291 	struct ol_tx_frms_queue_t *txq,
292 	ol_tx_desc_list *head,
293 	u_int16_t max_frames,
294 	u_int32_t *credit,
295 	int *bytes)
296 {
297 	u_int16_t num_frames;
298 	int bytes_sum;
299 	unsigned credit_sum;
300 
301 	TXRX_ASSERT2(txq->flag != ol_tx_queue_paused);
302 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
303 
304 	if (txq->frms < max_frames)
305 		max_frames = txq->frms;
306 
307 	bytes_sum = 0;
308 	credit_sum = 0;
309 	for (num_frames = 0; num_frames < max_frames; num_frames++) {
310 		unsigned frame_credit;
311 		struct ol_tx_desc_t *tx_desc;
312 		tx_desc = TAILQ_FIRST(&txq->head);
313 
314 		frame_credit = htt_tx_msdu_credit(tx_desc->netbuf);
315 		if (credit_sum + frame_credit > *credit)
316 			break;
317 
318 		credit_sum += frame_credit;
319 		bytes_sum += qdf_nbuf_len(tx_desc->netbuf);
320 		TAILQ_REMOVE(&txq->head, tx_desc, tx_desc_list_elem);
321 		TAILQ_INSERT_TAIL(head, tx_desc, tx_desc_list_elem);
322 	}
323 	txq->frms -= num_frames;
324 	txq->bytes -= bytes_sum;
325 	/* a paused queue remains paused, regardless of whether it has frames */
326 	if (txq->frms == 0 && txq->flag == ol_tx_queue_active)
327 		txq->flag = ol_tx_queue_empty;
328 
329 	ol_tx_queue_log_dequeue(pdev, txq, num_frames, bytes_sum);
330 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
331 
332 	*bytes = bytes_sum;
333 	*credit = credit_sum;
334 	return num_frames;
335 }
336 
337 void
338 ol_tx_queue_free(
339 	struct ol_txrx_pdev_t *pdev,
340 	struct ol_tx_frms_queue_t *txq,
341 	int tid, bool is_peer_txq)
342 {
343 	int frms = 0, bytes = 0;
344 	struct ol_tx_desc_t *tx_desc;
345 	struct ol_tx_sched_notify_ctx_t notify_ctx;
346 	ol_tx_desc_list tx_tmp_list;
347 
348 	TAILQ_INIT(&tx_tmp_list);
349 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
350 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
351 
352 	notify_ctx.event = OL_TX_DELETE_QUEUE;
353 	notify_ctx.txq = txq;
354 	notify_ctx.info.ext_tid = tid;
355 	ol_tx_sched_notify(pdev, &notify_ctx);
356 
357 	frms = txq->frms;
358 	tx_desc = TAILQ_FIRST(&txq->head);
359 	while (txq->frms) {
360 		bytes += qdf_nbuf_len(tx_desc->netbuf);
361 		txq->frms--;
362 		tx_desc = TAILQ_NEXT(tx_desc, tx_desc_list_elem);
363 	}
364 	ol_tx_queue_log_free(pdev, txq, tid, frms, bytes, is_peer_txq);
365 	txq->bytes -= bytes;
366 	ol_tx_queue_log_free(pdev, txq, tid, frms, bytes, is_peer_txq);
367 	txq->flag = ol_tx_queue_empty;
368 	/* txq->head gets reset during the TAILQ_CONCAT call */
369 	TAILQ_CONCAT(&tx_tmp_list, &txq->head, tx_desc_list_elem);
370 
371 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
372 	/* free tx frames without holding tx_queue_spinlock */
373 	qdf_atomic_add(frms, &pdev->tx_queue.rsrc_cnt);
374 	while (frms) {
375 		tx_desc = TAILQ_FIRST(&tx_tmp_list);
376 		TAILQ_REMOVE(&tx_tmp_list, tx_desc, tx_desc_list_elem);
377 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 0);
378 		frms--;
379 	}
380 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
381 }
382 
383 
384 /*--- queue pause / unpause functions ---------------------------------------*/
385 
386 /**
387  * ol_txrx_peer_tid_pause_base() - suspend/pause txq for a given tid given peer
388  * @pdev: the physical device object
389  * @peer: peer device object
390  * @tid: tid for which queue needs to be paused
391  *
392  * Return: None
393  */
394 static void
395 ol_txrx_peer_tid_pause_base(
396 	struct ol_txrx_pdev_t *pdev,
397 	struct ol_txrx_peer_t *peer,
398 	int tid)
399 {
400 	struct ol_tx_frms_queue_t *txq = &peer->txqs[tid];
401 
402 	if (txq->paused_count.total++ == 0) {
403 		struct ol_tx_sched_notify_ctx_t notify_ctx;
404 
405 		notify_ctx.event = OL_TX_PAUSE_QUEUE;
406 		notify_ctx.txq = txq;
407 		notify_ctx.info.ext_tid = tid;
408 		ol_tx_sched_notify(pdev, &notify_ctx);
409 		txq->flag = ol_tx_queue_paused;
410 	}
411 }
412 #ifdef QCA_BAD_PEER_TX_FLOW_CL
413 
414 /**
415  * ol_txrx_peer_pause_but_no_mgmt_q_base() - suspend/pause all txqs except
416  *					     management queue for a given peer
417  * @pdev: the physical device object
418  * @peer: peer device object
419  *
420  * Return: None
421  */
422 static void
423 ol_txrx_peer_pause_but_no_mgmt_q_base(
424 	struct ol_txrx_pdev_t *pdev,
425 	struct ol_txrx_peer_t *peer)
426 {
427 	int i;
428 	for (i = 0; i < OL_TX_MGMT_TID; i++)
429 		ol_txrx_peer_tid_pause_base(pdev, peer, i);
430 }
431 #endif
432 
433 
434 /**
435  * ol_txrx_peer_pause_base() - suspend/pause all txqs for a given peer
436  * @pdev: the physical device object
437  * @peer: peer device object
438  *
439  * Return: None
440  */
441 static void
442 ol_txrx_peer_pause_base(
443 	struct ol_txrx_pdev_t *pdev,
444 	struct ol_txrx_peer_t *peer)
445 {
446 	int i;
447 	for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
448 		ol_txrx_peer_tid_pause_base(pdev, peer, i);
449 }
450 
451 /**
452  * ol_txrx_peer_tid_unpause_base() - unpause txq for a given tid given peer
453  * @pdev: the physical device object
454  * @peer: peer device object
455  * @tid: tid for which queue needs to be unpaused
456  *
457  * Return: None
458  */
459 static void
460 ol_txrx_peer_tid_unpause_base(
461 	struct ol_txrx_pdev_t *pdev,
462 	struct ol_txrx_peer_t *peer,
463 	int tid)
464 {
465 	struct ol_tx_frms_queue_t *txq = &peer->txqs[tid];
466 	/*
467 	 * Don't actually unpause the tx queue until all pause requests
468 	 * have been removed.
469 	 */
470 	TXRX_ASSERT2(txq->paused_count.total > 0);
471 	/* return, if not already paused */
472 	if (txq->paused_count.total == 0)
473 		return;
474 
475 	if (--txq->paused_count.total == 0) {
476 		struct ol_tx_sched_notify_ctx_t notify_ctx;
477 
478 		notify_ctx.event = OL_TX_UNPAUSE_QUEUE;
479 		notify_ctx.txq = txq;
480 		notify_ctx.info.ext_tid = tid;
481 		ol_tx_sched_notify(pdev, &notify_ctx);
482 
483 		if (txq->frms == 0) {
484 			txq->flag = ol_tx_queue_empty;
485 		} else {
486 			txq->flag = ol_tx_queue_active;
487 			/*
488 			 * Now that the are new tx frames available to download,
489 			 * invoke the scheduling function, to see if it wants to
490 			 * download the new frames.
491 			 * Since the queue lock is currently held, and since
492 			 * the scheduler function takes the lock, temporarily
493 			 * release the lock.
494 			 */
495 			qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
496 			ol_tx_sched(pdev);
497 			qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
498 		}
499 	}
500 }
501 #ifdef QCA_BAD_PEER_TX_FLOW_CL
502 /**
503  * ol_txrx_peer_unpause_but_no_mgmt_q_base() - unpause all txqs except
504  *					       management queue for a given peer
505  * @pdev: the physical device object
506  * @peer: peer device object
507  *
508  * Return: None
509  */
510 static void
511 ol_txrx_peer_unpause_but_no_mgmt_q_base(
512 	struct ol_txrx_pdev_t *pdev,
513 	struct ol_txrx_peer_t *peer)
514 {
515 	int i;
516 	for (i = 0; i < OL_TX_MGMT_TID; i++)
517 		ol_txrx_peer_tid_unpause_base(pdev, peer, i);
518 }
519 #endif
520 
521 void
522 ol_txrx_peer_tid_unpause(ol_txrx_peer_handle peer, int tid)
523 {
524 	struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
525 
526 	/* TO DO: log the queue unpause */
527 
528 	/* acquire the mutex lock, since we'll be modifying the queues */
529 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
530 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
531 
532 	if (tid == -1) {
533 		int i;
534 		for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
535 			ol_txrx_peer_tid_unpause_base(pdev, peer, i);
536 
537 	} else {
538 		ol_txrx_peer_tid_unpause_base(pdev, peer, tid);
539 	}
540 
541 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
542 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
543 }
544 
545 void
546 ol_txrx_throttle_pause(ol_txrx_pdev_handle pdev)
547 {
548 #if defined(QCA_SUPPORT_TX_THROTTLE)
549 	qdf_spin_lock_bh(&pdev->tx_throttle.mutex);
550 
551 	if (pdev->tx_throttle.is_paused == true) {
552 		qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
553 		return;
554 	}
555 
556 	pdev->tx_throttle.is_paused = true;
557 	qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
558 #endif
559 	ol_txrx_pdev_pause(pdev, 0);
560 }
561 
562 void
563 ol_txrx_throttle_unpause(ol_txrx_pdev_handle pdev)
564 {
565 #if defined(QCA_SUPPORT_TX_THROTTLE)
566 	qdf_spin_lock_bh(&pdev->tx_throttle.mutex);
567 
568 	if (pdev->tx_throttle.is_paused == false) {
569 		qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
570 		return;
571 	}
572 
573 	pdev->tx_throttle.is_paused = false;
574 	qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
575 #endif
576 	ol_txrx_pdev_unpause(pdev, 0);
577 }
578 
579 void
580 ol_txrx_vdev_pause(void *pvdev, uint32_t reason)
581 {
582 	ol_txrx_vdev_handle vdev = pvdev;
583 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
584 	struct ol_txrx_peer_t *peer;
585 	/* TO DO: log the queue pause */
586 	/* acquire the mutex lock, since we'll be modifying the queues */
587 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
588 
589 
590 	/* use peer_ref_mutex before accessing peer_list */
591 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
592 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
593 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
594 		ol_txrx_peer_pause_base(pdev, peer);
595 	}
596 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
597 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
598 
599 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
600 }
601 
602 
603 void ol_txrx_vdev_unpause(void *pvdev, uint32_t reason)
604 {
605 	ol_txrx_vdev_handle vdev = pvdev;
606 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
607 	struct ol_txrx_peer_t *peer;
608 	/* TO DO: log the queue unpause */
609 	/* acquire the mutex lock, since we'll be modifying the queues */
610 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
611 
612 
613 
614 	/* take peer_ref_mutex before accessing peer_list */
615 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
616 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
617 
618 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
619 		int i;
620 		for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
621 			ol_txrx_peer_tid_unpause_base(pdev, peer, i);
622 	}
623 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
624 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
625 
626 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
627 }
628 
629 void ol_txrx_vdev_flush(void *pvdev)
630 {
631 	ol_txrx_vdev_handle vdev = pvdev;
632 
633 	ol_tx_queue_vdev_flush(vdev->pdev, vdev);
634 }
635 
636 #ifdef QCA_BAD_PEER_TX_FLOW_CL
637 
638 /**
639  * ol_txrx_peer_bal_add_limit_peer() - add one peer into limit list
640  * @pdev:		Pointer to PDEV structure.
641  * @peer_id:	Peer Identifier.
642  * @peer_limit	Peer limit threshold
643  *
644  * Add one peer into the limit list of pdev
645  * Note that the peer limit info will be also updated
646  * If it is the first time, start the timer
647  *
648  * Return: None
649  */
650 void
651 ol_txrx_peer_bal_add_limit_peer(struct ol_txrx_pdev_t *pdev,
652 				u_int16_t peer_id, u_int16_t peer_limit)
653 {
654 	u_int16_t i, existed = 0;
655 	struct ol_txrx_peer_t *peer = NULL;
656 
657 	for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
658 		if (pdev->tx_peer_bal.limit_list[i].peer_id == peer_id) {
659 			existed = 1;
660 			break;
661 		}
662 	}
663 
664 	if (!existed) {
665 		u_int32_t peer_num = pdev->tx_peer_bal.peer_num;
666 		/* Check if peer_num has reached the capabilit */
667 		if (peer_num >= MAX_NO_PEERS_IN_LIMIT) {
668 			TX_SCHED_DEBUG_PRINT_ALWAYS(
669 				"reach the maxinum peer num %d\n",
670 				peer_num);
671 				return;
672 		}
673 		pdev->tx_peer_bal.limit_list[peer_num].peer_id = peer_id;
674 		pdev->tx_peer_bal.limit_list[peer_num].limit_flag = true;
675 		pdev->tx_peer_bal.limit_list[peer_num].limit = peer_limit;
676 		pdev->tx_peer_bal.peer_num++;
677 
678 		peer = ol_txrx_peer_find_by_id(pdev, peer_id);
679 		if (peer) {
680 			peer->tx_limit_flag = true;
681 			peer->tx_limit = peer_limit;
682 		}
683 
684 		TX_SCHED_DEBUG_PRINT_ALWAYS(
685 			"Add one peer into limit queue, peer_id %d, cur peer num %d\n",
686 			peer_id,
687 			pdev->tx_peer_bal.peer_num);
688 	}
689 
690 	/* Only start the timer once */
691 	if (pdev->tx_peer_bal.peer_bal_timer_state ==
692 					ol_tx_peer_bal_timer_inactive) {
693 		qdf_timer_start(&pdev->tx_peer_bal.peer_bal_timer,
694 					pdev->tx_peer_bal.peer_bal_period_ms);
695 		pdev->tx_peer_bal.peer_bal_timer_state =
696 				ol_tx_peer_bal_timer_active;
697 	}
698 }
699 
700 /**
701  * ol_txrx_peer_bal_remove_limit_peer() - remove one peer from limit list
702  * @pdev:		Pointer to PDEV structure.
703  * @peer_id:	Peer Identifier.
704  *
705  * Remove one peer from the limit list of pdev
706  * Note that Only stop the timer if no peer in limit state
707  *
708  * Return: NULL
709  */
710 void
711 ol_txrx_peer_bal_remove_limit_peer(struct ol_txrx_pdev_t *pdev,
712 				   u_int16_t peer_id)
713 {
714 	u_int16_t i;
715 	struct ol_txrx_peer_t *peer = NULL;
716 
717 	for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
718 		if (pdev->tx_peer_bal.limit_list[i].peer_id == peer_id) {
719 			pdev->tx_peer_bal.limit_list[i] =
720 				pdev->tx_peer_bal.limit_list[
721 					pdev->tx_peer_bal.peer_num - 1];
722 			pdev->tx_peer_bal.peer_num--;
723 
724 			peer = ol_txrx_peer_find_by_id(pdev, peer_id);
725 			if (peer)
726 				peer->tx_limit_flag = false;
727 
728 
729 			TX_SCHED_DEBUG_PRINT(
730 				"Remove one peer from limitq, peer_id %d, cur peer num %d\n",
731 				peer_id,
732 				pdev->tx_peer_bal.peer_num);
733 			break;
734 		}
735 	}
736 
737 	/* Only stop the timer if no peer in limit state */
738 	if (pdev->tx_peer_bal.peer_num == 0) {
739 		qdf_timer_stop(&pdev->tx_peer_bal.peer_bal_timer);
740 		pdev->tx_peer_bal.peer_bal_timer_state =
741 				ol_tx_peer_bal_timer_inactive;
742 	}
743 }
744 
745 void
746 ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)
747 {
748 	struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
749 
750 	/* TO DO: log the queue pause */
751 
752 	/* acquire the mutex lock, since we'll be modifying the queues */
753 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
754 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
755 
756 	ol_txrx_peer_pause_but_no_mgmt_q_base(pdev, peer);
757 
758 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
759 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
760 }
761 
762 void
763 ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)
764 {
765 	struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
766 
767 	/* TO DO: log the queue pause */
768 
769 	/* acquire the mutex lock, since we'll be modifying the queues */
770 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
771 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
772 
773 	ol_txrx_peer_unpause_but_no_mgmt_q_base(pdev, peer);
774 
775 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
776 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
777 }
778 
779 u_int16_t
780 ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
781 			     u_int16_t max_frames,
782 			     u_int16_t *tx_limit_flag)
783 {
784 	if (txq && (txq->peer) && (txq->peer->tx_limit_flag) &&
785 	    (txq->peer->tx_limit < max_frames)) {
786 		TX_SCHED_DEBUG_PRINT(
787 			"Peer ID %d goes to limit, threshold is %d\n",
788 			txq->peer->peer_ids[0], txq->peer->tx_limit);
789 		*tx_limit_flag = 1;
790 		return txq->peer->tx_limit;
791 	} else {
792 		return max_frames;
793 	}
794 }
795 
796 void
797 ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
798 			       struct ol_tx_frms_queue_t *txq,
799 			       u_int16_t frames,
800 			       u_int16_t tx_limit_flag)
801 {
802 	qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
803 	if (txq && tx_limit_flag && (txq->peer) &&
804 	    (txq->peer->tx_limit_flag)) {
805 		if (txq->peer->tx_limit < frames)
806 			txq->peer->tx_limit = 0;
807 		else
808 			txq->peer->tx_limit -= frames;
809 
810 		TX_SCHED_DEBUG_PRINT_ALWAYS(
811 				"Peer ID %d in limit, deque %d frms\n",
812 				txq->peer->peer_ids[0], frames);
813 	} else if (txq->peer) {
814 		TX_SCHED_DEBUG_PRINT("Download peer_id %d, num_frames %d\n",
815 				     txq->peer->peer_ids[0], frames);
816 	}
817 	qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
818 }
819 
820 void
821 ol_txrx_bad_peer_txctl_set_setting(void *ppdev,
822 				   int enable, int period, int txq_limit)
823 {
824 	struct ol_txrx_pdev_t *pdev = ppdev;
825 	if (enable)
826 		pdev->tx_peer_bal.enabled = ol_tx_peer_bal_enable;
827 	else
828 		pdev->tx_peer_bal.enabled = ol_tx_peer_bal_disable;
829 
830 	/* Set the current settingl */
831 	pdev->tx_peer_bal.peer_bal_period_ms = period;
832 	pdev->tx_peer_bal.peer_bal_txq_limit = txq_limit;
833 }
834 
835 void
836 ol_txrx_bad_peer_txctl_update_threshold(void *ppdev,
837 					int level, int tput_thresh,
838 					int tx_limit)
839 {
840 	struct ol_txrx_pdev_t *pdev = ppdev;
841 
842 	/* Set the current settingl */
843 	pdev->tx_peer_bal.ctl_thresh[level].tput_thresh =
844 		tput_thresh;
845 	pdev->tx_peer_bal.ctl_thresh[level].tx_limit =
846 		tx_limit;
847 }
848 
849 /**
850  * ol_tx_pdev_peer_bal_timer() - timer function
851  * @context: context of timer function
852  *
853  * Return: None
854  */
855 void
856 ol_tx_pdev_peer_bal_timer(void *context)
857 {
858 	int i;
859 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
860 
861 	qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
862 
863 	for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
864 		if (pdev->tx_peer_bal.limit_list[i].limit_flag) {
865 			u_int16_t peer_id =
866 				pdev->tx_peer_bal.limit_list[i].peer_id;
867 			u_int16_t tx_limit =
868 				pdev->tx_peer_bal.limit_list[i].limit;
869 
870 			struct ol_txrx_peer_t *peer = NULL;
871 			peer = ol_txrx_peer_find_by_id(pdev, peer_id);
872 			TX_SCHED_DEBUG_PRINT(
873 				"%s peer_id %d  peer = 0x%x tx limit %d\n",
874 				__func__, peer_id,
875 				(int)peer, tx_limit);
876 
877 			/* It is possible the peer limit is still not 0,
878 			   but it is the scenario should not be cared */
879 			if (peer) {
880 				peer->tx_limit = tx_limit;
881 			} else {
882 				ol_txrx_peer_bal_remove_limit_peer(pdev,
883 								   peer_id);
884 				TX_SCHED_DEBUG_PRINT_ALWAYS(
885 					"No such a peer, peer id = %d\n",
886 					peer_id);
887 			}
888 		}
889 	}
890 
891 	qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
892 
893 	if (pdev->tx_peer_bal.peer_num) {
894 		ol_tx_sched(pdev);
895 		qdf_timer_start(&pdev->tx_peer_bal.peer_bal_timer,
896 					pdev->tx_peer_bal.peer_bal_period_ms);
897 	}
898 }
899 
900 void
901 ol_txrx_set_txq_peer(
902 	struct ol_tx_frms_queue_t *txq,
903 	struct ol_txrx_peer_t *peer)
904 {
905 	if (txq)
906 		txq->peer = peer;
907 }
908 
909 void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev)
910 {
911 	u_int32_t timer_period;
912 
913 	qdf_spinlock_create(&pdev->tx_peer_bal.mutex);
914 	pdev->tx_peer_bal.peer_num = 0;
915 	pdev->tx_peer_bal.peer_bal_timer_state
916 		= ol_tx_peer_bal_timer_inactive;
917 
918 	timer_period = 2000;
919 	pdev->tx_peer_bal.peer_bal_period_ms = timer_period;
920 
921 	qdf_timer_init(
922 			pdev->osdev,
923 			&pdev->tx_peer_bal.peer_bal_timer,
924 			ol_tx_pdev_peer_bal_timer,
925 			pdev, QDF_TIMER_TYPE_SW);
926 }
927 
928 void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev)
929 {
930 	qdf_timer_stop(&pdev->tx_peer_bal.peer_bal_timer);
931 	pdev->tx_peer_bal.peer_bal_timer_state =
932 					ol_tx_peer_bal_timer_inactive;
933 	qdf_timer_free(&pdev->tx_peer_bal.peer_bal_timer);
934 	qdf_spinlock_destroy(&pdev->tx_peer_bal.mutex);
935 }
936 
937 void
938 ol_txrx_peer_link_status_handler(
939 	ol_txrx_pdev_handle pdev,
940 	u_int16_t peer_num,
941 	struct rate_report_t *peer_link_status)
942 {
943 	u_int16_t i = 0;
944 	struct ol_txrx_peer_t *peer = NULL;
945 
946 	if (NULL == pdev) {
947 		TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL pdev handler\n");
948 		return;
949 	}
950 
951 	if (NULL == peer_link_status) {
952 		TX_SCHED_DEBUG_PRINT_ALWAYS(
953 			"Error:NULL link report message. peer num %d\n",
954 			peer_num);
955 		return;
956 	}
957 
958 	/* Check if bad peer tx flow CL is enabled */
959 	if (pdev->tx_peer_bal.enabled != ol_tx_peer_bal_enable) {
960 		TX_SCHED_DEBUG_PRINT_ALWAYS(
961 			"Bad peer tx flow CL is not enabled, ignore it\n");
962 		return;
963 	}
964 
965 	/* Check peer_num is reasonable */
966 	if (peer_num > MAX_NO_PEERS_IN_LIMIT) {
967 		TX_SCHED_DEBUG_PRINT_ALWAYS(
968 			"%s: Bad peer_num %d\n", __func__, peer_num);
969 		return;
970 	}
971 
972 	TX_SCHED_DEBUG_PRINT_ALWAYS("%s: peer_num %d\n", __func__, peer_num);
973 
974 	for (i = 0; i < peer_num; i++) {
975 		u_int16_t peer_limit, peer_id;
976 		u_int16_t pause_flag, unpause_flag;
977 		u_int32_t peer_phy, peer_tput;
978 
979 		peer_id = peer_link_status->id;
980 		peer_phy = peer_link_status->phy;
981 		peer_tput = peer_link_status->rate;
982 
983 		TX_SCHED_DEBUG_PRINT("%s: peer id %d tput %d phy %d\n",
984 				     __func__, peer_id, peer_tput, peer_phy);
985 
986 		/* Sanity check for the PHY mode value */
987 		if (peer_phy > TXRX_IEEE11_AC) {
988 			TX_SCHED_DEBUG_PRINT_ALWAYS(
989 				"%s: PHY value is illegal: %d, and the peer_id %d\n",
990 				__func__, peer_link_status->phy, peer_id);
991 			continue;
992 		}
993 		pause_flag   = false;
994 		unpause_flag = false;
995 		peer_limit   = 0;
996 
997 		/* From now on, PHY, PER info should be all fine */
998 		qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
999 
1000 		/* Update link status analysis for each peer */
1001 		peer = ol_txrx_peer_find_by_id(pdev, peer_id);
1002 		if (peer) {
1003 			u_int32_t thresh, limit, phy;
1004 			phy = peer_link_status->phy;
1005 			thresh = pdev->tx_peer_bal.ctl_thresh[phy].tput_thresh;
1006 			limit = pdev->tx_peer_bal.ctl_thresh[phy].tx_limit;
1007 
1008 			if (((peer->tx_pause_flag) || (peer->tx_limit_flag)) &&
1009 			    (peer_tput) && (peer_tput < thresh))
1010 				peer_limit = limit;
1011 
1012 			if (peer_limit) {
1013 				ol_txrx_peer_bal_add_limit_peer(pdev, peer_id,
1014 								peer_limit);
1015 			} else if (pdev->tx_peer_bal.peer_num) {
1016 				TX_SCHED_DEBUG_PRINT(
1017 					"%s: Check if peer_id %d exit limit\n",
1018 					__func__, peer_id);
1019 				ol_txrx_peer_bal_remove_limit_peer(pdev,
1020 								   peer_id);
1021 			}
1022 			if ((peer_tput == 0) &&
1023 			    (peer->tx_pause_flag == false)) {
1024 				peer->tx_pause_flag = true;
1025 				pause_flag = true;
1026 			} else if (peer->tx_pause_flag) {
1027 				unpause_flag = true;
1028 				peer->tx_pause_flag = false;
1029 			}
1030 		} else {
1031 			TX_SCHED_DEBUG_PRINT(
1032 				"%s: Remove peer_id %d from limit list\n",
1033 				__func__, peer_id);
1034 			ol_txrx_peer_bal_remove_limit_peer(pdev, peer_id);
1035 		}
1036 
1037 		peer_link_status++;
1038 		qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
1039 		if (pause_flag)
1040 			ol_txrx_peer_pause_but_no_mgmt_q(peer);
1041 		else if (unpause_flag)
1042 			ol_txrx_peer_unpause_but_no_mgmt_q(peer);
1043 	}
1044 }
1045 #endif /* QCA_BAD_PEER_TX_FLOW_CL */
1046 
1047 /*--- ADDBA triggering functions --------------------------------------------*/
1048 
1049 
1050 /*=== debug functions =======================================================*/
1051 
1052 /*--- queue event log -------------------------------------------------------*/
1053 
1054 #if defined(DEBUG_HL_LOGGING)
1055 
1056 #define negative_sign -1
1057 
1058 /**
1059  * ol_tx_queue_log_entry_type_info() - log queues entry info
1060  * @type: log entry type
1061  * @size: size
1062  * @align: alignment
1063  * @var_size: variable size record
1064  *
1065  * Return: None
1066  */
1067 static void
1068 ol_tx_queue_log_entry_type_info(
1069 	u_int8_t *type, int *size, int *align, int var_size)
1070 {
1071 	switch (*type) {
1072 	case ol_tx_log_entry_type_enqueue:
1073 	case ol_tx_log_entry_type_dequeue:
1074 	case ol_tx_log_entry_type_queue_free:
1075 		*size = sizeof(struct ol_tx_log_queue_add_t);
1076 		*align = 2;
1077 		break;
1078 
1079 	case ol_tx_log_entry_type_queue_state:
1080 		*size = offsetof(struct ol_tx_log_queue_state_var_sz_t, data);
1081 		*align = 4;
1082 		if (var_size) {
1083 			/* read the variable-sized record,
1084 			 * to see how large it is
1085 			 */
1086 			int align_pad;
1087 			struct ol_tx_log_queue_state_var_sz_t *record;
1088 
1089 			align_pad =
1090 				(*align - ((((u_int32_t) *type) + 1)))
1091 							& (*align - 1);
1092 			record = (struct ol_tx_log_queue_state_var_sz_t *)
1093 				(type + 1 + align_pad);
1094 			*size += record->num_cats_active *
1095 				(sizeof(u_int32_t) /* bytes */ +
1096 					sizeof(u_int16_t) /* frms */);
1097 		}
1098 		break;
1099 
1100 	/*case ol_tx_log_entry_type_drop:*/
1101 	default:
1102 		*size = 0;
1103 		*align = 0;
1104 	};
1105 }
1106 
1107 /**
1108  * ol_tx_queue_log_oldest_update() - log oldest record
1109  * @pdev: pointer to txrx handle
1110  * @offset: offset value
1111  *
1112  * Return: None
1113  */
1114 static void
1115 ol_tx_queue_log_oldest_update(struct ol_txrx_pdev_t *pdev, int offset)
1116 {
1117 	int oldest_record_offset;
1118 
1119 	/*
1120 	 * If the offset of the oldest record is between the current and
1121 	 * new values of the offset of the newest record, then the oldest
1122 	 * record has to be dropped from the log to provide room for the
1123 	 * newest record.
1124 	 * Advance the offset of the oldest record until it points to a
1125 	 * record that is beyond the new value of the offset of the newest
1126 	 * record.
1127 	 */
1128 	if (!pdev->txq_log.wrapped)
1129 		/*
1130 		 * The log has not even filled up yet - no need to remove
1131 		 * the oldest record to make room for a new record.
1132 		 */
1133 		return;
1134 
1135 
1136 	if (offset > pdev->txq_log.offset) {
1137 		/*
1138 		 * not wraparound -
1139 		 * The oldest record offset may have already wrapped around,
1140 		 * even if the newest record has not.  In this case, then
1141 		 * the oldest record offset is fine where it is.
1142 		 */
1143 		if (pdev->txq_log.oldest_record_offset == 0)
1144 			return;
1145 
1146 		oldest_record_offset = pdev->txq_log.oldest_record_offset;
1147 	} else
1148 		/* wraparound */
1149 		oldest_record_offset = 0;
1150 
1151 
1152 	while (oldest_record_offset < offset) {
1153 		int size, align, align_pad;
1154 		u_int8_t type;
1155 
1156 		type = pdev->txq_log.data[oldest_record_offset];
1157 		if (type == ol_tx_log_entry_type_wrap) {
1158 			oldest_record_offset = 0;
1159 			break;
1160 		}
1161 		ol_tx_queue_log_entry_type_info(
1162 				&pdev->txq_log.data[oldest_record_offset],
1163 				&size, &align, 1);
1164 		align_pad =
1165 			(align - ((oldest_record_offset + 1/*type*/)))
1166 							& (align - 1);
1167 		/*
1168 		   QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1169 		   "TXQ LOG old alloc: offset %d, type %d, size %d (%d)\n",
1170 		   oldest_record_offset, type, size, size + 1 + align_pad);
1171 		 */
1172 		oldest_record_offset += size + 1 + align_pad;
1173 	}
1174 	if (oldest_record_offset >= pdev->txq_log.size)
1175 		oldest_record_offset = 0;
1176 
1177 	pdev->txq_log.oldest_record_offset = oldest_record_offset;
1178 }
1179 
1180 /**
1181  * ol_tx_queue_log_alloc() - log data allocation
1182  * @pdev: physical device object
1183  * @type: ol_tx_log_entry_type
1184  * @extra_bytes: extra bytes
1185  *
1186  *
1187  * Return: log element
1188  */
1189 void*
1190 ol_tx_queue_log_alloc(
1191 	struct ol_txrx_pdev_t *pdev,
1192 	u_int8_t type /* ol_tx_log_entry_type */,
1193 	int extra_bytes)
1194 {
1195 	int size, align, align_pad;
1196 	int offset;
1197 
1198 	ol_tx_queue_log_entry_type_info(&type, &size, &align, 0);
1199 	size += extra_bytes;
1200 
1201 	offset = pdev->txq_log.offset;
1202 	align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
1203 
1204 	if (pdev->txq_log.size - offset >= size + 1 + align_pad)
1205 		/* no need to wrap around */
1206 		goto alloc_found;
1207 
1208 	if (!pdev->txq_log.allow_wrap)
1209 		return NULL; /* log is full and can't wrap */
1210 
1211 	/* handle wrap-around */
1212 	pdev->txq_log.wrapped = 1;
1213 	offset = 0;
1214 	align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
1215 	/* sanity check that the log is large enough to hold this entry */
1216 	if (pdev->txq_log.size <= size + 1 + align_pad)
1217 		return NULL;
1218 
1219 
1220 alloc_found:
1221 	ol_tx_queue_log_oldest_update(pdev, offset + size + 1 + align_pad);
1222 	if (offset == 0)
1223 		pdev->txq_log.data[pdev->txq_log.offset] =
1224 						ol_tx_log_entry_type_wrap;
1225 
1226 	/*
1227 	   QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1228 	   "TXQ LOG new alloc: offset %d, type %d, size %d (%d)\n",
1229 	   offset, type, size, size + 1 + align_pad);
1230 	 */
1231 	pdev->txq_log.data[offset] = type;
1232 	pdev->txq_log.offset = offset + size + 1 + align_pad;
1233 	if (pdev->txq_log.offset >= pdev->txq_log.size) {
1234 		pdev->txq_log.offset = 0;
1235 		pdev->txq_log.wrapped = 1;
1236 	}
1237 	return &pdev->txq_log.data[offset + 1 + align_pad];
1238 }
1239 
1240 /**
1241  * ol_tx_queue_log_record_display() - show log record of tx queue
1242  * @pdev: pointer to txrx handle
1243  * @offset: offset value
1244  *
1245  * Return: size of record
1246  */
1247 static int
1248 ol_tx_queue_log_record_display(struct ol_txrx_pdev_t *pdev, int offset)
1249 {
1250 	int size, align, align_pad;
1251 	u_int8_t type;
1252 	struct ol_txrx_peer_t *peer;
1253 
1254 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1255 	type = pdev->txq_log.data[offset];
1256 	ol_tx_queue_log_entry_type_info(
1257 			&pdev->txq_log.data[offset], &size, &align, 1);
1258 	align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
1259 
1260 	switch (type) {
1261 	case ol_tx_log_entry_type_enqueue:
1262 	{
1263 		struct ol_tx_log_queue_add_t record;
1264 		qdf_mem_copy(&record,
1265 			     &pdev->txq_log.data[offset + 1 + align_pad],
1266 			     sizeof(struct ol_tx_log_queue_add_t));
1267 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1268 
1269 		if (record.peer_id != 0xffff) {
1270 			peer = ol_txrx_peer_find_by_id(pdev,
1271 						       record.peer_id);
1272 			if (peer != NULL)
1273 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1274 					  QDF_TRACE_LEVEL_ERROR,
1275 					  "Q: %6d  %5d  %3d  %4d (%02x:%02x:%02x:%02x:%02x:%02x)",
1276 					  record.num_frms, record.num_bytes,
1277 					  record.tid,
1278 					  record.peer_id,
1279 					  peer->mac_addr.raw[0],
1280 					  peer->mac_addr.raw[1],
1281 					  peer->mac_addr.raw[2],
1282 					  peer->mac_addr.raw[3],
1283 					  peer->mac_addr.raw[4],
1284 					  peer->mac_addr.raw[5]);
1285 			else
1286 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1287 					  QDF_TRACE_LEVEL_ERROR,
1288 					  "Q: %6d  %5d  %3d  %4d",
1289 					  record.num_frms, record.num_bytes,
1290 					  record.tid, record.peer_id);
1291 		} else {
1292 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1293 				  QDF_TRACE_LEVEL_INFO,
1294 				  "Q: %6d  %5d  %3d  from vdev",
1295 				  record.num_frms, record.num_bytes,
1296 				   record.tid);
1297 		}
1298 		break;
1299 	}
1300 	case ol_tx_log_entry_type_dequeue:
1301 	{
1302 		struct ol_tx_log_queue_add_t record;
1303 		qdf_mem_copy(&record,
1304 			     &pdev->txq_log.data[offset + 1 + align_pad],
1305 			     sizeof(struct ol_tx_log_queue_add_t));
1306 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1307 
1308 		if (record.peer_id != 0xffff) {
1309 			peer = ol_txrx_peer_find_by_id(pdev, record.peer_id);
1310 			if (peer != NULL)
1311 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1312 					  QDF_TRACE_LEVEL_ERROR,
1313 					  "DQ: %6d  %5d  %3d  %4d (%02x:%02x:%02x:%02x:%02x:%02x)",
1314 					  record.num_frms, record.num_bytes,
1315 					  record.tid,
1316 					  record.peer_id,
1317 					  peer->mac_addr.raw[0],
1318 					  peer->mac_addr.raw[1],
1319 					  peer->mac_addr.raw[2],
1320 					  peer->mac_addr.raw[3],
1321 					  peer->mac_addr.raw[4],
1322 					  peer->mac_addr.raw[5]);
1323 			else
1324 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1325 					  QDF_TRACE_LEVEL_ERROR,
1326 					  "DQ: %6d  %5d  %3d  %4d",
1327 					  record.num_frms, record.num_bytes,
1328 					  record.tid, record.peer_id);
1329 		} else {
1330 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1331 				  QDF_TRACE_LEVEL_INFO,
1332 				  "DQ: %6d  %5d  %3d  from vdev",
1333 				  record.num_frms, record.num_bytes,
1334 				  record.tid);
1335 		}
1336 		break;
1337 	}
1338 	case ol_tx_log_entry_type_queue_free:
1339 	{
1340 		struct ol_tx_log_queue_add_t record;
1341 		qdf_mem_copy(&record,
1342 			     &pdev->txq_log.data[offset + 1 + align_pad],
1343 			     sizeof(struct ol_tx_log_queue_add_t));
1344 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1345 
1346 		if (record.peer_id != 0xffff) {
1347 			peer = ol_txrx_peer_find_by_id(pdev, record.peer_id);
1348 			if (peer != NULL)
1349 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1350 					  QDF_TRACE_LEVEL_ERROR,
1351 					  "F: %6d  %5d  %3d  %4d (%02x:%02x:%02x:%02x:%02x:%02x)",
1352 					  record.num_frms, record.num_bytes,
1353 					  record.tid,
1354 					  record.peer_id,
1355 					  peer->mac_addr.raw[0],
1356 					  peer->mac_addr.raw[1],
1357 					  peer->mac_addr.raw[2],
1358 					  peer->mac_addr.raw[3],
1359 					  peer->mac_addr.raw[4],
1360 					  peer->mac_addr.raw[5]);
1361 			else
1362 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1363 					  QDF_TRACE_LEVEL_ERROR,
1364 					  "F: %6d  %5d  %3d  %4d",
1365 					  record.num_frms, record.num_bytes,
1366 					  record.tid, record.peer_id);
1367 		} else {
1368 			/* shouldn't happen */
1369 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1370 				  QDF_TRACE_LEVEL_INFO,
1371 				  "Unexpected vdev queue removal\n");
1372 		}
1373 			break;
1374 	}
1375 
1376 	case ol_tx_log_entry_type_queue_state:
1377 	{
1378 		int i, j;
1379 		u_int32_t active_bitmap;
1380 		struct ol_tx_log_queue_state_var_sz_t record;
1381 		u_int8_t *data;
1382 
1383 		qdf_mem_copy(&record,
1384 			     &pdev->txq_log.data[offset + 1 + align_pad],
1385 			     sizeof(struct ol_tx_log_queue_state_var_sz_t));
1386 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1387 
1388 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1389 			  "S: bitmap = %#x",
1390 			  record.active_bitmap);
1391 		data = &record.data[0];
1392 		j = 0;
1393 		i = 0;
1394 		active_bitmap = record.active_bitmap;
1395 		while (active_bitmap) {
1396 			if (active_bitmap & 0x1) {
1397 				u_int16_t frms;
1398 				u_int32_t bytes;
1399 
1400 				frms = data[0] | (data[1] << 8);
1401 				bytes = (data[2] <<  0) | (data[3] <<  8) |
1402 					(data[4] << 16) | (data[5] << 24);
1403 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1404 					  QDF_TRACE_LEVEL_ERROR,
1405 					  "cat %2d: %6d  %5d",
1406 					  i, frms, bytes);
1407 				data += 6;
1408 				j++;
1409 			}
1410 			i++;
1411 			active_bitmap >>= 1;
1412 		}
1413 		break;
1414 	}
1415 
1416 	/*case ol_tx_log_entry_type_drop:*/
1417 
1418 	case ol_tx_log_entry_type_wrap:
1419 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1420 		return negative_sign * offset; /* go back to the top */
1421 
1422 	default:
1423 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1424 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1425 			  "*** invalid tx log entry type (%d)\n", type);
1426 		return 0; /* error */
1427 	};
1428 
1429 	return size + 1 + align_pad;
1430 }
1431 
1432 /**
1433  * ol_tx_queue_log_display() - show tx queue log
1434  * @pdev: pointer to txrx handle
1435  *
1436  * Return: None
1437  */
1438 void
1439 ol_tx_queue_log_display(struct ol_txrx_pdev_t *pdev)
1440 {
1441 	int offset;
1442 	int unwrap;
1443 
1444 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1445 	offset = pdev->txq_log.oldest_record_offset;
1446 	unwrap = pdev->txq_log.wrapped;
1447 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1448 	/*
1449 	 * In theory, this should use mutex to guard against the offset
1450 	 * being changed while in use, but since this is just for debugging,
1451 	 * don't bother.
1452 	 */
1453 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1454 		  "Tx queue log:");
1455 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1456 		  ": Frames  Bytes  TID  PEER");
1457 
1458 	while (unwrap || offset != pdev->txq_log.offset) {
1459 		int delta = ol_tx_queue_log_record_display(pdev, offset);
1460 		if (delta == 0)
1461 			return; /* error */
1462 
1463 		if (delta < 0)
1464 			unwrap = 0;
1465 
1466 		offset += delta;
1467 	}
1468 }
1469 
1470 void
1471 ol_tx_queue_log_enqueue(
1472 	struct ol_txrx_pdev_t *pdev,
1473 	struct ol_txrx_msdu_info_t *msdu_info,
1474 	int frms, int bytes)
1475 {
1476 	int tid;
1477 	u_int16_t peer_id = msdu_info->htt.info.peer_id;
1478 	struct ol_tx_log_queue_add_t *log_elem;
1479 	tid = msdu_info->htt.info.ext_tid;
1480 
1481 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1482 	log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_enqueue, 0);
1483 	if (!log_elem) {
1484 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1485 		return;
1486 	}
1487 
1488 	log_elem->num_frms = frms;
1489 	log_elem->num_bytes = bytes;
1490 	log_elem->peer_id = peer_id;
1491 	log_elem->tid = tid;
1492 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1493 }
1494 
1495 void
1496 ol_tx_queue_log_dequeue(
1497 	struct ol_txrx_pdev_t *pdev,
1498 	struct ol_tx_frms_queue_t *txq,
1499 	int frms, int bytes)
1500 {
1501 	int ext_tid;
1502 	u_int16_t peer_id;
1503 	struct ol_tx_log_queue_add_t *log_elem;
1504 
1505 	ext_tid = txq->ext_tid;
1506 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1507 	log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_dequeue, 0);
1508 	if (!log_elem) {
1509 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1510 		return;
1511 	}
1512 
1513 	if (ext_tid < OL_TX_NUM_TIDS) {
1514 		struct ol_txrx_peer_t *peer;
1515 		struct ol_tx_frms_queue_t *txq_base;
1516 
1517 		txq_base = txq - ext_tid;
1518 		peer = container_of(txq_base, struct ol_txrx_peer_t, txqs[0]);
1519 		peer_id = peer->peer_ids[0];
1520 	} else {
1521 		peer_id = ~0;
1522 	}
1523 
1524 	log_elem->num_frms = frms;
1525 	log_elem->num_bytes = bytes;
1526 	log_elem->peer_id = peer_id;
1527 	log_elem->tid = ext_tid;
1528 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1529 }
1530 
1531 void
1532 ol_tx_queue_log_free(
1533 	struct ol_txrx_pdev_t *pdev,
1534 	struct ol_tx_frms_queue_t *txq,
1535 	int tid, int frms, int bytes, bool is_peer_txq)
1536 {
1537 	u_int16_t peer_id;
1538 	struct ol_tx_log_queue_add_t *log_elem;
1539 
1540 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1541 	log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_queue_free,
1542 									0);
1543 	if (!log_elem) {
1544 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1545 		return;
1546 	}
1547 
1548 	if ((tid < OL_TX_NUM_TIDS) && is_peer_txq) {
1549 		struct ol_txrx_peer_t *peer;
1550 		struct ol_tx_frms_queue_t *txq_base;
1551 
1552 		txq_base = txq - tid;
1553 		peer = container_of(txq_base, struct ol_txrx_peer_t, txqs[0]);
1554 		peer_id = peer->peer_ids[0];
1555 	} else {
1556 		peer_id = ~0;
1557 	}
1558 
1559 	log_elem->num_frms = frms;
1560 	log_elem->num_bytes = bytes;
1561 	log_elem->peer_id = peer_id;
1562 	log_elem->tid = tid;
1563 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1564 }
1565 
1566 void
1567 ol_tx_queue_log_sched(
1568 	struct ol_txrx_pdev_t *pdev,
1569 	int credit,
1570 	int *num_cats,
1571 	u_int32_t **active_bitmap,
1572 	u_int8_t  **data)
1573 {
1574 	int data_size;
1575 	struct ol_tx_log_queue_state_var_sz_t *log_elem;
1576 
1577 	data_size = sizeof(u_int32_t) /* bytes */ +
1578 				sizeof(u_int16_t) /* frms */;
1579 	data_size *= *num_cats;
1580 
1581 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1582 	log_elem = ol_tx_queue_log_alloc(
1583 			pdev, ol_tx_log_entry_type_queue_state, data_size);
1584 	if (!log_elem) {
1585 		*num_cats = 0;
1586 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1587 		return;
1588 	}
1589 	log_elem->num_cats_active = *num_cats;
1590 	log_elem->active_bitmap = 0;
1591 	log_elem->credit = credit;
1592 
1593 	*active_bitmap = &log_elem->active_bitmap;
1594 	*data = &log_elem->data[0];
1595 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1596 }
1597 
1598 /**
1599  * ol_tx_queue_log_clear() - clear tx queue log
1600  * @pdev: pointer to txrx handle
1601  *
1602  * Return: None
1603  */
1604 void
1605 ol_tx_queue_log_clear(struct ol_txrx_pdev_t *pdev)
1606 {
1607 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1608 	qdf_mem_zero(&pdev->txq_log, sizeof(pdev->txq_log));
1609 	pdev->txq_log.size = OL_TXQ_LOG_SIZE;
1610 	pdev->txq_log.oldest_record_offset = 0;
1611 	pdev->txq_log.offset = 0;
1612 	pdev->txq_log.allow_wrap = 1;
1613 	pdev->txq_log.wrapped = 0;
1614 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1615 }
1616 #endif /* defined(DEBUG_HL_LOGGING) */
1617 
1618 /*--- queue state printouts -------------------------------------------------*/
1619 
1620 #if TXRX_DEBUG_LEVEL > 5
1621 
1622 /**
1623  * ol_tx_queue_display() - show tx queue info
1624  * @txq: pointer to txq frames
1625  * @indent: indent
1626  *
1627  * Return: None
1628  */
1629 void
1630 ol_tx_queue_display(struct ol_tx_frms_queue_t *txq, int indent)
1631 {
1632 	char *state;
1633 
1634 	state = (txq->flag == ol_tx_queue_active) ? "active" : "paused";
1635 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1636 		  "%*stxq %p (%s): %d frms, %d bytes\n",
1637 		  indent, " ", txq, state, txq->frms, txq->bytes);
1638 }
1639 
1640 void
1641 ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
1642 {
1643 	struct ol_txrx_vdev_t *vdev;
1644 
1645 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1646 		  "pdev %p tx queues:\n", pdev);
1647 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1648 		struct ol_txrx_peer_t *peer;
1649 		int i;
1650 		for (i = 0; i < QDF_ARRAY_SIZE(vdev->txqs); i++) {
1651 			if (vdev->txqs[i].frms == 0)
1652 				continue;
1653 
1654 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1655 				  "vdev %d (%p), txq %d\n", vdev->vdev_id,
1656 				  vdev, i);
1657 			ol_tx_queue_display(&vdev->txqs[i], 4);
1658 		}
1659 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1660 			for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++) {
1661 				if (peer->txqs[i].frms == 0)
1662 					continue;
1663 
1664 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1665 					  QDF_TRACE_LEVEL_INFO_LOW,
1666 					  "peer %d (%p), txq %d\n",
1667 					  peer->peer_ids[0], vdev, i);
1668 				ol_tx_queue_display(&peer->txqs[i], 6);
1669 			}
1670 		}
1671 	}
1672 }
1673 #endif
1674 
1675 #endif /* defined(CONFIG_HL_SUPPORT) */
1676 
1677 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
1678 
1679 /**
1680  * ol_txrx_vdev_pause- Suspend all tx data for the specified virtual device
1681  *
1682  * @data_vdev - the virtual device being paused
1683  * @reason - the reason for which vdev queue is getting paused
1684  *
1685  * This function applies primarily to HL systems, but also
1686  * applies to LL systems that use per-vdev tx queues for MCC or
1687  * thermal throttling. As an example, this function could be
1688  * used when a single-channel physical device supports multiple
1689  * channels by jumping back and forth between the channels in a
1690  * time-shared manner.  As the device is switched from channel A
1691  * to channel B, the virtual devices that operate on channel A
1692  * will be paused.
1693  *
1694  */
1695 void ol_txrx_vdev_pause(void *pvdev, uint32_t reason)
1696 {
1697 	ol_txrx_vdev_handle vdev = pvdev;
1698 
1699 	/* TO DO: log the queue pause */
1700 	/* acquire the mutex lock, since we'll be modifying the queues */
1701 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
1702 
1703 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
1704 	vdev->ll_pause.paused_reason |= reason;
1705 	vdev->ll_pause.q_pause_cnt++;
1706 	vdev->ll_pause.is_q_paused = true;
1707 	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
1708 
1709 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
1710 }
1711 
1712 /**
1713  * ol_txrx_vdev_unpause - Resume tx for the specified virtual device
1714  *
1715  * @data_vdev - the virtual device being unpaused
1716  * @reason - the reason for which vdev queue is getting unpaused
1717  *
1718  * This function applies primarily to HL systems, but also applies to
1719  * LL systems that use per-vdev tx queues for MCC or thermal throttling.
1720  *
1721  */
1722 void ol_txrx_vdev_unpause(void *pvdev, uint32_t reason)
1723 {
1724 	ol_txrx_vdev_handle vdev = pvdev;
1725 	/* TO DO: log the queue unpause */
1726 	/* acquire the mutex lock, since we'll be modifying the queues */
1727 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
1728 
1729 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
1730 	if (vdev->ll_pause.paused_reason & reason) {
1731 		vdev->ll_pause.paused_reason &= ~reason;
1732 		if (!vdev->ll_pause.paused_reason) {
1733 			vdev->ll_pause.is_q_paused = false;
1734 			vdev->ll_pause.q_unpause_cnt++;
1735 			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
1736 			ol_tx_vdev_ll_pause_queue_send(vdev);
1737 		} else {
1738 			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
1739 		}
1740 	} else {
1741 		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
1742 	}
1743 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
1744 }
1745 
1746 /**
1747  * ol_txrx_vdev_flush - Drop all tx data for the specified virtual device
1748  *
1749  * @data_vdev - the virtual device being flushed
1750  *
1751  *  This function applies primarily to HL systems, but also applies to
1752  *  LL systems that use per-vdev tx queues for MCC or thermal throttling.
1753  *  This function would typically be used by the ctrl SW after it parks
1754  *  a STA vdev and then resumes it, but to a new AP.  In this case, though
1755  *  the same vdev can be used, any old tx frames queued inside it would be
1756  *  stale, and would need to be discarded.
1757  *
1758  */
1759 void ol_txrx_vdev_flush(void *pvdev)
1760 {
1761 	ol_txrx_vdev_handle vdev = pvdev;
1762 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
1763 	qdf_timer_stop(&vdev->ll_pause.timer);
1764 	vdev->ll_pause.is_q_timer_on = false;
1765 	while (vdev->ll_pause.txq.head) {
1766 		qdf_nbuf_t next =
1767 			qdf_nbuf_next(vdev->ll_pause.txq.head);
1768 		qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
1769 		qdf_nbuf_unmap(vdev->pdev->osdev,
1770 			       vdev->ll_pause.txq.head,
1771 			       QDF_DMA_TO_DEVICE);
1772 		qdf_nbuf_tx_free(vdev->ll_pause.txq.head,
1773 				 QDF_NBUF_PKT_ERROR);
1774 		vdev->ll_pause.txq.head = next;
1775 	}
1776 	vdev->ll_pause.txq.tail = NULL;
1777 	vdev->ll_pause.txq.depth = 0;
1778 	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
1779 }
1780 #endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
1781 
1782 #if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT))
1783 void ol_txrx_vdev_flush(void *data_vdev)
1784 {
1785 	return;
1786 }
1787 #endif
1788 
1789 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1790 
1791 /**
1792  * ol_txrx_map_to_netif_reason_type() - map to netif_reason_type
1793  * @reason: reason
1794  *
1795  * Return: netif_reason_type
1796  */
1797 enum netif_reason_type
1798 ol_txrx_map_to_netif_reason_type(uint32_t reason)
1799 {
1800 	switch (reason) {
1801 	case OL_TXQ_PAUSE_REASON_FW:
1802 		return WLAN_FW_PAUSE;
1803 	case OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED:
1804 		return WLAN_PEER_UNAUTHORISED;
1805 	case OL_TXQ_PAUSE_REASON_TX_ABORT:
1806 		return WLAN_TX_ABORT;
1807 	case OL_TXQ_PAUSE_REASON_VDEV_STOP:
1808 		return WLAN_VDEV_STOP;
1809 	case OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION:
1810 		return WLAN_THERMAL_MITIGATION;
1811 	default:
1812 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
1813 			   "%s: reason not supported %d\n",
1814 			   __func__, reason);
1815 		return WLAN_REASON_TYPE_MAX;
1816 	}
1817 }
1818 
1819 #ifndef CONFIG_ICNSS
1820 /**
1821  * ol_txrx_vdev_pause() - pause vdev network queues
1822  * @vdev: vdev handle
1823  * @reason: reason
1824  *
1825  * Return: none
1826  */
1827 void ol_txrx_vdev_pause(void *pvdev, uint32_t reason)
1828 {
1829 	ol_txrx_vdev_handle vdev = pvdev;
1830 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1831 	enum netif_reason_type netif_reason;
1832 
1833 	if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
1834 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
1835 				   "%s: invalid pdev\n", __func__);
1836 		return;
1837 	}
1838 
1839 	netif_reason = ol_txrx_map_to_netif_reason_type(reason);
1840 	if (netif_reason == WLAN_REASON_TYPE_MAX)
1841 		return;
1842 
1843 	pdev->pause_cb(vdev->vdev_id, WLAN_NETIF_TX_DISABLE, netif_reason);
1844 }
1845 
1846 /**
1847  * ol_txrx_vdev_unpause() - unpause vdev network queues
1848  * @vdev: vdev handle
1849  * @reason: reason
1850  *
1851  * Return: none
1852  */
1853 void ol_txrx_vdev_unpause(void *pvdev, uint32_t reason)
1854 {
1855 	ol_txrx_vdev_handle vdev = pvdev;
1856 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1857 	enum netif_reason_type netif_reason;
1858 
1859 	if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
1860 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
1861 				   "%s: invalid pdev\n", __func__);
1862 		return;
1863 	}
1864 
1865 	netif_reason = ol_txrx_map_to_netif_reason_type(reason);
1866 	if (netif_reason == WLAN_REASON_TYPE_MAX)
1867 		return;
1868 
1869 	pdev->pause_cb(vdev->vdev_id, WLAN_WAKE_ALL_NETIF_QUEUE,
1870 			netif_reason);
1871 
1872 }
1873 #endif
1874 #endif
1875 
1876 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
1877 
1878 /**
1879  * ol_txrx_pdev_pause() - pause network queues for each vdev
1880  * @pdev: pdev handle
1881  * @reason: reason
1882  *
1883  * Return: none
1884  */
1885 void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
1886 {
1887 	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
1888 
1889 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
1890 		cdp_fc_vdev_pause(
1891 			cds_get_context(QDF_MODULE_ID_SOC), vdev, reason);
1892 	}
1893 
1894 }
1895 
1896 /**
1897  * ol_txrx_pdev_unpause() - unpause network queues for each vdev
1898  * @pdev: pdev handle
1899  * @reason: reason
1900  *
1901  * Return: none
1902  */
1903 void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
1904 {
1905 	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
1906 
1907 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
1908 		cdp_fc_vdev_unpause(cds_get_context(QDF_MODULE_ID_SOC),
1909 				    vdev, reason);
1910 	}
1911 
1912 }
1913 #endif
1914 
1915 /*--- LL tx throttle queue code --------------------------------------------*/
1916 #if defined(QCA_SUPPORT_TX_THROTTLE)
1917 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
1918 /**
1919  * ol_txrx_thermal_pause() - pause due to thermal mitigation
1920  * @pdev: pdev handle
1921  *
1922  * Return: none
1923  */
1924 static inline
1925 void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
1926 {
1927 	ol_txrx_pdev_pause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
1928 	return;
1929 }
1930 /**
1931  * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
1932  * @pdev: pdev handle
1933  *
1934  * Return: none
1935  */
1936 static inline
1937 void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
1938 {
1939 	ol_txrx_pdev_unpause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
1940 	return;
1941 }
1942 #else
1943 /**
1944  * ol_txrx_thermal_pause() - pause due to thermal mitigation
1945  * @pdev: pdev handle
1946  *
1947  * Return: none
1948  */
1949 static inline
1950 void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
1951 {
1952 	return;
1953 }
1954 
1955 /**
1956  * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
1957  * @pdev: pdev handle
1958  *
1959  * Return: none
1960  */
1961 static inline
1962 void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
1963 {
1964 	ol_tx_pdev_ll_pause_queue_send_all(pdev);
1965 	return;
1966 }
1967 #endif
1968 
1969 void ol_tx_pdev_throttle_phase_timer(void *context)
1970 {
1971 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
1972 	int ms;
1973 	enum throttle_level cur_level;
1974 	enum throttle_phase cur_phase;
1975 
1976 	/* update the phase */
1977 	pdev->tx_throttle.current_throttle_phase++;
1978 
1979 	if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_MAX)
1980 		pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
1981 
1982 	if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) {
1983 		/* Traffic is stopped */
1984 		TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
1985 				   "throttle phase --> OFF\n");
1986 		ol_txrx_throttle_pause(pdev);
1987 		ol_txrx_thermal_pause(pdev);
1988 		cur_level = pdev->tx_throttle.current_throttle_level;
1989 		cur_phase = pdev->tx_throttle.current_throttle_phase;
1990 		ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
1991 		if (pdev->tx_throttle.current_throttle_level !=
1992 				THROTTLE_LEVEL_0) {
1993 			TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
1994 					   "start timer %d ms\n", ms);
1995 			qdf_timer_start(&pdev->tx_throttle.
1996 							phase_timer, ms);
1997 		}
1998 	} else {
1999 		/* Traffic can go */
2000 		TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
2001 					"throttle phase --> ON\n");
2002 		ol_txrx_throttle_unpause(pdev);
2003 		ol_txrx_thermal_unpause(pdev);
2004 		cur_level = pdev->tx_throttle.current_throttle_level;
2005 		cur_phase = pdev->tx_throttle.current_throttle_phase;
2006 		ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
2007 		if (pdev->tx_throttle.current_throttle_level !=
2008 		    THROTTLE_LEVEL_0) {
2009 			TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "start timer %d ms\n",
2010 				   ms);
2011 			qdf_timer_start(&pdev->tx_throttle.phase_timer,
2012 						ms);
2013 		}
2014 	}
2015 }
2016 
2017 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
2018 void ol_tx_pdev_throttle_tx_timer(void *context)
2019 {
2020 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
2021 	ol_tx_pdev_ll_pause_queue_send_all(pdev);
2022 }
2023 #endif
2024 
2025 #ifdef CONFIG_HL_SUPPORT
2026 
2027 /**
2028  * ol_tx_set_throttle_phase_time() - Set the thermal mitgation throttle phase
2029  *				     and time
2030  * @pdev: the peer device object
2031  * @level: throttle phase level
2032  * @ms: throttle time
2033  *
2034  * Return: None
2035  */
2036 static void
2037 ol_tx_set_throttle_phase_time(struct ol_txrx_pdev_t *pdev, int level, int *ms)
2038 {
2039 	qdf_timer_stop(&pdev->tx_throttle.phase_timer);
2040 
2041 	/* Set the phase */
2042 	if (level != THROTTLE_LEVEL_0) {
2043 		pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
2044 		*ms = pdev->tx_throttle.throttle_time_ms[level]
2045 						[THROTTLE_PHASE_OFF];
2046 
2047 		/* pause all */
2048 		ol_txrx_throttle_pause(pdev);
2049 	} else {
2050 		pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_ON;
2051 		*ms = pdev->tx_throttle.throttle_time_ms[level]
2052 						[THROTTLE_PHASE_ON];
2053 
2054 		/* unpause all */
2055 		ol_txrx_throttle_unpause(pdev);
2056 	}
2057 }
2058 #else
2059 
2060 static void
2061 ol_tx_set_throttle_phase_time(struct ol_txrx_pdev_t *pdev, int level, int *ms)
2062 {
2063 	/* Reset the phase */
2064 	pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
2065 
2066 	/* Start with the new time */
2067 	*ms = pdev->tx_throttle.
2068 		throttle_time_ms[level][THROTTLE_PHASE_OFF];
2069 
2070 	qdf_timer_stop(&pdev->tx_throttle.phase_timer);
2071 }
2072 #endif
2073 
2074 void ol_tx_throttle_set_level(void *ppdev, int level)
2075 {
2076 	struct ol_txrx_pdev_t *pdev = ppdev;
2077 	int ms = 0;
2078 
2079 	if (level >= THROTTLE_LEVEL_MAX) {
2080 		TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
2081 			   "%s invalid throttle level set %d, ignoring\n",
2082 			   __func__, level);
2083 		return;
2084 	}
2085 
2086 	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Setting throttle level %d\n", level);
2087 
2088 	/* Set the current throttle level */
2089 	pdev->tx_throttle.current_throttle_level = (enum throttle_level) level;
2090 
2091 	ol_tx_set_throttle_phase_time(pdev, level, &ms);
2092 
2093 	if (level != THROTTLE_LEVEL_0)
2094 		qdf_timer_start(&pdev->tx_throttle.phase_timer, ms);
2095 }
2096 
2097 void ol_tx_throttle_init_period(void *ppdev, int period,
2098 				uint8_t *dutycycle_level)
2099 {
2100 	struct ol_txrx_pdev_t *pdev = ppdev;
2101 	int i;
2102 
2103 	/* Set the current throttle level */
2104 	pdev->tx_throttle.throttle_period_ms = period;
2105 
2106 	TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "level  OFF  ON\n");
2107 	for (i = 0; i < THROTTLE_LEVEL_MAX; i++) {
2108 		pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_ON] =
2109 			pdev->tx_throttle.throttle_period_ms -
2110 				((dutycycle_level[i] *
2111 				  pdev->tx_throttle.throttle_period_ms)/100);
2112 		pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_OFF] =
2113 			pdev->tx_throttle.throttle_period_ms -
2114 			pdev->tx_throttle.throttle_time_ms[
2115 				i][THROTTLE_PHASE_ON];
2116 		TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "%d      %d    %d\n", i,
2117 			   pdev->tx_throttle.
2118 			   throttle_time_ms[i][THROTTLE_PHASE_OFF],
2119 			   pdev->tx_throttle.
2120 			   throttle_time_ms[i][THROTTLE_PHASE_ON]);
2121 	}
2122 
2123 }
2124 
2125 void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev)
2126 {
2127 	uint32_t throttle_period;
2128 	uint8_t dutycycle_level[THROTTLE_LEVEL_MAX];
2129 	int i;
2130 
2131 	pdev->tx_throttle.current_throttle_level = THROTTLE_LEVEL_0;
2132 	pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
2133 	qdf_spinlock_create(&pdev->tx_throttle.mutex);
2134 
2135 	throttle_period = ol_cfg_throttle_period_ms(pdev->ctrl_pdev);
2136 
2137 	for (i = 0; i < THROTTLE_LEVEL_MAX; i++)
2138 		dutycycle_level[i] =
2139 			ol_cfg_throttle_duty_cycle_level(pdev->ctrl_pdev, i);
2140 
2141 	ol_tx_throttle_init_period(pdev, throttle_period, &dutycycle_level[0]);
2142 
2143 	qdf_timer_init(pdev->osdev,
2144 			       &pdev->tx_throttle.phase_timer,
2145 			       ol_tx_pdev_throttle_phase_timer, pdev,
2146 			       QDF_TIMER_TYPE_SW);
2147 
2148 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
2149 	qdf_timer_init(pdev->osdev,
2150 			       &pdev->tx_throttle.tx_timer,
2151 			       ol_tx_pdev_throttle_tx_timer, pdev,
2152 			       QDF_TIMER_TYPE_SW);
2153 #endif
2154 
2155 	pdev->tx_throttle.tx_threshold = THROTTLE_TX_THRESHOLD;
2156 }
2157 #endif /* QCA_SUPPORT_TX_THROTTLE */
2158 
2159 #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
2160 
2161 /**
2162  * ol_tx_vdev_has_tx_queue_group() - check for vdev having txq groups
2163  * @group: pointer to tx queue grpup
2164  * @vdev_id: vdev id
2165  *
2166  * Return: true if vedv has txq groups
2167  */
2168 static bool
2169 ol_tx_vdev_has_tx_queue_group(
2170 	struct ol_tx_queue_group_t *group,
2171 	u_int8_t vdev_id)
2172 {
2173 	u_int16_t vdev_bitmap;
2174 	vdev_bitmap = OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
2175 	if (OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_bitmap, vdev_id))
2176 		return true;
2177 
2178 	return false;
2179 }
2180 
2181 /**
2182  * ol_tx_ac_has_tx_queue_group() - check for ac having txq groups
2183  * @group: pointer to tx queue grpup
2184  * @ac: acess category
2185  *
2186  * Return: true if vedv has txq groups
2187  */
2188 static bool
2189 ol_tx_ac_has_tx_queue_group(
2190 	struct ol_tx_queue_group_t *group,
2191 	u_int8_t ac)
2192 {
2193 	u_int16_t ac_bitmap;
2194 	ac_bitmap = OL_TXQ_GROUP_AC_MASK_GET(group->membership);
2195 	if (OL_TXQ_GROUP_AC_BIT_MASK_GET(ac_bitmap, ac))
2196 		return true;
2197 
2198 	return false;
2199 }
2200 
2201 u_int32_t ol_tx_txq_group_credit_limit(
2202 	struct ol_txrx_pdev_t *pdev,
2203 	struct ol_tx_frms_queue_t *txq,
2204 	u_int32_t credit)
2205 {
2206 	u_int8_t i;
2207 	int updated_credit = credit;
2208 	/*
2209 	 * If this tx queue belongs to a group, check whether the group's
2210 	 * credit limit is more stringent than the global credit limit.
2211 	 */
2212 	for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
2213 		if (txq->group_ptrs[i]) {
2214 			int group_credit;
2215 			group_credit = qdf_atomic_read(
2216 					&txq->group_ptrs[i]->credit);
2217 			updated_credit = QDF_MIN(updated_credit, group_credit);
2218 		}
2219 	}
2220 
2221 	credit = (updated_credit < 0) ? 0 : updated_credit;
2222 
2223 	return credit;
2224 }
2225 
2226 void ol_tx_txq_group_credit_update(
2227 	struct ol_txrx_pdev_t *pdev,
2228 	struct ol_tx_frms_queue_t *txq,
2229 	int32_t credit,
2230 	u_int8_t absolute)
2231 {
2232 	u_int8_t i;
2233 	/*
2234 	 * If this tx queue belongs to a group then
2235 	 * update group credit
2236 	 */
2237 	for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
2238 		if (txq->group_ptrs[i])
2239 			ol_txrx_update_group_credit(txq->group_ptrs[i],
2240 						    credit, absolute);
2241 	}
2242 	ol_tx_update_group_credit_stats(pdev);
2243 }
2244 
2245 void
2246 ol_tx_set_vdev_group_ptr(
2247 	ol_txrx_pdev_handle pdev,
2248 	u_int8_t vdev_id,
2249 	struct ol_tx_queue_group_t *grp_ptr)
2250 {
2251 	struct ol_txrx_vdev_t *vdev = NULL;
2252 	struct ol_txrx_peer_t *peer = NULL;
2253 
2254 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2255 		if (vdev->vdev_id == vdev_id) {
2256 			u_int8_t i, j;
2257 			/* update vdev queues group pointers */
2258 			for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
2259 				for (j = 0; j < OL_TX_MAX_GROUPS_PER_QUEUE; j++)
2260 					vdev->txqs[i].group_ptrs[j] = grp_ptr;
2261 			}
2262 			qdf_spin_lock_bh(&pdev->peer_ref_mutex);
2263 			/* Update peer queue group pointers */
2264 			TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2265 				for (i = 0; i < OL_TX_NUM_TIDS; i++) {
2266 					for (j = 0;
2267 						j < OL_TX_MAX_GROUPS_PER_QUEUE;
2268 							j++)
2269 						peer->txqs[i].group_ptrs[j] =
2270 							grp_ptr;
2271 				}
2272 			}
2273 			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2274 			break;
2275 		}
2276 	}
2277 }
2278 
2279 void ol_tx_txq_set_group_ptr(
2280 	struct ol_tx_frms_queue_t *txq,
2281 	struct ol_tx_queue_group_t *grp_ptr)
2282 {
2283 	u_int8_t i;
2284 	for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++)
2285 		txq->group_ptrs[i] = grp_ptr;
2286 }
2287 
2288 void ol_tx_set_peer_group_ptr(
2289 	ol_txrx_pdev_handle pdev,
2290 	struct ol_txrx_peer_t *peer,
2291 	u_int8_t vdev_id,
2292 	u_int8_t tid)
2293 {
2294 	u_int8_t i, j = 0;
2295 	struct ol_tx_queue_group_t *group = NULL;
2296 
2297 	for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++)
2298 		peer->txqs[tid].group_ptrs[i] = NULL;
2299 
2300 	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
2301 		group = &pdev->txq_grps[i];
2302 		if (ol_tx_vdev_has_tx_queue_group(group, vdev_id)) {
2303 			if (tid < OL_TX_NUM_QOS_TIDS) {
2304 				if (ol_tx_ac_has_tx_queue_group(
2305 						group,
2306 						TXRX_TID_TO_WMM_AC(tid))) {
2307 					peer->txqs[tid].group_ptrs[j] = group;
2308 					j++;
2309 				}
2310 			} else {
2311 				peer->txqs[tid].group_ptrs[j] = group;
2312 				j++;
2313 			}
2314 		}
2315 		if (j >= OL_TX_MAX_GROUPS_PER_QUEUE)
2316 			break;
2317 	}
2318 }
2319 
2320 u_int32_t ol_tx_get_max_tx_groups_supported(struct ol_txrx_pdev_t *pdev)
2321 {
2322 #ifdef HIF_SDIO
2323 		return OL_TX_MAX_TXQ_GROUPS;
2324 #else
2325 		return 0;
2326 #endif
2327 }
2328 #endif
2329 
2330 /*--- End of LL tx throttle queue code ---------------------------------------*/
2331