1 /*
2 * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
21 #include <qdf_atomic.h> /* qdf_atomic_read, etc. */
22 #include <ol_cfg.h> /* ol_cfg_addba_retry */
23 #include <htt.h> /* HTT_TX_EXT_TID_MGMT */
24 #include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
25 #include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
26 #include <ol_txrx_ctrl_api.h> /* ol_txrx_sync, ol_tx_addba_conf */
27 #include <cdp_txrx_tx_throttle.h>
28 #include <ol_ctrl_txrx_api.h> /* ol_ctrl_addba_req */
29 #include <ol_txrx_internal.h> /* TXRX_ASSERT1, etc. */
30 #include <ol_tx_desc.h> /* ol_tx_desc, ol_tx_desc_frame_list_free */
31 #include <ol_tx.h> /* ol_tx_vdev_ll_pause_queue_send */
32 #include <ol_tx_sched.h> /* ol_tx_sched_notify, etc. */
33 #include <ol_tx_queue.h>
34 #include <ol_txrx.h> /* ol_tx_desc_pool_size_hl */
35 #include <ol_txrx_dbg.h> /* ENABLE_TX_QUEUE_LOG */
36 #include <qdf_types.h> /* bool */
37 #include "cdp_txrx_flow_ctrl_legacy.h"
38 #include <ol_txrx_peer_find.h>
39 #include <cdp_txrx_handle.h>
40 #if defined(CONFIG_HL_SUPPORT)
41
42 #ifndef offsetof
43 #define offsetof(type, field) ((qdf_size_t)(&((type *)0)->field))
44 #endif
45
46 /*--- function prototypes for optional host ADDBA negotiation ---------------*/
47
48 #define OL_TX_QUEUE_ADDBA_CHECK(pdev, txq, tx_msdu_info) /* no-op */
49
50 #ifndef container_of
51 #define container_of(ptr, type, member) ((type *)( \
52 (char *)(ptr) - (char *)(&((type *)0)->member)))
53 #endif
54 /*--- function definitions --------------------------------------------------*/
55
56 /**
57 * ol_tx_queue_vdev_flush() - try to flush pending frames in the tx queues
58 * no matter it's queued in the TX scheduler or not
59 * @pdev: the physical device object
60 * @vdev: the virtual device object
61 *
62 * Return: None
63 */
64 static void
ol_tx_queue_vdev_flush(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev)65 ol_tx_queue_vdev_flush(struct ol_txrx_pdev_t *pdev, struct ol_txrx_vdev_t *vdev)
66 {
67 #define PEER_ARRAY_COUNT 10
68 struct ol_tx_frms_queue_t *txq;
69 struct ol_txrx_peer_t *peer, *peers[PEER_ARRAY_COUNT];
70 int i, j, peer_count;
71
72 ol_tx_hl_queue_flush_all(vdev);
73
74 /* flush VDEV TX queues */
75 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
76 txq = &vdev->txqs[i];
77 /*
78 * currently txqs of MCAST_BCAST/DEFAULT_MGMT packet are using
79 * tid HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST/HTT_TX_EXT_TID_MGMT
80 * when inserted into scheduler, so use same tid when we flush
81 * them
82 */
83 if (i == OL_TX_VDEV_MCAST_BCAST)
84 ol_tx_queue_free(pdev,
85 txq,
86 HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST,
87 false);
88 else if (i == OL_TX_VDEV_DEFAULT_MGMT)
89 ol_tx_queue_free(pdev,
90 txq,
91 HTT_TX_EXT_TID_MGMT,
92 false);
93 else
94 ol_tx_queue_free(pdev,
95 txq,
96 (i + OL_TX_NUM_TIDS),
97 false);
98 }
99 /* flush PEER TX queues */
100 do {
101 peer_count = 0;
102 /* select candidate peers */
103 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
104 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
105 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
106 txq = &peer->txqs[i];
107 if (txq->frms) {
108 ol_txrx_peer_get_ref
109 (peer,
110 PEER_DEBUG_ID_OL_TXQ_VDEV_FL);
111 peers[peer_count++] = peer;
112 break;
113 }
114 }
115 if (peer_count >= PEER_ARRAY_COUNT)
116 break;
117 }
118 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
119 /* flush TX queues of candidate peers */
120 for (i = 0; i < peer_count; i++) {
121 for (j = 0; j < OL_TX_NUM_TIDS; j++) {
122 txq = &peers[i]->txqs[j];
123 if (txq->frms)
124 ol_tx_queue_free(pdev, txq, j, true);
125 }
126 ol_txrx_info("Delete Peer %pK", peer);
127 ol_txrx_peer_release_ref(peers[i],
128 PEER_DEBUG_ID_OL_TXQ_VDEV_FL);
129 }
130 } while (peer_count >= PEER_ARRAY_COUNT);
131 }
132
133 /**
134 * ol_tx_queue_flush() - try to flush pending frames in the tx queues
135 * no matter it's queued in the TX scheduler or not
136 * @pdev: the physical device object
137 *
138 * Return: None
139 */
140 static inline void
ol_tx_queue_flush(struct ol_txrx_pdev_t * pdev)141 ol_tx_queue_flush(struct ol_txrx_pdev_t *pdev)
142 {
143 struct ol_txrx_vdev_t *vdev;
144
145 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
146 ol_tx_queue_vdev_flush(pdev, vdev);
147 }
148 }
149
150 void
ol_tx_queue_discard(struct ol_txrx_pdev_t * pdev,bool flush_all,ol_tx_desc_list * tx_descs)151 ol_tx_queue_discard(
152 struct ol_txrx_pdev_t *pdev,
153 bool flush_all,
154 ol_tx_desc_list *tx_descs)
155 {
156 u_int16_t num;
157 u_int16_t discarded, actual_discarded = 0;
158
159 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
160
161 if (flush_all == true)
162 /* flush all the pending tx queues in the scheduler */
163 num = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev) -
164 qdf_atomic_read(&pdev->tx_queue.rsrc_cnt);
165 else
166 /*TODO: Discard frames for a particular vdev only */
167 num = pdev->tx_queue.rsrc_threshold_hi -
168 pdev->tx_queue.rsrc_threshold_lo;
169
170 TX_SCHED_DEBUG_PRINT("+%u", qdf_atomic_read(&pdev->tx_queue.rsrc_cnt));
171 while (num > 0) {
172 discarded = ol_tx_sched_discard_select(
173 pdev, (u_int16_t)num, tx_descs, flush_all);
174 if (discarded == 0)
175 /*
176 * No more packets could be discarded.
177 * Probably tx queues are empty.
178 */
179 break;
180
181 num -= discarded;
182 actual_discarded += discarded;
183 }
184 qdf_atomic_add(actual_discarded, &pdev->tx_queue.rsrc_cnt);
185 TX_SCHED_DEBUG_PRINT("-");
186
187 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
188
189 if (flush_all == true && num > 0)
190 /*
191 * try to flush pending frames in the tx queues
192 * which are not queued in the TX scheduler.
193 */
194 ol_tx_queue_flush(pdev);
195 }
196
197 #ifdef QCA_HL_NETDEV_FLOW_CONTROL
198
199 /**
200 * is_ol_tx_discard_frames_success() - check whether currently queued tx frames
201 * can be discarded or not
202 * @pdev: the physical device object
203 * @tx_desc: tx descriptor ptr
204 *
205 * Return: Success if available tx descriptors are too few
206 */
207 static inline bool
is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)208 is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t *pdev,
209 struct ol_tx_desc_t *tx_desc)
210 {
211 ol_txrx_vdev_handle vdev;
212 bool discard_frames;
213
214 vdev = tx_desc->vdev;
215
216 qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
217 if (vdev->tx_desc_limit == 0) {
218 /* Flow control not enabled */
219 discard_frames = qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) <=
220 pdev->tx_queue.rsrc_threshold_lo;
221 } else {
222 /*
223 * Discard
224 * if netbuf is normal priority and tx_desc_count greater than
225 * queue stop threshold
226 * AND
227 * if netbuf is high priority and tx_desc_count greater than
228 * tx desc limit.
229 */
230 discard_frames = (!ol_tx_desc_is_high_prio(tx_desc->netbuf) &&
231 qdf_atomic_read(&vdev->tx_desc_count) >
232 vdev->queue_stop_th) ||
233 (ol_tx_desc_is_high_prio(tx_desc->netbuf) &&
234 qdf_atomic_read(&vdev->tx_desc_count) >
235 vdev->tx_desc_limit);
236 }
237 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
238
239 return discard_frames;
240 }
241 #else
242
243 static inline bool
is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)244 is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t *pdev,
245 struct ol_tx_desc_t *tx_desc)
246 {
247 return qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) <=
248 pdev->tx_queue.rsrc_threshold_lo;
249 }
250 #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
251
252 void
ol_tx_enqueue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,struct ol_tx_desc_t * tx_desc,struct ol_txrx_msdu_info_t * tx_msdu_info)253 ol_tx_enqueue(
254 struct ol_txrx_pdev_t *pdev,
255 struct ol_tx_frms_queue_t *txq,
256 struct ol_tx_desc_t *tx_desc,
257 struct ol_txrx_msdu_info_t *tx_msdu_info)
258 {
259 int bytes;
260 struct ol_tx_sched_notify_ctx_t notify_ctx;
261
262 TX_SCHED_DEBUG_PRINT("Enter");
263
264 /*
265 * If too few tx descriptors are available, drop some currently-queued
266 * tx frames, to provide enough tx descriptors for new frames, which
267 * may be higher priority than the current frames.
268 */
269 if (is_ol_tx_discard_frames_success(pdev, tx_desc)) {
270 ol_tx_desc_list tx_descs;
271
272 TAILQ_INIT(&tx_descs);
273 ol_tx_queue_discard(pdev, false, &tx_descs);
274 /*Discard Frames in Discard List*/
275 ol_tx_desc_frame_list_free(pdev, &tx_descs, 1 /* error */);
276 }
277
278 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
279 TAILQ_INSERT_TAIL(&txq->head, tx_desc, tx_desc_list_elem);
280
281 bytes = qdf_nbuf_len(tx_desc->netbuf);
282 txq->frms++;
283 txq->bytes += bytes;
284 ol_tx_update_grp_frm_count(txq, 1);
285 ol_tx_queue_log_enqueue(pdev, tx_msdu_info, 1, bytes);
286
287 if (txq->flag != ol_tx_queue_paused) {
288 notify_ctx.event = OL_TX_ENQUEUE_FRAME;
289 notify_ctx.frames = 1;
290 notify_ctx.bytes = qdf_nbuf_len(tx_desc->netbuf);
291 notify_ctx.txq = txq;
292 notify_ctx.info.tx_msdu_info = tx_msdu_info;
293 ol_tx_sched_notify(pdev, ¬ify_ctx);
294 txq->flag = ol_tx_queue_active;
295 }
296
297 if (!ETHERTYPE_IS_EAPOL_WAPI(tx_msdu_info->htt.info.ethertype))
298 OL_TX_QUEUE_ADDBA_CHECK(pdev, txq, tx_msdu_info);
299
300 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
301 TX_SCHED_DEBUG_PRINT("Leave");
302 }
303
304 u_int16_t
ol_tx_dequeue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,ol_tx_desc_list * head,u_int16_t max_frames,u_int32_t * credit,int * bytes)305 ol_tx_dequeue(
306 struct ol_txrx_pdev_t *pdev,
307 struct ol_tx_frms_queue_t *txq,
308 ol_tx_desc_list *head,
309 u_int16_t max_frames,
310 u_int32_t *credit,
311 int *bytes)
312 {
313 u_int16_t num_frames;
314 int bytes_sum;
315 unsigned int credit_sum;
316
317 TXRX_ASSERT2(txq->flag != ol_tx_queue_paused);
318 TX_SCHED_DEBUG_PRINT("Enter");
319
320 if (txq->frms < max_frames)
321 max_frames = txq->frms;
322
323 bytes_sum = 0;
324 credit_sum = 0;
325 for (num_frames = 0; num_frames < max_frames; num_frames++) {
326 unsigned int frame_credit;
327 struct ol_tx_desc_t *tx_desc;
328
329 tx_desc = TAILQ_FIRST(&txq->head);
330
331 frame_credit = htt_tx_msdu_credit(tx_desc->netbuf);
332 if (credit_sum + frame_credit > *credit)
333 break;
334
335 credit_sum += frame_credit;
336 bytes_sum += qdf_nbuf_len(tx_desc->netbuf);
337 TAILQ_REMOVE(&txq->head, tx_desc, tx_desc_list_elem);
338 TAILQ_INSERT_TAIL(head, tx_desc, tx_desc_list_elem);
339 }
340 txq->frms -= num_frames;
341 txq->bytes -= bytes_sum;
342 ol_tx_update_grp_frm_count(txq, -credit_sum);
343
344 /* a paused queue remains paused, regardless of whether it has frames */
345 if (txq->frms == 0 && txq->flag == ol_tx_queue_active)
346 txq->flag = ol_tx_queue_empty;
347
348 ol_tx_queue_log_dequeue(pdev, txq, num_frames, bytes_sum);
349 TX_SCHED_DEBUG_PRINT("Leave");
350
351 *bytes = bytes_sum;
352 *credit = credit_sum;
353 return num_frames;
354 }
355
356 void
ol_tx_queue_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,bool is_peer_txq)357 ol_tx_queue_free(
358 struct ol_txrx_pdev_t *pdev,
359 struct ol_tx_frms_queue_t *txq,
360 int tid, bool is_peer_txq)
361 {
362 int frms = 0, bytes = 0;
363 struct ol_tx_desc_t *tx_desc;
364 struct ol_tx_sched_notify_ctx_t notify_ctx;
365 ol_tx_desc_list tx_tmp_list;
366
367 TAILQ_INIT(&tx_tmp_list);
368 TX_SCHED_DEBUG_PRINT("Enter");
369 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
370
371 notify_ctx.event = OL_TX_DELETE_QUEUE;
372 notify_ctx.txq = txq;
373 notify_ctx.info.ext_tid = tid;
374 ol_tx_sched_notify(pdev, ¬ify_ctx);
375
376 frms = txq->frms;
377 tx_desc = TAILQ_FIRST(&txq->head);
378 while (txq->frms) {
379 bytes += qdf_nbuf_len(tx_desc->netbuf);
380 txq->frms--;
381 tx_desc = TAILQ_NEXT(tx_desc, tx_desc_list_elem);
382 }
383 ol_tx_queue_log_free(pdev, txq, tid, frms, bytes, is_peer_txq);
384 txq->bytes -= bytes;
385 ol_tx_queue_log_free(pdev, txq, tid, frms, bytes, is_peer_txq);
386 txq->flag = ol_tx_queue_empty;
387 /* txq->head gets reset during the TAILQ_CONCAT call */
388 TAILQ_CONCAT(&tx_tmp_list, &txq->head, tx_desc_list_elem);
389 ol_tx_update_grp_frm_count(txq, -frms);
390 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
391 /* free tx frames without holding tx_queue_spinlock */
392 qdf_atomic_add(frms, &pdev->tx_queue.rsrc_cnt);
393 while (frms) {
394 tx_desc = TAILQ_FIRST(&tx_tmp_list);
395 TAILQ_REMOVE(&tx_tmp_list, tx_desc, tx_desc_list_elem);
396 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 0);
397 frms--;
398 }
399 TX_SCHED_DEBUG_PRINT("Leave");
400 }
401
402
403 /*--- queue pause / unpause functions ---------------------------------------*/
404
405 /**
406 * ol_txrx_peer_tid_pause_base() - suspend/pause txq for a given tid given peer
407 * @pdev: the physical device object
408 * @peer: peer device object
409 * @tid: tid for which queue needs to be paused
410 *
411 * Return: None
412 */
413 static void
ol_txrx_peer_tid_pause_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,int tid)414 ol_txrx_peer_tid_pause_base(
415 struct ol_txrx_pdev_t *pdev,
416 struct ol_txrx_peer_t *peer,
417 int tid)
418 {
419 struct ol_tx_frms_queue_t *txq = &peer->txqs[tid];
420
421 if (txq->paused_count.total++ == 0) {
422 struct ol_tx_sched_notify_ctx_t notify_ctx;
423
424 notify_ctx.event = OL_TX_PAUSE_QUEUE;
425 notify_ctx.txq = txq;
426 notify_ctx.info.ext_tid = tid;
427 ol_tx_sched_notify(pdev, ¬ify_ctx);
428 txq->flag = ol_tx_queue_paused;
429 }
430 }
431 #ifdef QCA_BAD_PEER_TX_FLOW_CL
432
433 /**
434 * ol_txrx_peer_pause_but_no_mgmt_q_base() - suspend/pause all txqs except
435 * management queue for a given peer
436 * @pdev: the physical device object
437 * @peer: peer device object
438 *
439 * Return: None
440 */
441 static void
ol_txrx_peer_pause_but_no_mgmt_q_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)442 ol_txrx_peer_pause_but_no_mgmt_q_base(
443 struct ol_txrx_pdev_t *pdev,
444 struct ol_txrx_peer_t *peer)
445 {
446 int i;
447
448 for (i = 0; i < OL_TX_MGMT_TID; i++)
449 ol_txrx_peer_tid_pause_base(pdev, peer, i);
450 }
451 #endif
452
453
454 /**
455 * ol_txrx_peer_pause_base() - suspend/pause all txqs for a given peer
456 * @pdev: the physical device object
457 * @peer: peer device object
458 *
459 * Return: None
460 */
461 static void
ol_txrx_peer_pause_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)462 ol_txrx_peer_pause_base(
463 struct ol_txrx_pdev_t *pdev,
464 struct ol_txrx_peer_t *peer)
465 {
466 int i;
467
468 for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
469 ol_txrx_peer_tid_pause_base(pdev, peer, i);
470 }
471
472 /**
473 * ol_txrx_peer_tid_unpause_base() - unpause txq for a given tid given peer
474 * @pdev: the physical device object
475 * @peer: peer device object
476 * @tid: tid for which queue needs to be unpaused
477 *
478 * Return: None
479 */
480 static void
ol_txrx_peer_tid_unpause_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,int tid)481 ol_txrx_peer_tid_unpause_base(
482 struct ol_txrx_pdev_t *pdev,
483 struct ol_txrx_peer_t *peer,
484 int tid)
485 {
486 struct ol_tx_frms_queue_t *txq = &peer->txqs[tid];
487 /*
488 * Don't actually unpause the tx queue until all pause requests
489 * have been removed.
490 */
491 TXRX_ASSERT2(txq->paused_count.total > 0);
492 /* return, if not already paused */
493 if (txq->paused_count.total == 0)
494 return;
495
496 if (--txq->paused_count.total == 0) {
497 struct ol_tx_sched_notify_ctx_t notify_ctx;
498
499 notify_ctx.event = OL_TX_UNPAUSE_QUEUE;
500 notify_ctx.txq = txq;
501 notify_ctx.info.ext_tid = tid;
502 ol_tx_sched_notify(pdev, ¬ify_ctx);
503
504 if (txq->frms == 0) {
505 txq->flag = ol_tx_queue_empty;
506 } else {
507 txq->flag = ol_tx_queue_active;
508 /*
509 * Now that the are new tx frames available to download,
510 * invoke the scheduling function, to see if it wants to
511 * download the new frames.
512 * Since the queue lock is currently held, and since
513 * the scheduler function takes the lock, temporarily
514 * release the lock.
515 */
516 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
517 ol_tx_sched(pdev);
518 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
519 }
520 }
521 }
522
523 /**
524 * ol_txrx_peer_unpause_base() - unpause all txqs for a given peer
525 * @pdev: the physical device object
526 * @peer: peer device object
527 *
528 * Return: None
529 */
530 static void
ol_txrx_peer_unpause_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)531 ol_txrx_peer_unpause_base(
532 struct ol_txrx_pdev_t *pdev,
533 struct ol_txrx_peer_t *peer)
534 {
535 int i;
536
537 for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
538 ol_txrx_peer_tid_unpause_base(pdev, peer, i);
539 }
540
541 #ifdef QCA_BAD_PEER_TX_FLOW_CL
542 /**
543 * ol_txrx_peer_unpause_but_no_mgmt_q_base() - unpause all txqs except
544 * management queue for a given peer
545 * @pdev: the physical device object
546 * @peer: peer device object
547 *
548 * Return: None
549 */
550 static void
ol_txrx_peer_unpause_but_no_mgmt_q_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)551 ol_txrx_peer_unpause_but_no_mgmt_q_base(
552 struct ol_txrx_pdev_t *pdev,
553 struct ol_txrx_peer_t *peer)
554 {
555 int i;
556
557 for (i = 0; i < OL_TX_MGMT_TID; i++)
558 ol_txrx_peer_tid_unpause_base(pdev, peer, i);
559 }
560 #endif
561
562 void
ol_txrx_peer_tid_unpause(ol_txrx_peer_handle peer,int tid)563 ol_txrx_peer_tid_unpause(ol_txrx_peer_handle peer, int tid)
564 {
565 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
566
567 /* TO DO: log the queue unpause */
568
569 /* acquire the mutex lock, since we'll be modifying the queues */
570 TX_SCHED_DEBUG_PRINT("Enter");
571 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
572
573 if (tid == -1) {
574 int i;
575
576 for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
577 ol_txrx_peer_tid_unpause_base(pdev, peer, i);
578
579 } else {
580 ol_txrx_peer_tid_unpause_base(pdev, peer, tid);
581 }
582
583 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
584 TX_SCHED_DEBUG_PRINT("Leave");
585 }
586
587 void
ol_txrx_vdev_pause(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint32_t reason,uint32_t pause_type)588 ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
589 uint32_t reason, uint32_t pause_type)
590 {
591 struct ol_txrx_vdev_t *vdev =
592 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
593 struct ol_txrx_pdev_t *pdev;
594 struct ol_txrx_peer_t *peer;
595 /* TO DO: log the queue pause */
596 /* acquire the mutex lock, since we'll be modifying the queues */
597 TX_SCHED_DEBUG_PRINT("Enter");
598
599 if (qdf_unlikely(!vdev)) {
600 ol_txrx_err("vdev is NULL");
601 return;
602 }
603
604 pdev = vdev->pdev;
605
606 /* use peer_ref_mutex before accessing peer_list */
607 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
608 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
609 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
610 if (pause_type == PAUSE_TYPE_CHOP) {
611 if (!(peer->is_tdls_peer && peer->tdls_offchan_enabled))
612 ol_txrx_peer_pause_base(pdev, peer);
613 } else if (pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
614 if (peer->is_tdls_peer && peer->tdls_offchan_enabled)
615 ol_txrx_peer_pause_base(pdev, peer);
616 } else {
617 ol_txrx_peer_pause_base(pdev, peer);
618 }
619 }
620 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
621 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
622
623 TX_SCHED_DEBUG_PRINT("Leave");
624 }
625
ol_txrx_vdev_unpause(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint32_t reason,uint32_t pause_type)626 void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
627 uint32_t reason, uint32_t pause_type)
628 {
629 struct ol_txrx_vdev_t *vdev =
630 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
631 struct ol_txrx_pdev_t *pdev;
632 struct ol_txrx_peer_t *peer;
633
634 /* TO DO: log the queue unpause */
635 /* acquire the mutex lock, since we'll be modifying the queues */
636 TX_SCHED_DEBUG_PRINT("Enter");
637
638 if (qdf_unlikely(!vdev)) {
639 ol_txrx_err("vdev is NULL");
640 return;
641 }
642
643 pdev = vdev->pdev;
644
645 /* take peer_ref_mutex before accessing peer_list */
646 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
647 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
648
649 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
650 if (pause_type == PAUSE_TYPE_CHOP) {
651 if (!(peer->is_tdls_peer && peer->tdls_offchan_enabled))
652 ol_txrx_peer_unpause_base(pdev, peer);
653 } else if (pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
654 if (peer->is_tdls_peer && peer->tdls_offchan_enabled)
655 ol_txrx_peer_unpause_base(pdev, peer);
656 } else {
657 ol_txrx_peer_unpause_base(pdev, peer);
658 }
659 }
660 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
661 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
662
663 TX_SCHED_DEBUG_PRINT("Leave");
664 }
665
ol_txrx_vdev_flush(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)666 void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
667 {
668 struct ol_txrx_vdev_t *vdev =
669 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
670
671 if (qdf_unlikely(!vdev)) {
672 ol_txrx_err("vdev is NULL");
673 return;
674 }
675
676 if (!vdev)
677 return;
678
679 ol_tx_queue_vdev_flush(vdev->pdev, vdev);
680 }
681
682 #ifdef QCA_BAD_PEER_TX_FLOW_CL
683
684 /**
685 * ol_txrx_peer_bal_add_limit_peer() - add one peer into limit list
686 * @pdev: Pointer to PDEV structure.
687 * @peer_id: Peer Identifier.
688 * @peer_limit Peer limit threshold
689 *
690 * Add one peer into the limit list of pdev
691 * Note that the peer limit info will be also updated
692 * If it is the first time, start the timer
693 *
694 * Return: None
695 */
696 void
ol_txrx_peer_bal_add_limit_peer(struct ol_txrx_pdev_t * pdev,u_int16_t peer_id,u_int16_t peer_limit)697 ol_txrx_peer_bal_add_limit_peer(struct ol_txrx_pdev_t *pdev,
698 u_int16_t peer_id, u_int16_t peer_limit)
699 {
700 u_int16_t i, existed = 0;
701 struct ol_txrx_peer_t *peer = NULL;
702
703 for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
704 if (pdev->tx_peer_bal.limit_list[i].peer_id == peer_id) {
705 existed = 1;
706 break;
707 }
708 }
709
710 if (!existed) {
711 u_int32_t peer_num = pdev->tx_peer_bal.peer_num;
712 /* Check if peer_num has reached the capabilit */
713 if (peer_num >= MAX_NO_PEERS_IN_LIMIT) {
714 TX_SCHED_DEBUG_PRINT_ALWAYS(
715 "reach the maximum peer num %d", peer_num);
716 return;
717 }
718 pdev->tx_peer_bal.limit_list[peer_num].peer_id = peer_id;
719 pdev->tx_peer_bal.limit_list[peer_num].limit_flag = true;
720 pdev->tx_peer_bal.limit_list[peer_num].limit = peer_limit;
721 pdev->tx_peer_bal.peer_num++;
722
723 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
724 if (peer) {
725 peer->tx_limit_flag = true;
726 peer->tx_limit = peer_limit;
727 }
728
729 TX_SCHED_DEBUG_PRINT_ALWAYS(
730 "Add one peer into limit queue, peer_id %d, cur peer num %d",
731 peer_id,
732 pdev->tx_peer_bal.peer_num);
733 }
734
735 /* Only start the timer once */
736 if (pdev->tx_peer_bal.peer_bal_timer_state ==
737 ol_tx_peer_bal_timer_inactive) {
738 qdf_timer_start(&pdev->tx_peer_bal.peer_bal_timer,
739 pdev->tx_peer_bal.peer_bal_period_ms);
740 pdev->tx_peer_bal.peer_bal_timer_state =
741 ol_tx_peer_bal_timer_active;
742 }
743 }
744
745 /**
746 * ol_txrx_peer_bal_remove_limit_peer() - remove one peer from limit list
747 * @pdev: Pointer to PDEV structure.
748 * @peer_id: Peer Identifier.
749 *
750 * Remove one peer from the limit list of pdev
751 * Note that Only stop the timer if no peer in limit state
752 *
753 * Return: NULL
754 */
755 void
ol_txrx_peer_bal_remove_limit_peer(struct ol_txrx_pdev_t * pdev,u_int16_t peer_id)756 ol_txrx_peer_bal_remove_limit_peer(struct ol_txrx_pdev_t *pdev,
757 u_int16_t peer_id)
758 {
759 u_int16_t i;
760 struct ol_txrx_peer_t *peer = NULL;
761
762 for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
763 if (pdev->tx_peer_bal.limit_list[i].peer_id == peer_id) {
764 pdev->tx_peer_bal.limit_list[i] =
765 pdev->tx_peer_bal.limit_list[
766 pdev->tx_peer_bal.peer_num - 1];
767 pdev->tx_peer_bal.peer_num--;
768
769 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
770 if (peer)
771 peer->tx_limit_flag = false;
772
773
774 TX_SCHED_DEBUG_PRINT(
775 "Remove one peer from limitq, peer_id %d, cur peer num %d",
776 peer_id,
777 pdev->tx_peer_bal.peer_num);
778 break;
779 }
780 }
781
782 /* Only stop the timer if no peer in limit state */
783 if (pdev->tx_peer_bal.peer_num == 0) {
784 qdf_timer_stop(&pdev->tx_peer_bal.peer_bal_timer);
785 pdev->tx_peer_bal.peer_bal_timer_state =
786 ol_tx_peer_bal_timer_inactive;
787 }
788 }
789
790 void
ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)791 ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)
792 {
793 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
794
795 /* TO DO: log the queue pause */
796
797 /* acquire the mutex lock, since we'll be modifying the queues */
798 TX_SCHED_DEBUG_PRINT("Enter");
799 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
800
801 ol_txrx_peer_pause_but_no_mgmt_q_base(pdev, peer);
802
803 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
804 TX_SCHED_DEBUG_PRINT("Leave");
805 }
806
807 void
ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)808 ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)
809 {
810 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
811
812 /* TO DO: log the queue pause */
813
814 /* acquire the mutex lock, since we'll be modifying the queues */
815 TX_SCHED_DEBUG_PRINT("Enter");
816 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
817
818 ol_txrx_peer_unpause_but_no_mgmt_q_base(pdev, peer);
819
820 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
821 TX_SCHED_DEBUG_PRINT("Leave");
822 }
823
824 u_int16_t
ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t * txq,u_int16_t max_frames,u_int16_t * tx_limit_flag)825 ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
826 u_int16_t max_frames,
827 u_int16_t *tx_limit_flag)
828 {
829 if (txq && (txq->peer) && (txq->peer->tx_limit_flag) &&
830 (txq->peer->tx_limit < max_frames)) {
831 TX_SCHED_DEBUG_PRINT(
832 "Peer ID %d goes to limit, threshold is %d",
833 txq->peer->peer_ids[0], txq->peer->tx_limit);
834 *tx_limit_flag = 1;
835 return txq->peer->tx_limit;
836 } else {
837 return max_frames;
838 }
839 }
840
841 void
ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u_int16_t frames,u_int16_t tx_limit_flag)842 ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
843 struct ol_tx_frms_queue_t *txq,
844 u_int16_t frames,
845 u_int16_t tx_limit_flag)
846 {
847 if (unlikely(!pdev)) {
848 TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL pdev handler");
849 return;
850 }
851
852 if (unlikely(!txq)) {
853 TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL txq");
854 return;
855 }
856
857 qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
858 if (tx_limit_flag && (txq->peer) &&
859 (txq->peer->tx_limit_flag)) {
860 if (txq->peer->tx_limit < frames)
861 txq->peer->tx_limit = 0;
862 else
863 txq->peer->tx_limit -= frames;
864
865 TX_SCHED_DEBUG_PRINT_ALWAYS(
866 "Peer ID %d in limit, deque %d frms",
867 txq->peer->peer_ids[0], frames);
868 } else if (txq->peer) {
869 TX_SCHED_DEBUG_PRINT("Download peer_id %d, num_frames %d",
870 txq->peer->peer_ids[0], frames);
871 }
872 qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
873 }
874
875 void
ol_txrx_bad_peer_txctl_set_setting(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int enable,int period,int txq_limit)876 ol_txrx_bad_peer_txctl_set_setting(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
877 int enable, int period, int txq_limit)
878 {
879 struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
880 ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
881
882 if (enable)
883 pdev->tx_peer_bal.enabled = ol_tx_peer_bal_enable;
884 else
885 pdev->tx_peer_bal.enabled = ol_tx_peer_bal_disable;
886
887 /* Set the current settingl */
888 pdev->tx_peer_bal.peer_bal_period_ms = period;
889 pdev->tx_peer_bal.peer_bal_txq_limit = txq_limit;
890 }
891
892 void
ol_txrx_bad_peer_txctl_update_threshold(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int level,int tput_thresh,int tx_limit)893 ol_txrx_bad_peer_txctl_update_threshold(struct cdp_soc_t *soc_hdl,
894 uint8_t pdev_id, int level,
895 int tput_thresh, int tx_limit)
896 {
897 struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
898 ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
899
900 /* Set the current settingl */
901 pdev->tx_peer_bal.ctl_thresh[level].tput_thresh =
902 tput_thresh;
903 pdev->tx_peer_bal.ctl_thresh[level].tx_limit =
904 tx_limit;
905 }
906
907 /**
908 * ol_tx_pdev_peer_bal_timer() - timer function
909 * @context: context of timer function
910 *
911 * Return: None
912 */
913 static void
ol_tx_pdev_peer_bal_timer(void * context)914 ol_tx_pdev_peer_bal_timer(void *context)
915 {
916 int i;
917 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
918
919 qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
920
921 for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
922 if (pdev->tx_peer_bal.limit_list[i].limit_flag) {
923 u_int16_t peer_id =
924 pdev->tx_peer_bal.limit_list[i].peer_id;
925 u_int16_t tx_limit =
926 pdev->tx_peer_bal.limit_list[i].limit;
927
928 struct ol_txrx_peer_t *peer = NULL;
929
930 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
931 TX_SCHED_DEBUG_PRINT(
932 "peer_id %d peer = 0x%x tx limit %d",
933 peer_id,
934 (int)peer, tx_limit);
935
936 /*
937 * It is possible the peer limit is still not 0,
938 * but it is the scenario should not be cared
939 */
940 if (peer) {
941 peer->tx_limit = tx_limit;
942 } else {
943 ol_txrx_peer_bal_remove_limit_peer(pdev,
944 peer_id);
945 TX_SCHED_DEBUG_PRINT_ALWAYS(
946 "No such a peer, peer id = %d",
947 peer_id);
948 }
949 }
950 }
951
952 qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
953
954 if (pdev->tx_peer_bal.peer_num) {
955 ol_tx_sched(pdev);
956 qdf_timer_start(&pdev->tx_peer_bal.peer_bal_timer,
957 pdev->tx_peer_bal.peer_bal_period_ms);
958 }
959 }
960
961 void
ol_txrx_set_txq_peer(struct ol_tx_frms_queue_t * txq,struct ol_txrx_peer_t * peer)962 ol_txrx_set_txq_peer(
963 struct ol_tx_frms_queue_t *txq,
964 struct ol_txrx_peer_t *peer)
965 {
966 if (txq)
967 txq->peer = peer;
968 }
969
ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t * pdev)970 void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev)
971 {
972 u_int32_t timer_period;
973
974 qdf_spinlock_create(&pdev->tx_peer_bal.mutex);
975 pdev->tx_peer_bal.peer_num = 0;
976 pdev->tx_peer_bal.peer_bal_timer_state
977 = ol_tx_peer_bal_timer_inactive;
978
979 timer_period = 2000;
980 pdev->tx_peer_bal.peer_bal_period_ms = timer_period;
981
982 qdf_timer_init(
983 pdev->osdev,
984 &pdev->tx_peer_bal.peer_bal_timer,
985 ol_tx_pdev_peer_bal_timer,
986 pdev, QDF_TIMER_TYPE_SW);
987 }
988
ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t * pdev)989 void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev)
990 {
991 qdf_timer_stop(&pdev->tx_peer_bal.peer_bal_timer);
992 pdev->tx_peer_bal.peer_bal_timer_state =
993 ol_tx_peer_bal_timer_inactive;
994 qdf_timer_free(&pdev->tx_peer_bal.peer_bal_timer);
995 qdf_spinlock_destroy(&pdev->tx_peer_bal.mutex);
996 }
997
998 void
ol_txrx_peer_link_status_handler(ol_txrx_pdev_handle pdev,u_int16_t peer_num,struct rate_report_t * peer_link_status)999 ol_txrx_peer_link_status_handler(
1000 ol_txrx_pdev_handle pdev,
1001 u_int16_t peer_num,
1002 struct rate_report_t *peer_link_status)
1003 {
1004 u_int16_t i = 0;
1005 struct ol_txrx_peer_t *peer = NULL;
1006
1007 if (!pdev) {
1008 TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL pdev handler");
1009 return;
1010 }
1011
1012 if (!peer_link_status) {
1013 TX_SCHED_DEBUG_PRINT_ALWAYS(
1014 "Error:NULL link report message. peer num %d",
1015 peer_num);
1016 return;
1017 }
1018
1019 /* Check if bad peer tx flow CL is enabled */
1020 if (pdev->tx_peer_bal.enabled != ol_tx_peer_bal_enable) {
1021 TX_SCHED_DEBUG_PRINT_ALWAYS(
1022 "Bad peer tx flow CL is not enabled, ignore it");
1023 return;
1024 }
1025
1026 /* Check peer_num is reasonable */
1027 if (peer_num > MAX_NO_PEERS_IN_LIMIT) {
1028 TX_SCHED_DEBUG_PRINT_ALWAYS("Bad peer_num %d", peer_num);
1029 return;
1030 }
1031
1032 TX_SCHED_DEBUG_PRINT_ALWAYS("peer_num %d", peer_num);
1033
1034 for (i = 0; i < peer_num; i++) {
1035 u_int16_t peer_limit, peer_id;
1036 u_int16_t pause_flag, unpause_flag;
1037 u_int32_t peer_phy, peer_tput;
1038
1039 peer_id = peer_link_status->id;
1040 peer_phy = peer_link_status->phy;
1041 peer_tput = peer_link_status->rate;
1042
1043 TX_SCHED_DEBUG_PRINT("peer id %d tput %d phy %d",
1044 peer_id, peer_tput, peer_phy);
1045
1046 /* Sanity check for the PHY mode value */
1047 if (peer_phy > TXRX_IEEE11_AC) {
1048 TX_SCHED_DEBUG_PRINT_ALWAYS(
1049 "PHY value is illegal: %d, and the peer_id %d",
1050 peer_link_status->phy, peer_id);
1051 continue;
1052 }
1053 pause_flag = false;
1054 unpause_flag = false;
1055 peer_limit = 0;
1056
1057 /* From now on, PHY, PER info should be all fine */
1058 qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
1059
1060 /* Update link status analysis for each peer */
1061 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
1062 if (peer) {
1063 u_int32_t thresh, limit, phy;
1064
1065 phy = peer_link_status->phy;
1066 thresh = pdev->tx_peer_bal.ctl_thresh[phy].tput_thresh;
1067 limit = pdev->tx_peer_bal.ctl_thresh[phy].tx_limit;
1068
1069 if (((peer->tx_pause_flag) || (peer->tx_limit_flag)) &&
1070 (peer_tput) && (peer_tput < thresh))
1071 peer_limit = limit;
1072
1073 if (peer_limit) {
1074 ol_txrx_peer_bal_add_limit_peer(pdev, peer_id,
1075 peer_limit);
1076 } else if (pdev->tx_peer_bal.peer_num) {
1077 TX_SCHED_DEBUG_PRINT(
1078 "Check if peer_id %d exit limit",
1079 peer_id);
1080 ol_txrx_peer_bal_remove_limit_peer(pdev,
1081 peer_id);
1082 }
1083 if ((peer_tput == 0) &&
1084 (peer->tx_pause_flag == false)) {
1085 peer->tx_pause_flag = true;
1086 pause_flag = true;
1087 } else if (peer->tx_pause_flag) {
1088 unpause_flag = true;
1089 peer->tx_pause_flag = false;
1090 }
1091 } else {
1092 TX_SCHED_DEBUG_PRINT(
1093 "Remove peer_id %d from limit list", peer_id);
1094 ol_txrx_peer_bal_remove_limit_peer(pdev, peer_id);
1095 }
1096
1097 peer_link_status++;
1098 qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
1099 if (pause_flag)
1100 ol_txrx_peer_pause_but_no_mgmt_q(peer);
1101 else if (unpause_flag)
1102 ol_txrx_peer_unpause_but_no_mgmt_q(peer);
1103 }
1104 }
1105 #endif /* QCA_BAD_PEER_TX_FLOW_CL */
1106
1107 /*--- ADDBA triggering functions --------------------------------------------*/
1108
1109
1110 /*=== debug functions =======================================================*/
1111
1112 /*--- queue event log -------------------------------------------------------*/
1113
1114 #if defined(DEBUG_HL_LOGGING)
1115
1116 #define negative_sign -1
1117
1118 /**
1119 * ol_tx_queue_log_entry_type_info() - log queues entry info
1120 * @type: log entry type
1121 * @size: size
1122 * @align: alignment
1123 * @var_size: variable size record
1124 *
1125 * Return: None
1126 */
1127 static void
ol_tx_queue_log_entry_type_info(u_int8_t * type,int * size,int * align,int var_size)1128 ol_tx_queue_log_entry_type_info(
1129 u_int8_t *type, int *size, int *align, int var_size)
1130 {
1131 switch (*type) {
1132 case ol_tx_log_entry_type_enqueue:
1133 case ol_tx_log_entry_type_dequeue:
1134 case ol_tx_log_entry_type_queue_free:
1135 *size = sizeof(struct ol_tx_log_queue_add_t);
1136 *align = 2;
1137 break;
1138
1139 case ol_tx_log_entry_type_queue_state:
1140 *size = offsetof(struct ol_tx_log_queue_state_var_sz_t, data);
1141 *align = 4;
1142 if (var_size) {
1143 /* read the variable-sized record,
1144 * to see how large it is
1145 */
1146 int align_pad;
1147 struct ol_tx_log_queue_state_var_sz_t *record;
1148
1149 align_pad =
1150 (*align - (uint32_t)(((unsigned long) type) + 1))
1151 & (*align - 1);
1152 record = (struct ol_tx_log_queue_state_var_sz_t *)
1153 (type + 1 + align_pad);
1154 *size += record->num_cats_active *
1155 (sizeof(u_int32_t) /* bytes */ +
1156 sizeof(u_int16_t) /* frms */);
1157 }
1158 break;
1159
1160 /*case ol_tx_log_entry_type_drop:*/
1161 default:
1162 *size = 0;
1163 *align = 0;
1164 };
1165 }
1166
1167 /**
1168 * ol_tx_queue_log_oldest_update() - log oldest record
1169 * @pdev: pointer to txrx handle
1170 * @offset: offset value
1171 *
1172 * Return: None
1173 */
1174 static void
ol_tx_queue_log_oldest_update(struct ol_txrx_pdev_t * pdev,int offset)1175 ol_tx_queue_log_oldest_update(struct ol_txrx_pdev_t *pdev, int offset)
1176 {
1177 int oldest_record_offset;
1178
1179 /*
1180 * If the offset of the oldest record is between the current and
1181 * new values of the offset of the newest record, then the oldest
1182 * record has to be dropped from the log to provide room for the
1183 * newest record.
1184 * Advance the offset of the oldest record until it points to a
1185 * record that is beyond the new value of the offset of the newest
1186 * record.
1187 */
1188 if (!pdev->txq_log.wrapped)
1189 /*
1190 * The log has not even filled up yet - no need to remove
1191 * the oldest record to make room for a new record.
1192 */
1193 return;
1194
1195
1196 if (offset > pdev->txq_log.offset) {
1197 /*
1198 * not wraparound -
1199 * The oldest record offset may have already wrapped around,
1200 * even if the newest record has not. In this case, then
1201 * the oldest record offset is fine where it is.
1202 */
1203 if (pdev->txq_log.oldest_record_offset == 0)
1204 return;
1205
1206 oldest_record_offset = pdev->txq_log.oldest_record_offset;
1207 } else
1208 /* wraparound */
1209 oldest_record_offset = 0;
1210
1211
1212 while (oldest_record_offset < offset) {
1213 int size, align, align_pad;
1214 u_int8_t type;
1215
1216 type = pdev->txq_log.data[oldest_record_offset];
1217 if (type == ol_tx_log_entry_type_wrap) {
1218 oldest_record_offset = 0;
1219 break;
1220 }
1221 ol_tx_queue_log_entry_type_info(
1222 &pdev->txq_log.data[oldest_record_offset],
1223 &size, &align, 1);
1224 align_pad =
1225 (align - ((oldest_record_offset + 1/*type*/)))
1226 & (align - 1);
1227 /*
1228 * QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1229 * "TXQ LOG old alloc: offset %d, type %d, size %d (%d)\n",
1230 * oldest_record_offset, type, size, size + 1 + align_pad);
1231 */
1232 oldest_record_offset += size + 1 + align_pad;
1233 }
1234 if (oldest_record_offset >= pdev->txq_log.size)
1235 oldest_record_offset = 0;
1236
1237 pdev->txq_log.oldest_record_offset = oldest_record_offset;
1238 }
1239
1240 /**
1241 * ol_tx_queue_log_alloc() - log data allocation
1242 * @pdev: physical device object
1243 * @type: ol_tx_log_entry_type
1244 * @extra_bytes: extra bytes
1245 *
1246 *
1247 * Return: log element
1248 */
1249 static void *
ol_tx_queue_log_alloc(struct ol_txrx_pdev_t * pdev,u_int8_t type,int extra_bytes)1250 ol_tx_queue_log_alloc(
1251 struct ol_txrx_pdev_t *pdev,
1252 u_int8_t type /* ol_tx_log_entry_type */,
1253 int extra_bytes)
1254 {
1255 int size, align, align_pad;
1256 int offset;
1257
1258 ol_tx_queue_log_entry_type_info(&type, &size, &align, 0);
1259 size += extra_bytes;
1260
1261 offset = pdev->txq_log.offset;
1262 align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
1263
1264 if (pdev->txq_log.size - offset >= size + 1 + align_pad)
1265 /* no need to wrap around */
1266 goto alloc_found;
1267
1268 if (!pdev->txq_log.allow_wrap)
1269 return NULL; /* log is full and can't wrap */
1270
1271 /* handle wrap-around */
1272 pdev->txq_log.wrapped = 1;
1273 offset = 0;
1274 align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
1275 /* sanity check that the log is large enough to hold this entry */
1276 if (pdev->txq_log.size <= size + 1 + align_pad)
1277 return NULL;
1278
1279
1280 alloc_found:
1281 ol_tx_queue_log_oldest_update(pdev, offset + size + 1 + align_pad);
1282 if (offset == 0)
1283 pdev->txq_log.data[pdev->txq_log.offset] =
1284 ol_tx_log_entry_type_wrap;
1285
1286 /*
1287 * QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1288 * "TXQ LOG new alloc: offset %d, type %d, size %d (%d)\n",
1289 * offset, type, size, size + 1 + align_pad);
1290 */
1291 pdev->txq_log.data[offset] = type;
1292 pdev->txq_log.offset = offset + size + 1 + align_pad;
1293 if (pdev->txq_log.offset >= pdev->txq_log.size) {
1294 pdev->txq_log.offset = 0;
1295 pdev->txq_log.wrapped = 1;
1296 }
1297 return &pdev->txq_log.data[offset + 1 + align_pad];
1298 }
1299
1300 /**
1301 * ol_tx_queue_log_record_display() - show log record of tx queue
1302 * @pdev: pointer to txrx handle
1303 * @offset: offset value
1304 *
1305 * Return: size of record
1306 */
1307 static int
ol_tx_queue_log_record_display(struct ol_txrx_pdev_t * pdev,int offset)1308 ol_tx_queue_log_record_display(struct ol_txrx_pdev_t *pdev, int offset)
1309 {
1310 int size, align, align_pad;
1311 u_int8_t type;
1312 struct ol_txrx_peer_t *peer;
1313
1314 qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1315 type = pdev->txq_log.data[offset];
1316 ol_tx_queue_log_entry_type_info(
1317 &pdev->txq_log.data[offset], &size, &align, 1);
1318 align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
1319
1320 switch (type) {
1321 case ol_tx_log_entry_type_enqueue:
1322 {
1323 struct ol_tx_log_queue_add_t record;
1324
1325 qdf_mem_copy(&record,
1326 &pdev->txq_log.data[offset + 1 + align_pad],
1327 sizeof(struct ol_tx_log_queue_add_t));
1328 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1329
1330 if (record.peer_id != 0xffff) {
1331 peer = ol_txrx_peer_find_by_id(pdev,
1332 record.peer_id);
1333 if (peer)
1334 QDF_TRACE(QDF_MODULE_ID_TXRX,
1335 QDF_TRACE_LEVEL_ERROR,
1336 "Q: %6d %5d %3d %4d ("QDF_MAC_ADDR_FMT")",
1337 record.num_frms, record.num_bytes,
1338 record.tid,
1339 record.peer_id,
1340 QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1341 else
1342 QDF_TRACE(QDF_MODULE_ID_TXRX,
1343 QDF_TRACE_LEVEL_ERROR,
1344 "Q: %6d %5d %3d %4d",
1345 record.num_frms, record.num_bytes,
1346 record.tid, record.peer_id);
1347 } else {
1348 QDF_TRACE(QDF_MODULE_ID_TXRX,
1349 QDF_TRACE_LEVEL_INFO,
1350 "Q: %6d %5d %3d from vdev",
1351 record.num_frms, record.num_bytes,
1352 record.tid);
1353 }
1354 break;
1355 }
1356 case ol_tx_log_entry_type_dequeue:
1357 {
1358 struct ol_tx_log_queue_add_t record;
1359
1360 qdf_mem_copy(&record,
1361 &pdev->txq_log.data[offset + 1 + align_pad],
1362 sizeof(struct ol_tx_log_queue_add_t));
1363 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1364
1365 if (record.peer_id != 0xffff) {
1366 peer = ol_txrx_peer_find_by_id(pdev, record.peer_id);
1367 if (peer)
1368 QDF_TRACE(QDF_MODULE_ID_TXRX,
1369 QDF_TRACE_LEVEL_ERROR,
1370 "DQ: %6d %5d %3d %4d ("QDF_MAC_ADDR_FMT")",
1371 record.num_frms, record.num_bytes,
1372 record.tid,
1373 record.peer_id,
1374 QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1375 else
1376 QDF_TRACE(QDF_MODULE_ID_TXRX,
1377 QDF_TRACE_LEVEL_ERROR,
1378 "DQ: %6d %5d %3d %4d",
1379 record.num_frms, record.num_bytes,
1380 record.tid, record.peer_id);
1381 } else {
1382 QDF_TRACE(QDF_MODULE_ID_TXRX,
1383 QDF_TRACE_LEVEL_INFO,
1384 "DQ: %6d %5d %3d from vdev",
1385 record.num_frms, record.num_bytes,
1386 record.tid);
1387 }
1388 break;
1389 }
1390 case ol_tx_log_entry_type_queue_free:
1391 {
1392 struct ol_tx_log_queue_add_t record;
1393
1394 qdf_mem_copy(&record,
1395 &pdev->txq_log.data[offset + 1 + align_pad],
1396 sizeof(struct ol_tx_log_queue_add_t));
1397 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1398
1399 if (record.peer_id != 0xffff) {
1400 peer = ol_txrx_peer_find_by_id(pdev, record.peer_id);
1401 if (peer)
1402 QDF_TRACE(QDF_MODULE_ID_TXRX,
1403 QDF_TRACE_LEVEL_ERROR,
1404 "F: %6d %5d %3d %4d ("QDF_MAC_ADDR_FMT")",
1405 record.num_frms, record.num_bytes,
1406 record.tid,
1407 record.peer_id,
1408 QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1409 else
1410 QDF_TRACE(QDF_MODULE_ID_TXRX,
1411 QDF_TRACE_LEVEL_ERROR,
1412 "F: %6d %5d %3d %4d",
1413 record.num_frms, record.num_bytes,
1414 record.tid, record.peer_id);
1415 } else {
1416 /* shouldn't happen */
1417 QDF_TRACE(QDF_MODULE_ID_TXRX,
1418 QDF_TRACE_LEVEL_INFO,
1419 "Unexpected vdev queue removal\n");
1420 }
1421 break;
1422 }
1423
1424 case ol_tx_log_entry_type_queue_state:
1425 {
1426 int i, j;
1427 u_int32_t active_bitmap;
1428 struct ol_tx_log_queue_state_var_sz_t record;
1429 u_int8_t *data;
1430
1431 qdf_mem_copy(&record,
1432 &pdev->txq_log.data[offset + 1 + align_pad],
1433 sizeof(struct ol_tx_log_queue_state_var_sz_t));
1434 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1435
1436 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1437 "S: bitmap = %#x",
1438 record.active_bitmap);
1439 data = &record.data[0];
1440 j = 0;
1441 i = 0;
1442 active_bitmap = record.active_bitmap;
1443 while (active_bitmap) {
1444 if (active_bitmap & 0x1) {
1445 u_int16_t frms;
1446 u_int32_t bytes;
1447
1448 frms = data[0] | (data[1] << 8);
1449 bytes = (data[2] << 0) | (data[3] << 8) |
1450 (data[4] << 16) | (data[5] << 24);
1451 QDF_TRACE(QDF_MODULE_ID_TXRX,
1452 QDF_TRACE_LEVEL_ERROR,
1453 "cat %2d: %6d %5d",
1454 i, frms, bytes);
1455 data += 6;
1456 j++;
1457 }
1458 i++;
1459 active_bitmap >>= 1;
1460 }
1461 break;
1462 }
1463
1464 /*case ol_tx_log_entry_type_drop:*/
1465
1466 case ol_tx_log_entry_type_wrap:
1467 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1468 return negative_sign * offset; /* go back to the top */
1469
1470 default:
1471 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1472 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1473 "*** invalid tx log entry type (%d)\n", type);
1474 return 0; /* error */
1475 };
1476
1477 return size + 1 + align_pad;
1478 }
1479
1480 /**
1481 * ol_tx_queue_log_display() - show tx queue log
1482 * @pdev: pointer to txrx handle
1483 *
1484 * Return: None
1485 */
1486 void
ol_tx_queue_log_display(struct ol_txrx_pdev_t * pdev)1487 ol_tx_queue_log_display(struct ol_txrx_pdev_t *pdev)
1488 {
1489 int offset;
1490 int unwrap;
1491
1492 qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1493 offset = pdev->txq_log.oldest_record_offset;
1494 unwrap = pdev->txq_log.wrapped;
1495 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1496 /*
1497 * In theory, this should use mutex to guard against the offset
1498 * being changed while in use, but since this is just for debugging,
1499 * don't bother.
1500 */
1501 txrx_nofl_info("Current target credit: %d",
1502 qdf_atomic_read(&pdev->target_tx_credit));
1503 txrx_nofl_info("Tx queue log:");
1504 txrx_nofl_info(": Frames Bytes TID PEER");
1505
1506 while (unwrap || offset != pdev->txq_log.offset) {
1507 int delta = ol_tx_queue_log_record_display(pdev, offset);
1508
1509 if (delta == 0)
1510 return; /* error */
1511
1512 if (delta < 0)
1513 unwrap = 0;
1514
1515 offset += delta;
1516 }
1517 }
1518
1519 void
ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t * pdev,struct ol_txrx_msdu_info_t * msdu_info,int frms,int bytes)1520 ol_tx_queue_log_enqueue(
1521 struct ol_txrx_pdev_t *pdev,
1522 struct ol_txrx_msdu_info_t *msdu_info,
1523 int frms, int bytes)
1524 {
1525 int tid;
1526 u_int16_t peer_id = msdu_info->htt.info.peer_id;
1527 struct ol_tx_log_queue_add_t *log_elem;
1528
1529 tid = msdu_info->htt.info.ext_tid;
1530
1531 qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1532 log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_enqueue, 0);
1533 if (!log_elem) {
1534 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1535 return;
1536 }
1537
1538 log_elem->num_frms = frms;
1539 log_elem->num_bytes = bytes;
1540 log_elem->peer_id = peer_id;
1541 log_elem->tid = tid;
1542 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1543 }
1544
1545 void
ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int frms,int bytes)1546 ol_tx_queue_log_dequeue(
1547 struct ol_txrx_pdev_t *pdev,
1548 struct ol_tx_frms_queue_t *txq,
1549 int frms, int bytes)
1550 {
1551 int ext_tid;
1552 u_int16_t peer_id;
1553 struct ol_tx_log_queue_add_t *log_elem;
1554
1555 ext_tid = txq->ext_tid;
1556 qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1557 log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_dequeue, 0);
1558 if (!log_elem) {
1559 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1560 return;
1561 }
1562
1563 if (ext_tid < OL_TX_NUM_TIDS) {
1564 struct ol_txrx_peer_t *peer;
1565 struct ol_tx_frms_queue_t *txq_base;
1566
1567 txq_base = txq - ext_tid;
1568 peer = container_of(txq_base, struct ol_txrx_peer_t, txqs[0]);
1569 peer_id = peer->peer_ids[0];
1570 } else {
1571 peer_id = ~0;
1572 }
1573
1574 log_elem->num_frms = frms;
1575 log_elem->num_bytes = bytes;
1576 log_elem->peer_id = peer_id;
1577 log_elem->tid = ext_tid;
1578 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1579 }
1580
1581 void
ol_tx_queue_log_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,int frms,int bytes,bool is_peer_txq)1582 ol_tx_queue_log_free(
1583 struct ol_txrx_pdev_t *pdev,
1584 struct ol_tx_frms_queue_t *txq,
1585 int tid, int frms, int bytes, bool is_peer_txq)
1586 {
1587 u_int16_t peer_id;
1588 struct ol_tx_log_queue_add_t *log_elem;
1589
1590 qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1591 log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_queue_free,
1592 0);
1593 if (!log_elem) {
1594 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1595 return;
1596 }
1597
1598 if ((tid < OL_TX_NUM_TIDS) && is_peer_txq) {
1599 struct ol_txrx_peer_t *peer;
1600 struct ol_tx_frms_queue_t *txq_base;
1601
1602 txq_base = txq - tid;
1603 peer = container_of(txq_base, struct ol_txrx_peer_t, txqs[0]);
1604 peer_id = peer->peer_ids[0];
1605 } else {
1606 peer_id = ~0;
1607 }
1608
1609 log_elem->num_frms = frms;
1610 log_elem->num_bytes = bytes;
1611 log_elem->peer_id = peer_id;
1612 log_elem->tid = tid;
1613 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1614 }
1615
1616 void
ol_tx_queue_log_sched(struct ol_txrx_pdev_t * pdev,int credit,int * num_cats,u_int32_t ** active_bitmap,u_int8_t ** data)1617 ol_tx_queue_log_sched(
1618 struct ol_txrx_pdev_t *pdev,
1619 int credit,
1620 int *num_cats,
1621 u_int32_t **active_bitmap,
1622 u_int8_t **data)
1623 {
1624 int data_size;
1625 struct ol_tx_log_queue_state_var_sz_t *log_elem;
1626
1627 data_size = sizeof(u_int32_t) /* bytes */ +
1628 sizeof(u_int16_t) /* frms */;
1629 data_size *= *num_cats;
1630
1631 qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1632 log_elem = ol_tx_queue_log_alloc(
1633 pdev, ol_tx_log_entry_type_queue_state, data_size);
1634 if (!log_elem) {
1635 *num_cats = 0;
1636 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1637 return;
1638 }
1639 log_elem->num_cats_active = *num_cats;
1640 log_elem->active_bitmap = 0;
1641 log_elem->credit = credit;
1642
1643 *active_bitmap = &log_elem->active_bitmap;
1644 *data = &log_elem->data[0];
1645 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1646 }
1647
1648 /**
1649 * ol_tx_queue_log_clear() - clear tx queue log
1650 * @pdev: pointer to txrx handle
1651 *
1652 * Return: None
1653 */
1654 void
ol_tx_queue_log_clear(struct ol_txrx_pdev_t * pdev)1655 ol_tx_queue_log_clear(struct ol_txrx_pdev_t *pdev)
1656 {
1657 qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1658 qdf_mem_zero(&pdev->txq_log, sizeof(pdev->txq_log));
1659 pdev->txq_log.size = OL_TXQ_LOG_SIZE;
1660 pdev->txq_log.oldest_record_offset = 0;
1661 pdev->txq_log.offset = 0;
1662 pdev->txq_log.allow_wrap = 1;
1663 pdev->txq_log.wrapped = 0;
1664 qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1665 }
1666 #endif /* defined(DEBUG_HL_LOGGING) */
1667
1668 /*--- queue state printouts -------------------------------------------------*/
1669
1670 #if TXRX_DEBUG_LEVEL > 5
1671
1672 /**
1673 * ol_tx_queue_display() - show tx queue info
1674 * @txq: pointer to txq frames
1675 * @indent: indent
1676 *
1677 * Return: None
1678 */
1679 static void
ol_tx_queue_display(struct ol_tx_frms_queue_t * txq,int indent)1680 ol_tx_queue_display(struct ol_tx_frms_queue_t *txq, int indent)
1681 {
1682 char *state;
1683
1684 state = (txq->flag == ol_tx_queue_active) ? "active" : "paused";
1685 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1686 "%*stxq %pK (%s): %d frms, %d bytes\n",
1687 indent, " ", txq, state, txq->frms, txq->bytes);
1688 }
1689
1690 void
ol_tx_queues_display(struct ol_txrx_pdev_t * pdev)1691 ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
1692 {
1693 struct ol_txrx_vdev_t *vdev;
1694
1695 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1696 "pdev %pK tx queues:\n", pdev);
1697 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1698 struct ol_txrx_peer_t *peer;
1699 int i;
1700
1701 for (i = 0; i < QDF_ARRAY_SIZE(vdev->txqs); i++) {
1702 if (vdev->txqs[i].frms == 0)
1703 continue;
1704
1705 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1706 "vdev %d (%pK), txq %d\n", vdev->vdev_id,
1707 vdev, i);
1708 ol_tx_queue_display(&vdev->txqs[i], 4);
1709 }
1710 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1711 for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++) {
1712 if (peer->txqs[i].frms == 0)
1713 continue;
1714
1715 QDF_TRACE(QDF_MODULE_ID_TXRX,
1716 QDF_TRACE_LEVEL_INFO_LOW,
1717 "peer %d (%pK), txq %d\n",
1718 peer->peer_ids[0], vdev, i);
1719 ol_tx_queue_display(&peer->txqs[i], 6);
1720 }
1721 }
1722 }
1723 }
1724 #endif
1725
1726 #endif /* defined(CONFIG_HL_SUPPORT) */
1727
1728 #if defined(CONFIG_HL_SUPPORT)
1729
1730 /**
1731 * ol_txrx_pdev_pause() - pause network queues for each vdev
1732 * @pdev: pdev handle
1733 * @reason: reason
1734 *
1735 * Return: none
1736 */
ol_txrx_pdev_pause(struct ol_txrx_pdev_t * pdev,uint32_t reason)1737 void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
1738 {
1739 struct ol_txrx_vdev_t *vdev = NULL, *tmp;
1740
1741 TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
1742 cdp_fc_vdev_pause(cds_get_context(QDF_MODULE_ID_SOC),
1743 vdev->vdev_id, reason, 0);
1744 }
1745
1746 }
1747
1748 /**
1749 * ol_txrx_pdev_unpause() - unpause network queues for each vdev
1750 * @pdev: pdev handle
1751 * @reason: reason
1752 *
1753 * Return: none
1754 */
ol_txrx_pdev_unpause(struct ol_txrx_pdev_t * pdev,uint32_t reason)1755 void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
1756 {
1757 struct ol_txrx_vdev_t *vdev = NULL, *tmp;
1758
1759 TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
1760 cdp_fc_vdev_unpause(cds_get_context(QDF_MODULE_ID_SOC),
1761 vdev->vdev_id, reason, 0);
1762 }
1763
1764 }
1765 #endif
1766
1767 #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
1768
1769 /**
1770 * ol_tx_vdev_has_tx_queue_group() - check for vdev having txq groups
1771 * @group: pointer to tx queue grpup
1772 * @vdev_id: vdev id
1773 *
1774 * Return: true if vedv has txq groups
1775 */
1776 static bool
ol_tx_vdev_has_tx_queue_group(struct ol_tx_queue_group_t * group,u_int8_t vdev_id)1777 ol_tx_vdev_has_tx_queue_group(
1778 struct ol_tx_queue_group_t *group,
1779 u_int8_t vdev_id)
1780 {
1781 u_int16_t vdev_bitmap;
1782
1783 vdev_bitmap = OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
1784 if (OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_bitmap, vdev_id))
1785 return true;
1786
1787 return false;
1788 }
1789
1790 /**
1791 * ol_tx_ac_has_tx_queue_group() - check for ac having txq groups
1792 * @group: pointer to tx queue grpup
1793 * @ac: access category
1794 *
1795 * Return: true if vedv has txq groups
1796 */
1797 static bool
ol_tx_ac_has_tx_queue_group(struct ol_tx_queue_group_t * group,u_int8_t ac)1798 ol_tx_ac_has_tx_queue_group(
1799 struct ol_tx_queue_group_t *group,
1800 u_int8_t ac)
1801 {
1802 u_int16_t ac_bitmap;
1803
1804 ac_bitmap = OL_TXQ_GROUP_AC_MASK_GET(group->membership);
1805 if (OL_TXQ_GROUP_AC_BIT_MASK_GET(ac_bitmap, ac))
1806 return true;
1807
1808 return false;
1809 }
1810
1811 #ifdef FEATURE_HL_DBS_GROUP_CREDIT_SHARING
1812 static inline struct ol_tx_queue_group_t *
ol_tx_txq_find_other_group(struct ol_txrx_pdev_t * pdev,struct ol_tx_queue_group_t * txq_grp)1813 ol_tx_txq_find_other_group(struct ol_txrx_pdev_t *pdev,
1814 struct ol_tx_queue_group_t *txq_grp)
1815 {
1816 int i;
1817 struct ol_tx_queue_group_t *other_grp = NULL;
1818
1819 for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
1820 if (&pdev->txq_grps[i] != txq_grp) {
1821 other_grp = &pdev->txq_grps[i];
1822 break;
1823 }
1824 }
1825 return other_grp;
1826 }
1827
ol_tx_txq_group_credit_limit(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u32 credit)1828 u32 ol_tx_txq_group_credit_limit(
1829 struct ol_txrx_pdev_t *pdev,
1830 struct ol_tx_frms_queue_t *txq,
1831 u32 credit)
1832 {
1833 struct ol_tx_queue_group_t *txq_grp = txq->group_ptrs[0];
1834 struct ol_tx_queue_group_t *other_grp;
1835 u32 ask;
1836 u32 updated_credit;
1837 u32 credit_oth_grp;
1838
1839 if (qdf_unlikely(!txq_grp))
1840 return credit;
1841
1842 updated_credit = qdf_atomic_read(&txq_grp->credit);
1843
1844 if (credit <= updated_credit)
1845 /* We have enough credits */
1846 return credit;
1847
1848 ask = credit - updated_credit;
1849 other_grp = ol_tx_txq_find_other_group(pdev, txq_grp);
1850 if (qdf_unlikely(!other_grp))
1851 return credit;
1852
1853 credit_oth_grp = qdf_atomic_read(&other_grp->credit);
1854 if (other_grp->frm_count < credit_oth_grp) {
1855 u32 spare = credit_oth_grp - other_grp->frm_count;
1856
1857 if (pdev->limit_lend) {
1858 if (spare > pdev->min_reserve)
1859 spare -= pdev->min_reserve;
1860 else
1861 spare = 0;
1862 }
1863 updated_credit += min(spare, ask);
1864 }
1865 return updated_credit;
1866 }
1867
ol_tx_txq_update_borrowed_group_credits(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u32 credits_used)1868 u32 ol_tx_txq_update_borrowed_group_credits(struct ol_txrx_pdev_t *pdev,
1869 struct ol_tx_frms_queue_t *txq,
1870 u32 credits_used)
1871 {
1872 struct ol_tx_queue_group_t *txq_grp = txq->group_ptrs[0];
1873 u32 credits_cur_grp;
1874 u32 credits_brwd;
1875
1876 if (qdf_unlikely(!txq_grp))
1877 return credits_used;
1878
1879 credits_cur_grp = qdf_atomic_read(&txq_grp->credit);
1880 if (credits_used > credits_cur_grp) {
1881 struct ol_tx_queue_group_t *other_grp =
1882 ol_tx_txq_find_other_group(pdev, txq_grp);
1883
1884 if (qdf_likely(other_grp)) {
1885 credits_brwd = credits_used - credits_cur_grp;
1886 /*
1887 * All the credits were used from the active txq group.
1888 */
1889 credits_used = credits_cur_grp;
1890 /* Deduct credits borrowed from other group */
1891 ol_txrx_update_group_credit(other_grp, -credits_brwd,
1892 0);
1893 }
1894 }
1895 return credits_used;
1896 }
1897 #else /* FEATURE_HL_DBS_GROUP_CREDIT_SHARING */
ol_tx_txq_group_credit_limit(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u_int32_t credit)1898 u_int32_t ol_tx_txq_group_credit_limit(
1899 struct ol_txrx_pdev_t *pdev,
1900 struct ol_tx_frms_queue_t *txq,
1901 u_int32_t credit)
1902 {
1903 u_int8_t i;
1904 int updated_credit = credit;
1905
1906 /*
1907 * If this tx queue belongs to a group, check whether the group's
1908 * credit limit is more stringent than the global credit limit.
1909 */
1910 for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
1911 if (txq->group_ptrs[i]) {
1912 int group_credit;
1913
1914 group_credit = qdf_atomic_read(
1915 &txq->group_ptrs[i]->credit);
1916 updated_credit = QDF_MIN(updated_credit, group_credit);
1917 }
1918 }
1919
1920 credit = (updated_credit < 0) ? 0 : updated_credit;
1921
1922 return credit;
1923 }
1924 #endif /* FEATURE_HL_DBS_GROUP_CREDIT_SHARING */
1925
ol_tx_txq_group_credit_update(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int32_t credit,u_int8_t absolute)1926 void ol_tx_txq_group_credit_update(
1927 struct ol_txrx_pdev_t *pdev,
1928 struct ol_tx_frms_queue_t *txq,
1929 int32_t credit,
1930 u_int8_t absolute)
1931 {
1932 u_int8_t i;
1933 /*
1934 * If this tx queue belongs to a group then
1935 * update group credit
1936 */
1937 for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
1938 if (txq->group_ptrs[i])
1939 ol_txrx_update_group_credit(txq->group_ptrs[i],
1940 credit, absolute);
1941 }
1942 ol_tx_update_group_credit_stats(pdev);
1943 }
1944
1945 void
ol_tx_set_vdev_group_ptr(ol_txrx_pdev_handle pdev,u_int8_t vdev_id,struct ol_tx_queue_group_t * grp_ptr)1946 ol_tx_set_vdev_group_ptr(
1947 ol_txrx_pdev_handle pdev,
1948 u_int8_t vdev_id,
1949 struct ol_tx_queue_group_t *grp_ptr)
1950 {
1951 struct ol_txrx_vdev_t *vdev = NULL;
1952 struct ol_txrx_peer_t *peer = NULL;
1953
1954 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1955 if (vdev->vdev_id == vdev_id) {
1956 u_int8_t i, j;
1957
1958 /* update vdev queues group pointers */
1959 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1960 for (j = 0; j < OL_TX_MAX_GROUPS_PER_QUEUE; j++)
1961 vdev->txqs[i].group_ptrs[j] = grp_ptr;
1962 }
1963 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
1964 /* Update peer queue group pointers */
1965 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1966 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1967 for (j = 0;
1968 j < OL_TX_MAX_GROUPS_PER_QUEUE;
1969 j++)
1970 peer->txqs[i].group_ptrs[j] =
1971 grp_ptr;
1972 }
1973 }
1974 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
1975 break;
1976 }
1977 }
1978 }
1979
ol_tx_txq_set_group_ptr(struct ol_tx_frms_queue_t * txq,struct ol_tx_queue_group_t * grp_ptr)1980 void ol_tx_txq_set_group_ptr(
1981 struct ol_tx_frms_queue_t *txq,
1982 struct ol_tx_queue_group_t *grp_ptr)
1983 {
1984 u_int8_t i;
1985
1986 for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++)
1987 txq->group_ptrs[i] = grp_ptr;
1988 }
1989
ol_tx_set_peer_group_ptr(ol_txrx_pdev_handle pdev,struct ol_txrx_peer_t * peer,u_int8_t vdev_id,u_int8_t tid)1990 void ol_tx_set_peer_group_ptr(
1991 ol_txrx_pdev_handle pdev,
1992 struct ol_txrx_peer_t *peer,
1993 u_int8_t vdev_id,
1994 u_int8_t tid)
1995 {
1996 u_int8_t i, j = 0;
1997 struct ol_tx_queue_group_t *group = NULL;
1998
1999 for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++)
2000 peer->txqs[tid].group_ptrs[i] = NULL;
2001
2002 for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
2003 group = &pdev->txq_grps[i];
2004 if (ol_tx_vdev_has_tx_queue_group(group, vdev_id)) {
2005 if (tid < OL_TX_NUM_QOS_TIDS) {
2006 if (ol_tx_ac_has_tx_queue_group(
2007 group,
2008 TXRX_TID_TO_WMM_AC(tid))) {
2009 peer->txqs[tid].group_ptrs[j] = group;
2010 j++;
2011 }
2012 } else {
2013 peer->txqs[tid].group_ptrs[j] = group;
2014 j++;
2015 }
2016 }
2017 if (j >= OL_TX_MAX_GROUPS_PER_QUEUE)
2018 break;
2019 }
2020 }
2021
ol_tx_get_max_tx_groups_supported(struct ol_txrx_pdev_t * pdev)2022 u_int32_t ol_tx_get_max_tx_groups_supported(struct ol_txrx_pdev_t *pdev)
2023 {
2024 return OL_TX_MAX_TXQ_GROUPS;
2025 }
2026 #endif /* FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL */
2027
2028 #if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
2029 defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
ol_tx_update_grp_frm_count(struct ol_tx_frms_queue_t * txq,int num_frms)2030 void ol_tx_update_grp_frm_count(struct ol_tx_frms_queue_t *txq, int num_frms)
2031 {
2032 int i;
2033
2034 if (!num_frms || !txq) {
2035 ol_txrx_dbg("Invalid params");
2036 return;
2037 }
2038
2039 for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
2040 if (txq->group_ptrs[i]) {
2041 txq->group_ptrs[i]->frm_count += num_frms;
2042 qdf_assert(txq->group_ptrs[i]->frm_count >= 0);
2043 }
2044 }
2045 }
2046 #endif
2047
2048 /*--- End of LL tx throttle queue code ---------------------------------------*/
2049