1 /*
2 * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
21 #include <htt.h> /* HTT_TX_EXT_TID_MGMT */
22 #include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
23 #include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
24 #include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
25 #include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
26 #include <ol_txrx_types.h> /* pdev stats, etc. */
27 #include <ol_tx_desc.h> /* ol_tx_desc */
28 #include <ol_tx_send.h> /* ol_tx_send */
29 #include <ol_tx_sched.h> /* OL_TX_SCHED, etc. */
30 #include <ol_tx_queue.h>
31 #include <ol_txrx.h>
32 #include <qdf_types.h>
33 #include <qdf_mem.h> /* qdf_os_mem_alloc_consistent et al */
34 #include <cdp_txrx_handle.h>
35 #if defined(CONFIG_HL_SUPPORT)
36
37 #if defined(DEBUG_HL_LOGGING)
38 static void
39 ol_tx_sched_log(struct ol_txrx_pdev_t *pdev);
40
41 #else
42 static void
ol_tx_sched_log(struct ol_txrx_pdev_t * pdev)43 ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
44 {
45 }
46 #endif /* defined(DEBUG_HL_LOGGING) */
47
48 #if DEBUG_HTT_CREDIT
49 #define OL_TX_DISPATCH_LOG_CREDIT() \
50 do { \
51 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, \
52 "TX %d bytes\n", qdf_nbuf_len(msdu)); \
53 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, \
54 " <HTT> Decrease credit %d - 1 = %d, len:%d.\n", \
55 qdf_atomic_read(&pdev->target_tx_credit), \
56 qdf_atomic_read(&pdev->target_tx_credit) - 1, \
57 qdf_nbuf_len(msdu)); \
58 } while (0)
59 #else
60 #define OL_TX_DISPATCH_LOG_CREDIT()
61 #endif
62
63 /*--- generic definitions used by the scheduler framework for all algs ---*/
64
65 struct ol_tx_sched_ctx {
66 ol_tx_desc_list head;
67 int frms;
68 };
69
70 typedef TAILQ_HEAD(ol_tx_frms_queue_list_s, ol_tx_frms_queue_t)
71 ol_tx_frms_queue_list;
72
73 /*--- scheduler algorithm selection ---*/
74
75 /*--- scheduler options -----------------------------------------------
76 * 1. Round-robin scheduler:
77 * Select the TID that is at the head of the list of active TIDs.
78 * Select the head tx queue for this TID.
79 * Move the tx queue to the back of the list of tx queues for
80 * this TID.
81 * Move the TID to the back of the list of active TIDs.
82 * Send as many frames from the tx queue as credit allows.
83 * 2. Weighted-round-robin advanced scheduler:
84 * Keep an ordered list of which TID gets selected next.
85 * Use a weighted-round-robin scheme to determine when to promote
86 * a TID within this list.
87 * If a TID at the head of the list is inactive, leave it at the
88 * head, but check the next TIDs.
89 * If the credit available is less than the credit threshold for the
90 * next active TID, don't send anything, and leave the TID at the
91 * head of the list.
92 * After a TID is selected, move it to the back of the list.
93 * Select the head tx queue for this TID.
94 * Move the tx queue to the back of the list of tx queues for this
95 * TID.
96 * Send no more frames than the limit specified for the TID.
97 */
98 #define OL_TX_SCHED_RR 1
99 #define OL_TX_SCHED_WRR_ADV 2
100
101 #ifndef OL_TX_SCHED
102 /*#define OL_TX_SCHED OL_TX_SCHED_RR*/
103 #define OL_TX_SCHED OL_TX_SCHED_WRR_ADV /* default */
104 #endif
105
106
107 #if OL_TX_SCHED == OL_TX_SCHED_RR
108
109 #define ol_tx_sched_rr_t ol_tx_sched_t
110
111 #define OL_TX_SCHED_NUM_CATEGORIES (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
112
113 #define ol_tx_sched_init ol_tx_sched_init_rr
114 #define ol_tx_sched_select_init(pdev) /* no-op */
115 #define ol_tx_sched_select_batch ol_tx_sched_select_batch_rr
116 #define ol_tx_sched_txq_enqueue ol_tx_sched_txq_enqueue_rr
117 #define ol_tx_sched_txq_deactivate ol_tx_sched_txq_deactivate_rr
118 #define ol_tx_sched_category_tx_queues ol_tx_sched_category_tx_queues_rr
119 #define ol_tx_sched_txq_discard ol_tx_sched_txq_discard_rr
120 #define ol_tx_sched_category_info ol_tx_sched_category_info_rr
121 #define ol_tx_sched_discard_select_category \
122 ol_tx_sched_discard_select_category_rr
123
124 #elif OL_TX_SCHED == OL_TX_SCHED_WRR_ADV
125
126 #define ol_tx_sched_wrr_adv_t ol_tx_sched_t
127
128 #define OL_TX_SCHED_NUM_CATEGORIES OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES
129
130 #define ol_tx_sched_init ol_tx_sched_init_wrr_adv
131 #define ol_tx_sched_select_init(pdev) \
132 do { \
133 qdf_spin_lock_bh(&pdev->tx_queue_spinlock); \
134 ol_tx_sched_select_init_wrr_adv(pdev); \
135 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock); \
136 } while (0)
137 #define ol_tx_sched_select_batch ol_tx_sched_select_batch_wrr_adv
138 #define ol_tx_sched_txq_enqueue ol_tx_sched_txq_enqueue_wrr_adv
139 #define ol_tx_sched_txq_deactivate ol_tx_sched_txq_deactivate_wrr_adv
140 #define ol_tx_sched_category_tx_queues ol_tx_sched_category_tx_queues_wrr_adv
141 #define ol_tx_sched_txq_discard ol_tx_sched_txq_discard_wrr_adv
142 #define ol_tx_sched_category_info ol_tx_sched_category_info_wrr_adv
143 #define ol_tx_sched_discard_select_category \
144 ol_tx_sched_discard_select_category_wrr_adv
145
146 #else
147
148 #error Unknown OL TX SCHED specification
149
150 #endif /* OL_TX_SCHED */
151
152 /*--- round-robin scheduler ----------------------------------------*/
153 #if OL_TX_SCHED == OL_TX_SCHED_RR
154
155 /*--- definitions ---*/
156
157 struct ol_tx_active_queues_in_tid_t {
158 /* list_elem is used to queue up into up level queues*/
159 TAILQ_ENTRY(ol_tx_active_queues_in_tid_t) list_elem;
160 u_int32_t frms;
161 u_int32_t bytes;
162 ol_tx_frms_queue_list head;
163 bool active;
164 int tid;
165 };
166
167 struct ol_tx_sched_rr_t {
168 struct ol_tx_active_queues_in_tid_t
169 tx_active_queues_in_tid_array[OL_TX_NUM_TIDS
170 + OL_TX_VDEV_NUM_QUEUES];
171 TAILQ_HEAD(ol_tx_active_tids_s, ol_tx_active_queues_in_tid_t)
172 tx_active_tids_list;
173 u_int8_t discard_weights[OL_TX_NUM_TIDS
174 + OL_TX_VDEV_NUM_QUEUES];
175 };
176
177 #define TX_SCH_MAX_CREDIT_FOR_THIS_TID(tidq) 16
178
179 /*--- functions ---*/
180
181 /*
182 * The scheduler sync spinlock has been acquired outside this function,
183 * so there is no need to worry about mutex within this function.
184 */
185 static int
ol_tx_sched_select_batch_rr(struct ol_txrx_pdev_t * pdev,struct ol_tx_sched_ctx * sctx,u_int32_t credit)186 ol_tx_sched_select_batch_rr(
187 struct ol_txrx_pdev_t *pdev,
188 struct ol_tx_sched_ctx *sctx,
189 u_int32_t credit)
190 {
191 struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
192 struct ol_tx_active_queues_in_tid_t *txq_queue;
193 struct ol_tx_frms_queue_t *next_tq;
194 u_int16_t frames, used_credits = 0, tx_limit, tx_limit_flag = 0;
195 int bytes;
196
197 TX_SCHED_DEBUG_PRINT("Enter");
198
199 if (TAILQ_EMPTY(&scheduler->tx_active_tids_list))
200 return used_credits;
201
202 txq_queue = TAILQ_FIRST(&scheduler->tx_active_tids_list);
203
204 TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue, list_elem);
205 txq_queue->active = false;
206
207 next_tq = TAILQ_FIRST(&txq_queue->head);
208 TAILQ_REMOVE(&txq_queue->head, next_tq, list_elem);
209
210 credit = QDF_MIN(credit, TX_SCH_MAX_CREDIT_FOR_THIS_TID(next_tq));
211 frames = next_tq->frms; /* download as many frames as credit allows */
212 tx_limit = ol_tx_bad_peer_dequeue_check(next_tq,
213 frames,
214 &tx_limit_flag);
215 frames = ol_tx_dequeue(
216 pdev, next_tq, &sctx->head, tx_limit, &credit, &bytes);
217 ol_tx_bad_peer_update_tx_limit(pdev, next_tq, frames, tx_limit_flag);
218
219 used_credits = credit;
220 txq_queue->frms -= frames;
221 txq_queue->bytes -= bytes;
222
223 if (next_tq->frms > 0) {
224 TAILQ_INSERT_TAIL(&txq_queue->head, next_tq, list_elem);
225 TAILQ_INSERT_TAIL(
226 &scheduler->tx_active_tids_list,
227 txq_queue, list_elem);
228 txq_queue->active = true;
229 } else if (!TAILQ_EMPTY(&txq_queue->head)) {
230 /*
231 * This tx queue is empty, but there's another tx queue for the
232 * same TID that is not empty.
233 *Thus, the TID as a whole is active.
234 */
235 TAILQ_INSERT_TAIL(
236 &scheduler->tx_active_tids_list,
237 txq_queue, list_elem);
238 txq_queue->active = true;
239 }
240 sctx->frms += frames;
241
242 TX_SCHED_DEBUG_PRINT("Leave");
243 return used_credits;
244 }
245
246 static inline void
ol_tx_sched_txq_enqueue_rr(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,int frms,int bytes)247 ol_tx_sched_txq_enqueue_rr(
248 struct ol_txrx_pdev_t *pdev,
249 struct ol_tx_frms_queue_t *txq,
250 int tid,
251 int frms,
252 int bytes)
253 {
254 struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
255 struct ol_tx_active_queues_in_tid_t *txq_queue;
256
257 txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
258 if (txq->flag != ol_tx_queue_active)
259 TAILQ_INSERT_TAIL(&txq_queue->head, txq, list_elem);
260
261 txq_queue->frms += frms;
262 txq_queue->bytes += bytes;
263
264 if (!txq_queue->active) {
265 TAILQ_INSERT_TAIL(
266 &scheduler->tx_active_tids_list,
267 txq_queue, list_elem);
268 txq_queue->active = true;
269 }
270 }
271
272 static inline void
ol_tx_sched_txq_deactivate_rr(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid)273 ol_tx_sched_txq_deactivate_rr(
274 struct ol_txrx_pdev_t *pdev,
275 struct ol_tx_frms_queue_t *txq,
276 int tid)
277 {
278 struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
279 struct ol_tx_active_queues_in_tid_t *txq_queue;
280
281 txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
282 txq_queue->frms -= txq->frms;
283 txq_queue->bytes -= txq->bytes;
284
285 TAILQ_REMOVE(&txq_queue->head, txq, list_elem);
286 /*if (txq_queue->frms == 0 && txq_queue->active) {*/
287 if (TAILQ_EMPTY(&txq_queue->head) && txq_queue->active) {
288 TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue,
289 list_elem);
290 txq_queue->active = false;
291 }
292 }
293
294 ol_tx_frms_queue_list *
ol_tx_sched_category_tx_queues_rr(struct ol_txrx_pdev_t * pdev,int tid)295 ol_tx_sched_category_tx_queues_rr(struct ol_txrx_pdev_t *pdev, int tid)
296 {
297 struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
298 struct ol_tx_active_queues_in_tid_t *txq_queue;
299
300 txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
301 return &txq_queue->head;
302 }
303
304 int
ol_tx_sched_discard_select_category_rr(struct ol_txrx_pdev_t * pdev)305 ol_tx_sched_discard_select_category_rr(struct ol_txrx_pdev_t *pdev)
306 {
307 struct ol_tx_sched_rr_t *scheduler;
308 u_int8_t i, tid = 0;
309 int max_score = 0;
310
311 scheduler = pdev->tx_sched.scheduler;
312 /*
313 * Choose which TID's tx frames to drop next based on two factors:
314 * 1. Which TID has the most tx frames present
315 * 2. The TID's priority (high-priority TIDs have a low discard_weight)
316 */
317 for (i = 0; i < (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES); i++) {
318 int score;
319
320 score =
321 scheduler->tx_active_queues_in_tid_array[i].frms *
322 scheduler->discard_weights[i];
323 if (max_score == 0 || score > max_score) {
324 max_score = score;
325 tid = i;
326 }
327 }
328 return tid;
329 }
330
331 void
ol_tx_sched_txq_discard_rr(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,int frames,int bytes)332 ol_tx_sched_txq_discard_rr(
333 struct ol_txrx_pdev_t *pdev,
334 struct ol_tx_frms_queue_t *txq,
335 int tid, int frames, int bytes)
336 {
337 struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
338 struct ol_tx_active_queues_in_tid_t *txq_queue;
339
340 txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
341
342 if (0 == txq->frms)
343 TAILQ_REMOVE(&txq_queue->head, txq, list_elem);
344
345 txq_queue->frms -= frames;
346 txq_queue->bytes -= bytes;
347 if (txq_queue->active == true && txq_queue->frms == 0) {
348 TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue,
349 list_elem);
350 txq_queue->active = false;
351 }
352 }
353
354 void
ol_tx_sched_category_info_rr(struct ol_txrx_pdev_t * pdev,int cat,int * active,int * frms,int * bytes)355 ol_tx_sched_category_info_rr(
356 struct ol_txrx_pdev_t *pdev,
357 int cat, int *active,
358 int *frms, int *bytes)
359 {
360 struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
361 struct ol_tx_active_queues_in_tid_t *txq_queue;
362
363 txq_queue = &scheduler->tx_active_queues_in_tid_array[cat];
364
365 *active = txq_queue->active;
366 *frms = txq_queue->frms;
367 *bytes = txq_queue->bytes;
368 }
369
370 enum {
371 ol_tx_sched_discard_weight_voice = 1,
372 ol_tx_sched_discard_weight_video = 4,
373 ol_tx_sched_discard_weight_ucast_default = 8,
374 ol_tx_sched_discard_weight_mgmt_non_qos = 1, /* 0? */
375 ol_tx_sched_discard_weight_mcast = 1, /* 0? also for probe & assoc */
376 };
377
378 void *
ol_tx_sched_init_rr(struct ol_txrx_pdev_t * pdev)379 ol_tx_sched_init_rr(
380 struct ol_txrx_pdev_t *pdev)
381 {
382 struct ol_tx_sched_rr_t *scheduler;
383 int i;
384
385 scheduler = qdf_mem_malloc(sizeof(struct ol_tx_sched_rr_t));
386 if (!scheduler)
387 return scheduler;
388
389 for (i = 0; i < (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES); i++) {
390 scheduler->tx_active_queues_in_tid_array[i].tid = i;
391 TAILQ_INIT(&scheduler->tx_active_queues_in_tid_array[i].head);
392 scheduler->tx_active_queues_in_tid_array[i].active = 0;
393 scheduler->tx_active_queues_in_tid_array[i].frms = 0;
394 scheduler->tx_active_queues_in_tid_array[i].bytes = 0;
395 }
396 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
397 scheduler->tx_active_queues_in_tid_array[i].tid = i;
398 if (i < OL_TX_NON_QOS_TID) {
399 int ac = TXRX_TID_TO_WMM_AC(i);
400
401 switch (ac) {
402 case TXRX_WMM_AC_VO:
403 scheduler->discard_weights[i] =
404 ol_tx_sched_discard_weight_voice;
405 case TXRX_WMM_AC_VI:
406 scheduler->discard_weights[i] =
407 ol_tx_sched_discard_weight_video;
408 default:
409 scheduler->discard_weights[i] =
410 ol_tx_sched_discard_weight_ucast_default;
411 };
412 } else {
413 scheduler->discard_weights[i] =
414 ol_tx_sched_discard_weight_mgmt_non_qos;
415 }
416 }
417 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
418 int j = i + OL_TX_NUM_TIDS;
419
420 scheduler->tx_active_queues_in_tid_array[j].tid =
421 OL_TX_NUM_TIDS - 1;
422 scheduler->discard_weights[j] =
423 ol_tx_sched_discard_weight_mcast;
424 }
425 TAILQ_INIT(&scheduler->tx_active_tids_list);
426
427 return scheduler;
428 }
429
430 void
ol_txrx_set_wmm_param(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct ol_tx_wmm_param_t wmm_param)431 ol_txrx_set_wmm_param(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
432 struct ol_tx_wmm_param_t wmm_param)
433 {
434 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
435 "Dummy function when OL_TX_SCHED_RR is enabled\n");
436 }
437
438 /**
439 * ol_tx_sched_stats_display() - tx sched stats display
440 * @pdev: Pointer to the PDEV structure.
441 *
442 * Return: none.
443 */
ol_tx_sched_stats_display(struct ol_txrx_pdev_t * pdev)444 void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
445 {
446 }
447
448 /**
449 * ol_tx_sched_cur_state_display() - tx sched cur stat display
450 * @pdev: Pointer to the PDEV structure.
451 *
452 * Return: none.
453 */
ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t * pdev)454 void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
455 {
456 }
457
458 /**
459 * ol_tx_sched_cur_state_display() - reset tx sched stats
460 * @pdev: Pointer to the PDEV structure.
461 *
462 * Return: none.
463 */
ol_tx_sched_stats_clear(struct ol_txrx_pdev_t * pdev)464 void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev)
465 {
466 }
467
468 #endif /* OL_TX_SCHED == OL_TX_SCHED_RR */
469
470 /*--- advanced scheduler ----------------------------------------------------*/
471 #if OL_TX_SCHED == OL_TX_SCHED_WRR_ADV
472
473 /*--- definitions ---*/
474
475 struct ol_tx_sched_wrr_adv_category_info_t {
476 struct {
477 int wrr_skip_weight;
478 u_int32_t credit_threshold;
479 u_int16_t send_limit;
480 int credit_reserve;
481 int discard_weight;
482 } specs;
483 struct {
484 int wrr_count;
485 int frms;
486 int bytes;
487 ol_tx_frms_queue_list head;
488 bool active;
489 } state;
490 #ifdef DEBUG_HL_LOGGING
491 struct {
492 char *cat_name;
493 unsigned int queued;
494 unsigned int dispatched;
495 unsigned int discard;
496 } stat;
497 #endif
498 };
499
500 #define OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(cat, \
501 wrr_skip_weight, \
502 credit_threshold, \
503 send_limit, \
504 credit_reserve, \
505 discard_weights) \
506 enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _WRR_SKIP_WEIGHT = \
507 (wrr_skip_weight) }; \
508 enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_THRESHOLD = \
509 (credit_threshold) }; \
510 enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _SEND_LIMIT = \
511 (send_limit) }; \
512 enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_RESERVE = \
513 (credit_reserve) }; \
514 enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _DISCARD_WEIGHT = \
515 (discard_weights) };
516 /* Rome:
517 * For high-volume traffic flows (VI, BE, BK), use a credit threshold
518 * roughly equal to a large A-MPDU (occupying half the target memory
519 * available for holding tx frames) to download AMPDU-sized batches
520 * of traffic.
521 * For high-priority, low-volume traffic flows (VO and mgmt), use no
522 * credit threshold, to minimize download latency.
523 */
524 /* WRR send
525 * skip credit limit credit disc
526 * wts thresh (frms) reserv wts
527 */
528 #ifdef HIF_SDIO
529 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VO, 1, 17, 24, 0, 1);
530 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VI, 3, 17, 16, 1, 4);
531 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BE, 10, 17, 16, 1, 8);
532 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BK, 12, 6, 6, 1, 8);
533 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(NON_QOS_DATA,10, 17, 16, 1, 8);
534 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(UCAST_MGMT, 1, 1, 4, 0, 1);
535 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_DATA, 10, 17, 4, 1, 4);
536 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_MGMT, 1, 1, 4, 0, 1);
537 #else
538 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VO, 1, 16, 24, 0, 1);
539 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VI, 3, 16, 16, 1, 4);
540 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BE, 10, 12, 12, 1, 8);
541 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BK, 12, 6, 6, 1, 8);
542 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(NON_QOS_DATA, 12, 6, 4, 1, 8);
543 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(UCAST_MGMT, 1, 1, 4, 0, 1);
544 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_DATA, 10, 16, 4, 1, 4);
545 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_MGMT, 1, 1, 4, 0, 1);
546 #endif
547
548 #ifdef DEBUG_HL_LOGGING
549
550 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler) \
551 do { \
552 scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
553 .stat.queued = 0; \
554 scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
555 .stat.discard = 0; \
556 scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
557 .stat.dispatched = 0; \
558 scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
559 .stat.cat_name = #category; \
560 } while (0)
561 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms) \
562 category->stat.queued += frms;
563 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frms) \
564 category->stat.discard += frms;
565 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category, frms) \
566 category->stat.dispatched += frms;
567 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(scheduler) \
568 ol_tx_sched_wrr_adv_cat_stat_dump(scheduler)
569 #define OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(scheduler) \
570 ol_tx_sched_wrr_adv_cat_cur_state_dump(scheduler)
571 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(scheduler) \
572 ol_tx_sched_wrr_adv_cat_stat_clear(scheduler)
573
574 #else /* DEBUG_HL_LOGGING */
575
576 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler)
577 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms)
578 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frms)
579 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category, frms)
580 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(scheduler)
581 #define OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(scheduler)
582 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(scheduler)
583
584 #endif /* DEBUG_HL_LOGGING */
585
586 #define OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(category, scheduler) \
587 do { \
588 scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
589 .specs.wrr_skip_weight = \
590 OL_TX_SCHED_WRR_ADV_ ## category ## _WRR_SKIP_WEIGHT; \
591 scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
592 .specs.credit_threshold = \
593 OL_TX_SCHED_WRR_ADV_ ## category ## _CREDIT_THRESHOLD; \
594 scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
595 .specs.send_limit = \
596 OL_TX_SCHED_WRR_ADV_ ## category ## _SEND_LIMIT; \
597 scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
598 .specs.credit_reserve = \
599 OL_TX_SCHED_WRR_ADV_ ## category ## _CREDIT_RESERVE; \
600 scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
601 .specs.discard_weight = \
602 OL_TX_SCHED_WRR_ADV_ ## category ## _DISCARD_WEIGHT; \
603 OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler); \
604 } while (0)
605
606 struct ol_tx_sched_wrr_adv_t {
607 int order[OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES];
608 int index;
609 struct ol_tx_sched_wrr_adv_category_info_t
610 categories[OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES];
611 };
612
613 #define OL_TX_AIFS_DEFAULT_VO 2
614 #define OL_TX_AIFS_DEFAULT_VI 2
615 #define OL_TX_AIFS_DEFAULT_BE 3
616 #define OL_TX_AIFS_DEFAULT_BK 7
617 #define OL_TX_CW_MIN_DEFAULT_VO 3
618 #define OL_TX_CW_MIN_DEFAULT_VI 7
619 #define OL_TX_CW_MIN_DEFAULT_BE 15
620 #define OL_TX_CW_MIN_DEFAULT_BK 15
621
622 /*--- functions ---*/
623
624 #ifdef DEBUG_HL_LOGGING
ol_tx_sched_wrr_adv_cat_stat_dump(struct ol_tx_sched_wrr_adv_t * scheduler)625 static void ol_tx_sched_wrr_adv_cat_stat_dump(
626 struct ol_tx_sched_wrr_adv_t *scheduler)
627 {
628 int i;
629
630 txrx_nofl_info("Scheduler Stats:");
631 txrx_nofl_info("====category(CRR,CRT,WSW): Queued Discard Dequeued frms wrr===");
632 for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
633 txrx_nofl_info("%12s(%2d, %2d, %2d): %6d %7d %8d %4d %3d",
634 scheduler->categories[i].stat.cat_name,
635 scheduler->categories[i].specs.credit_reserve,
636 scheduler->categories[i].specs.
637 credit_threshold,
638 scheduler->categories[i].
639 specs.wrr_skip_weight,
640 scheduler->categories[i].stat.queued,
641 scheduler->categories[i].stat.discard,
642 scheduler->categories[i].stat.dispatched,
643 scheduler->categories[i].state.frms,
644 scheduler->categories[i].state.wrr_count);
645 }
646 }
647
ol_tx_sched_wrr_adv_cat_cur_state_dump(struct ol_tx_sched_wrr_adv_t * scheduler)648 static void ol_tx_sched_wrr_adv_cat_cur_state_dump(
649 struct ol_tx_sched_wrr_adv_t *scheduler)
650 {
651 int i;
652
653 txrx_nofl_info("Scheduler State Snapshot:");
654 txrx_nofl_info("====category(CRR,CRT,WSW): IS_Active Pend_Frames Pend_bytes wrr===");
655 for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
656 txrx_nofl_info("%12s(%2d, %2d, %2d): %9d %11d %10d %3d",
657 scheduler->categories[i].stat.cat_name,
658 scheduler->categories[i].specs.credit_reserve,
659 scheduler->categories[i].specs.
660 credit_threshold,
661 scheduler->categories[i].specs.
662 wrr_skip_weight,
663 scheduler->categories[i].state.active,
664 scheduler->categories[i].state.frms,
665 scheduler->categories[i].state.bytes,
666 scheduler->categories[i].state.wrr_count);
667 }
668 }
669
ol_tx_sched_wrr_adv_cat_stat_clear(struct ol_tx_sched_wrr_adv_t * scheduler)670 static void ol_tx_sched_wrr_adv_cat_stat_clear(
671 struct ol_tx_sched_wrr_adv_t *scheduler)
672 {
673 int i;
674
675 for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
676 scheduler->categories[i].stat.queued = 0;
677 scheduler->categories[i].stat.discard = 0;
678 scheduler->categories[i].stat.dispatched = 0;
679 }
680 }
681
682 #endif
683
684 static void
ol_tx_sched_select_init_wrr_adv(struct ol_txrx_pdev_t * pdev)685 ol_tx_sched_select_init_wrr_adv(struct ol_txrx_pdev_t *pdev)
686 {
687 struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
688 /* start selection from the front of the ordered list */
689 scheduler->index = 0;
690 }
691
692 static void
ol_tx_sched_wrr_adv_rotate_order_list_tail(struct ol_tx_sched_wrr_adv_t * scheduler,int idx)693 ol_tx_sched_wrr_adv_rotate_order_list_tail(
694 struct ol_tx_sched_wrr_adv_t *scheduler, int idx)
695 {
696 int value;
697 /* remember the value of the specified element */
698 value = scheduler->order[idx];
699 /* shift all further elements up one space */
700 for (; idx < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES-1; idx++)
701 scheduler->order[idx] = scheduler->order[idx + 1];
702
703 /* put the specified element at the end */
704 scheduler->order[idx] = value;
705 }
706
707 static void
ol_tx_sched_wrr_adv_credit_sanity_check(struct ol_txrx_pdev_t * pdev,u_int32_t credit)708 ol_tx_sched_wrr_adv_credit_sanity_check(struct ol_txrx_pdev_t *pdev,
709 u_int32_t credit)
710 {
711 struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
712 int i;
713 int okay = 1;
714
715 for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
716 if (scheduler->categories[i].specs.credit_threshold > credit) {
717 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
718 "*** Config error: credit (%d) not enough to support category %d threshold (%d)\n",
719 credit, i,
720 scheduler->categories[i].specs.
721 credit_threshold);
722 okay = 0;
723 }
724 }
725 qdf_assert(okay);
726 }
727
728 /*
729 * The scheduler sync spinlock has been acquired outside this function,
730 * so there is no need to worry about mutex within this function.
731 */
732 static int
ol_tx_sched_select_batch_wrr_adv(struct ol_txrx_pdev_t * pdev,struct ol_tx_sched_ctx * sctx,u_int32_t credit)733 ol_tx_sched_select_batch_wrr_adv(
734 struct ol_txrx_pdev_t *pdev,
735 struct ol_tx_sched_ctx *sctx,
736 u_int32_t credit)
737 {
738 static int first = 1;
739 int category_index = 0;
740 struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
741 struct ol_tx_frms_queue_t *txq, *first_txq = NULL;
742 int index;
743 struct ol_tx_sched_wrr_adv_category_info_t *category = NULL;
744 int frames, bytes, used_credits = 0, tx_limit;
745 u_int16_t tx_limit_flag;
746 u32 credit_rem = credit;
747
748 /*
749 * Just for good measure, do a sanity check that the initial credit
750 * is enough to cover every category's credit threshold.
751 */
752 if (first) {
753 first = 0;
754 ol_tx_sched_wrr_adv_credit_sanity_check(pdev, credit);
755 }
756
757 /* choose the traffic category from the ordered list */
758 index = scheduler->index;
759 while (index < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES) {
760 category_index = scheduler->order[index];
761 category = &scheduler->categories[category_index];
762 if (!category->state.active) {
763 /* move on to the next category */
764 index++;
765 continue;
766 }
767 if (++category->state.wrr_count <
768 category->specs.wrr_skip_weight) {
769 /* skip this category (move it to the back) */
770 ol_tx_sched_wrr_adv_rotate_order_list_tail(scheduler,
771 index);
772 /*
773 * try again (iterate) on the new element
774 * that was moved up
775 */
776 continue;
777 }
778 /* found the first active category whose WRR turn is present */
779 break;
780 }
781 if (index >= OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES) {
782 /* no categories are active */
783 return 0;
784 }
785
786 /* is there enough credit for the selected category? */
787 if (credit < category->specs.credit_threshold) {
788 /*
789 * Can't send yet - wait until more credit becomes available.
790 * In the meantime, restore the WRR counter (since we didn't
791 * service this category after all).
792 */
793 category->state.wrr_count = category->state.wrr_count - 1;
794 return 0;
795 }
796 /* enough credit is available - go ahead and send some frames */
797 /*
798 * This category was serviced - reset the WRR counter, and move this
799 * category to the back of the order list.
800 */
801 category->state.wrr_count = 0;
802 ol_tx_sched_wrr_adv_rotate_order_list_tail(scheduler, index);
803 /*
804 * With this category moved to the back, if there's still any credit
805 * left, set up the next invocation of this function to start from
806 * where this one left off, by looking at the category that just got
807 * shifted forward into the position the service category was
808 * occupying.
809 */
810 scheduler->index = index;
811
812 /*
813 * Take the tx queue from the head of the category list.
814 */
815 txq = TAILQ_FIRST(&category->state.head);
816
817 while (txq) {
818 TAILQ_REMOVE(&category->state.head, txq, list_elem);
819 credit = ol_tx_txq_group_credit_limit(pdev, txq, credit);
820 if (credit > category->specs.credit_reserve) {
821 credit -= category->specs.credit_reserve;
822 tx_limit = ol_tx_bad_peer_dequeue_check(txq,
823 category->specs.send_limit,
824 &tx_limit_flag);
825 frames = ol_tx_dequeue(
826 pdev, txq, &sctx->head,
827 tx_limit, &credit, &bytes);
828 ol_tx_bad_peer_update_tx_limit(pdev, txq,
829 frames,
830 tx_limit_flag);
831
832 OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category,
833 frames);
834 /* Update used global credits */
835 used_credits = credit;
836 credit =
837 ol_tx_txq_update_borrowed_group_credits(pdev, txq,
838 credit);
839 category->state.frms -= frames;
840 category->state.bytes -= bytes;
841 if (txq->frms > 0) {
842 TAILQ_INSERT_TAIL(&category->state.head,
843 txq, list_elem);
844 } else {
845 if (category->state.frms == 0)
846 category->state.active = 0;
847 }
848 sctx->frms += frames;
849 ol_tx_txq_group_credit_update(pdev, txq, -credit, 0);
850 break;
851 } else {
852 /*
853 * Current txq belongs to a group which does not have
854 * enough credits,
855 * Iterate over to next txq and see if we can download
856 * packets from that queue.
857 */
858 if (ol_tx_if_iterate_next_txq(first_txq, txq)) {
859 credit = credit_rem;
860 if (!first_txq)
861 first_txq = txq;
862
863 TAILQ_INSERT_TAIL(&category->state.head,
864 txq, list_elem);
865
866 txq = TAILQ_FIRST(&category->state.head);
867 } else {
868 TAILQ_INSERT_HEAD(&category->state.head, txq,
869 list_elem);
870 break;
871 }
872 }
873 } /* while(txq) */
874
875 return used_credits;
876 }
877
878 static inline void
ol_tx_sched_txq_enqueue_wrr_adv(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,int frms,int bytes)879 ol_tx_sched_txq_enqueue_wrr_adv(
880 struct ol_txrx_pdev_t *pdev,
881 struct ol_tx_frms_queue_t *txq,
882 int tid,
883 int frms,
884 int bytes)
885 {
886 struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
887 struct ol_tx_sched_wrr_adv_category_info_t *category;
888
889 category = &scheduler->categories[pdev->tid_to_ac[tid]];
890 category->state.frms += frms;
891 category->state.bytes += bytes;
892 OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms);
893 if (txq->flag != ol_tx_queue_active) {
894 TAILQ_INSERT_TAIL(&category->state.head, txq, list_elem);
895 category->state.active = 1; /* may have already been active */
896 }
897 }
898
899 static inline void
ol_tx_sched_txq_deactivate_wrr_adv(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid)900 ol_tx_sched_txq_deactivate_wrr_adv(
901 struct ol_txrx_pdev_t *pdev,
902 struct ol_tx_frms_queue_t *txq,
903 int tid)
904 {
905 struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
906 struct ol_tx_sched_wrr_adv_category_info_t *category;
907
908 category = &scheduler->categories[pdev->tid_to_ac[tid]];
909 category->state.frms -= txq->frms;
910 category->state.bytes -= txq->bytes;
911
912 TAILQ_REMOVE(&category->state.head, txq, list_elem);
913
914 if (category->state.frms == 0 && category->state.active)
915 category->state.active = 0;
916 }
917
918 static ol_tx_frms_queue_list *
ol_tx_sched_category_tx_queues_wrr_adv(struct ol_txrx_pdev_t * pdev,int cat)919 ol_tx_sched_category_tx_queues_wrr_adv(struct ol_txrx_pdev_t *pdev, int cat)
920 {
921 struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
922 struct ol_tx_sched_wrr_adv_category_info_t *category;
923
924 category = &scheduler->categories[cat];
925 return &category->state.head;
926 }
927
928 static int
ol_tx_sched_discard_select_category_wrr_adv(struct ol_txrx_pdev_t * pdev)929 ol_tx_sched_discard_select_category_wrr_adv(struct ol_txrx_pdev_t *pdev)
930 {
931 struct ol_tx_sched_wrr_adv_t *scheduler;
932 u_int8_t i, cat = 0;
933 int max_score = 0;
934
935 scheduler = pdev->tx_sched.scheduler;
936 /*
937 * Choose which category's tx frames to drop next based on two factors:
938 * 1. Which category has the most tx frames present
939 * 2. The category's priority (high-priority categories have a low
940 * discard_weight)
941 */
942 for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
943 int score;
944
945 score =
946 scheduler->categories[i].state.frms *
947 scheduler->categories[i].specs.discard_weight;
948 if (max_score == 0 || score > max_score) {
949 max_score = score;
950 cat = i;
951 }
952 }
953 return cat;
954 }
955
956 static void
ol_tx_sched_txq_discard_wrr_adv(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int cat,int frames,int bytes)957 ol_tx_sched_txq_discard_wrr_adv(
958 struct ol_txrx_pdev_t *pdev,
959 struct ol_tx_frms_queue_t *txq,
960 int cat, int frames, int bytes)
961 {
962 struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
963 struct ol_tx_sched_wrr_adv_category_info_t *category;
964
965 category = &scheduler->categories[cat];
966
967 if (0 == txq->frms)
968 TAILQ_REMOVE(&category->state.head, txq, list_elem);
969
970
971 category->state.frms -= frames;
972 category->state.bytes -= bytes;
973 OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frames);
974 if (category->state.frms == 0)
975 category->state.active = 0;
976 }
977
978 static void
ol_tx_sched_category_info_wrr_adv(struct ol_txrx_pdev_t * pdev,int cat,int * active,int * frms,int * bytes)979 ol_tx_sched_category_info_wrr_adv(
980 struct ol_txrx_pdev_t *pdev,
981 int cat, int *active,
982 int *frms, int *bytes)
983 {
984 struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
985 struct ol_tx_sched_wrr_adv_category_info_t *category;
986
987 category = &scheduler->categories[cat];
988 *active = category->state.active;
989 *frms = category->state.frms;
990 *bytes = category->state.bytes;
991 }
992
993 /**
994 * ol_tx_sched_wrr_param_update() - update the WRR TX sched params
995 * @pdev: Pointer to PDEV structure.
996 * @scheduler: Pointer to tx scheduler.
997 *
998 * Update the WRR TX schedule parameters for each category if it is
999 * specified in the ini file by user.
1000 *
1001 * Return: none
1002 */
ol_tx_sched_wrr_param_update(struct ol_txrx_pdev_t * pdev,struct ol_tx_sched_wrr_adv_t * scheduler)1003 static void ol_tx_sched_wrr_param_update(struct ol_txrx_pdev_t *pdev,
1004 struct ol_tx_sched_wrr_adv_t *
1005 scheduler)
1006 {
1007 int i;
1008 static const char * const tx_sched_wrr_name[4] = {
1009 "BE",
1010 "BK",
1011 "VI",
1012 "VO"
1013 };
1014
1015 if (!scheduler)
1016 return;
1017
1018 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1019 "%s: Tuning the TX scheduler wrr parameters by ini file:",
1020 __func__);
1021
1022 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1023 " skip credit limit credit disc");
1024
1025 for (i = OL_TX_SCHED_WRR_ADV_CAT_BE;
1026 i <= OL_TX_SCHED_WRR_ADV_CAT_VO; i++) {
1027 if (ol_cfg_get_wrr_skip_weight(pdev->ctrl_pdev, i)) {
1028 scheduler->categories[i].specs.wrr_skip_weight =
1029 ol_cfg_get_wrr_skip_weight(pdev->ctrl_pdev, i);
1030 scheduler->categories[i].specs.credit_threshold =
1031 ol_cfg_get_credit_threshold(pdev->ctrl_pdev, i);
1032 scheduler->categories[i].specs.send_limit =
1033 ol_cfg_get_send_limit(pdev->ctrl_pdev, i);
1034 scheduler->categories[i].specs.credit_reserve =
1035 ol_cfg_get_credit_reserve(pdev->ctrl_pdev, i);
1036 scheduler->categories[i].specs.discard_weight =
1037 ol_cfg_get_discard_weight(pdev->ctrl_pdev, i);
1038
1039 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1040 "%s-update: %d, %d, %d, %d, %d",
1041 tx_sched_wrr_name[i],
1042 scheduler->categories[i].specs.wrr_skip_weight,
1043 scheduler->categories[i].specs.credit_threshold,
1044 scheduler->categories[i].specs.send_limit,
1045 scheduler->categories[i].specs.credit_reserve,
1046 scheduler->categories[i].specs.discard_weight);
1047 } else {
1048 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1049 "%s-orig: %d, %d, %d, %d, %d",
1050 tx_sched_wrr_name[i],
1051 scheduler->categories[i].specs.wrr_skip_weight,
1052 scheduler->categories[i].specs.credit_threshold,
1053 scheduler->categories[i].specs.send_limit,
1054 scheduler->categories[i].specs.credit_reserve,
1055 scheduler->categories[i].specs.discard_weight);
1056 }
1057 }
1058 }
1059
1060 static void *
ol_tx_sched_init_wrr_adv(struct ol_txrx_pdev_t * pdev)1061 ol_tx_sched_init_wrr_adv(
1062 struct ol_txrx_pdev_t *pdev)
1063 {
1064 struct ol_tx_sched_wrr_adv_t *scheduler;
1065 int i;
1066
1067 scheduler = qdf_mem_malloc(
1068 sizeof(struct ol_tx_sched_wrr_adv_t));
1069 if (!scheduler)
1070 return scheduler;
1071
1072 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VO, scheduler);
1073 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VI, scheduler);
1074 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BE, scheduler);
1075 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BK, scheduler);
1076 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(NON_QOS_DATA, scheduler);
1077 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(UCAST_MGMT, scheduler);
1078 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(MCAST_DATA, scheduler);
1079 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(MCAST_MGMT, scheduler);
1080
1081 ol_tx_sched_wrr_param_update(pdev, scheduler);
1082
1083 for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
1084 scheduler->categories[i].state.active = 0;
1085 scheduler->categories[i].state.frms = 0;
1086 /*scheduler->categories[i].state.bytes = 0;*/
1087 TAILQ_INIT(&scheduler->categories[i].state.head);
1088 /*
1089 * init categories to not be skipped before
1090 * their initial selection
1091 */
1092 scheduler->categories[i].state.wrr_count =
1093 scheduler->categories[i].specs.wrr_skip_weight - 1;
1094 }
1095
1096 /*
1097 * Init the order array - the initial ordering doesn't matter, as the
1098 * order array will get reshuffled as data arrives.
1099 */
1100 for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++)
1101 scheduler->order[i] = i;
1102
1103 return scheduler;
1104 }
1105
1106
1107 /* WMM parameters are suppposed to be passed when associate with AP.
1108 * According to AIFS+CWMin, the function maps each queue to one of four default
1109 * settings of the scheduler, ie. VO, VI, BE, or BK.
1110 */
1111 void
ol_txrx_set_wmm_param(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct ol_tx_wmm_param_t wmm_param)1112 ol_txrx_set_wmm_param(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1113 struct ol_tx_wmm_param_t wmm_param)
1114 {
1115 struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1116 ol_txrx_pdev_handle data_pdev =
1117 ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1118 struct ol_tx_sched_wrr_adv_t def_cfg;
1119 struct ol_tx_sched_wrr_adv_t *scheduler =
1120 data_pdev->tx_sched.scheduler;
1121 u_int32_t i, ac_selected;
1122 u_int32_t weight[QCA_WLAN_AC_ALL], default_edca[QCA_WLAN_AC_ALL];
1123
1124 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VO, (&def_cfg));
1125 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VI, (&def_cfg));
1126 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BE, (&def_cfg));
1127 OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BK, (&def_cfg));
1128
1129 /* default_eca = AIFS + CWMin */
1130 default_edca[OL_TX_SCHED_WRR_ADV_CAT_VO] =
1131 OL_TX_AIFS_DEFAULT_VO + OL_TX_CW_MIN_DEFAULT_VO;
1132 default_edca[OL_TX_SCHED_WRR_ADV_CAT_VI] =
1133 OL_TX_AIFS_DEFAULT_VI + OL_TX_CW_MIN_DEFAULT_VI;
1134 default_edca[OL_TX_SCHED_WRR_ADV_CAT_BE] =
1135 OL_TX_AIFS_DEFAULT_BE + OL_TX_CW_MIN_DEFAULT_BE;
1136 default_edca[OL_TX_SCHED_WRR_ADV_CAT_BK] =
1137 OL_TX_AIFS_DEFAULT_BK + OL_TX_CW_MIN_DEFAULT_BK;
1138
1139 weight[OL_TX_SCHED_WRR_ADV_CAT_VO] =
1140 wmm_param.ac[QCA_WLAN_AC_VO].aifs +
1141 wmm_param.ac[QCA_WLAN_AC_VO].cwmin;
1142 weight[OL_TX_SCHED_WRR_ADV_CAT_VI] =
1143 wmm_param.ac[QCA_WLAN_AC_VI].aifs +
1144 wmm_param.ac[QCA_WLAN_AC_VI].cwmin;
1145 weight[OL_TX_SCHED_WRR_ADV_CAT_BK] =
1146 wmm_param.ac[QCA_WLAN_AC_BK].aifs +
1147 wmm_param.ac[QCA_WLAN_AC_BK].cwmin;
1148 weight[OL_TX_SCHED_WRR_ADV_CAT_BE] =
1149 wmm_param.ac[QCA_WLAN_AC_BE].aifs +
1150 wmm_param.ac[QCA_WLAN_AC_BE].cwmin;
1151
1152 for (i = 0; i < QCA_WLAN_AC_ALL; i++) {
1153 if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_VO] >= weight[i])
1154 ac_selected = OL_TX_SCHED_WRR_ADV_CAT_VO;
1155 else if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_VI] >= weight[i])
1156 ac_selected = OL_TX_SCHED_WRR_ADV_CAT_VI;
1157 else if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_BE] >= weight[i])
1158 ac_selected = OL_TX_SCHED_WRR_ADV_CAT_BE;
1159 else
1160 ac_selected = OL_TX_SCHED_WRR_ADV_CAT_BK;
1161
1162
1163 scheduler->categories[i].specs.wrr_skip_weight =
1164 def_cfg.categories[ac_selected].specs.wrr_skip_weight;
1165 scheduler->categories[i].specs.credit_threshold =
1166 def_cfg.categories[ac_selected].specs.credit_threshold;
1167 scheduler->categories[i].specs.send_limit =
1168 def_cfg.categories[ac_selected].specs.send_limit;
1169 scheduler->categories[i].specs.credit_reserve =
1170 def_cfg.categories[ac_selected].specs.credit_reserve;
1171 scheduler->categories[i].specs.discard_weight =
1172 def_cfg.categories[ac_selected].specs.discard_weight;
1173 }
1174 }
1175
1176 /**
1177 * ol_tx_sched_stats_display() - tx sched stats display
1178 * @pdev: Pointer to the PDEV structure.
1179 *
1180 * Return: none.
1181 */
ol_tx_sched_stats_display(struct ol_txrx_pdev_t * pdev)1182 void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
1183 {
1184 OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(pdev->tx_sched.scheduler);
1185 }
1186
1187 /**
1188 * ol_tx_sched_cur_state_display() - tx sched cur stat display
1189 * @pdev: Pointer to the PDEV structure.
1190 *
1191 * Return: none.
1192 */
ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t * pdev)1193 void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
1194 {
1195 OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(pdev->tx_sched.scheduler);
1196 }
1197
1198 /**
1199 * ol_tx_sched_cur_state_display() - reset tx sched stats
1200 * @pdev: Pointer to the PDEV structure.
1201 *
1202 * Return: none.
1203 */
ol_tx_sched_stats_clear(struct ol_txrx_pdev_t * pdev)1204 void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev)
1205 {
1206 OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(pdev->tx_sched.scheduler);
1207 }
1208
1209 #endif /* OL_TX_SCHED == OL_TX_SCHED_WRR_ADV */
1210
1211 /*--- congestion control discard --------------------------------------------*/
1212
1213 static struct ol_tx_frms_queue_t *
ol_tx_sched_discard_select_txq(struct ol_txrx_pdev_t * pdev,ol_tx_frms_queue_list * tx_queues)1214 ol_tx_sched_discard_select_txq(
1215 struct ol_txrx_pdev_t *pdev,
1216 ol_tx_frms_queue_list *tx_queues)
1217 {
1218 struct ol_tx_frms_queue_t *txq;
1219 struct ol_tx_frms_queue_t *selected_txq = NULL;
1220 int max_frms = 0;
1221
1222 /* return the tx queue with the most frames */
1223 TAILQ_FOREACH(txq, tx_queues, list_elem) {
1224 if (txq->frms > max_frms) {
1225 max_frms = txq->frms;
1226 selected_txq = txq;
1227 }
1228 }
1229 return selected_txq;
1230 }
1231
1232 u_int16_t
ol_tx_sched_discard_select(struct ol_txrx_pdev_t * pdev,u_int16_t frms,ol_tx_desc_list * tx_descs,bool force)1233 ol_tx_sched_discard_select(
1234 struct ol_txrx_pdev_t *pdev,
1235 u_int16_t frms,
1236 ol_tx_desc_list *tx_descs,
1237 bool force)
1238 {
1239 int cat;
1240 struct ol_tx_frms_queue_t *txq;
1241 int bytes;
1242 u_int32_t credit;
1243 struct ol_tx_sched_notify_ctx_t notify_ctx;
1244
1245 /*
1246 * first decide what category of traffic (e.g. TID or AC)
1247 * to discard next
1248 */
1249 cat = ol_tx_sched_discard_select_category(pdev);
1250
1251 /* then decide which peer within this category to discard from next */
1252 txq = ol_tx_sched_discard_select_txq(
1253 pdev, ol_tx_sched_category_tx_queues(pdev, cat));
1254 if (!txq)
1255 /* No More pending Tx Packets in Tx Queue. Exit Discard loop */
1256 return 0;
1257
1258
1259 if (force == false) {
1260 /*
1261 * Now decide how many frames to discard from this peer-TID.
1262 * Don't discard more frames than the caller has specified.
1263 * Don't discard more than a fixed quantum of frames at a time.
1264 * Don't discard more than 50% of the queue's frames at a time,
1265 * but if there's only 1 frame left, go ahead and discard it.
1266 */
1267 #define OL_TX_DISCARD_QUANTUM 10
1268 if (OL_TX_DISCARD_QUANTUM < frms)
1269 frms = OL_TX_DISCARD_QUANTUM;
1270
1271
1272 if (txq->frms > 1 && frms >= (txq->frms >> 1))
1273 frms = txq->frms >> 1;
1274 }
1275
1276 /*
1277 * Discard from the head of the queue, because:
1278 * 1. Front-dropping gives applications like TCP that include ARQ
1279 * an early notification of congestion.
1280 * 2. For time-sensitive applications like RTP, the newest frames are
1281 * most relevant.
1282 */
1283 credit = 10000; /* no credit limit */
1284 frms = ol_tx_dequeue(pdev, txq, tx_descs, frms, &credit, &bytes);
1285
1286 notify_ctx.event = OL_TX_DISCARD_FRAMES;
1287 notify_ctx.frames = frms;
1288 notify_ctx.bytes = bytes;
1289 notify_ctx.txq = txq;
1290 notify_ctx.info.ext_tid = cat;
1291 ol_tx_sched_notify(pdev, ¬ify_ctx);
1292
1293 TX_SCHED_DEBUG_PRINT("Tx Drop : %d", frms);
1294 return frms;
1295 }
1296
1297 /*--- scheduler framework ---------------------------------------------------*/
1298
1299 /*
1300 * The scheduler mutex spinlock has been acquired outside this function,
1301 * so there is need to take locks inside this function.
1302 */
1303 void
ol_tx_sched_notify(struct ol_txrx_pdev_t * pdev,struct ol_tx_sched_notify_ctx_t * ctx)1304 ol_tx_sched_notify(
1305 struct ol_txrx_pdev_t *pdev,
1306 struct ol_tx_sched_notify_ctx_t *ctx)
1307 {
1308 struct ol_tx_frms_queue_t *txq = ctx->txq;
1309 int tid;
1310
1311 if (!pdev->tx_sched.scheduler)
1312 return;
1313
1314 switch (ctx->event) {
1315 case OL_TX_ENQUEUE_FRAME:
1316 tid = ctx->info.tx_msdu_info->htt.info.ext_tid;
1317 ol_tx_sched_txq_enqueue(pdev, txq, tid, 1, ctx->bytes);
1318 break;
1319 case OL_TX_DELETE_QUEUE:
1320 tid = ctx->info.ext_tid;
1321 if (txq->flag == ol_tx_queue_active)
1322 ol_tx_sched_txq_deactivate(pdev, txq, tid);
1323
1324 break;
1325 case OL_TX_PAUSE_QUEUE:
1326 tid = ctx->info.ext_tid;
1327 if (txq->flag == ol_tx_queue_active)
1328 ol_tx_sched_txq_deactivate(pdev, txq, tid);
1329
1330 break;
1331 case OL_TX_UNPAUSE_QUEUE:
1332 tid = ctx->info.ext_tid;
1333 if (txq->frms != 0)
1334 ol_tx_sched_txq_enqueue(pdev, txq, tid,
1335 txq->frms, txq->bytes);
1336
1337 break;
1338 case OL_TX_DISCARD_FRAMES:
1339 /* not necessarily TID, could be category */
1340 tid = ctx->info.ext_tid;
1341 ol_tx_sched_txq_discard(pdev, txq, tid,
1342 ctx->frames, ctx->bytes);
1343 break;
1344 default:
1345 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1346 "Error: unknown sched notification (%d)\n",
1347 ctx->event);
1348 qdf_assert(0);
1349 break;
1350 }
1351 }
1352
1353 #define OL_TX_MSDU_ID_STORAGE_ERR(ptr) (!ptr)
1354
1355 static void
ol_tx_sched_dispatch(struct ol_txrx_pdev_t * pdev,struct ol_tx_sched_ctx * sctx)1356 ol_tx_sched_dispatch(
1357 struct ol_txrx_pdev_t *pdev,
1358 struct ol_tx_sched_ctx *sctx)
1359 {
1360 qdf_nbuf_t msdu, prev = NULL, head_msdu = NULL;
1361 struct ol_tx_desc_t *tx_desc;
1362 u_int16_t *msdu_id_storage;
1363 u_int16_t msdu_id;
1364 int num_msdus = 0;
1365
1366 TX_SCHED_DEBUG_PRINT("Enter");
1367 while (sctx->frms) {
1368 tx_desc = TAILQ_FIRST(&sctx->head);
1369 if (!tx_desc) {
1370 /* TODO: find its reason */
1371 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1372 "%s: err, no enough tx_desc from stx->head.\n",
1373 __func__);
1374 break;
1375 }
1376 msdu = tx_desc->netbuf;
1377 TAILQ_REMOVE(&sctx->head, tx_desc, tx_desc_list_elem);
1378 if (!head_msdu)
1379 head_msdu = msdu;
1380
1381 if (prev)
1382 qdf_nbuf_set_next(prev, msdu);
1383
1384 prev = msdu;
1385
1386 #ifndef ATH_11AC_TXCOMPACT
1387 /*
1388 * When the tx frame is downloaded to the target, there are two
1389 * outstanding references:
1390 * 1. The host download SW (HTT, HTC, HIF)
1391 * This reference is cleared by the ol_tx_send_done callback
1392 * functions.
1393 * 2. The target FW
1394 * This reference is cleared by the ol_tx_completion_handler
1395 * function.
1396 * It is extremely probable that the download completion is
1397 * processed before the tx completion message. However, under
1398 * exceptional conditions the tx completion may be processed
1399 *first. Thus, rather that assuming that reference (1) is
1400 *done before reference (2),
1401 * explicit reference tracking is needed.
1402 * Double-increment the ref count to account for both references
1403 * described above.
1404 */
1405 qdf_atomic_init(&tx_desc->ref_cnt);
1406 qdf_atomic_inc(&tx_desc->ref_cnt);
1407 qdf_atomic_inc(&tx_desc->ref_cnt);
1408 #endif
1409
1410 /*Store the MSDU Id for each MSDU*/
1411 /* store MSDU ID */
1412 msdu_id = ol_tx_desc_id(pdev, tx_desc);
1413 msdu_id_storage = ol_tx_msdu_id_storage(msdu);
1414 if (OL_TX_MSDU_ID_STORAGE_ERR(msdu_id_storage)) {
1415 /*
1416 * Send the prior frames as a batch,
1417 *then send this as a single,
1418 * then resume handling the remaining frames.
1419 */
1420 if (head_msdu)
1421 ol_tx_send_batch(pdev, head_msdu, num_msdus);
1422
1423 prev = NULL;
1424 head_msdu = prev;
1425 num_msdus = 0;
1426
1427 if (htt_tx_send_std(pdev->htt_pdev, msdu, msdu_id)) {
1428 ol_tx_target_credit_incr(pdev, msdu);
1429 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
1430 1 /* error */);
1431 }
1432 } else {
1433 *msdu_id_storage = msdu_id;
1434 num_msdus++;
1435 }
1436 sctx->frms--;
1437 }
1438
1439 /*Send Batch Of Frames*/
1440 if (head_msdu)
1441 ol_tx_send_batch(pdev, head_msdu, num_msdus);
1442 TX_SCHED_DEBUG_PRINT("Leave");
1443 }
1444
1445 #ifdef QCA_TX_PADDING_CREDIT_SUPPORT
replenish_tx_pad_credit(struct ol_txrx_pdev_t * pdev)1446 static void replenish_tx_pad_credit(struct ol_txrx_pdev_t *pdev)
1447 {
1448 int replenish_credit = 0, avail_targ_tx_credit = 0;
1449 int cur_tx_pad_credit = 0, grp_credit = 0, i = 0;
1450 qdf_atomic_t *tx_grp_credit = NULL;
1451
1452 cur_tx_pad_credit = qdf_atomic_read(&pdev->pad_reserve_tx_credit);
1453 if (cur_tx_pad_credit < MIN_TX_PAD_CREDIT_THRESH) {
1454 replenish_credit = MAX_TX_PAD_CREDIT_THRESH - cur_tx_pad_credit;
1455 avail_targ_tx_credit = qdf_atomic_read(&pdev->target_tx_credit);
1456 replenish_credit = (replenish_credit < avail_targ_tx_credit) ?
1457 replenish_credit : avail_targ_tx_credit;
1458 if (replenish_credit < 0) {
1459 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
1460 "Tx Pad Credits = %d Target Tx Credits = %d",
1461 cur_tx_pad_credit,
1462 avail_targ_tx_credit);
1463 qdf_assert(0);
1464 }
1465 qdf_atomic_add(replenish_credit, &pdev->pad_reserve_tx_credit);
1466 qdf_atomic_add(-replenish_credit, &pdev->target_tx_credit);
1467
1468 while (replenish_credit > 0) {
1469 for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
1470 tx_grp_credit = &pdev->txq_grps[i].credit;
1471 grp_credit = qdf_atomic_read(tx_grp_credit);
1472 if (grp_credit) {
1473 qdf_atomic_add(-1, tx_grp_credit);
1474 replenish_credit--;
1475 }
1476 if (!replenish_credit)
1477 break;
1478 }
1479 }
1480 }
1481 }
1482 #else
replenish_tx_pad_credit(struct ol_txrx_pdev_t * pdev)1483 static void replenish_tx_pad_credit(struct ol_txrx_pdev_t *pdev)
1484 {
1485 }
1486 #endif
1487
1488 void
ol_tx_sched(struct ol_txrx_pdev_t * pdev)1489 ol_tx_sched(struct ol_txrx_pdev_t *pdev)
1490 {
1491 struct ol_tx_sched_ctx sctx;
1492 u_int32_t credit;
1493
1494 TX_SCHED_DEBUG_PRINT("Enter");
1495 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1496 if (pdev->tx_sched.tx_sched_status != ol_tx_scheduler_idle) {
1497 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1498 return;
1499 }
1500 pdev->tx_sched.tx_sched_status = ol_tx_scheduler_running;
1501
1502 ol_tx_sched_log(pdev);
1503 /*
1504 *adf_os_print("BEFORE tx sched:\n");
1505 *ol_tx_queues_display(pdev);
1506 */
1507 replenish_tx_pad_credit(pdev);
1508 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1509
1510 TAILQ_INIT(&sctx.head);
1511 sctx.frms = 0;
1512
1513 ol_tx_sched_select_init(pdev);
1514 while (qdf_atomic_read(&pdev->target_tx_credit) > 0) {
1515 int num_credits;
1516
1517 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1518 replenish_tx_pad_credit(pdev);
1519 credit = qdf_atomic_read(&pdev->target_tx_credit);
1520 num_credits = ol_tx_sched_select_batch(pdev, &sctx, credit);
1521 if (num_credits > 0) {
1522 #if DEBUG_HTT_CREDIT
1523 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1524 " <HTT> Decrease credit %d - %d = %d.\n",
1525 qdf_atomic_read(&pdev->target_tx_credit),
1526 num_credits,
1527 qdf_atomic_read(&pdev->target_tx_credit) -
1528 num_credits);
1529 #endif
1530 DPTRACE(qdf_dp_trace_credit_record(QDF_TX_SCHED,
1531 QDF_CREDIT_DEC, num_credits,
1532 qdf_atomic_read(&pdev->target_tx_credit) -
1533 num_credits,
1534 qdf_atomic_read(&pdev->txq_grps[0].credit),
1535 qdf_atomic_read(&pdev->txq_grps[1].credit)));
1536
1537 qdf_atomic_add(-num_credits, &pdev->target_tx_credit);
1538 }
1539 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1540
1541 if (num_credits == 0)
1542 break;
1543 }
1544 ol_tx_sched_dispatch(pdev, &sctx);
1545
1546 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1547 /*
1548 *adf_os_print("AFTER tx sched:\n");
1549 *ol_tx_queues_display(pdev);
1550 */
1551
1552 pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
1553 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1554 TX_SCHED_DEBUG_PRINT("Leave");
1555 }
1556
1557 void *
ol_tx_sched_attach(struct ol_txrx_pdev_t * pdev)1558 ol_tx_sched_attach(
1559 struct ol_txrx_pdev_t *pdev)
1560 {
1561 pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
1562 return ol_tx_sched_init(pdev);
1563 }
1564
1565 void
ol_tx_sched_detach(struct ol_txrx_pdev_t * pdev)1566 ol_tx_sched_detach(
1567 struct ol_txrx_pdev_t *pdev)
1568 {
1569 if (pdev->tx_sched.scheduler) {
1570 qdf_mem_free(pdev->tx_sched.scheduler);
1571 pdev->tx_sched.scheduler = NULL;
1572 }
1573 }
1574
1575 /*--- debug functions -------------------------------------------------------*/
1576
1577 #if defined(DEBUG_HL_LOGGING)
1578
1579 static void
ol_tx_sched_log(struct ol_txrx_pdev_t * pdev)1580 ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
1581 {
1582 u_int8_t *buf;
1583 u_int32_t *active_bitmap;
1584 int i, j, num_cats_active;
1585 int active, frms, bytes;
1586 int credit;
1587
1588 /* don't bother recording state if credit is zero */
1589 credit = qdf_atomic_read(&pdev->target_tx_credit);
1590 if (credit == 0)
1591 return;
1592
1593
1594 /*
1595 * See how many TIDs are active, so queue state can be stored only
1596 * for those TIDs.
1597 * Do an initial iteration through all categories to see if any
1598 * are active. Doing an extra iteration is inefficient, but
1599 * efficiency is not a dominant concern when logging is enabled.
1600 */
1601 num_cats_active = 0;
1602 for (i = 0; i < OL_TX_SCHED_NUM_CATEGORIES; i++) {
1603 ol_tx_sched_category_info(pdev, i, &active, &frms, &bytes);
1604 if (active)
1605 num_cats_active++;
1606 }
1607 /* don't bother recording state if there are no active queues */
1608 if (num_cats_active == 0)
1609 return;
1610
1611
1612 ol_tx_queue_log_sched(pdev, credit, &num_cats_active,
1613 &active_bitmap, &buf);
1614
1615 if (num_cats_active == 0)
1616 return;
1617
1618 *active_bitmap = 0;
1619 for (i = 0, j = 0;
1620 i < OL_TX_SCHED_NUM_CATEGORIES && j < num_cats_active;
1621 i++) {
1622 u_int8_t *p;
1623
1624 ol_tx_sched_category_info(pdev, i, &active, &frms, &bytes);
1625 if (!active)
1626 continue;
1627
1628 p = &buf[j*6];
1629 p[0] = (frms >> 0) & 0xff;
1630 p[1] = (frms >> 8) & 0xff;
1631
1632 p[2] = (bytes >> 0) & 0xff;
1633 p[3] = (bytes >> 8) & 0xff;
1634 p[4] = (bytes >> 16) & 0xff;
1635 p[5] = (bytes >> 24) & 0xff;
1636 j++;
1637 *active_bitmap |= 1 << i;
1638 }
1639 }
1640
1641 #endif /* defined(DEBUG_HL_LOGGING) */
1642
1643 #endif /* defined(CONFIG_HL_SUPPORT) */
1644