xref: /wlan-dirver/qcacld-3.0/core/dp/txrx/ol_tx_sched.c (revision eff16d956b6c25bc860fac91ea57d737c47dd7a7)
1 /*
2  * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_nbuf.h>         /* qdf_nbuf_t, etc. */
20 #include <htt.h>              /* HTT_TX_EXT_TID_MGMT */
21 #include <ol_htt_tx_api.h>    /* htt_tx_desc_tid */
22 #include <ol_txrx_api.h>      /* ol_txrx_vdev_handle */
23 #include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
24 #include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
25 #include <ol_txrx_types.h>    /* pdev stats, etc. */
26 #include <ol_tx_desc.h>       /* ol_tx_desc */
27 #include <ol_tx_send.h>       /* ol_tx_send */
28 #include <ol_tx_sched.h>      /* OL_TX_SCHED, etc. */
29 #include <ol_tx_queue.h>
30 #include <ol_txrx.h>
31 #include <qdf_types.h>
32 #include <qdf_mem.h>         /* qdf_os_mem_alloc_consistent et al */
33 #include <cdp_txrx_handle.h>
34 #if defined(CONFIG_HL_SUPPORT)
35 
36 #if defined(DEBUG_HL_LOGGING)
37 static void
38 ol_tx_sched_log(struct ol_txrx_pdev_t *pdev);
39 
40 #else
41 static void
42 ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
43 {
44 }
45 #endif /* defined(DEBUG_HL_LOGGING) */
46 
47 #if DEBUG_HTT_CREDIT
48 #define OL_TX_DISPATCH_LOG_CREDIT()                                           \
49 	do {								      \
50 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,	\
51 			"TX %d bytes\n", qdf_nbuf_len(msdu));	\
52 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,	\
53 			" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",  \
54 			qdf_atomic_read(&pdev->target_tx_credit),	\
55 			qdf_atomic_read(&pdev->target_tx_credit) - 1,	\
56 			qdf_nbuf_len(msdu));				\
57 	} while (0)
58 #else
59 #define OL_TX_DISPATCH_LOG_CREDIT()
60 #endif
61 
62 /*--- generic definitions used by the scheduler framework for all algs ---*/
63 
64 struct ol_tx_sched_ctx {
65 	ol_tx_desc_list head;
66 	int frms;
67 };
68 
69 typedef TAILQ_HEAD(ol_tx_frms_queue_list_s, ol_tx_frms_queue_t)
70 	ol_tx_frms_queue_list;
71 
72 #define OL_A_MAX(_x, _y) ((_x) > (_y) ? (_x) : (_y))
73 
74 #define OL_A_MIN(_x, _y) ((_x) < (_y) ? (_x) : (_y))
75 
76 	/*--- scheduler algorithm selection ---*/
77 
78 	/*--- scheduler options -----------------------------------------------
79 	 * 1. Round-robin scheduler:
80 	 *    Select the TID that is at the head of the list of active TIDs.
81 	 *    Select the head tx queue for this TID.
82 	 *    Move the tx queue to the back of the list of tx queues for
83 	 *    this TID.
84 	 *    Move the TID to the back of the list of active TIDs.
85 	 *    Send as many frames from the tx queue as credit allows.
86 	 * 2. Weighted-round-robin advanced scheduler:
87 	 *    Keep an ordered list of which TID gets selected next.
88 	 *    Use a weighted-round-robin scheme to determine when to promote
89 	 *    a TID within this list.
90 	 *    If a TID at the head of the list is inactive, leave it at the
91 	 *    head, but check the next TIDs.
92 	 *    If the credit available is less than the credit threshold for the
93 	 *    next active TID, don't send anything, and leave the TID at the
94 	 *    head of the list.
95 	 *    After a TID is selected, move it to the back of the list.
96 	 *    Select the head tx queue for this TID.
97 	 *    Move the tx queue to the back of the list of tx queues for this
98 	 *    TID.
99 	 *    Send no more frames than the limit specified for the TID.
100 	 */
101 #define OL_TX_SCHED_RR  1
102 #define OL_TX_SCHED_WRR_ADV 2
103 
104 #ifndef OL_TX_SCHED
105 	/*#define OL_TX_SCHED OL_TX_SCHED_RR*/
106 #define OL_TX_SCHED OL_TX_SCHED_WRR_ADV /* default */
107 #endif
108 
109 
110 #if OL_TX_SCHED == OL_TX_SCHED_RR
111 
112 #define ol_tx_sched_rr_t ol_tx_sched_t
113 
114 #define OL_TX_SCHED_NUM_CATEGORIES (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
115 
116 #define ol_tx_sched_init                ol_tx_sched_init_rr
117 #define ol_tx_sched_select_init(pdev)   /* no-op */
118 #define ol_tx_sched_select_batch        ol_tx_sched_select_batch_rr
119 #define ol_tx_sched_txq_enqueue         ol_tx_sched_txq_enqueue_rr
120 #define ol_tx_sched_txq_deactivate      ol_tx_sched_txq_deactivate_rr
121 #define ol_tx_sched_category_tx_queues  ol_tx_sched_category_tx_queues_rr
122 #define ol_tx_sched_txq_discard         ol_tx_sched_txq_discard_rr
123 #define ol_tx_sched_category_info       ol_tx_sched_category_info_rr
124 #define ol_tx_sched_discard_select_category \
125 		ol_tx_sched_discard_select_category_rr
126 
127 #elif OL_TX_SCHED == OL_TX_SCHED_WRR_ADV
128 
129 #define ol_tx_sched_wrr_adv_t ol_tx_sched_t
130 
131 #define OL_TX_SCHED_NUM_CATEGORIES OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES
132 
133 #define ol_tx_sched_init                ol_tx_sched_init_wrr_adv
134 #define ol_tx_sched_select_init(pdev) \
135 		do { \
136 			qdf_spin_lock_bh(&pdev->tx_queue_spinlock); \
137 			ol_tx_sched_select_init_wrr_adv(pdev); \
138 			qdf_spin_unlock_bh(&pdev->tx_queue_spinlock); \
139 		} while (0)
140 #define ol_tx_sched_select_batch        ol_tx_sched_select_batch_wrr_adv
141 #define ol_tx_sched_txq_enqueue         ol_tx_sched_txq_enqueue_wrr_adv
142 #define ol_tx_sched_txq_deactivate      ol_tx_sched_txq_deactivate_wrr_adv
143 #define ol_tx_sched_category_tx_queues  ol_tx_sched_category_tx_queues_wrr_adv
144 #define ol_tx_sched_txq_discard         ol_tx_sched_txq_discard_wrr_adv
145 #define ol_tx_sched_category_info       ol_tx_sched_category_info_wrr_adv
146 #define ol_tx_sched_discard_select_category \
147 		ol_tx_sched_discard_select_category_wrr_adv
148 
149 #else
150 
151 #error Unknown OL TX SCHED specification
152 
153 #endif /* OL_TX_SCHED */
154 
155 	/*--- round-robin scheduler ----------------------------------------*/
156 #if OL_TX_SCHED == OL_TX_SCHED_RR
157 
158 	/*--- definitions ---*/
159 
160 	struct ol_tx_active_queues_in_tid_t {
161 		/* list_elem is used to queue up into up level queues*/
162 		TAILQ_ENTRY(ol_tx_active_queues_in_tid_t) list_elem;
163 		u_int32_t frms;
164 		u_int32_t bytes;
165 		ol_tx_frms_queue_list head;
166 		bool    active;
167 		int tid;
168 	};
169 
170 	struct ol_tx_sched_rr_t {
171 		struct ol_tx_active_queues_in_tid_t
172 			tx_active_queues_in_tid_array[OL_TX_NUM_TIDS
173 						+ OL_TX_VDEV_NUM_QUEUES];
174 	TAILQ_HEAD(ol_tx_active_tids_s, ol_tx_active_queues_in_tid_t)
175 							tx_active_tids_list;
176 		u_int8_t discard_weights[OL_TX_NUM_TIDS
177 					+ OL_TX_VDEV_NUM_QUEUES];
178 	};
179 
180 #define TX_SCH_MAX_CREDIT_FOR_THIS_TID(tidq) 16
181 
182 /*--- functions ---*/
183 
184 /*
185  * The scheduler sync spinlock has been acquired outside this function,
186  * so there is no need to worry about mutex within this function.
187  */
188 static int
189 ol_tx_sched_select_batch_rr(
190 	struct ol_txrx_pdev_t *pdev,
191 	struct ol_tx_sched_ctx *sctx,
192 	u_int32_t credit)
193 {
194 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
195 	struct ol_tx_active_queues_in_tid_t *txq_queue;
196 	struct ol_tx_frms_queue_t *next_tq;
197 	u_int16_t frames, used_credits = 0, tx_limit, tx_limit_flag = 0;
198 	int bytes;
199 
200 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
201 
202 	if (TAILQ_EMPTY(&scheduler->tx_active_tids_list))
203 		return used_credits;
204 
205 	txq_queue = TAILQ_FIRST(&scheduler->tx_active_tids_list);
206 
207 	TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue, list_elem);
208 	txq_queue->active = false;
209 
210 	next_tq = TAILQ_FIRST(&txq_queue->head);
211 	TAILQ_REMOVE(&txq_queue->head, next_tq, list_elem);
212 
213 	credit = OL_A_MIN(credit, TX_SCH_MAX_CREDIT_FOR_THIS_TID(next_tq));
214 	frames = next_tq->frms; /* download as many frames as credit allows */
215 	tx_limit = ol_tx_bad_peer_dequeue_check(next_tq,
216 					frames,
217 					&tx_limit_flag);
218 	frames = ol_tx_dequeue(
219 			pdev, next_tq, &sctx->head, tx_limit, &credit, &bytes);
220 	ol_tx_bad_peer_update_tx_limit(pdev, next_tq, frames, tx_limit_flag);
221 
222 	used_credits = credit;
223 	txq_queue->frms -= frames;
224 	txq_queue->bytes -= bytes;
225 
226 	if (next_tq->frms > 0) {
227 		TAILQ_INSERT_TAIL(&txq_queue->head, next_tq, list_elem);
228 		TAILQ_INSERT_TAIL(
229 				&scheduler->tx_active_tids_list,
230 						txq_queue, list_elem);
231 		txq_queue->active = true;
232 	} else if (!TAILQ_EMPTY(&txq_queue->head)) {
233 		/*
234 		 * This tx queue is empty, but there's another tx queue for the
235 		 * same TID that is not empty.
236 		 *Thus, the TID as a whole is active.
237 		 */
238 		TAILQ_INSERT_TAIL(
239 				&scheduler->tx_active_tids_list,
240 						txq_queue, list_elem);
241 		txq_queue->active = true;
242 	}
243 	sctx->frms += frames;
244 
245 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
246 	return used_credits;
247 }
248 
249 static inline void
250 ol_tx_sched_txq_enqueue_rr(
251 	struct ol_txrx_pdev_t *pdev,
252 	struct ol_tx_frms_queue_t *txq,
253 	int tid,
254 	int frms,
255 	int bytes)
256 {
257 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
258 	struct ol_tx_active_queues_in_tid_t *txq_queue;
259 
260 	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
261 	if (txq->flag != ol_tx_queue_active)
262 		TAILQ_INSERT_TAIL(&txq_queue->head, txq, list_elem);
263 
264 	txq_queue->frms += frms;
265 	txq_queue->bytes += bytes;
266 
267 	if (!txq_queue->active) {
268 		TAILQ_INSERT_TAIL(
269 				&scheduler->tx_active_tids_list,
270 				txq_queue, list_elem);
271 		txq_queue->active = true;
272 	}
273 }
274 
275 static inline void
276 ol_tx_sched_txq_deactivate_rr(
277 	struct ol_txrx_pdev_t *pdev,
278 	struct ol_tx_frms_queue_t *txq,
279 	int tid)
280 {
281 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
282 	struct ol_tx_active_queues_in_tid_t *txq_queue;
283 
284 	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
285 	txq_queue->frms -= txq->frms;
286 	txq_queue->bytes -= txq->bytes;
287 
288 	TAILQ_REMOVE(&txq_queue->head, txq, list_elem);
289 	/*if (txq_queue->frms == 0 && txq_queue->active) {*/
290 	if (TAILQ_EMPTY(&txq_queue->head) && txq_queue->active) {
291 		TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue,
292 			     list_elem);
293 		txq_queue->active = false;
294 	}
295 }
296 
297 ol_tx_frms_queue_list *
298 ol_tx_sched_category_tx_queues_rr(struct ol_txrx_pdev_t *pdev, int tid)
299 {
300 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
301 	struct ol_tx_active_queues_in_tid_t *txq_queue;
302 
303 	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
304 	return &txq_queue->head;
305 }
306 
307 int
308 ol_tx_sched_discard_select_category_rr(struct ol_txrx_pdev_t *pdev)
309 {
310 	struct ol_tx_sched_rr_t *scheduler;
311 	u_int8_t i, tid = 0;
312 	int max_score = 0;
313 
314 	scheduler = pdev->tx_sched.scheduler;
315 	/*
316 	 * Choose which TID's tx frames to drop next based on two factors:
317 	 * 1.  Which TID has the most tx frames present
318 	 * 2.  The TID's priority (high-priority TIDs have a low discard_weight)
319 	 */
320 	for (i = 0; i < (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES); i++) {
321 		int score;
322 
323 		score =
324 			scheduler->tx_active_queues_in_tid_array[i].frms *
325 			scheduler->discard_weights[i];
326 		if (max_score == 0 || score > max_score) {
327 			max_score = score;
328 			tid = i;
329 		}
330 	}
331 	return tid;
332 }
333 
334 void
335 ol_tx_sched_txq_discard_rr(
336 	struct ol_txrx_pdev_t *pdev,
337 	struct ol_tx_frms_queue_t *txq,
338 	int tid, int frames, int bytes)
339 {
340 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
341 	struct ol_tx_active_queues_in_tid_t *txq_queue;
342 
343 	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
344 
345 	if (0 == txq->frms)
346 		TAILQ_REMOVE(&txq_queue->head, txq, list_elem);
347 
348 	txq_queue->frms -= frames;
349 	txq_queue->bytes -= bytes;
350 	if (txq_queue->active == true && txq_queue->frms == 0) {
351 		TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue,
352 			     list_elem);
353 		txq_queue->active = false;
354 	}
355 }
356 
357 void
358 ol_tx_sched_category_info_rr(
359 	struct ol_txrx_pdev_t *pdev,
360 	int cat, int *active,
361 	int *frms, int *bytes)
362 {
363 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
364 	struct ol_tx_active_queues_in_tid_t *txq_queue;
365 
366 	txq_queue = &scheduler->tx_active_queues_in_tid_array[cat];
367 
368 	*active = txq_queue->active;
369 	*frms = txq_queue->frms;
370 	*bytes = txq_queue->bytes;
371 }
372 
373 enum {
374 	ol_tx_sched_discard_weight_voice = 1,
375 	ol_tx_sched_discard_weight_video = 4,
376 	ol_tx_sched_discard_weight_ucast_default = 8,
377 	ol_tx_sched_discard_weight_mgmt_non_qos = 1, /* 0? */
378 	ol_tx_sched_discard_weight_mcast = 1, /* 0? also for probe & assoc */
379 };
380 
381 void *
382 ol_tx_sched_init_rr(
383 	struct ol_txrx_pdev_t *pdev)
384 {
385 	struct ol_tx_sched_rr_t *scheduler;
386 	int i;
387 
388 	scheduler = qdf_mem_malloc(sizeof(struct ol_tx_sched_rr_t));
389 	if (scheduler == NULL)
390 		return scheduler;
391 
392 	for (i = 0; i < (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES); i++) {
393 		scheduler->tx_active_queues_in_tid_array[i].tid = i;
394 		TAILQ_INIT(&scheduler->tx_active_queues_in_tid_array[i].head);
395 		scheduler->tx_active_queues_in_tid_array[i].active = 0;
396 		scheduler->tx_active_queues_in_tid_array[i].frms = 0;
397 		scheduler->tx_active_queues_in_tid_array[i].bytes = 0;
398 	}
399 	for (i = 0; i < OL_TX_NUM_TIDS; i++) {
400 		scheduler->tx_active_queues_in_tid_array[i].tid = i;
401 		if (i < OL_TX_NON_QOS_TID) {
402 			int ac = TXRX_TID_TO_WMM_AC(i);
403 
404 			switch (ac) {
405 			case TXRX_WMM_AC_VO:
406 				scheduler->discard_weights[i] =
407 					ol_tx_sched_discard_weight_voice;
408 			case TXRX_WMM_AC_VI:
409 				scheduler->discard_weights[i] =
410 					ol_tx_sched_discard_weight_video;
411 			default:
412 				scheduler->discard_weights[i] =
413 				ol_tx_sched_discard_weight_ucast_default;
414 			};
415 		} else {
416 			scheduler->discard_weights[i] =
417 				ol_tx_sched_discard_weight_mgmt_non_qos;
418 		}
419 	}
420 	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
421 		int j = i + OL_TX_NUM_TIDS;
422 
423 		scheduler->tx_active_queues_in_tid_array[j].tid =
424 							OL_TX_NUM_TIDS - 1;
425 		scheduler->discard_weights[j] =
426 					ol_tx_sched_discard_weight_mcast;
427 	}
428 	TAILQ_INIT(&scheduler->tx_active_tids_list);
429 
430 	return scheduler;
431 }
432 
433 void
434 ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
435 		      struct ol_tx_wmm_param_t wmm_param)
436 {
437 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
438 		  "Dummy function when OL_TX_SCHED_RR is enabled\n");
439 }
440 
441 /**
442  * ol_tx_sched_stats_display() - tx sched stats display
443  * @pdev: Pointer to the PDEV structure.
444  *
445  * Return: none.
446  */
447 void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
448 {
449 }
450 
451 /**
452  * ol_tx_sched_cur_state_display() - tx sched cur stat display
453  * @pdev: Pointer to the PDEV structure.
454  *
455  * Return: none.
456  */
457 void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
458 {
459 }
460 
461 /**
462  * ol_tx_sched_cur_state_display() - reset tx sched stats
463  * @pdev: Pointer to the PDEV structure.
464  *
465  * Return: none.
466  */
467 void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev)
468 {
469 }
470 
471 #endif /* OL_TX_SCHED == OL_TX_SCHED_RR */
472 
473 /*--- advanced scheduler ----------------------------------------------------*/
474 #if OL_TX_SCHED == OL_TX_SCHED_WRR_ADV
475 
476 /*--- definitions ---*/
477 
478 struct ol_tx_sched_wrr_adv_category_info_t {
479 	struct {
480 		int wrr_skip_weight;
481 		u_int32_t credit_threshold;
482 		u_int16_t send_limit;
483 		int credit_reserve;
484 		int discard_weight;
485 	} specs;
486 	struct {
487 		int wrr_count;
488 		int frms;
489 		int bytes;
490 		ol_tx_frms_queue_list head;
491 		bool active;
492 	} state;
493 #ifdef DEBUG_HL_LOGGING
494 	struct {
495 		char *cat_name;
496 		unsigned int queued;
497 		unsigned int dispatched;
498 		unsigned int discard;
499 	} stat;
500 #endif
501 };
502 
503 #define OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(cat, \
504 		wrr_skip_weight, \
505 		credit_threshold, \
506 		send_limit, \
507 		credit_reserve, \
508 		discard_weights) \
509 		enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _WRR_SKIP_WEIGHT = \
510 			(wrr_skip_weight) }; \
511 		enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_THRESHOLD = \
512 			(credit_threshold) }; \
513 		enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _SEND_LIMIT = \
514 			(send_limit) }; \
515 		enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_RESERVE = \
516 			(credit_reserve) }; \
517 		enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _DISCARD_WEIGHT = \
518 			(discard_weights) };
519 /* Rome:
520  * For high-volume traffic flows (VI, BE, BK), use a credit threshold
521  * roughly equal to a large A-MPDU (occupying half the target memory
522  * available for holding tx frames) to download AMPDU-sized batches
523  * of traffic.
524  * For high-priority, low-volume traffic flows (VO and mgmt), use no
525  * credit threshold, to minimize download latency.
526  */
527 /*                                            WRR           send
528  *                                           skip  credit  limit credit disc
529  *                                            wts  thresh (frms) reserv  wts
530  */
531 #ifdef HIF_SDIO
532 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VO,           1,     17,    24,     0,  1);
533 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VI,           3,     17,    16,     1,  4);
534 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BE,          10,     17,    16,     1,  8);
535 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BK,          12,      6,     6,     1,  8);
536 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(NON_QOS_DATA,10,     17,    16,     1,  8);
537 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(UCAST_MGMT,   1,      1,     4,     0,  1);
538 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_DATA,  10,     17,     4,     1,  4);
539 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_MGMT,   1,      1,     4,     0,  1);
540 #else
541 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VO,           1,     16,    24,     0,  1);
542 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VI,           3,     16,    16,     1,  4);
543 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BE,          10,     12,    12,     1,  8);
544 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BK,          12,      6,     6,     1,  8);
545 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(NON_QOS_DATA, 12,      6,     4,     1,  8);
546 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(UCAST_MGMT,   1,      1,     4,     0,  1);
547 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_DATA,  10,     16,     4,     1,  4);
548 OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_MGMT,   1,      1,     4,     0,  1);
549 #endif
550 
551 #ifdef DEBUG_HL_LOGGING
552 
553 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler)               \
554 	do {                                                                 \
555 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
556 		.stat.queued = 0;					\
557 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
558 		.stat.discard = 0;					\
559 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
560 		.stat.dispatched = 0;					\
561 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
562 		.stat.cat_name = #category;				\
563 	} while (0)
564 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms)             \
565 	category->stat.queued += frms;
566 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frms)           \
567 	category->stat.discard += frms;
568 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category, frms)         \
569 	category->stat.dispatched += frms;
570 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(scheduler)                        \
571 	ol_tx_sched_wrr_adv_cat_stat_dump(scheduler)
572 #define OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(scheduler)                   \
573 	ol_tx_sched_wrr_adv_cat_cur_state_dump(scheduler)
574 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(scheduler)                       \
575 	ol_tx_sched_wrr_adv_cat_stat_clear(scheduler)
576 
577 #else   /* DEBUG_HL_LOGGING */
578 
579 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler)
580 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms)
581 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frms)
582 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category, frms)
583 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(scheduler)
584 #define OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(scheduler)
585 #define OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(scheduler)
586 
587 #endif  /* DEBUG_HL_LOGGING */
588 
589 #define OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(category, scheduler) \
590 	do { \
591 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
592 		.specs.wrr_skip_weight = \
593 		OL_TX_SCHED_WRR_ADV_ ## category ## _WRR_SKIP_WEIGHT; \
594 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
595 		.specs.credit_threshold = \
596 		OL_TX_SCHED_WRR_ADV_ ## category ## _CREDIT_THRESHOLD; \
597 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
598 		.specs.send_limit = \
599 		OL_TX_SCHED_WRR_ADV_ ## category ## _SEND_LIMIT; \
600 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
601 		.specs.credit_reserve = \
602 		OL_TX_SCHED_WRR_ADV_ ## category ## _CREDIT_RESERVE; \
603 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
604 		.specs.discard_weight = \
605 		OL_TX_SCHED_WRR_ADV_ ## category ## _DISCARD_WEIGHT; \
606 		OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler); \
607 	} while (0)
608 
609 struct ol_tx_sched_wrr_adv_t {
610 	int order[OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES];
611 	int index;
612 	struct ol_tx_sched_wrr_adv_category_info_t
613 		categories[OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES];
614 };
615 
616 #define OL_TX_AIFS_DEFAULT_VO   2
617 #define OL_TX_AIFS_DEFAULT_VI   2
618 #define OL_TX_AIFS_DEFAULT_BE   3
619 #define OL_TX_AIFS_DEFAULT_BK   7
620 #define OL_TX_CW_MIN_DEFAULT_VO   3
621 #define OL_TX_CW_MIN_DEFAULT_VI   7
622 #define OL_TX_CW_MIN_DEFAULT_BE   15
623 #define OL_TX_CW_MIN_DEFAULT_BK   15
624 
625 /*--- functions ---*/
626 
627 #ifdef DEBUG_HL_LOGGING
628 static void ol_tx_sched_wrr_adv_cat_stat_dump(
629 	struct ol_tx_sched_wrr_adv_t *scheduler)
630 {
631 	int i;
632 
633 	txrx_nofl_info("Scheduler Stats:");
634 	txrx_nofl_info("====category(CRR,CRT,WSW): Queued  Discard  Dequeued  frms  wrr===");
635 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
636 		txrx_nofl_info("%12s(%2d, %2d, %2d):  %6d  %7d  %8d  %4d  %3d",
637 			       scheduler->categories[i].stat.cat_name,
638 			       scheduler->categories[i].specs.credit_reserve,
639 			       scheduler->categories[i].specs.
640 					credit_threshold,
641 			       scheduler->categories[i].
642 					specs.wrr_skip_weight,
643 			       scheduler->categories[i].stat.queued,
644 			       scheduler->categories[i].stat.discard,
645 			       scheduler->categories[i].stat.dispatched,
646 			       scheduler->categories[i].state.frms,
647 			       scheduler->categories[i].state.wrr_count);
648 	}
649 }
650 
651 static void ol_tx_sched_wrr_adv_cat_cur_state_dump(
652 	struct ol_tx_sched_wrr_adv_t *scheduler)
653 {
654 	int i;
655 
656 	txrx_nofl_info("Scheduler State Snapshot:");
657 	txrx_nofl_info("====category(CRR,CRT,WSW): IS_Active  Pend_Frames  Pend_bytes  wrr===");
658 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
659 		txrx_nofl_info("%12s(%2d, %2d, %2d):  %9d  %11d  %10d  %3d",
660 			       scheduler->categories[i].stat.cat_name,
661 			       scheduler->categories[i].specs.credit_reserve,
662 			       scheduler->categories[i].specs.
663 					credit_threshold,
664 			       scheduler->categories[i].specs.
665 					wrr_skip_weight,
666 			       scheduler->categories[i].state.active,
667 			       scheduler->categories[i].state.frms,
668 			       scheduler->categories[i].state.bytes,
669 			       scheduler->categories[i].state.wrr_count);
670 	}
671 }
672 
673 static void ol_tx_sched_wrr_adv_cat_stat_clear(
674 	struct ol_tx_sched_wrr_adv_t *scheduler)
675 {
676 	int i;
677 
678 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
679 		scheduler->categories[i].stat.queued = 0;
680 		scheduler->categories[i].stat.discard = 0;
681 		scheduler->categories[i].stat.dispatched = 0;
682 	}
683 }
684 
685 #endif
686 
687 static void
688 ol_tx_sched_select_init_wrr_adv(struct ol_txrx_pdev_t *pdev)
689 {
690 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
691 	/* start selection from the front of the ordered list */
692 	scheduler->index = 0;
693 	pdev->tx_sched.last_used_txq = NULL;
694 }
695 
696 static void
697 ol_tx_sched_wrr_adv_rotate_order_list_tail(
698 		struct ol_tx_sched_wrr_adv_t *scheduler, int idx)
699 {
700 	int value;
701 	/* remember the value of the specified element */
702 	value = scheduler->order[idx];
703 	/* shift all further elements up one space */
704 	for (; idx < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES-1; idx++)
705 		scheduler->order[idx] = scheduler->order[idx + 1];
706 
707 	/* put the specified element at the end */
708 	scheduler->order[idx] = value;
709 }
710 
711 static void
712 ol_tx_sched_wrr_adv_credit_sanity_check(struct ol_txrx_pdev_t *pdev,
713 					u_int32_t credit)
714 {
715 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
716 	int i;
717 	int okay = 1;
718 
719 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
720 		if (scheduler->categories[i].specs.credit_threshold > credit) {
721 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
722 				  "*** Config error: credit (%d) not enough to support category %d threshold (%d)\n",
723 				  credit, i,
724 				  scheduler->categories[i].specs.
725 						credit_threshold);
726 			okay = 0;
727 		}
728 	}
729 	qdf_assert(okay);
730 }
731 
732 /*
733  * The scheduler sync spinlock has been acquired outside this function,
734  * so there is no need to worry about mutex within this function.
735  */
736 static int
737 ol_tx_sched_select_batch_wrr_adv(
738 	struct ol_txrx_pdev_t *pdev,
739 	struct ol_tx_sched_ctx *sctx,
740 	u_int32_t credit)
741 {
742 	static int first = 1;
743 	int category_index = 0;
744 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
745 	struct ol_tx_frms_queue_t *txq;
746 	int index;
747 	struct ol_tx_sched_wrr_adv_category_info_t *category = NULL;
748 	int frames, bytes, used_credits = 0, tx_limit;
749 	u_int16_t tx_limit_flag;
750 
751 	/*
752 	 * Just for good measure, do a sanity check that the initial credit
753 	 * is enough to cover every category's credit threshold.
754 	 */
755 	if (first) {
756 		first = 0;
757 		ol_tx_sched_wrr_adv_credit_sanity_check(pdev, credit);
758 	}
759 
760 	/* choose the traffic category from the ordered list */
761 	index = scheduler->index;
762 	while (index < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES) {
763 		category_index = scheduler->order[index];
764 		category = &scheduler->categories[category_index];
765 		if (!category->state.active) {
766 			/* move on to the next category */
767 			index++;
768 			continue;
769 		}
770 		if (++category->state.wrr_count <
771 					category->specs.wrr_skip_weight) {
772 			/* skip this cateogry (move it to the back) */
773 			ol_tx_sched_wrr_adv_rotate_order_list_tail(scheduler,
774 								   index);
775 			/*
776 			 * try again (iterate) on the new element
777 			 * that was moved up
778 			 */
779 			continue;
780 		}
781 		/* found the first active category whose WRR turn is present */
782 		break;
783 	}
784 	if (index >= OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES) {
785 		/* no categories are active */
786 		return 0;
787 	}
788 
789 	/* is there enough credit for the selected category? */
790 	if (credit < category->specs.credit_threshold) {
791 		/*
792 		 * Can't send yet - wait until more credit becomes available.
793 		 * In the meantime, restore the WRR counter (since we didn't
794 		 * service this category after all).
795 		 */
796 		category->state.wrr_count = category->state.wrr_count - 1;
797 		return 0;
798 	}
799 	/* enough credit is available - go ahead and send some frames */
800 	/*
801 	 * This category was serviced - reset the WRR counter, and move this
802 	 * category to the back of the order list.
803 	 */
804 	category->state.wrr_count = 0;
805 	ol_tx_sched_wrr_adv_rotate_order_list_tail(scheduler, index);
806 	/*
807 	 * With this category moved to the back, if there's still any credit
808 	 * left, set up the next invocation of this function to start from
809 	 * where this one left off, by looking at the category that just got
810 	 * shifted forward into the position the service category was
811 	 * occupying.
812 	 */
813 	scheduler->index = index;
814 
815 	/*
816 	 * Take the tx queue from the head of the category list.
817 	 */
818 	txq = TAILQ_FIRST(&category->state.head);
819 
820 	if (txq) {
821 		TAILQ_REMOVE(&category->state.head, txq, list_elem);
822 		credit = ol_tx_txq_group_credit_limit(pdev, txq, credit);
823 		if (credit > category->specs.credit_reserve) {
824 			credit -= category->specs.credit_reserve;
825 			/*
826 			 * this tx queue will download some frames,
827 			 * so update last_used_txq
828 			 */
829 			pdev->tx_sched.last_used_txq = txq;
830 
831 			tx_limit = ol_tx_bad_peer_dequeue_check(txq,
832 					category->specs.send_limit,
833 					&tx_limit_flag);
834 			frames = ol_tx_dequeue(
835 					pdev, txq, &sctx->head,
836 					tx_limit, &credit, &bytes);
837 			ol_tx_bad_peer_update_tx_limit(pdev, txq,
838 						       frames,
839 						       tx_limit_flag);
840 
841 			OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category,
842 								    frames);
843 			/* Update used global credits */
844 			used_credits = credit;
845 			credit =
846 			ol_tx_txq_update_borrowed_group_credits(pdev, txq,
847 								credit);
848 			category->state.frms -= frames;
849 			category->state.bytes -= bytes;
850 			if (txq->frms > 0) {
851 				TAILQ_INSERT_TAIL(&category->state.head,
852 						  txq, list_elem);
853 			} else {
854 				if (category->state.frms == 0)
855 					category->state.active = 0;
856 			}
857 			sctx->frms += frames;
858 			ol_tx_txq_group_credit_update(pdev, txq, -credit, 0);
859 		} else {
860 			if (ol_tx_is_txq_last_serviced_queue(pdev, txq)) {
861 				/*
862 				 * The scheduler has looked at all the active
863 				 * tx queues but none were able to download any
864 				 * of their tx frames.
865 				 * Nothing is changed, so if none were able
866 				 * to download before,
867 				 * they wont be able to download now.
868 				 * Return that no credit has been used, which
869 				 * will cause the scheduler to stop.
870 				 */
871 				TAILQ_INSERT_HEAD(&category->state.head, txq,
872 						  list_elem);
873 				return 0;
874 			}
875 			TAILQ_INSERT_TAIL(&category->state.head, txq,
876 					  list_elem);
877 			if (!pdev->tx_sched.last_used_txq)
878 				pdev->tx_sched.last_used_txq = txq;
879 		}
880 		TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
881 	} else {
882 		used_credits = 0;
883 		/* TODO: find its reason */
884 		ol_txrx_err("Error, no TXQ can be popped");
885 	}
886 	return used_credits;
887 }
888 
889 static inline void
890 ol_tx_sched_txq_enqueue_wrr_adv(
891 	struct ol_txrx_pdev_t *pdev,
892 	struct ol_tx_frms_queue_t *txq,
893 	int tid,
894 	int frms,
895 	int bytes)
896 {
897 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
898 	struct ol_tx_sched_wrr_adv_category_info_t *category;
899 
900 	category = &scheduler->categories[pdev->tid_to_ac[tid]];
901 	category->state.frms += frms;
902 	category->state.bytes += bytes;
903 	OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms);
904 	if (txq->flag != ol_tx_queue_active) {
905 		TAILQ_INSERT_TAIL(&category->state.head, txq, list_elem);
906 		category->state.active = 1; /* may have already been active */
907 	}
908 }
909 
910 static inline void
911 ol_tx_sched_txq_deactivate_wrr_adv(
912 	struct ol_txrx_pdev_t *pdev,
913 	struct ol_tx_frms_queue_t *txq,
914 	int tid)
915 {
916 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
917 	struct ol_tx_sched_wrr_adv_category_info_t *category;
918 
919 	category = &scheduler->categories[pdev->tid_to_ac[tid]];
920 	category->state.frms -= txq->frms;
921 	category->state.bytes -= txq->bytes;
922 
923 	TAILQ_REMOVE(&category->state.head, txq, list_elem);
924 
925 	if (category->state.frms == 0 && category->state.active)
926 		category->state.active = 0;
927 }
928 
929 static ol_tx_frms_queue_list *
930 ol_tx_sched_category_tx_queues_wrr_adv(struct ol_txrx_pdev_t *pdev, int cat)
931 {
932 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
933 	struct ol_tx_sched_wrr_adv_category_info_t *category;
934 
935 	category = &scheduler->categories[cat];
936 	return &category->state.head;
937 }
938 
939 static int
940 ol_tx_sched_discard_select_category_wrr_adv(struct ol_txrx_pdev_t *pdev)
941 {
942 	struct ol_tx_sched_wrr_adv_t *scheduler;
943 	u_int8_t i, cat = 0;
944 	int max_score = 0;
945 
946 	scheduler = pdev->tx_sched.scheduler;
947 	/*
948 	 * Choose which category's tx frames to drop next based on two factors:
949 	 * 1.  Which category has the most tx frames present
950 	 * 2.  The category's priority (high-priority categories have a low
951 	 *     discard_weight)
952 	 */
953 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
954 		int score;
955 
956 		score =
957 			scheduler->categories[i].state.frms *
958 			scheduler->categories[i].specs.discard_weight;
959 		if (max_score == 0 || score > max_score) {
960 			max_score = score;
961 			cat = i;
962 		}
963 	}
964 	return cat;
965 }
966 
967 static void
968 ol_tx_sched_txq_discard_wrr_adv(
969 	struct ol_txrx_pdev_t *pdev,
970 	struct ol_tx_frms_queue_t *txq,
971 	int cat, int frames, int bytes)
972 {
973 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
974 	struct ol_tx_sched_wrr_adv_category_info_t *category;
975 
976 	category = &scheduler->categories[cat];
977 
978 	if (0 == txq->frms)
979 		TAILQ_REMOVE(&category->state.head, txq, list_elem);
980 
981 
982 	category->state.frms -= frames;
983 	category->state.bytes -= bytes;
984 	OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frames);
985 	if (category->state.frms == 0)
986 		category->state.active = 0;
987 }
988 
989 static void
990 ol_tx_sched_category_info_wrr_adv(
991 	struct ol_txrx_pdev_t *pdev,
992 	int cat, int *active,
993 	int *frms, int *bytes)
994 {
995 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
996 	struct ol_tx_sched_wrr_adv_category_info_t *category;
997 
998 	category = &scheduler->categories[cat];
999 	*active = category->state.active;
1000 	*frms = category->state.frms;
1001 	*bytes = category->state.bytes;
1002 }
1003 
1004 /**
1005  * ol_tx_sched_wrr_param_update() - update the WRR TX sched params
1006  * @pdev: Pointer to PDEV structure.
1007  * @scheduler: Pointer to tx scheduler.
1008  *
1009  * Update the WRR TX schedule parameters for each category if it is
1010  * specified in the ini file by user.
1011  *
1012  * Return: none
1013  */
1014 static void ol_tx_sched_wrr_param_update(struct ol_txrx_pdev_t *pdev,
1015 					 struct ol_tx_sched_wrr_adv_t *
1016 					 scheduler)
1017 {
1018 	int i;
1019 	static const char * const tx_sched_wrr_name[4] = {
1020 		"BE",
1021 		"BK",
1022 		"VI",
1023 		"VO"
1024 	};
1025 
1026 	if (NULL == scheduler)
1027 		return;
1028 
1029 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1030 		"%s: Tuning the TX scheduler wrr parameters by ini file:",
1031 		__func__);
1032 
1033 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1034 		"         skip credit limit credit disc");
1035 
1036 	for (i = OL_TX_SCHED_WRR_ADV_CAT_BE;
1037 		i <= OL_TX_SCHED_WRR_ADV_CAT_VO; i++) {
1038 		if (ol_cfg_get_wrr_skip_weight(pdev->ctrl_pdev, i)) {
1039 			scheduler->categories[i].specs.wrr_skip_weight =
1040 				ol_cfg_get_wrr_skip_weight(pdev->ctrl_pdev, i);
1041 			scheduler->categories[i].specs.credit_threshold =
1042 				ol_cfg_get_credit_threshold(pdev->ctrl_pdev, i);
1043 			scheduler->categories[i].specs.send_limit =
1044 				ol_cfg_get_send_limit(pdev->ctrl_pdev, i);
1045 			scheduler->categories[i].specs.credit_reserve =
1046 				ol_cfg_get_credit_reserve(pdev->ctrl_pdev, i);
1047 			scheduler->categories[i].specs.discard_weight =
1048 				ol_cfg_get_discard_weight(pdev->ctrl_pdev, i);
1049 
1050 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1051 				"%s-update: %d,  %d,    %d,   %d,    %d",
1052 				tx_sched_wrr_name[i],
1053 				scheduler->categories[i].specs.wrr_skip_weight,
1054 				scheduler->categories[i].specs.credit_threshold,
1055 				scheduler->categories[i].specs.send_limit,
1056 				scheduler->categories[i].specs.credit_reserve,
1057 				scheduler->categories[i].specs.discard_weight);
1058 		} else {
1059 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1060 				"%s-orig: %d,  %d,    %d,   %d,    %d",
1061 				tx_sched_wrr_name[i],
1062 				scheduler->categories[i].specs.wrr_skip_weight,
1063 				scheduler->categories[i].specs.credit_threshold,
1064 				scheduler->categories[i].specs.send_limit,
1065 				scheduler->categories[i].specs.credit_reserve,
1066 				scheduler->categories[i].specs.discard_weight);
1067 		}
1068 	}
1069 }
1070 
1071 static void *
1072 ol_tx_sched_init_wrr_adv(
1073 		struct ol_txrx_pdev_t *pdev)
1074 {
1075 	struct ol_tx_sched_wrr_adv_t *scheduler;
1076 	int i;
1077 
1078 	scheduler = qdf_mem_malloc(
1079 			sizeof(struct ol_tx_sched_wrr_adv_t));
1080 	if (scheduler == NULL)
1081 		return scheduler;
1082 
1083 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VO, scheduler);
1084 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VI, scheduler);
1085 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BE, scheduler);
1086 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BK, scheduler);
1087 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(NON_QOS_DATA, scheduler);
1088 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(UCAST_MGMT, scheduler);
1089 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(MCAST_DATA, scheduler);
1090 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(MCAST_MGMT, scheduler);
1091 
1092 	ol_tx_sched_wrr_param_update(pdev, scheduler);
1093 
1094 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
1095 		scheduler->categories[i].state.active = 0;
1096 		scheduler->categories[i].state.frms = 0;
1097 		/*scheduler->categories[i].state.bytes = 0;*/
1098 		TAILQ_INIT(&scheduler->categories[i].state.head);
1099 		/*
1100 		 * init categories to not be skipped before
1101 		 * their initial selection
1102 		 */
1103 		scheduler->categories[i].state.wrr_count =
1104 			scheduler->categories[i].specs.wrr_skip_weight - 1;
1105 	}
1106 
1107 	/*
1108 	 * Init the order array - the initial ordering doesn't matter, as the
1109 	 * order array will get reshuffled as data arrives.
1110 	 */
1111 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++)
1112 		scheduler->order[i] = i;
1113 
1114 	return scheduler;
1115 }
1116 
1117 
1118 /* WMM parameters are suppposed to be passed when associate with AP.
1119  * According to AIFS+CWMin, the function maps each queue to one of four default
1120  * settings of the scheduler, ie. VO, VI, BE, or BK.
1121  */
1122 void
1123 ol_txrx_set_wmm_param(struct cdp_pdev *pdev,
1124 		      struct ol_tx_wmm_param_t wmm_param)
1125 {
1126 	struct ol_txrx_pdev_t *data_pdev = (struct ol_txrx_pdev_t *)pdev;
1127 	struct ol_tx_sched_wrr_adv_t def_cfg;
1128 	struct ol_tx_sched_wrr_adv_t *scheduler =
1129 					data_pdev->tx_sched.scheduler;
1130 	u_int32_t i, ac_selected;
1131 	u_int32_t  weight[OL_TX_NUM_WMM_AC], default_edca[OL_TX_NUM_WMM_AC];
1132 
1133 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VO, (&def_cfg));
1134 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VI, (&def_cfg));
1135 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BE, (&def_cfg));
1136 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BK, (&def_cfg));
1137 
1138 	/* default_eca = AIFS + CWMin */
1139 	default_edca[OL_TX_SCHED_WRR_ADV_CAT_VO] =
1140 		OL_TX_AIFS_DEFAULT_VO + OL_TX_CW_MIN_DEFAULT_VO;
1141 	default_edca[OL_TX_SCHED_WRR_ADV_CAT_VI] =
1142 		OL_TX_AIFS_DEFAULT_VI + OL_TX_CW_MIN_DEFAULT_VI;
1143 	default_edca[OL_TX_SCHED_WRR_ADV_CAT_BE] =
1144 		OL_TX_AIFS_DEFAULT_BE + OL_TX_CW_MIN_DEFAULT_BE;
1145 	default_edca[OL_TX_SCHED_WRR_ADV_CAT_BK] =
1146 		OL_TX_AIFS_DEFAULT_BK + OL_TX_CW_MIN_DEFAULT_BK;
1147 
1148 	weight[OL_TX_SCHED_WRR_ADV_CAT_VO] =
1149 		wmm_param.ac[OL_TX_WMM_AC_VO].aifs +
1150 				wmm_param.ac[OL_TX_WMM_AC_VO].cwmin;
1151 	weight[OL_TX_SCHED_WRR_ADV_CAT_VI] =
1152 		wmm_param.ac[OL_TX_WMM_AC_VI].aifs +
1153 				wmm_param.ac[OL_TX_WMM_AC_VI].cwmin;
1154 	weight[OL_TX_SCHED_WRR_ADV_CAT_BK] =
1155 		wmm_param.ac[OL_TX_WMM_AC_BK].aifs +
1156 				wmm_param.ac[OL_TX_WMM_AC_BK].cwmin;
1157 	weight[OL_TX_SCHED_WRR_ADV_CAT_BE] =
1158 		wmm_param.ac[OL_TX_WMM_AC_BE].aifs +
1159 				wmm_param.ac[OL_TX_WMM_AC_BE].cwmin;
1160 
1161 	for (i = 0; i < OL_TX_NUM_WMM_AC; i++) {
1162 		if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_VO] >= weight[i])
1163 			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_VO;
1164 		else if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_VI] >= weight[i])
1165 			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_VI;
1166 		else if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_BE] >= weight[i])
1167 			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_BE;
1168 		else
1169 			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_BK;
1170 
1171 
1172 		scheduler->categories[i].specs.wrr_skip_weight =
1173 			def_cfg.categories[ac_selected].specs.wrr_skip_weight;
1174 		scheduler->categories[i].specs.credit_threshold =
1175 			def_cfg.categories[ac_selected].specs.credit_threshold;
1176 		scheduler->categories[i].specs.send_limit =
1177 			def_cfg.categories[ac_selected].specs.send_limit;
1178 		scheduler->categories[i].specs.credit_reserve =
1179 			def_cfg.categories[ac_selected].specs.credit_reserve;
1180 		scheduler->categories[i].specs.discard_weight =
1181 			def_cfg.categories[ac_selected].specs.discard_weight;
1182 	}
1183 }
1184 
1185 /**
1186  * ol_tx_sched_stats_display() - tx sched stats display
1187  * @pdev: Pointer to the PDEV structure.
1188  *
1189  * Return: none.
1190  */
1191 void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
1192 {
1193 	OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(pdev->tx_sched.scheduler);
1194 }
1195 
1196 /**
1197  * ol_tx_sched_cur_state_display() - tx sched cur stat display
1198  * @pdev: Pointer to the PDEV structure.
1199  *
1200  * Return: none.
1201  */
1202 void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
1203 {
1204 	OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(pdev->tx_sched.scheduler);
1205 }
1206 
1207 /**
1208  * ol_tx_sched_cur_state_display() - reset tx sched stats
1209  * @pdev: Pointer to the PDEV structure.
1210  *
1211  * Return: none.
1212  */
1213 void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev)
1214 {
1215 	OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(pdev->tx_sched.scheduler);
1216 }
1217 
1218 #endif /* OL_TX_SCHED == OL_TX_SCHED_WRR_ADV */
1219 
1220 /*--- congestion control discard --------------------------------------------*/
1221 
1222 static struct ol_tx_frms_queue_t *
1223 ol_tx_sched_discard_select_txq(
1224 		struct ol_txrx_pdev_t *pdev,
1225 		ol_tx_frms_queue_list *tx_queues)
1226 {
1227 	struct ol_tx_frms_queue_t *txq;
1228 	struct ol_tx_frms_queue_t *selected_txq = NULL;
1229 	int max_frms = 0;
1230 
1231 	/* return the tx queue with the most frames */
1232 	TAILQ_FOREACH(txq, tx_queues, list_elem) {
1233 		if (txq->frms > max_frms) {
1234 			max_frms = txq->frms;
1235 			selected_txq = txq;
1236 		}
1237 	}
1238 	return selected_txq;
1239 }
1240 
1241 u_int16_t
1242 ol_tx_sched_discard_select(
1243 		struct ol_txrx_pdev_t *pdev,
1244 		u_int16_t frms,
1245 		ol_tx_desc_list *tx_descs,
1246 		bool force)
1247 {
1248 	int cat;
1249 	struct ol_tx_frms_queue_t *txq;
1250 	int bytes;
1251 	u_int32_t credit;
1252 	struct ol_tx_sched_notify_ctx_t notify_ctx;
1253 
1254 	/*
1255 	 * first decide what category of traffic (e.g. TID or AC)
1256 	 * to discard next
1257 	 */
1258 	cat = ol_tx_sched_discard_select_category(pdev);
1259 
1260 	/* then decide which peer within this category to discard from next */
1261 	txq = ol_tx_sched_discard_select_txq(
1262 			pdev, ol_tx_sched_category_tx_queues(pdev, cat));
1263 	if (NULL == txq)
1264 		/* No More pending Tx Packets in Tx Queue. Exit Discard loop */
1265 		return 0;
1266 
1267 
1268 	if (force == false) {
1269 		/*
1270 		 * Now decide how many frames to discard from this peer-TID.
1271 		 * Don't discard more frames than the caller has specified.
1272 		 * Don't discard more than a fixed quantum of frames at a time.
1273 		 * Don't discard more than 50% of the queue's frames at a time,
1274 		 * but if there's only 1 frame left, go ahead and discard it.
1275 		 */
1276 #define OL_TX_DISCARD_QUANTUM 10
1277 		if (OL_TX_DISCARD_QUANTUM < frms)
1278 			frms = OL_TX_DISCARD_QUANTUM;
1279 
1280 
1281 		if (txq->frms > 1 && frms >= (txq->frms >> 1))
1282 			frms = txq->frms >> 1;
1283 	}
1284 
1285 	/*
1286 	 * Discard from the head of the queue, because:
1287 	 * 1.  Front-dropping gives applications like TCP that include ARQ
1288 	 *     an early notification of congestion.
1289 	 * 2.  For time-sensitive applications like RTP, the newest frames are
1290 	 *     most relevant.
1291 	 */
1292 	credit = 10000; /* no credit limit */
1293 	frms = ol_tx_dequeue(pdev, txq, tx_descs, frms, &credit, &bytes);
1294 
1295 	notify_ctx.event = OL_TX_DISCARD_FRAMES;
1296 	notify_ctx.frames = frms;
1297 	notify_ctx.bytes = bytes;
1298 	notify_ctx.txq = txq;
1299 	notify_ctx.info.ext_tid = cat;
1300 	ol_tx_sched_notify(pdev, &notify_ctx);
1301 
1302 	TX_SCHED_DEBUG_PRINT("%s Tx Drop : %d\n", __func__, frms);
1303 	return frms;
1304 }
1305 
1306 /*--- scheduler framework ---------------------------------------------------*/
1307 
1308 /*
1309  * The scheduler mutex spinlock has been acquired outside this function,
1310  * so there is need to take locks inside this function.
1311  */
1312 void
1313 ol_tx_sched_notify(
1314 		struct ol_txrx_pdev_t *pdev,
1315 		struct ol_tx_sched_notify_ctx_t *ctx)
1316 {
1317 	struct ol_tx_frms_queue_t *txq = ctx->txq;
1318 	int tid;
1319 
1320 	if (!pdev->tx_sched.scheduler)
1321 		return;
1322 
1323 	switch (ctx->event) {
1324 	case OL_TX_ENQUEUE_FRAME:
1325 		tid = ctx->info.tx_msdu_info->htt.info.ext_tid;
1326 		ol_tx_sched_txq_enqueue(pdev, txq, tid, 1, ctx->bytes);
1327 		break;
1328 	case OL_TX_DELETE_QUEUE:
1329 		tid = ctx->info.ext_tid;
1330 		if (txq->flag == ol_tx_queue_active)
1331 			ol_tx_sched_txq_deactivate(pdev, txq, tid);
1332 
1333 		break;
1334 	case OL_TX_PAUSE_QUEUE:
1335 		tid = ctx->info.ext_tid;
1336 		if (txq->flag == ol_tx_queue_active)
1337 			ol_tx_sched_txq_deactivate(pdev, txq, tid);
1338 
1339 		break;
1340 	case OL_TX_UNPAUSE_QUEUE:
1341 		tid = ctx->info.ext_tid;
1342 		if (txq->frms != 0)
1343 			ol_tx_sched_txq_enqueue(pdev, txq, tid,
1344 						txq->frms, txq->bytes);
1345 
1346 		break;
1347 	case OL_TX_DISCARD_FRAMES:
1348 		/* not necessarily TID, could be category */
1349 		tid = ctx->info.ext_tid;
1350 		ol_tx_sched_txq_discard(pdev, txq, tid,
1351 					ctx->frames, ctx->bytes);
1352 		break;
1353 	default:
1354 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1355 			  "Error: unknown sched notification (%d)\n",
1356 			  ctx->event);
1357 		qdf_assert(0);
1358 		break;
1359 	}
1360 }
1361 
1362 #define OL_TX_MSDU_ID_STORAGE_ERR(ptr) (NULL == ptr)
1363 
1364 static void
1365 ol_tx_sched_dispatch(
1366 	struct ol_txrx_pdev_t *pdev,
1367 	struct ol_tx_sched_ctx *sctx)
1368 {
1369 	qdf_nbuf_t msdu, prev = NULL, head_msdu = NULL;
1370 	struct ol_tx_desc_t *tx_desc;
1371 	u_int16_t *msdu_id_storage;
1372 	u_int16_t msdu_id;
1373 	int num_msdus = 0;
1374 
1375 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
1376 	while (sctx->frms) {
1377 		tx_desc = TAILQ_FIRST(&sctx->head);
1378 		if (tx_desc == NULL) {
1379 			/* TODO: find its reason */
1380 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1381 				  "%s: err, no enough tx_desc from stx->head.\n",
1382 				  __func__);
1383 			break;
1384 		}
1385 		msdu = tx_desc->netbuf;
1386 		TAILQ_REMOVE(&sctx->head, tx_desc, tx_desc_list_elem);
1387 		if (NULL == head_msdu)
1388 			head_msdu = msdu;
1389 
1390 		if (prev)
1391 			qdf_nbuf_set_next(prev, msdu);
1392 
1393 		prev = msdu;
1394 
1395 #ifndef ATH_11AC_TXCOMPACT
1396 		/*
1397 		 * When the tx frame is downloaded to the target, there are two
1398 		 * outstanding references:
1399 		 * 1.  The host download SW (HTT, HTC, HIF)
1400 		 *     This reference is cleared by the ol_tx_send_done callback
1401 		 *     functions.
1402 		 * 2.  The target FW
1403 		 *     This reference is cleared by the ol_tx_completion_handler
1404 		 *     function.
1405 		 * It is extremely probable that the download completion is
1406 		 * processed before the tx completion message.  However, under
1407 		 * exceptional conditions the tx completion may be processed
1408 		 *first. Thus, rather that assuming that reference (1) is
1409 		 *done before reference (2),
1410 		 * explicit reference tracking is needed.
1411 		 * Double-increment the ref count to account for both references
1412 		 * described above.
1413 		 */
1414 		qdf_atomic_init(&tx_desc->ref_cnt);
1415 		qdf_atomic_inc(&tx_desc->ref_cnt);
1416 		qdf_atomic_inc(&tx_desc->ref_cnt);
1417 #endif
1418 
1419 		/*Store the MSDU Id for each MSDU*/
1420 		/* store MSDU ID */
1421 		msdu_id = ol_tx_desc_id(pdev, tx_desc);
1422 		msdu_id_storage = ol_tx_msdu_id_storage(msdu);
1423 		if (OL_TX_MSDU_ID_STORAGE_ERR(msdu_id_storage)) {
1424 			/*
1425 			 * Send the prior frames as a batch,
1426 			 *then send this as a single,
1427 			 * then resume handling the remaining frames.
1428 			 */
1429 			if (head_msdu)
1430 				ol_tx_send_batch(pdev, head_msdu, num_msdus);
1431 
1432 			prev = NULL;
1433 			head_msdu = prev;
1434 			num_msdus = 0;
1435 
1436 			if (htt_tx_send_std(pdev->htt_pdev, msdu, msdu_id)) {
1437 				ol_tx_target_credit_incr(pdev, msdu);
1438 				ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
1439 							     1 /* error */);
1440 			}
1441 		} else {
1442 			*msdu_id_storage = msdu_id;
1443 			num_msdus++;
1444 		}
1445 		sctx->frms--;
1446 	}
1447 
1448 	/*Send Batch Of Frames*/
1449 	if (head_msdu)
1450 		ol_tx_send_batch(pdev, head_msdu, num_msdus);
1451 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
1452 }
1453 
1454 	void
1455 ol_tx_sched(struct ol_txrx_pdev_t *pdev)
1456 {
1457 	struct ol_tx_sched_ctx sctx;
1458 	u_int32_t credit;
1459 
1460 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
1461 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1462 	if (pdev->tx_sched.tx_sched_status != ol_tx_scheduler_idle) {
1463 		qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1464 		return;
1465 	}
1466 	pdev->tx_sched.tx_sched_status = ol_tx_scheduler_running;
1467 
1468 	ol_tx_sched_log(pdev);
1469 	/*
1470 	 *adf_os_print("BEFORE tx sched:\n");
1471 	 *ol_tx_queues_display(pdev);
1472 	 */
1473 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1474 
1475 	TAILQ_INIT(&sctx.head);
1476 	sctx.frms = 0;
1477 
1478 	ol_tx_sched_select_init(pdev);
1479 	while (qdf_atomic_read(&pdev->target_tx_credit) > 0) {
1480 		int num_credits;
1481 
1482 		qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1483 		credit = qdf_atomic_read(&pdev->target_tx_credit);
1484 		num_credits = ol_tx_sched_select_batch(pdev, &sctx, credit);
1485 		if (num_credits > 0) {
1486 #if DEBUG_HTT_CREDIT
1487 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1488 				  " <HTT> Decrease credit %d - %d = %d.\n",
1489 				  qdf_atomic_read(&pdev->target_tx_credit),
1490 				  num_credits,
1491 				  qdf_atomic_read(&pdev->target_tx_credit) -
1492 				  num_credits);
1493 #endif
1494 			qdf_atomic_add(-num_credits, &pdev->target_tx_credit);
1495 		}
1496 		qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1497 
1498 		if (num_credits == 0)
1499 			break;
1500 	}
1501 	ol_tx_sched_dispatch(pdev, &sctx);
1502 
1503 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1504 	/*
1505 	 *adf_os_print("AFTER tx sched:\n");
1506 	 *ol_tx_queues_display(pdev);
1507 	 */
1508 
1509 	pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
1510 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1511 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
1512 }
1513 
1514 void *
1515 ol_tx_sched_attach(
1516 	struct ol_txrx_pdev_t *pdev)
1517 {
1518 	pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
1519 	return ol_tx_sched_init(pdev);
1520 }
1521 
1522 void
1523 ol_tx_sched_detach(
1524 	struct ol_txrx_pdev_t *pdev)
1525 {
1526 	if (pdev->tx_sched.scheduler) {
1527 		qdf_mem_free(pdev->tx_sched.scheduler);
1528 		pdev->tx_sched.scheduler = NULL;
1529 	}
1530 }
1531 
1532 /*--- debug functions -------------------------------------------------------*/
1533 
1534 #if defined(DEBUG_HL_LOGGING)
1535 
1536 static void
1537 ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
1538 {
1539 	u_int8_t  *buf;
1540 	u_int32_t *active_bitmap;
1541 	int i, j, num_cats_active;
1542 	int active, frms, bytes;
1543 	int credit;
1544 
1545 	/* don't bother recording state if credit is zero */
1546 	credit = qdf_atomic_read(&pdev->target_tx_credit);
1547 	if (credit == 0)
1548 		return;
1549 
1550 
1551 	/*
1552 	 * See how many TIDs are active, so queue state can be stored only
1553 	 * for those TIDs.
1554 	 * Do an initial iteration through all categories to see if any
1555 	 * are active.  Doing an extra iteration is inefficient, but
1556 	 * efficiency is not a dominant concern when logging is enabled.
1557 	 */
1558 	num_cats_active = 0;
1559 	for (i = 0; i < OL_TX_SCHED_NUM_CATEGORIES; i++) {
1560 		ol_tx_sched_category_info(pdev, i, &active, &frms, &bytes);
1561 		if (active)
1562 			num_cats_active++;
1563 	}
1564 	/* don't bother recording state if there are no active queues */
1565 	if (num_cats_active == 0)
1566 		return;
1567 
1568 
1569 	ol_tx_queue_log_sched(pdev, credit, &num_cats_active,
1570 			      &active_bitmap, &buf);
1571 
1572 	if (num_cats_active == 0)
1573 		return;
1574 
1575 	*active_bitmap = 0;
1576 	for (i = 0, j = 0;
1577 			i < OL_TX_SCHED_NUM_CATEGORIES && j < num_cats_active;
1578 			i++) {
1579 		u_int8_t *p;
1580 
1581 		ol_tx_sched_category_info(pdev, i, &active, &frms, &bytes);
1582 		if (!active)
1583 			continue;
1584 
1585 		p = &buf[j*6];
1586 		p[0]   = (frms >> 0) & 0xff;
1587 		p[1] = (frms >> 8) & 0xff;
1588 
1589 		p[2] = (bytes >> 0) & 0xff;
1590 		p[3] = (bytes >> 8) & 0xff;
1591 		p[4] = (bytes >> 16) & 0xff;
1592 		p[5] = (bytes >> 24) & 0xff;
1593 		j++;
1594 		*active_bitmap |= 1 << i;
1595 	}
1596 }
1597 
1598 #endif /* defined(DEBUG_HL_LOGGING) */
1599 
1600 #endif /* defined(CONFIG_HL_SUPPORT) */
1601