xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1cd395495SRajeev Kumar /*
2f28396d0SVivek  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3*8b3dca18SJeff Johnson  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4cd395495SRajeev Kumar  *
5cd395495SRajeev Kumar  * Permission to use, copy, modify, and/or distribute this software for
6cd395495SRajeev Kumar  * any purpose with or without fee is hereby granted, provided that the
7cd395495SRajeev Kumar  * above copyright notice and this permission notice appear in all
8cd395495SRajeev Kumar  * copies.
9cd395495SRajeev Kumar  *
10cd395495SRajeev Kumar  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11cd395495SRajeev Kumar  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12cd395495SRajeev Kumar  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13cd395495SRajeev Kumar  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14cd395495SRajeev Kumar  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15cd395495SRajeev Kumar  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16cd395495SRajeev Kumar  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17cd395495SRajeev Kumar  * PERFORMANCE OF THIS SOFTWARE.
18cd395495SRajeev Kumar  */
19cd395495SRajeev Kumar 
20cd395495SRajeev Kumar #include <scheduler_core.h>
21ef615e76SHouston Hoffman #include <qdf_atomic.h>
22c7ee85c4SDustin Brown #include "qdf_flex_mem.h"
23cd395495SRajeev Kumar 
2499a10d07SRajeev Kumar static struct scheduler_ctx g_sched_ctx;
25cd395495SRajeev Kumar static struct scheduler_ctx *gp_sched_ctx;
26cd395495SRajeev Kumar 
27c7ee85c4SDustin Brown DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
28c7ee85c4SDustin Brown 			 WLAN_SCHED_REDUCTION_LIMIT);
29c7ee85c4SDustin Brown 
3039bb395eSDustin Brown #ifdef WLAN_SCHED_HISTORY_SIZE
3139bb395eSDustin Brown 
32f28396d0SVivek #define SCHEDULER_HISTORY_HEADER "|Callback                               "\
33f28396d0SVivek 				 "|Message Type"			   \
34f28396d0SVivek 				 "|Queue Duration(us)|Queue Depth"	   \
35f28396d0SVivek 				 "|Run Duration(us)|"
36f28396d0SVivek 
37f28396d0SVivek #define SCHEDULER_HISTORY_LINE "--------------------------------------" \
38f28396d0SVivek 			       "--------------------------------------" \
39f28396d0SVivek 			       "--------------------------------------"
40f28396d0SVivek 
41dae10a5fSDustin Brown /**
42dae10a5fSDustin Brown  * struct sched_history_item - metrics for a scheduler message
43dae10a5fSDustin Brown  * @callback: the message's execution callback
44dae10a5fSDustin Brown  * @type_id: the message's type_id
45dae10a5fSDustin Brown  * @queue_id: Id of the queue the message was added to
46dae10a5fSDustin Brown  * @queue_start_us: timestamp when the message was queued in microseconds
47dae10a5fSDustin Brown  * @queue_duration_us: duration the message was queued in microseconds
48dae10a5fSDustin Brown  * @queue_depth: depth of the queue when the message was queued
49dae10a5fSDustin Brown  * @run_start_us: timesatmp when the message started execution in microseconds
50dae10a5fSDustin Brown  * @run_duration_us: duration the message was executed in microseconds
51dae10a5fSDustin Brown  */
5239bb395eSDustin Brown struct sched_history_item {
5339bb395eSDustin Brown 	void *callback;
5439bb395eSDustin Brown 	uint32_t type_id;
55dae10a5fSDustin Brown 	QDF_MODULE_ID queue_id;
56dae10a5fSDustin Brown 	uint64_t queue_start_us;
57dae10a5fSDustin Brown 	uint32_t queue_duration_us;
58dae10a5fSDustin Brown 	uint32_t queue_depth;
59dae10a5fSDustin Brown 	uint64_t run_start_us;
60dae10a5fSDustin Brown 	uint32_t run_duration_us;
6139bb395eSDustin Brown };
6239bb395eSDustin Brown 
6339bb395eSDustin Brown static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE];
6439bb395eSDustin Brown static uint32_t sched_history_index;
6539bb395eSDustin Brown 
66dae10a5fSDustin Brown static void sched_history_queue(struct scheduler_mq_type *queue,
67dae10a5fSDustin Brown 				struct scheduler_msg *msg)
68dae10a5fSDustin Brown {
69dae10a5fSDustin Brown 	msg->queue_id = queue->qid;
70dae10a5fSDustin Brown 	msg->queue_depth = qdf_list_size(&queue->mq_list);
71dae10a5fSDustin Brown 	msg->queued_at_us = qdf_get_log_timestamp_usecs();
72dae10a5fSDustin Brown }
73dae10a5fSDustin Brown 
7439bb395eSDustin Brown static void sched_history_start(struct scheduler_msg *msg)
7539bb395eSDustin Brown {
76dae10a5fSDustin Brown 	uint64_t started_at_us = qdf_get_log_timestamp_usecs();
7739bb395eSDustin Brown 	struct sched_history_item hist = {
7839bb395eSDustin Brown 		.callback = msg->callback,
7939bb395eSDustin Brown 		.type_id = msg->type,
80dae10a5fSDustin Brown 		.queue_start_us = msg->queued_at_us,
81dae10a5fSDustin Brown 		.queue_duration_us = started_at_us - msg->queued_at_us,
8227d56464SDustin Brown 		.queue_depth = msg->queue_depth,
83dae10a5fSDustin Brown 		.run_start_us = started_at_us,
8439bb395eSDustin Brown 	};
8539bb395eSDustin Brown 
8639bb395eSDustin Brown 	sched_history[sched_history_index] = hist;
8739bb395eSDustin Brown }
8839bb395eSDustin Brown 
8939bb395eSDustin Brown static void sched_history_stop(void)
9039bb395eSDustin Brown {
9139bb395eSDustin Brown 	struct sched_history_item *hist = &sched_history[sched_history_index];
92dae10a5fSDustin Brown 	uint64_t stopped_at_us = qdf_get_log_timestamp_usecs();
9339bb395eSDustin Brown 
94dae10a5fSDustin Brown 	hist->run_duration_us = stopped_at_us - hist->run_start_us;
9539bb395eSDustin Brown 
9639bb395eSDustin Brown 	sched_history_index++;
9739bb395eSDustin Brown 	sched_history_index %= WLAN_SCHED_HISTORY_SIZE;
9839bb395eSDustin Brown }
9939bb395eSDustin Brown 
100f28396d0SVivek void sched_history_print(void)
101f28396d0SVivek {
102f28396d0SVivek 	struct sched_history_item *history, *item;
103f28396d0SVivek 	uint32_t history_idx;
104f28396d0SVivek 	uint32_t idx, index;
105f28396d0SVivek 
106f28396d0SVivek 	history = qdf_mem_malloc(sizeof(*history) * WLAN_SCHED_HISTORY_SIZE);
107f28396d0SVivek 
108f28396d0SVivek 	if (!history) {
109f28396d0SVivek 		sched_err("Mem alloc failed");
110f28396d0SVivek 		return;
111f28396d0SVivek 	}
112f28396d0SVivek 
113f28396d0SVivek 	qdf_mem_copy(history, &sched_history,
114f28396d0SVivek 		     (sizeof(*history) * WLAN_SCHED_HISTORY_SIZE));
115f28396d0SVivek 	history_idx = sched_history_index;
116f28396d0SVivek 
117f28396d0SVivek 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
118f28396d0SVivek 	sched_nofl_fatal(SCHEDULER_HISTORY_HEADER);
119f28396d0SVivek 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
120f28396d0SVivek 
121f28396d0SVivek 	for (idx = 0; idx < WLAN_SCHED_HISTORY_SIZE; idx++) {
122f28396d0SVivek 		index = (history_idx + idx) % WLAN_SCHED_HISTORY_SIZE;
123f28396d0SVivek 		item = history + index;
124f28396d0SVivek 
125f28396d0SVivek 		if (!item->callback)
126f28396d0SVivek 			continue;
127f28396d0SVivek 
128f28396d0SVivek 		sched_nofl_fatal("%40pF|%12d|%18d|%11d|%16d|",
129f28396d0SVivek 				 item->callback, item->type_id,
130f28396d0SVivek 				 item->queue_duration_us,
131f28396d0SVivek 				 item->queue_depth,
132f28396d0SVivek 				 item->run_duration_us);
133f28396d0SVivek 	}
134f28396d0SVivek 
135f28396d0SVivek 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
136f28396d0SVivek 
137f28396d0SVivek 	qdf_mem_free(history);
138f28396d0SVivek }
13939bb395eSDustin Brown #else /* WLAN_SCHED_HISTORY_SIZE */
14039bb395eSDustin Brown 
141dae10a5fSDustin Brown static inline void sched_history_queue(struct scheduler_mq_type *queue,
142dae10a5fSDustin Brown 				       struct scheduler_msg *msg) { }
14339bb395eSDustin Brown static inline void sched_history_start(struct scheduler_msg *msg) { }
14439bb395eSDustin Brown static inline void sched_history_stop(void) { }
145f28396d0SVivek void sched_history_print(void) { }
14639bb395eSDustin Brown 
14739bb395eSDustin Brown #endif /* WLAN_SCHED_HISTORY_SIZE */
14839bb395eSDustin Brown 
149cd395495SRajeev Kumar QDF_STATUS scheduler_create_ctx(void)
150cd395495SRajeev Kumar {
151c7ee85c4SDustin Brown 	qdf_flex_mem_init(&sched_pool);
15299a10d07SRajeev Kumar 	gp_sched_ctx = &g_sched_ctx;
153cd395495SRajeev Kumar 
154cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
155cd395495SRajeev Kumar }
156cd395495SRajeev Kumar 
157cd395495SRajeev Kumar QDF_STATUS scheduler_destroy_ctx(void)
158cd395495SRajeev Kumar {
159cd395495SRajeev Kumar 	gp_sched_ctx = NULL;
160c7ee85c4SDustin Brown 	qdf_flex_mem_deinit(&sched_pool);
16199a10d07SRajeev Kumar 
162cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
163cd395495SRajeev Kumar }
164cd395495SRajeev Kumar 
165cd395495SRajeev Kumar struct scheduler_ctx *scheduler_get_context(void)
166cd395495SRajeev Kumar {
167c7ee85c4SDustin Brown 	QDF_BUG(gp_sched_ctx);
168c7ee85c4SDustin Brown 
169cd395495SRajeev Kumar 	return gp_sched_ctx;
170cd395495SRajeev Kumar }
171cd395495SRajeev Kumar 
172c7ee85c4SDustin Brown static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
173c7ee85c4SDustin Brown {
174c7ee85c4SDustin Brown 	sched_enter();
175c7ee85c4SDustin Brown 
176c7ee85c4SDustin Brown 	qdf_spinlock_create(&msg_q->mq_lock);
177c7ee85c4SDustin Brown 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
178c7ee85c4SDustin Brown 
179c7ee85c4SDustin Brown 	sched_exit();
180c7ee85c4SDustin Brown 
181c7ee85c4SDustin Brown 	return QDF_STATUS_SUCCESS;
182c7ee85c4SDustin Brown }
183c7ee85c4SDustin Brown 
184c7ee85c4SDustin Brown static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
185c7ee85c4SDustin Brown {
186c7ee85c4SDustin Brown 	sched_enter();
187c7ee85c4SDustin Brown 
188c7ee85c4SDustin Brown 	qdf_list_destroy(&msg_q->mq_list);
189c7ee85c4SDustin Brown 	qdf_spinlock_destroy(&msg_q->mq_lock);
190c7ee85c4SDustin Brown 
191c7ee85c4SDustin Brown 	sched_exit();
192c7ee85c4SDustin Brown }
193c7ee85c4SDustin Brown 
194c7ee85c4SDustin Brown static qdf_atomic_t __sched_queue_depth;
1950626a4daSVivek static qdf_atomic_t __sched_dup_fail_count;
196cd395495SRajeev Kumar 
1973149adf5SDustin Brown static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
198cd395495SRajeev Kumar {
1993149adf5SDustin Brown 	QDF_STATUS status;
200cd395495SRajeev Kumar 	int i;
201cd395495SRajeev Kumar 
2023149adf5SDustin Brown 	sched_enter();
2033149adf5SDustin Brown 
2048afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
2058afde5a8SDustin Brown 	if (!sched_ctx)
206cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
207cd395495SRajeev Kumar 
208c7ee85c4SDustin Brown 	qdf_atomic_set(&__sched_queue_depth, 0);
209cd395495SRajeev Kumar 
210cd395495SRajeev Kumar 	/* Initialize all message queues */
211cd395495SRajeev Kumar 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
2123149adf5SDustin Brown 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
213cd395495SRajeev Kumar 		if (QDF_STATUS_SUCCESS != status)
214cd395495SRajeev Kumar 			return status;
215cd395495SRajeev Kumar 	}
2163149adf5SDustin Brown 
217ba5d80caSKrunal Soni 	/* Initialize all qid to qidx mapping to invalid values */
218ba5d80caSKrunal Soni 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
219ba5d80caSKrunal Soni 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
220ba5d80caSKrunal Soni 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
221cd395495SRajeev Kumar 
2223149adf5SDustin Brown 	sched_exit();
223cd395495SRajeev Kumar 
224cd395495SRajeev Kumar 	return status;
225cd395495SRajeev Kumar }
226cd395495SRajeev Kumar 
2273149adf5SDustin Brown static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
228cd395495SRajeev Kumar {
229cd395495SRajeev Kumar 	int i;
230cd395495SRajeev Kumar 
2313149adf5SDustin Brown 	sched_enter();
2323149adf5SDustin Brown 
2338afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
2348afde5a8SDustin Brown 	if (!sched_ctx)
235cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
236cd395495SRajeev Kumar 
237cd395495SRajeev Kumar 	/* De-Initialize all message queues */
238cd395495SRajeev Kumar 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
239cd395495SRajeev Kumar 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
240cd395495SRajeev Kumar 
241ba5d80caSKrunal Soni 	/* Initialize all qid to qidx mapping to invalid values */
242ba5d80caSKrunal Soni 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
243ba5d80caSKrunal Soni 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
244ba5d80caSKrunal Soni 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
245ba5d80caSKrunal Soni 
2463149adf5SDustin Brown 	sched_exit();
2473149adf5SDustin Brown 
2483149adf5SDustin Brown 	return QDF_STATUS_SUCCESS;
249cd395495SRajeev Kumar }
250cd395495SRajeev Kumar 
251cd395495SRajeev Kumar void scheduler_mq_put(struct scheduler_mq_type *msg_q,
252c7ee85c4SDustin Brown 		      struct scheduler_msg *msg)
253cd395495SRajeev Kumar {
254cd395495SRajeev Kumar 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
255dae10a5fSDustin Brown 	sched_history_queue(msg_q, msg);
256c7ee85c4SDustin Brown 	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
257cd395495SRajeev Kumar 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
258cd395495SRajeev Kumar }
259cd395495SRajeev Kumar 
260cd395495SRajeev Kumar void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
261c7ee85c4SDustin Brown 			    struct scheduler_msg *msg)
262cd395495SRajeev Kumar {
263cd395495SRajeev Kumar 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
264dae10a5fSDustin Brown 	sched_history_queue(msg_q, msg);
265c7ee85c4SDustin Brown 	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
266cd395495SRajeev Kumar 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
267cd395495SRajeev Kumar }
268cd395495SRajeev Kumar 
269c7ee85c4SDustin Brown struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
270cd395495SRajeev Kumar {
271c7ee85c4SDustin Brown 	QDF_STATUS status;
272c7ee85c4SDustin Brown 	qdf_list_node_t *node;
273cd395495SRajeev Kumar 
274c7ee85c4SDustin Brown 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
275c7ee85c4SDustin Brown 	status = qdf_list_remove_front(&msg_q->mq_list, &node);
276c7ee85c4SDustin Brown 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
277c7ee85c4SDustin Brown 
278c7ee85c4SDustin Brown 	if (QDF_IS_STATUS_ERROR(status))
279cd395495SRajeev Kumar 		return NULL;
280cd395495SRajeev Kumar 
281c7ee85c4SDustin Brown 	return qdf_container_of(node, struct scheduler_msg, node);
282cd395495SRajeev Kumar }
283cd395495SRajeev Kumar 
284cd395495SRajeev Kumar QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
285cd395495SRajeev Kumar {
286cd395495SRajeev Kumar 	return scheduler_all_queues_deinit(sched_ctx);
287cd395495SRajeev Kumar }
288cd395495SRajeev Kumar 
289cd395495SRajeev Kumar QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
290cd395495SRajeev Kumar {
2913149adf5SDustin Brown 	QDF_STATUS status;
292cd395495SRajeev Kumar 
2933149adf5SDustin Brown 	sched_enter();
2943149adf5SDustin Brown 
2958afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
2968afde5a8SDustin Brown 	if (!sched_ctx)
297cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
2983149adf5SDustin Brown 
299cd395495SRajeev Kumar 	status = scheduler_all_queues_init(sched_ctx);
300c7ee85c4SDustin Brown 	if (QDF_IS_STATUS_ERROR(status)) {
301cd395495SRajeev Kumar 		scheduler_all_queues_deinit(sched_ctx);
3023149adf5SDustin Brown 		sched_err("Failed to initialize the msg queues");
303cd395495SRajeev Kumar 		return status;
304cd395495SRajeev Kumar 	}
3053149adf5SDustin Brown 
3063149adf5SDustin Brown 	sched_debug("Queue init passed");
307cd395495SRajeev Kumar 
3083149adf5SDustin Brown 	sched_exit();
3093149adf5SDustin Brown 
3103149adf5SDustin Brown 	return QDF_STATUS_SUCCESS;
311cd395495SRajeev Kumar }
312cd395495SRajeev Kumar 
313c7ee85c4SDustin Brown struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
314cd395495SRajeev Kumar {
315c7ee85c4SDustin Brown 	struct scheduler_msg *dup;
316c7ee85c4SDustin Brown 
317c7ee85c4SDustin Brown 	if (qdf_atomic_inc_return(&__sched_queue_depth) >
318c7ee85c4SDustin Brown 	    SCHEDULER_CORE_MAX_MESSAGES)
319c7ee85c4SDustin Brown 		goto buffer_full;
320c7ee85c4SDustin Brown 
321c7ee85c4SDustin Brown 	dup = qdf_flex_mem_alloc(&sched_pool);
322c7ee85c4SDustin Brown 	if (!dup) {
323c7ee85c4SDustin Brown 		sched_err("out of memory");
324c7ee85c4SDustin Brown 		goto dec_queue_count;
325c7ee85c4SDustin Brown 	}
326c7ee85c4SDustin Brown 
327c7ee85c4SDustin Brown 	qdf_mem_copy(dup, msg, sizeof(*dup));
328c7ee85c4SDustin Brown 
3290626a4daSVivek 	qdf_atomic_set(&__sched_dup_fail_count, 0);
3300626a4daSVivek 
331c7ee85c4SDustin Brown 	return dup;
332c7ee85c4SDustin Brown 
333c7ee85c4SDustin Brown buffer_full:
3340626a4daSVivek 	if (qdf_atomic_inc_return(&__sched_dup_fail_count) >
3350626a4daSVivek 	    SCHEDULER_WRAPPER_MAX_FAIL_COUNT)
336fe41df9cSDustin Brown 		QDF_DEBUG_PANIC("Scheduler buffer is full");
337c7ee85c4SDustin Brown 
3380626a4daSVivek 
339c7ee85c4SDustin Brown dec_queue_count:
340c7ee85c4SDustin Brown 	qdf_atomic_dec(&__sched_queue_depth);
341c7ee85c4SDustin Brown 
342c7ee85c4SDustin Brown 	return NULL;
343cd395495SRajeev Kumar }
344cd395495SRajeev Kumar 
345c7ee85c4SDustin Brown void scheduler_core_msg_free(struct scheduler_msg *msg)
346c7ee85c4SDustin Brown {
347c7ee85c4SDustin Brown 	qdf_flex_mem_free(&sched_pool, msg);
348c7ee85c4SDustin Brown 	qdf_atomic_dec(&__sched_queue_depth);
349cd395495SRajeev Kumar }
350cd395495SRajeev Kumar 
351cd395495SRajeev Kumar static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
352cd395495SRajeev Kumar 					    bool *shutdown)
353cd395495SRajeev Kumar {
354cd395495SRajeev Kumar 	int i;
3553149adf5SDustin Brown 	QDF_STATUS status;
356c7ee85c4SDustin Brown 	struct scheduler_msg *msg;
357cd395495SRajeev Kumar 
358cd395495SRajeev Kumar 	if (!sch_ctx) {
359fe41df9cSDustin Brown 		QDF_DEBUG_PANIC("sch_ctx is null");
360cd395495SRajeev Kumar 		return;
361cd395495SRajeev Kumar 	}
362cd395495SRajeev Kumar 
363cd395495SRajeev Kumar 	/* start with highest priority queue : timer queue at index 0 */
364cd395495SRajeev Kumar 	i = 0;
365cd395495SRajeev Kumar 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
366cd395495SRajeev Kumar 		/* Check if MC needs to shutdown */
367ef615e76SHouston Hoffman 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
368cd395495SRajeev Kumar 					&sch_ctx->sch_event_flag)) {
3698afde5a8SDustin Brown 			sched_debug("scheduler thread signaled to shutdown");
370cd395495SRajeev Kumar 			*shutdown = true;
3713149adf5SDustin Brown 
372cd395495SRajeev Kumar 			/* Check for any Suspend Indication */
373ef615e76SHouston Hoffman 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
374cd395495SRajeev Kumar 						&sch_ctx->sch_event_flag)) {
375cd395495SRajeev Kumar 				/* Unblock anyone waiting on suspend */
376cd395495SRajeev Kumar 				if (gp_sched_ctx->hdd_callback)
377cd395495SRajeev Kumar 					gp_sched_ctx->hdd_callback();
378cd395495SRajeev Kumar 			}
3793149adf5SDustin Brown 
380cd395495SRajeev Kumar 			break;
381cd395495SRajeev Kumar 		}
3823149adf5SDustin Brown 
383c7ee85c4SDustin Brown 		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
384c7ee85c4SDustin Brown 		if (!msg) {
385cd395495SRajeev Kumar 			/* check next queue */
386cd395495SRajeev Kumar 			i++;
387cd395495SRajeev Kumar 			continue;
388cd395495SRajeev Kumar 		}
3893149adf5SDustin Brown 
390cd395495SRajeev Kumar 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
391e226cebdSDustin Brown 			sch_ctx->watchdog_msg_type = msg->type;
392e226cebdSDustin Brown 			sch_ctx->watchdog_callback = msg->callback;
39339bb395eSDustin Brown 
39439bb395eSDustin Brown 			sched_history_start(msg);
395e226cebdSDustin Brown 			qdf_timer_start(&sch_ctx->watchdog_timer,
39697f44cd3SVivek 					sch_ctx->timeout);
3973149adf5SDustin Brown 			status = sch_ctx->queue_ctx.
398e226cebdSDustin Brown 					scheduler_msg_process_fn[i](msg);
399e226cebdSDustin Brown 			qdf_timer_stop(&sch_ctx->watchdog_timer);
40039bb395eSDustin Brown 			sched_history_stop();
401e226cebdSDustin Brown 
4023149adf5SDustin Brown 			if (QDF_IS_STATUS_ERROR(status))
4033149adf5SDustin Brown 				sched_err("Failed processing Qid[%d] message",
404cd395495SRajeev Kumar 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
4053149adf5SDustin Brown 
406c7ee85c4SDustin Brown 			scheduler_core_msg_free(msg);
407cd395495SRajeev Kumar 		}
408cd395495SRajeev Kumar 
409cd395495SRajeev Kumar 		/* start again with highest priority queue at index 0 */
410cd395495SRajeev Kumar 		i = 0;
411cd395495SRajeev Kumar 	}
4123149adf5SDustin Brown 
413cd395495SRajeev Kumar 	/* Check for any Suspend Indication */
414ef615e76SHouston Hoffman 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
415cd395495SRajeev Kumar 			&sch_ctx->sch_event_flag)) {
416cd395495SRajeev Kumar 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
417cd395495SRajeev Kumar 		qdf_event_reset(&sch_ctx->resume_sch_event);
418cd395495SRajeev Kumar 		/* controller thread suspend completion callback */
419cd395495SRajeev Kumar 		if (gp_sched_ctx->hdd_callback)
420cd395495SRajeev Kumar 			gp_sched_ctx->hdd_callback();
421cd395495SRajeev Kumar 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
422cd395495SRajeev Kumar 		/* Wait for resume indication */
423cd395495SRajeev Kumar 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
424cd395495SRajeev Kumar 	}
425cd395495SRajeev Kumar 
426cd395495SRajeev Kumar 	return;  /* Nothing to process wait on wait queue */
427cd395495SRajeev Kumar }
428cd395495SRajeev Kumar 
429cd395495SRajeev Kumar int scheduler_thread(void *arg)
430cd395495SRajeev Kumar {
431cd395495SRajeev Kumar 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
432cd395495SRajeev Kumar 	int retWaitStatus = 0;
433cd395495SRajeev Kumar 	bool shutdown = false;
434cd395495SRajeev Kumar 
4353149adf5SDustin Brown 	if (!arg) {
436fe41df9cSDustin Brown 		QDF_DEBUG_PANIC("arg is null");
437cd395495SRajeev Kumar 		return 0;
438cd395495SRajeev Kumar 	}
439cd395495SRajeev Kumar 	qdf_set_user_nice(current, -2);
440cd395495SRajeev Kumar 
441cd395495SRajeev Kumar 	/* Ack back to the context from which the main controller thread
442cd395495SRajeev Kumar 	 * has been created
443cd395495SRajeev Kumar 	 */
444cd395495SRajeev Kumar 	qdf_event_set(&sch_ctx->sch_start_event);
4453149adf5SDustin Brown 	sched_debug("scheduler thread %d (%s) starting up",
4463149adf5SDustin Brown 		    current->pid, current->comm);
447cd395495SRajeev Kumar 
448cd395495SRajeev Kumar 	while (!shutdown) {
449cd395495SRajeev Kumar 		/* This implements the execution model algorithm */
450cd395495SRajeev Kumar 		retWaitStatus = qdf_wait_queue_interruptible(
451cd395495SRajeev Kumar 					sch_ctx->sch_wait_queue,
452ef615e76SHouston Hoffman 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
453cd395495SRajeev Kumar 						&sch_ctx->sch_event_flag) ||
454ef615e76SHouston Hoffman 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
455cd395495SRajeev Kumar 						&sch_ctx->sch_event_flag));
456cd395495SRajeev Kumar 
457fe41df9cSDustin Brown 		if (retWaitStatus == -ERESTARTSYS)
458fe41df9cSDustin Brown 			QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS");
4593149adf5SDustin Brown 
460ef615e76SHouston Hoffman 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
461cd395495SRajeev Kumar 		scheduler_thread_process_queues(sch_ctx, &shutdown);
462cd395495SRajeev Kumar 	}
4633149adf5SDustin Brown 
4643149adf5SDustin Brown 	/* If we get here the scheduler thread must exit */
4658afde5a8SDustin Brown 	sched_debug("Scheduler thread exiting");
466cd395495SRajeev Kumar 	qdf_event_set(&sch_ctx->sch_shutdown);
467cd395495SRajeev Kumar 
468cd395495SRajeev Kumar 	return 0;
469cd395495SRajeev Kumar }
470cd395495SRajeev Kumar 
471c7ee85c4SDustin Brown static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
472cd395495SRajeev Kumar {
473c7ee85c4SDustin Brown 	struct scheduler_msg *msg;
474c7ee85c4SDustin Brown 	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
475cd395495SRajeev Kumar 
476c7ee85c4SDustin Brown 	while ((msg = scheduler_mq_get(mq))) {
477c7ee85c4SDustin Brown 		if (msg->flush_callback) {
4788afde5a8SDustin Brown 			sched_debug("Calling flush callback; type: %x",
479c7ee85c4SDustin Brown 				    msg->type);
480c7ee85c4SDustin Brown 			flush_cb = msg->flush_callback;
481c7ee85c4SDustin Brown 			flush_cb(msg);
482c7ee85c4SDustin Brown 		} else if (msg->bodyptr) {
4838afde5a8SDustin Brown 			sched_debug("Freeing scheduler msg bodyptr; type: %x",
484c7ee85c4SDustin Brown 				    msg->type);
485c7ee85c4SDustin Brown 			qdf_mem_free(msg->bodyptr);
486cd395495SRajeev Kumar 		}
487cd395495SRajeev Kumar 
488c7ee85c4SDustin Brown 		scheduler_core_msg_free(msg);
4891c6bb033SZhu Jianmin 	}
4901880b6f8SKrunal Soni }
4913149adf5SDustin Brown 
492c7ee85c4SDustin Brown void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
493c7ee85c4SDustin Brown {
494c7ee85c4SDustin Brown 	struct scheduler_mq_type *mq;
495c7ee85c4SDustin Brown 	int i;
496c7ee85c4SDustin Brown 
4978afde5a8SDustin Brown 	sched_debug("Flushing scheduler message queues");
498c7ee85c4SDustin Brown 
499c7ee85c4SDustin Brown 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
500c7ee85c4SDustin Brown 		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
501c7ee85c4SDustin Brown 		scheduler_flush_single_queue(mq);
502cd395495SRajeev Kumar 	}
503cd395495SRajeev Kumar }
504c7ee85c4SDustin Brown 
505