xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1cd395495SRajeev Kumar /*
2*f28396d0SVivek  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3cd395495SRajeev Kumar  *
4cd395495SRajeev Kumar  * Permission to use, copy, modify, and/or distribute this software for
5cd395495SRajeev Kumar  * any purpose with or without fee is hereby granted, provided that the
6cd395495SRajeev Kumar  * above copyright notice and this permission notice appear in all
7cd395495SRajeev Kumar  * copies.
8cd395495SRajeev Kumar  *
9cd395495SRajeev Kumar  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10cd395495SRajeev Kumar  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11cd395495SRajeev Kumar  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12cd395495SRajeev Kumar  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13cd395495SRajeev Kumar  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14cd395495SRajeev Kumar  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15cd395495SRajeev Kumar  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16cd395495SRajeev Kumar  * PERFORMANCE OF THIS SOFTWARE.
17cd395495SRajeev Kumar  */
18cd395495SRajeev Kumar 
19cd395495SRajeev Kumar #include <scheduler_core.h>
20ef615e76SHouston Hoffman #include <qdf_atomic.h>
21c7ee85c4SDustin Brown #include "qdf_flex_mem.h"
22cd395495SRajeev Kumar 
2399a10d07SRajeev Kumar static struct scheduler_ctx g_sched_ctx;
24cd395495SRajeev Kumar static struct scheduler_ctx *gp_sched_ctx;
25cd395495SRajeev Kumar 
26c7ee85c4SDustin Brown DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
27c7ee85c4SDustin Brown 			 WLAN_SCHED_REDUCTION_LIMIT);
28c7ee85c4SDustin Brown 
2939bb395eSDustin Brown #ifdef WLAN_SCHED_HISTORY_SIZE
3039bb395eSDustin Brown 
31*f28396d0SVivek #define SCHEDULER_HISTORY_HEADER "|Callback                               "\
32*f28396d0SVivek 				 "|Message Type"			   \
33*f28396d0SVivek 				 "|Queue Duration(us)|Queue Depth"	   \
34*f28396d0SVivek 				 "|Run Duration(us)|"
35*f28396d0SVivek 
36*f28396d0SVivek #define SCHEDULER_HISTORY_LINE "--------------------------------------" \
37*f28396d0SVivek 			       "--------------------------------------" \
38*f28396d0SVivek 			       "--------------------------------------"
39*f28396d0SVivek 
40dae10a5fSDustin Brown /**
41dae10a5fSDustin Brown  * struct sched_history_item - metrics for a scheduler message
42dae10a5fSDustin Brown  * @callback: the message's execution callback
43dae10a5fSDustin Brown  * @type_id: the message's type_id
44dae10a5fSDustin Brown  * @queue_id: Id of the queue the message was added to
45dae10a5fSDustin Brown  * @queue_start_us: timestamp when the message was queued in microseconds
46dae10a5fSDustin Brown  * @queue_duration_us: duration the message was queued in microseconds
47dae10a5fSDustin Brown  * @queue_depth: depth of the queue when the message was queued
48dae10a5fSDustin Brown  * @run_start_us: timesatmp when the message started execution in microseconds
49dae10a5fSDustin Brown  * @run_duration_us: duration the message was executed in microseconds
50dae10a5fSDustin Brown  */
5139bb395eSDustin Brown struct sched_history_item {
5239bb395eSDustin Brown 	void *callback;
5339bb395eSDustin Brown 	uint32_t type_id;
54dae10a5fSDustin Brown 	QDF_MODULE_ID queue_id;
55dae10a5fSDustin Brown 	uint64_t queue_start_us;
56dae10a5fSDustin Brown 	uint32_t queue_duration_us;
57dae10a5fSDustin Brown 	uint32_t queue_depth;
58dae10a5fSDustin Brown 	uint64_t run_start_us;
59dae10a5fSDustin Brown 	uint32_t run_duration_us;
6039bb395eSDustin Brown };
6139bb395eSDustin Brown 
6239bb395eSDustin Brown static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE];
6339bb395eSDustin Brown static uint32_t sched_history_index;
6439bb395eSDustin Brown 
65dae10a5fSDustin Brown static void sched_history_queue(struct scheduler_mq_type *queue,
66dae10a5fSDustin Brown 				struct scheduler_msg *msg)
67dae10a5fSDustin Brown {
68dae10a5fSDustin Brown 	msg->queue_id = queue->qid;
69dae10a5fSDustin Brown 	msg->queue_depth = qdf_list_size(&queue->mq_list);
70dae10a5fSDustin Brown 	msg->queued_at_us = qdf_get_log_timestamp_usecs();
71dae10a5fSDustin Brown }
72dae10a5fSDustin Brown 
7339bb395eSDustin Brown static void sched_history_start(struct scheduler_msg *msg)
7439bb395eSDustin Brown {
75dae10a5fSDustin Brown 	uint64_t started_at_us = qdf_get_log_timestamp_usecs();
7639bb395eSDustin Brown 	struct sched_history_item hist = {
7739bb395eSDustin Brown 		.callback = msg->callback,
7839bb395eSDustin Brown 		.type_id = msg->type,
79dae10a5fSDustin Brown 		.queue_start_us = msg->queued_at_us,
80dae10a5fSDustin Brown 		.queue_duration_us = started_at_us - msg->queued_at_us,
8127d56464SDustin Brown 		.queue_depth = msg->queue_depth,
82dae10a5fSDustin Brown 		.run_start_us = started_at_us,
8339bb395eSDustin Brown 	};
8439bb395eSDustin Brown 
8539bb395eSDustin Brown 	sched_history[sched_history_index] = hist;
8639bb395eSDustin Brown }
8739bb395eSDustin Brown 
8839bb395eSDustin Brown static void sched_history_stop(void)
8939bb395eSDustin Brown {
9039bb395eSDustin Brown 	struct sched_history_item *hist = &sched_history[sched_history_index];
91dae10a5fSDustin Brown 	uint64_t stopped_at_us = qdf_get_log_timestamp_usecs();
9239bb395eSDustin Brown 
93dae10a5fSDustin Brown 	hist->run_duration_us = stopped_at_us - hist->run_start_us;
9439bb395eSDustin Brown 
9539bb395eSDustin Brown 	sched_history_index++;
9639bb395eSDustin Brown 	sched_history_index %= WLAN_SCHED_HISTORY_SIZE;
9739bb395eSDustin Brown }
9839bb395eSDustin Brown 
99*f28396d0SVivek void sched_history_print(void)
100*f28396d0SVivek {
101*f28396d0SVivek 	struct sched_history_item *history, *item;
102*f28396d0SVivek 	uint32_t history_idx;
103*f28396d0SVivek 	uint32_t idx, index;
104*f28396d0SVivek 
105*f28396d0SVivek 	history = qdf_mem_malloc(sizeof(*history) * WLAN_SCHED_HISTORY_SIZE);
106*f28396d0SVivek 
107*f28396d0SVivek 	if (!history) {
108*f28396d0SVivek 		sched_err("Mem alloc failed");
109*f28396d0SVivek 		return;
110*f28396d0SVivek 	}
111*f28396d0SVivek 
112*f28396d0SVivek 	qdf_mem_copy(history, &sched_history,
113*f28396d0SVivek 		     (sizeof(*history) * WLAN_SCHED_HISTORY_SIZE));
114*f28396d0SVivek 	history_idx = sched_history_index;
115*f28396d0SVivek 
116*f28396d0SVivek 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
117*f28396d0SVivek 	sched_nofl_fatal(SCHEDULER_HISTORY_HEADER);
118*f28396d0SVivek 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
119*f28396d0SVivek 
120*f28396d0SVivek 	for (idx = 0; idx < WLAN_SCHED_HISTORY_SIZE; idx++) {
121*f28396d0SVivek 		index = (history_idx + idx) % WLAN_SCHED_HISTORY_SIZE;
122*f28396d0SVivek 		item = history + index;
123*f28396d0SVivek 
124*f28396d0SVivek 		if (!item->callback)
125*f28396d0SVivek 			continue;
126*f28396d0SVivek 
127*f28396d0SVivek 		sched_nofl_fatal("%40pF|%12d|%18d|%11d|%16d|",
128*f28396d0SVivek 				 item->callback, item->type_id,
129*f28396d0SVivek 				 item->queue_duration_us,
130*f28396d0SVivek 				 item->queue_depth,
131*f28396d0SVivek 				 item->run_duration_us);
132*f28396d0SVivek 	}
133*f28396d0SVivek 
134*f28396d0SVivek 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
135*f28396d0SVivek 
136*f28396d0SVivek 	qdf_mem_free(history);
137*f28396d0SVivek }
13839bb395eSDustin Brown #else /* WLAN_SCHED_HISTORY_SIZE */
13939bb395eSDustin Brown 
140dae10a5fSDustin Brown static inline void sched_history_queue(struct scheduler_mq_type *queue,
141dae10a5fSDustin Brown 				       struct scheduler_msg *msg) { }
14239bb395eSDustin Brown static inline void sched_history_start(struct scheduler_msg *msg) { }
14339bb395eSDustin Brown static inline void sched_history_stop(void) { }
144*f28396d0SVivek void sched_history_print(void) { }
14539bb395eSDustin Brown 
14639bb395eSDustin Brown #endif /* WLAN_SCHED_HISTORY_SIZE */
14739bb395eSDustin Brown 
148cd395495SRajeev Kumar QDF_STATUS scheduler_create_ctx(void)
149cd395495SRajeev Kumar {
150c7ee85c4SDustin Brown 	qdf_flex_mem_init(&sched_pool);
15199a10d07SRajeev Kumar 	gp_sched_ctx = &g_sched_ctx;
152cd395495SRajeev Kumar 
153cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
154cd395495SRajeev Kumar }
155cd395495SRajeev Kumar 
156cd395495SRajeev Kumar QDF_STATUS scheduler_destroy_ctx(void)
157cd395495SRajeev Kumar {
158cd395495SRajeev Kumar 	gp_sched_ctx = NULL;
159c7ee85c4SDustin Brown 	qdf_flex_mem_deinit(&sched_pool);
16099a10d07SRajeev Kumar 
161cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
162cd395495SRajeev Kumar }
163cd395495SRajeev Kumar 
164cd395495SRajeev Kumar struct scheduler_ctx *scheduler_get_context(void)
165cd395495SRajeev Kumar {
166c7ee85c4SDustin Brown 	QDF_BUG(gp_sched_ctx);
167c7ee85c4SDustin Brown 
168cd395495SRajeev Kumar 	return gp_sched_ctx;
169cd395495SRajeev Kumar }
170cd395495SRajeev Kumar 
171c7ee85c4SDustin Brown static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
172c7ee85c4SDustin Brown {
173c7ee85c4SDustin Brown 	sched_enter();
174c7ee85c4SDustin Brown 
175c7ee85c4SDustin Brown 	qdf_spinlock_create(&msg_q->mq_lock);
176c7ee85c4SDustin Brown 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
177c7ee85c4SDustin Brown 
178c7ee85c4SDustin Brown 	sched_exit();
179c7ee85c4SDustin Brown 
180c7ee85c4SDustin Brown 	return QDF_STATUS_SUCCESS;
181c7ee85c4SDustin Brown }
182c7ee85c4SDustin Brown 
183c7ee85c4SDustin Brown static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
184c7ee85c4SDustin Brown {
185c7ee85c4SDustin Brown 	sched_enter();
186c7ee85c4SDustin Brown 
187c7ee85c4SDustin Brown 	qdf_list_destroy(&msg_q->mq_list);
188c7ee85c4SDustin Brown 	qdf_spinlock_destroy(&msg_q->mq_lock);
189c7ee85c4SDustin Brown 
190c7ee85c4SDustin Brown 	sched_exit();
191c7ee85c4SDustin Brown }
192c7ee85c4SDustin Brown 
193c7ee85c4SDustin Brown static qdf_atomic_t __sched_queue_depth;
1940626a4daSVivek static qdf_atomic_t __sched_dup_fail_count;
195cd395495SRajeev Kumar 
1963149adf5SDustin Brown static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
197cd395495SRajeev Kumar {
1983149adf5SDustin Brown 	QDF_STATUS status;
199cd395495SRajeev Kumar 	int i;
200cd395495SRajeev Kumar 
2013149adf5SDustin Brown 	sched_enter();
2023149adf5SDustin Brown 
2038afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
2048afde5a8SDustin Brown 	if (!sched_ctx)
205cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
206cd395495SRajeev Kumar 
207c7ee85c4SDustin Brown 	qdf_atomic_set(&__sched_queue_depth, 0);
208cd395495SRajeev Kumar 
209cd395495SRajeev Kumar 	/* Initialize all message queues */
210cd395495SRajeev Kumar 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
2113149adf5SDustin Brown 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
212cd395495SRajeev Kumar 		if (QDF_STATUS_SUCCESS != status)
213cd395495SRajeev Kumar 			return status;
214cd395495SRajeev Kumar 	}
2153149adf5SDustin Brown 
216ba5d80caSKrunal Soni 	/* Initialize all qid to qidx mapping to invalid values */
217ba5d80caSKrunal Soni 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
218ba5d80caSKrunal Soni 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
219ba5d80caSKrunal Soni 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
220cd395495SRajeev Kumar 
2213149adf5SDustin Brown 	sched_exit();
222cd395495SRajeev Kumar 
223cd395495SRajeev Kumar 	return status;
224cd395495SRajeev Kumar }
225cd395495SRajeev Kumar 
2263149adf5SDustin Brown static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
227cd395495SRajeev Kumar {
228cd395495SRajeev Kumar 	int i;
229cd395495SRajeev Kumar 
2303149adf5SDustin Brown 	sched_enter();
2313149adf5SDustin Brown 
2328afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
2338afde5a8SDustin Brown 	if (!sched_ctx)
234cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
235cd395495SRajeev Kumar 
236cd395495SRajeev Kumar 	/* De-Initialize all message queues */
237cd395495SRajeev Kumar 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
238cd395495SRajeev Kumar 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
239cd395495SRajeev Kumar 
240ba5d80caSKrunal Soni 	/* Initialize all qid to qidx mapping to invalid values */
241ba5d80caSKrunal Soni 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
242ba5d80caSKrunal Soni 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
243ba5d80caSKrunal Soni 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
244ba5d80caSKrunal Soni 
2453149adf5SDustin Brown 	sched_exit();
2463149adf5SDustin Brown 
2473149adf5SDustin Brown 	return QDF_STATUS_SUCCESS;
248cd395495SRajeev Kumar }
249cd395495SRajeev Kumar 
250cd395495SRajeev Kumar void scheduler_mq_put(struct scheduler_mq_type *msg_q,
251c7ee85c4SDustin Brown 		      struct scheduler_msg *msg)
252cd395495SRajeev Kumar {
253cd395495SRajeev Kumar 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
254dae10a5fSDustin Brown 	sched_history_queue(msg_q, msg);
255c7ee85c4SDustin Brown 	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
256cd395495SRajeev Kumar 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
257cd395495SRajeev Kumar }
258cd395495SRajeev Kumar 
259cd395495SRajeev Kumar void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
260c7ee85c4SDustin Brown 			    struct scheduler_msg *msg)
261cd395495SRajeev Kumar {
262cd395495SRajeev Kumar 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
263dae10a5fSDustin Brown 	sched_history_queue(msg_q, msg);
264c7ee85c4SDustin Brown 	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
265cd395495SRajeev Kumar 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
266cd395495SRajeev Kumar }
267cd395495SRajeev Kumar 
268c7ee85c4SDustin Brown struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
269cd395495SRajeev Kumar {
270c7ee85c4SDustin Brown 	QDF_STATUS status;
271c7ee85c4SDustin Brown 	qdf_list_node_t *node;
272cd395495SRajeev Kumar 
273c7ee85c4SDustin Brown 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
274c7ee85c4SDustin Brown 	status = qdf_list_remove_front(&msg_q->mq_list, &node);
275c7ee85c4SDustin Brown 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
276c7ee85c4SDustin Brown 
277c7ee85c4SDustin Brown 	if (QDF_IS_STATUS_ERROR(status))
278cd395495SRajeev Kumar 		return NULL;
279cd395495SRajeev Kumar 
280c7ee85c4SDustin Brown 	return qdf_container_of(node, struct scheduler_msg, node);
281cd395495SRajeev Kumar }
282cd395495SRajeev Kumar 
283cd395495SRajeev Kumar QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
284cd395495SRajeev Kumar {
285cd395495SRajeev Kumar 	return scheduler_all_queues_deinit(sched_ctx);
286cd395495SRajeev Kumar }
287cd395495SRajeev Kumar 
288cd395495SRajeev Kumar QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
289cd395495SRajeev Kumar {
2903149adf5SDustin Brown 	QDF_STATUS status;
291cd395495SRajeev Kumar 
2923149adf5SDustin Brown 	sched_enter();
2933149adf5SDustin Brown 
2948afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
2958afde5a8SDustin Brown 	if (!sched_ctx)
296cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
2973149adf5SDustin Brown 
298cd395495SRajeev Kumar 	status = scheduler_all_queues_init(sched_ctx);
299c7ee85c4SDustin Brown 	if (QDF_IS_STATUS_ERROR(status)) {
300cd395495SRajeev Kumar 		scheduler_all_queues_deinit(sched_ctx);
3013149adf5SDustin Brown 		sched_err("Failed to initialize the msg queues");
302cd395495SRajeev Kumar 		return status;
303cd395495SRajeev Kumar 	}
3043149adf5SDustin Brown 
3053149adf5SDustin Brown 	sched_debug("Queue init passed");
306cd395495SRajeev Kumar 
3073149adf5SDustin Brown 	sched_exit();
3083149adf5SDustin Brown 
3093149adf5SDustin Brown 	return QDF_STATUS_SUCCESS;
310cd395495SRajeev Kumar }
311cd395495SRajeev Kumar 
312c7ee85c4SDustin Brown struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
313cd395495SRajeev Kumar {
314c7ee85c4SDustin Brown 	struct scheduler_msg *dup;
315c7ee85c4SDustin Brown 
316c7ee85c4SDustin Brown 	if (qdf_atomic_inc_return(&__sched_queue_depth) >
317c7ee85c4SDustin Brown 	    SCHEDULER_CORE_MAX_MESSAGES)
318c7ee85c4SDustin Brown 		goto buffer_full;
319c7ee85c4SDustin Brown 
320c7ee85c4SDustin Brown 	dup = qdf_flex_mem_alloc(&sched_pool);
321c7ee85c4SDustin Brown 	if (!dup) {
322c7ee85c4SDustin Brown 		sched_err("out of memory");
323c7ee85c4SDustin Brown 		goto dec_queue_count;
324c7ee85c4SDustin Brown 	}
325c7ee85c4SDustin Brown 
326c7ee85c4SDustin Brown 	qdf_mem_copy(dup, msg, sizeof(*dup));
327c7ee85c4SDustin Brown 
3280626a4daSVivek 	qdf_atomic_set(&__sched_dup_fail_count, 0);
3290626a4daSVivek 
330c7ee85c4SDustin Brown 	return dup;
331c7ee85c4SDustin Brown 
332c7ee85c4SDustin Brown buffer_full:
3330626a4daSVivek 	if (qdf_atomic_inc_return(&__sched_dup_fail_count) >
3340626a4daSVivek 	    SCHEDULER_WRAPPER_MAX_FAIL_COUNT)
335fe41df9cSDustin Brown 		QDF_DEBUG_PANIC("Scheduler buffer is full");
336c7ee85c4SDustin Brown 
3370626a4daSVivek 
338c7ee85c4SDustin Brown dec_queue_count:
339c7ee85c4SDustin Brown 	qdf_atomic_dec(&__sched_queue_depth);
340c7ee85c4SDustin Brown 
341c7ee85c4SDustin Brown 	return NULL;
342cd395495SRajeev Kumar }
343cd395495SRajeev Kumar 
344c7ee85c4SDustin Brown void scheduler_core_msg_free(struct scheduler_msg *msg)
345c7ee85c4SDustin Brown {
346c7ee85c4SDustin Brown 	qdf_flex_mem_free(&sched_pool, msg);
347c7ee85c4SDustin Brown 	qdf_atomic_dec(&__sched_queue_depth);
348cd395495SRajeev Kumar }
349cd395495SRajeev Kumar 
350cd395495SRajeev Kumar static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
351cd395495SRajeev Kumar 					    bool *shutdown)
352cd395495SRajeev Kumar {
353cd395495SRajeev Kumar 	int i;
3543149adf5SDustin Brown 	QDF_STATUS status;
355c7ee85c4SDustin Brown 	struct scheduler_msg *msg;
356cd395495SRajeev Kumar 
357cd395495SRajeev Kumar 	if (!sch_ctx) {
358fe41df9cSDustin Brown 		QDF_DEBUG_PANIC("sch_ctx is null");
359cd395495SRajeev Kumar 		return;
360cd395495SRajeev Kumar 	}
361cd395495SRajeev Kumar 
362cd395495SRajeev Kumar 	/* start with highest priority queue : timer queue at index 0 */
363cd395495SRajeev Kumar 	i = 0;
364cd395495SRajeev Kumar 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
365cd395495SRajeev Kumar 		/* Check if MC needs to shutdown */
366ef615e76SHouston Hoffman 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
367cd395495SRajeev Kumar 					&sch_ctx->sch_event_flag)) {
3688afde5a8SDustin Brown 			sched_debug("scheduler thread signaled to shutdown");
369cd395495SRajeev Kumar 			*shutdown = true;
3703149adf5SDustin Brown 
371cd395495SRajeev Kumar 			/* Check for any Suspend Indication */
372ef615e76SHouston Hoffman 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
373cd395495SRajeev Kumar 						&sch_ctx->sch_event_flag)) {
374cd395495SRajeev Kumar 				/* Unblock anyone waiting on suspend */
375cd395495SRajeev Kumar 				if (gp_sched_ctx->hdd_callback)
376cd395495SRajeev Kumar 					gp_sched_ctx->hdd_callback();
377cd395495SRajeev Kumar 			}
3783149adf5SDustin Brown 
379cd395495SRajeev Kumar 			break;
380cd395495SRajeev Kumar 		}
3813149adf5SDustin Brown 
382c7ee85c4SDustin Brown 		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
383c7ee85c4SDustin Brown 		if (!msg) {
384cd395495SRajeev Kumar 			/* check next queue */
385cd395495SRajeev Kumar 			i++;
386cd395495SRajeev Kumar 			continue;
387cd395495SRajeev Kumar 		}
3883149adf5SDustin Brown 
389cd395495SRajeev Kumar 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
390e226cebdSDustin Brown 			sch_ctx->watchdog_msg_type = msg->type;
391e226cebdSDustin Brown 			sch_ctx->watchdog_callback = msg->callback;
39239bb395eSDustin Brown 
39339bb395eSDustin Brown 			sched_history_start(msg);
394e226cebdSDustin Brown 			qdf_timer_start(&sch_ctx->watchdog_timer,
395e226cebdSDustin Brown 					SCHEDULER_WATCHDOG_TIMEOUT);
3963149adf5SDustin Brown 			status = sch_ctx->queue_ctx.
397e226cebdSDustin Brown 					scheduler_msg_process_fn[i](msg);
398e226cebdSDustin Brown 			qdf_timer_stop(&sch_ctx->watchdog_timer);
39939bb395eSDustin Brown 			sched_history_stop();
400e226cebdSDustin Brown 
4013149adf5SDustin Brown 			if (QDF_IS_STATUS_ERROR(status))
4023149adf5SDustin Brown 				sched_err("Failed processing Qid[%d] message",
403cd395495SRajeev Kumar 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
4043149adf5SDustin Brown 
405c7ee85c4SDustin Brown 			scheduler_core_msg_free(msg);
406cd395495SRajeev Kumar 		}
407cd395495SRajeev Kumar 
408cd395495SRajeev Kumar 		/* start again with highest priority queue at index 0 */
409cd395495SRajeev Kumar 		i = 0;
410cd395495SRajeev Kumar 	}
4113149adf5SDustin Brown 
412cd395495SRajeev Kumar 	/* Check for any Suspend Indication */
413ef615e76SHouston Hoffman 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
414cd395495SRajeev Kumar 			&sch_ctx->sch_event_flag)) {
415cd395495SRajeev Kumar 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
416cd395495SRajeev Kumar 		qdf_event_reset(&sch_ctx->resume_sch_event);
417cd395495SRajeev Kumar 		/* controller thread suspend completion callback */
418cd395495SRajeev Kumar 		if (gp_sched_ctx->hdd_callback)
419cd395495SRajeev Kumar 			gp_sched_ctx->hdd_callback();
420cd395495SRajeev Kumar 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
421cd395495SRajeev Kumar 		/* Wait for resume indication */
422cd395495SRajeev Kumar 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
423cd395495SRajeev Kumar 	}
424cd395495SRajeev Kumar 
425cd395495SRajeev Kumar 	return;  /* Nothing to process wait on wait queue */
426cd395495SRajeev Kumar }
427cd395495SRajeev Kumar 
428cd395495SRajeev Kumar int scheduler_thread(void *arg)
429cd395495SRajeev Kumar {
430cd395495SRajeev Kumar 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
431cd395495SRajeev Kumar 	int retWaitStatus = 0;
432cd395495SRajeev Kumar 	bool shutdown = false;
433cd395495SRajeev Kumar 
4343149adf5SDustin Brown 	if (!arg) {
435fe41df9cSDustin Brown 		QDF_DEBUG_PANIC("arg is null");
436cd395495SRajeev Kumar 		return 0;
437cd395495SRajeev Kumar 	}
438cd395495SRajeev Kumar 	qdf_set_user_nice(current, -2);
439cd395495SRajeev Kumar 
440cd395495SRajeev Kumar 	/* Ack back to the context from which the main controller thread
441cd395495SRajeev Kumar 	 * has been created
442cd395495SRajeev Kumar 	 */
443cd395495SRajeev Kumar 	qdf_event_set(&sch_ctx->sch_start_event);
4443149adf5SDustin Brown 	sched_debug("scheduler thread %d (%s) starting up",
4453149adf5SDustin Brown 		    current->pid, current->comm);
446cd395495SRajeev Kumar 
447cd395495SRajeev Kumar 	while (!shutdown) {
448cd395495SRajeev Kumar 		/* This implements the execution model algorithm */
449cd395495SRajeev Kumar 		retWaitStatus = qdf_wait_queue_interruptible(
450cd395495SRajeev Kumar 					sch_ctx->sch_wait_queue,
451ef615e76SHouston Hoffman 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
452cd395495SRajeev Kumar 						&sch_ctx->sch_event_flag) ||
453ef615e76SHouston Hoffman 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
454cd395495SRajeev Kumar 						&sch_ctx->sch_event_flag));
455cd395495SRajeev Kumar 
456fe41df9cSDustin Brown 		if (retWaitStatus == -ERESTARTSYS)
457fe41df9cSDustin Brown 			QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS");
4583149adf5SDustin Brown 
459ef615e76SHouston Hoffman 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
460cd395495SRajeev Kumar 		scheduler_thread_process_queues(sch_ctx, &shutdown);
461cd395495SRajeev Kumar 	}
4623149adf5SDustin Brown 
4633149adf5SDustin Brown 	/* If we get here the scheduler thread must exit */
4648afde5a8SDustin Brown 	sched_debug("Scheduler thread exiting");
465cd395495SRajeev Kumar 	qdf_event_set(&sch_ctx->sch_shutdown);
466cd395495SRajeev Kumar 	qdf_exit_thread(QDF_STATUS_SUCCESS);
467cd395495SRajeev Kumar 
468cd395495SRajeev Kumar 	return 0;
469cd395495SRajeev Kumar }
470cd395495SRajeev Kumar 
471c7ee85c4SDustin Brown static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
472cd395495SRajeev Kumar {
473c7ee85c4SDustin Brown 	struct scheduler_msg *msg;
474c7ee85c4SDustin Brown 	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
475cd395495SRajeev Kumar 
476c7ee85c4SDustin Brown 	while ((msg = scheduler_mq_get(mq))) {
477c7ee85c4SDustin Brown 		if (msg->flush_callback) {
4788afde5a8SDustin Brown 			sched_debug("Calling flush callback; type: %x",
479c7ee85c4SDustin Brown 				    msg->type);
480c7ee85c4SDustin Brown 			flush_cb = msg->flush_callback;
481c7ee85c4SDustin Brown 			flush_cb(msg);
482c7ee85c4SDustin Brown 		} else if (msg->bodyptr) {
4838afde5a8SDustin Brown 			sched_debug("Freeing scheduler msg bodyptr; type: %x",
484c7ee85c4SDustin Brown 				    msg->type);
485c7ee85c4SDustin Brown 			qdf_mem_free(msg->bodyptr);
486cd395495SRajeev Kumar 		}
487cd395495SRajeev Kumar 
488c7ee85c4SDustin Brown 		scheduler_core_msg_free(msg);
4891c6bb033SZhu Jianmin 	}
4901880b6f8SKrunal Soni }
4913149adf5SDustin Brown 
492c7ee85c4SDustin Brown void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
493c7ee85c4SDustin Brown {
494c7ee85c4SDustin Brown 	struct scheduler_mq_type *mq;
495c7ee85c4SDustin Brown 	int i;
496c7ee85c4SDustin Brown 
4978afde5a8SDustin Brown 	sched_debug("Flushing scheduler message queues");
498c7ee85c4SDustin Brown 
499c7ee85c4SDustin Brown 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
500c7ee85c4SDustin Brown 		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
501c7ee85c4SDustin Brown 		scheduler_flush_single_queue(mq);
502cd395495SRajeev Kumar 	}
503cd395495SRajeev Kumar }
504c7ee85c4SDustin Brown 
505