xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_api.c (revision bea437e2293c3d4fb1b5704fcf633aedac996962)
1cd395495SRajeev Kumar /*
2*bea437e2SVivek  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3cd395495SRajeev Kumar  *
4cd395495SRajeev Kumar  * Permission to use, copy, modify, and/or distribute this software for
5cd395495SRajeev Kumar  * any purpose with or without fee is hereby granted, provided that the
6cd395495SRajeev Kumar  * above copyright notice and this permission notice appear in all
7cd395495SRajeev Kumar  * copies.
8cd395495SRajeev Kumar  *
9cd395495SRajeev Kumar  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10cd395495SRajeev Kumar  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11cd395495SRajeev Kumar  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12cd395495SRajeev Kumar  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13cd395495SRajeev Kumar  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14cd395495SRajeev Kumar  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15cd395495SRajeev Kumar  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16cd395495SRajeev Kumar  * PERFORMANCE OF THIS SOFTWARE.
17cd395495SRajeev Kumar  */
18cd395495SRajeev Kumar 
19cd395495SRajeev Kumar #include <scheduler_api.h>
20cd395495SRajeev Kumar #include <scheduler_core.h>
2173c05a80SRajeev Kumar #include <qdf_atomic.h>
2211f5a63aSNaga #include <qdf_module.h>
2373c05a80SRajeev Kumar 
24e0c9f669SDustin Brown QDF_STATUS scheduler_disable(void)
25cd395495SRajeev Kumar {
26e0c9f669SDustin Brown 	struct scheduler_ctx *sched_ctx;
2791abaccbSDustin Brown 
288afde5a8SDustin Brown 	sched_debug("Disabling Scheduler");
29e0c9f669SDustin Brown 
30e0c9f669SDustin Brown 	sched_ctx = scheduler_get_context();
31f4c76f93SDustin Brown 	QDF_BUG(sched_ctx);
328afde5a8SDustin Brown 	if (!sched_ctx)
33f4c76f93SDustin Brown 		return QDF_STATUS_E_INVAL;
34bac753d9SDustin Brown 
35f4c76f93SDustin Brown 	/* send shutdown signal to scheduler thread */
36ef615e76SHouston Hoffman 	qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag);
37ef615e76SHouston Hoffman 	qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
38cd395495SRajeev Kumar 	qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
39bac753d9SDustin Brown 
40f4c76f93SDustin Brown 	/* wait for scheduler thread to shutdown */
41cd395495SRajeev Kumar 	qdf_wait_single_event(&sched_ctx->sch_shutdown, 0);
427b329469SDustin Brown 	sched_ctx->sch_thread = NULL;
43cd395495SRajeev Kumar 
44f4c76f93SDustin Brown 	/* flush any unprocessed scheduler messages */
45c7ee85c4SDustin Brown 	scheduler_queues_flush(sched_ctx);
46cd395495SRajeev Kumar 
47cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
48cd395495SRajeev Kumar }
49cd395495SRajeev Kumar 
50e226cebdSDustin Brown static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched)
51e226cebdSDustin Brown {
52bac753d9SDustin Brown 	char symbol[QDF_SYMBOL_LEN];
53e226cebdSDustin Brown 
54e226cebdSDustin Brown 	if (sched->watchdog_callback)
55e226cebdSDustin Brown 		qdf_sprint_symbol(symbol, sched->watchdog_callback);
56e226cebdSDustin Brown 
57*bea437e2SVivek 	sched_fatal("Callback %s (type 0x%x) exceeded its allotted time of %ds",
58bac753d9SDustin Brown 		    sched->watchdog_callback ? symbol : "<null>",
59*bea437e2SVivek 		    sched->watchdog_msg_type,
60*bea437e2SVivek 		    SCHEDULER_WATCHDOG_TIMEOUT / 1000);
61e226cebdSDustin Brown }
62e226cebdSDustin Brown 
63bac753d9SDustin Brown static void scheduler_watchdog_timeout(void *arg)
64e226cebdSDustin Brown {
65bac753d9SDustin Brown 	struct scheduler_ctx *sched = arg;
66bac753d9SDustin Brown 
67bac753d9SDustin Brown 	scheduler_watchdog_notify(sched);
687b329469SDustin Brown 	if (sched->sch_thread)
697b329469SDustin Brown 		qdf_print_thread_trace(sched->sch_thread);
70bac753d9SDustin Brown 
71bac753d9SDustin Brown 	/* avoid crashing during shutdown */
72ef615e76SHouston Hoffman 	if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag))
73bac753d9SDustin Brown 		return;
74bac753d9SDustin Brown 
758afde5a8SDustin Brown 	QDF_DEBUG_PANIC("Going down for Scheduler Watchdog Bite!");
76e226cebdSDustin Brown }
77e226cebdSDustin Brown 
78e0c9f669SDustin Brown QDF_STATUS scheduler_enable(void)
79cd395495SRajeev Kumar {
80e0c9f669SDustin Brown 	struct scheduler_ctx *sched_ctx;
81f4c76f93SDustin Brown 
828afde5a8SDustin Brown 	sched_debug("Enabling Scheduler");
8391abaccbSDustin Brown 
84e0c9f669SDustin Brown 	sched_ctx = scheduler_get_context();
85f4c76f93SDustin Brown 	QDF_BUG(sched_ctx);
868afde5a8SDustin Brown 	if (!sched_ctx)
87f4c76f93SDustin Brown 		return QDF_STATUS_E_INVAL;
8891abaccbSDustin Brown 
89e0c9f669SDustin Brown 	qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK,
90e0c9f669SDustin Brown 			     &sched_ctx->sch_event_flag);
91e0c9f669SDustin Brown 	qdf_atomic_clear_bit(MC_POST_EVENT_MASK,
92e0c9f669SDustin Brown 			     &sched_ctx->sch_event_flag);
93e0c9f669SDustin Brown 
94e0c9f669SDustin Brown 	/* create the scheduler thread */
95e0c9f669SDustin Brown 	sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx,
96e0c9f669SDustin Brown 						  "scheduler_thread");
971f55ed1aSChaitanya Kiran Godavarthi 	if (!sched_ctx->sch_thread) {
98*bea437e2SVivek 		sched_fatal("Failed to create scheduler thread");
99e0c9f669SDustin Brown 		return QDF_STATUS_E_RESOURCES;
100e0c9f669SDustin Brown 	}
101e0c9f669SDustin Brown 
1028afde5a8SDustin Brown 	sched_debug("Scheduler thread created");
103e0c9f669SDustin Brown 
104e0c9f669SDustin Brown 	/* wait for the scheduler thread to startup */
105e0c9f669SDustin Brown 	qdf_wake_up_process(sched_ctx->sch_thread);
106e0c9f669SDustin Brown 	qdf_wait_single_event(&sched_ctx->sch_start_event, 0);
107e0c9f669SDustin Brown 
1088afde5a8SDustin Brown 	sched_debug("Scheduler thread started");
109e0c9f669SDustin Brown 
110e0c9f669SDustin Brown 	return QDF_STATUS_SUCCESS;
111e0c9f669SDustin Brown }
112e0c9f669SDustin Brown 
113e0c9f669SDustin Brown QDF_STATUS scheduler_init(void)
114e0c9f669SDustin Brown {
115e0c9f669SDustin Brown 	QDF_STATUS status;
116e0c9f669SDustin Brown 	struct scheduler_ctx *sched_ctx;
117e0c9f669SDustin Brown 
1188afde5a8SDustin Brown 	sched_debug("Initializing Scheduler");
119e0c9f669SDustin Brown 
120e0c9f669SDustin Brown 	status = scheduler_create_ctx();
121e0c9f669SDustin Brown 	if (QDF_IS_STATUS_ERROR(status)) {
122*bea437e2SVivek 		sched_fatal("Failed to create context; status:%d", status);
123e0c9f669SDustin Brown 		return status;
124e0c9f669SDustin Brown 	}
125e0c9f669SDustin Brown 
126e0c9f669SDustin Brown 	sched_ctx = scheduler_get_context();
127e0c9f669SDustin Brown 	QDF_BUG(sched_ctx);
128e0c9f669SDustin Brown 	if (!sched_ctx) {
129e0c9f669SDustin Brown 		status = QDF_STATUS_E_FAILURE;
130e0c9f669SDustin Brown 		goto ctx_destroy;
131e0c9f669SDustin Brown 	}
132e0c9f669SDustin Brown 
133e0c9f669SDustin Brown 	status = scheduler_queues_init(sched_ctx);
134e0c9f669SDustin Brown 	if (QDF_IS_STATUS_ERROR(status)) {
135*bea437e2SVivek 		sched_fatal("Failed to init queues; status:%d", status);
136e0c9f669SDustin Brown 		goto ctx_destroy;
137e0c9f669SDustin Brown 	}
138e0c9f669SDustin Brown 
139f4c76f93SDustin Brown 	status = qdf_event_create(&sched_ctx->sch_start_event);
140f4c76f93SDustin Brown 	if (QDF_IS_STATUS_ERROR(status)) {
141*bea437e2SVivek 		sched_fatal("Failed to create start event; status:%d", status);
142e0c9f669SDustin Brown 		goto queues_deinit;
143f4c76f93SDustin Brown 	}
144f4c76f93SDustin Brown 
145f4c76f93SDustin Brown 	status = qdf_event_create(&sched_ctx->sch_shutdown);
146f4c76f93SDustin Brown 	if (QDF_IS_STATUS_ERROR(status)) {
147*bea437e2SVivek 		sched_fatal("Failed to create shutdown event; status:%d",
148*bea437e2SVivek 			    status);
149f4c76f93SDustin Brown 		goto start_event_destroy;
150f4c76f93SDustin Brown 	}
151f4c76f93SDustin Brown 
152f4c76f93SDustin Brown 	status = qdf_event_create(&sched_ctx->resume_sch_event);
153f4c76f93SDustin Brown 	if (QDF_IS_STATUS_ERROR(status)) {
154*bea437e2SVivek 		sched_fatal("Failed to create resume event; status:%d", status);
155f4c76f93SDustin Brown 		goto shutdown_event_destroy;
156f4c76f93SDustin Brown 	}
157f4c76f93SDustin Brown 
158cd395495SRajeev Kumar 	qdf_spinlock_create(&sched_ctx->sch_thread_lock);
159cd395495SRajeev Kumar 	qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue);
160cd395495SRajeev Kumar 	sched_ctx->sch_event_flag = 0;
161e226cebdSDustin Brown 	qdf_timer_init(NULL,
162e226cebdSDustin Brown 		       &sched_ctx->watchdog_timer,
163bac753d9SDustin Brown 		       &scheduler_watchdog_timeout,
164e226cebdSDustin Brown 		       sched_ctx,
165e226cebdSDustin Brown 		       QDF_TIMER_TYPE_SW);
166e226cebdSDustin Brown 
167e0c9f669SDustin Brown 	qdf_register_mc_timer_callback(scheduler_mc_timer_callback);
16891abaccbSDustin Brown 
169cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
170f4c76f93SDustin Brown 
171f4c76f93SDustin Brown shutdown_event_destroy:
172f4c76f93SDustin Brown 	qdf_event_destroy(&sched_ctx->sch_shutdown);
173f4c76f93SDustin Brown 
174f4c76f93SDustin Brown start_event_destroy:
175f4c76f93SDustin Brown 	qdf_event_destroy(&sched_ctx->sch_start_event);
176f4c76f93SDustin Brown 
177f4c76f93SDustin Brown queues_deinit:
178f4c76f93SDustin Brown 	scheduler_queues_deinit(sched_ctx);
179f4c76f93SDustin Brown 
180f4c76f93SDustin Brown ctx_destroy:
181f4c76f93SDustin Brown 	scheduler_destroy_ctx();
182f4c76f93SDustin Brown 
183f4c76f93SDustin Brown 	return status;
184cd395495SRajeev Kumar }
185cd395495SRajeev Kumar 
186cd395495SRajeev Kumar QDF_STATUS scheduler_deinit(void)
187cd395495SRajeev Kumar {
188e0c9f669SDustin Brown 	QDF_STATUS status;
189e0c9f669SDustin Brown 	struct scheduler_ctx *sched_ctx;
190cd395495SRajeev Kumar 
1918afde5a8SDustin Brown 	sched_debug("Deinitializing Scheduler");
19291abaccbSDustin Brown 
193e0c9f669SDustin Brown 	sched_ctx = scheduler_get_context();
194e0c9f669SDustin Brown 	QDF_BUG(sched_ctx);
1958afde5a8SDustin Brown 	if (!sched_ctx)
196e0c9f669SDustin Brown 		return QDF_STATUS_E_INVAL;
19791abaccbSDustin Brown 
198e0c9f669SDustin Brown 	qdf_timer_free(&sched_ctx->watchdog_timer);
199e0c9f669SDustin Brown 	qdf_spinlock_destroy(&sched_ctx->sch_thread_lock);
200e0c9f669SDustin Brown 	qdf_event_destroy(&sched_ctx->resume_sch_event);
201e0c9f669SDustin Brown 	qdf_event_destroy(&sched_ctx->sch_shutdown);
202e0c9f669SDustin Brown 	qdf_event_destroy(&sched_ctx->sch_start_event);
203f4c76f93SDustin Brown 
204e0c9f669SDustin Brown 	status = scheduler_queues_deinit(sched_ctx);
205e0c9f669SDustin Brown 	if (QDF_IS_STATUS_ERROR(status))
206e0c9f669SDustin Brown 		sched_err("Failed to deinit queues; status:%d", status);
207e0c9f669SDustin Brown 
208e0c9f669SDustin Brown 	status = scheduler_destroy_ctx();
209e0c9f669SDustin Brown 	if (QDF_IS_STATUS_ERROR(status))
210e0c9f669SDustin Brown 		sched_err("Failed to destroy context; status:%d", status);
211e0c9f669SDustin Brown 
212e0c9f669SDustin Brown 	return QDF_STATUS_SUCCESS;
213cd395495SRajeev Kumar }
214cd395495SRajeev Kumar 
2156e4b9c54Sgaurank kathpalia QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid,
216c7ee85c4SDustin Brown 					  struct scheduler_msg *msg,
217c7ee85c4SDustin Brown 					  bool is_high_priority)
218cd395495SRajeev Kumar {
219cd395495SRajeev Kumar 	uint8_t qidx;
220c7ee85c4SDustin Brown 	struct scheduler_mq_type *target_mq;
221c7ee85c4SDustin Brown 	struct scheduler_msg *queue_msg;
222c7ee85c4SDustin Brown 	struct scheduler_ctx *sched_ctx;
2236e4b9c54Sgaurank kathpalia 	uint16_t src_id;
2246e4b9c54Sgaurank kathpalia 	uint16_t dest_id;
225302a1d97Sgaurank kathpalia 	uint16_t que_id;
226cd395495SRajeev Kumar 
2278afde5a8SDustin Brown 	QDF_BUG(msg);
2288afde5a8SDustin Brown 	if (!msg)
229d2cd9eabSDustin Brown 		return QDF_STATUS_E_INVAL;
230d2cd9eabSDustin Brown 
231c7ee85c4SDustin Brown 	sched_ctx = scheduler_get_context();
2328afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
2338afde5a8SDustin Brown 	if (!sched_ctx)
234d2cd9eabSDustin Brown 		return QDF_STATUS_E_INVAL;
235d2cd9eabSDustin Brown 
236d2cd9eabSDustin Brown 	if (!sched_ctx->sch_thread) {
237d2cd9eabSDustin Brown 		sched_err("Cannot post message; scheduler thread is stopped");
238cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
239cd395495SRajeev Kumar 	}
240cd395495SRajeev Kumar 
241c7ee85c4SDustin Brown 	if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) {
242fe41df9cSDustin Brown 		QDF_DEBUG_PANIC("Scheduler messages must be initialized");
243afc63bc8SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
244afc63bc8SRajeev Kumar 	}
245afc63bc8SRajeev Kumar 
2466e4b9c54Sgaurank kathpalia 	dest_id = scheduler_get_dest_id(qid);
2476e4b9c54Sgaurank kathpalia 	src_id = scheduler_get_src_id(qid);
248302a1d97Sgaurank kathpalia 	que_id = scheduler_get_que_id(qid);
2496e4b9c54Sgaurank kathpalia 
250302a1d97Sgaurank kathpalia 	if (que_id >= QDF_MODULE_ID_MAX || src_id >= QDF_MODULE_ID_MAX ||
251302a1d97Sgaurank kathpalia 	    dest_id >= QDF_MODULE_ID_MAX) {
2526e4b9c54Sgaurank kathpalia 		sched_err("Src_id/Dest_id invalid, cannot post message");
2536e4b9c54Sgaurank kathpalia 		return QDF_STATUS_E_FAILURE;
2546e4b9c54Sgaurank kathpalia 	}
255cd395495SRajeev Kumar 	/* Target_If is a special message queue in phase 3 convergence beacause
256cd395495SRajeev Kumar 	 * its used by both legacy WMA and as well as new UMAC components which
257cd395495SRajeev Kumar 	 * directly populate callback handlers in message body.
258cd395495SRajeev Kumar 	 * 1) WMA legacy messages should not have callback
259cd395495SRajeev Kumar 	 * 2) New target_if message needs to have valid callback
260cd395495SRajeev Kumar 	 * Clear callback handler for legacy WMA messages such that in case
261cd395495SRajeev Kumar 	 * if someone is sending legacy WMA message from stack which has
262cd395495SRajeev Kumar 	 * uninitialized callback then its handled properly. Also change
263cd395495SRajeev Kumar 	 * legacy WMA message queue id to target_if queue such that its  always
264cd395495SRajeev Kumar 	 * handled in right order.
265cd395495SRajeev Kumar 	 */
266302a1d97Sgaurank kathpalia 	if (QDF_MODULE_ID_WMA == que_id) {
267c7ee85c4SDustin Brown 		msg->callback = NULL;
268cd395495SRajeev Kumar 		/* change legacy WMA message id to new target_if mq id */
269302a1d97Sgaurank kathpalia 		que_id = QDF_MODULE_ID_TARGET_IF;
270cd395495SRajeev Kumar 	}
2715e652ebbSgaurank kathpalia 	qdf_mtrace(src_id, dest_id, msg->type, 0xFF, 0);
272cd395495SRajeev Kumar 
273302a1d97Sgaurank kathpalia 	qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[que_id];
274cd395495SRajeev Kumar 	if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
27591abaccbSDustin Brown 		sched_err("Scheduler is deinitialized ignore msg");
276cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
277cd395495SRajeev Kumar 	}
27891abaccbSDustin Brown 
279cd395495SRajeev Kumar 	if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) {
280302a1d97Sgaurank kathpalia 		QDF_DEBUG_PANIC("callback not registered for qid[%d]", que_id);
281cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
282cd395495SRajeev Kumar 	}
28391abaccbSDustin Brown 
284cd395495SRajeev Kumar 	target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
285cd395495SRajeev Kumar 
286c7ee85c4SDustin Brown 	queue_msg = scheduler_core_msg_dup(msg);
287c7ee85c4SDustin Brown 	if (!queue_msg)
288c7ee85c4SDustin Brown 		return QDF_STATUS_E_NOMEM;
289cd395495SRajeev Kumar 
290cd395495SRajeev Kumar 	if (is_high_priority)
291c7ee85c4SDustin Brown 		scheduler_mq_put_front(target_mq, queue_msg);
292cd395495SRajeev Kumar 	else
293c7ee85c4SDustin Brown 		scheduler_mq_put(target_mq, queue_msg);
294cd395495SRajeev Kumar 
295ef615e76SHouston Hoffman 	qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
296cd395495SRajeev Kumar 	qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
297cd395495SRajeev Kumar 
298cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
299cd395495SRajeev Kumar }
300cd395495SRajeev Kumar 
301cd395495SRajeev Kumar QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid,
302cd395495SRajeev Kumar 				     scheduler_msg_process_fn_t callback)
303cd395495SRajeev Kumar {
304cd395495SRajeev Kumar 	struct scheduler_mq_ctx *ctx;
305cd395495SRajeev Kumar 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
306cd395495SRajeev Kumar 
30791abaccbSDustin Brown 	sched_enter();
30891abaccbSDustin Brown 
3098afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
3108afde5a8SDustin Brown 	if (!sched_ctx)
311cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
312cd395495SRajeev Kumar 
313cd395495SRajeev Kumar 	if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
31491abaccbSDustin Brown 		sched_err("Already registered max %d no of message queues",
315cd395495SRajeev Kumar 			  SCHEDULER_NUMBER_OF_MSG_QUEUE);
316cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
317cd395495SRajeev Kumar 	}
318cd395495SRajeev Kumar 
319cd395495SRajeev Kumar 	ctx = &sched_ctx->queue_ctx;
320cd395495SRajeev Kumar 	ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx;
321cd395495SRajeev Kumar 	ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid;
322cd395495SRajeev Kumar 	ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback;
323cd395495SRajeev Kumar 	sched_ctx->sch_last_qidx++;
32491abaccbSDustin Brown 
32591abaccbSDustin Brown 	sched_exit();
32691abaccbSDustin Brown 
327cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
328cd395495SRajeev Kumar }
329cd395495SRajeev Kumar 
330cd395495SRajeev Kumar QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid)
331cd395495SRajeev Kumar {
332cd395495SRajeev Kumar 	struct scheduler_mq_ctx *ctx;
333cd395495SRajeev Kumar 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
334cd395495SRajeev Kumar 	uint8_t qidx;
335cd395495SRajeev Kumar 
33691abaccbSDustin Brown 	sched_enter();
33791abaccbSDustin Brown 
3388afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
3398afde5a8SDustin Brown 	if (!sched_ctx)
340cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
34191abaccbSDustin Brown 
342cd395495SRajeev Kumar 	ctx = &sched_ctx->queue_ctx;
343cd395495SRajeev Kumar 	qidx = ctx->scheduler_msg_qid_to_qidx[qid];
344cd395495SRajeev Kumar 	ctx->scheduler_msg_process_fn[qidx] = NULL;
34558cac671SYuanyuan Liu 	sched_ctx->sch_last_qidx--;
346cd395495SRajeev Kumar 	ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE;
34791abaccbSDustin Brown 
34891abaccbSDustin Brown 	sched_exit();
34991abaccbSDustin Brown 
350cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
351cd395495SRajeev Kumar }
352cd395495SRajeev Kumar 
353cd395495SRajeev Kumar void scheduler_resume(void)
354cd395495SRajeev Kumar {
355cd395495SRajeev Kumar 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
356cd395495SRajeev Kumar 
357cd395495SRajeev Kumar 	if (sched_ctx)
358cd395495SRajeev Kumar 		qdf_event_set(&sched_ctx->resume_sch_event);
359cd395495SRajeev Kumar }
360cd395495SRajeev Kumar 
361cd395495SRajeev Kumar void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback)
362cd395495SRajeev Kumar {
363cd395495SRajeev Kumar 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
364cd395495SRajeev Kumar 
365cd395495SRajeev Kumar 	if (sched_ctx)
366cd395495SRajeev Kumar 		sched_ctx->hdd_callback = callback;
367cd395495SRajeev Kumar }
368cd395495SRajeev Kumar void scheduler_wake_up_controller_thread(void)
369cd395495SRajeev Kumar {
370cd395495SRajeev Kumar 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
371cd395495SRajeev Kumar 
372cd395495SRajeev Kumar 	if (sched_ctx)
373cd395495SRajeev Kumar 		qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
374cd395495SRajeev Kumar }
375cd395495SRajeev Kumar void scheduler_set_event_mask(uint32_t event_mask)
376cd395495SRajeev Kumar {
377cd395495SRajeev Kumar 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
378cd395495SRajeev Kumar 
379cd395495SRajeev Kumar 	if (sched_ctx)
380ef615e76SHouston Hoffman 		qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag);
381cd395495SRajeev Kumar }
382cd395495SRajeev Kumar 
383cd395495SRajeev Kumar void scheduler_clear_event_mask(uint32_t event_mask)
384cd395495SRajeev Kumar {
385cd395495SRajeev Kumar 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
386cd395495SRajeev Kumar 
387cd395495SRajeev Kumar 	if (sched_ctx)
388ef615e76SHouston Hoffman 		qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag);
389cd395495SRajeev Kumar }
390cd395495SRajeev Kumar 
391cd395495SRajeev Kumar QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg)
392cd395495SRajeev Kumar {
393cd395495SRajeev Kumar 	QDF_STATUS status;
394cd395495SRajeev Kumar 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
395cd395495SRajeev Kumar 	QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *);
396cd395495SRajeev Kumar 
3978afde5a8SDustin Brown 	QDF_BUG(msg);
3988afde5a8SDustin Brown 	if (!msg)
399cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
4008afde5a8SDustin Brown 
4018afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
4028afde5a8SDustin Brown 	if (!sched_ctx)
4038afde5a8SDustin Brown 		return QDF_STATUS_E_FAILURE;
404cd395495SRajeev Kumar 
405cd395495SRajeev Kumar 	target_if_msg_handler = msg->callback;
406cd395495SRajeev Kumar 
407cd395495SRajeev Kumar 	/* Target_If is a special message queue in phase 3 convergence beacause
408cd395495SRajeev Kumar 	 * its used by both legacy WMA and as well as new UMAC components. New
409cd395495SRajeev Kumar 	 * UMAC components directly pass their message handlers as callback in
410cd395495SRajeev Kumar 	 * message body.
411cd395495SRajeev Kumar 	 * 1) All Legacy WMA messages do not contain message callback so invoke
412cd395495SRajeev Kumar 	 *    registered legacy WMA handler. Scheduler message posting APIs
413cd395495SRajeev Kumar 	 *    makes sure legacy WMA messages do not have callbacks.
414cd395495SRajeev Kumar 	 * 2) For new messages which have valid callbacks invoke their callbacks
415cd395495SRajeev Kumar 	 *    directly.
416cd395495SRajeev Kumar 	 */
4178afde5a8SDustin Brown 	if (!target_if_msg_handler)
418cd395495SRajeev Kumar 		status = sched_ctx->legacy_wma_handler(msg);
419cd395495SRajeev Kumar 	else
420cd395495SRajeev Kumar 		status = target_if_msg_handler(msg);
421cd395495SRajeev Kumar 
422cd395495SRajeev Kumar 	return status;
423cd395495SRajeev Kumar }
424cd395495SRajeev Kumar 
425cd395495SRajeev Kumar QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg)
426cd395495SRajeev Kumar {
427cd395495SRajeev Kumar 	QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *);
428cd395495SRajeev Kumar 
4298afde5a8SDustin Brown 	QDF_BUG(msg);
4308afde5a8SDustin Brown 	if (!msg)
431cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
432cd395495SRajeev Kumar 
433cd395495SRajeev Kumar 	os_if_msg_handler = msg->callback;
434cd395495SRajeev Kumar 
4358afde5a8SDustin Brown 	QDF_BUG(os_if_msg_handler);
4368afde5a8SDustin Brown 	if (!os_if_msg_handler)
437cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
4388afde5a8SDustin Brown 
439cd395495SRajeev Kumar 	os_if_msg_handler(msg);
440cd395495SRajeev Kumar 
441cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
442cd395495SRajeev Kumar }
443cd395495SRajeev Kumar 
444cd395495SRajeev Kumar QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg)
445cd395495SRajeev Kumar {
446cd395495SRajeev Kumar 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
4478afde5a8SDustin Brown 	qdf_mc_timer_callback_t timer_callback;
448cd395495SRajeev Kumar 
4498afde5a8SDustin Brown 	QDF_BUG(msg);
4508afde5a8SDustin Brown 	if (!msg)
451cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
452cd395495SRajeev Kumar 
4538afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
4548afde5a8SDustin Brown 	if (!sched_ctx)
4558afde5a8SDustin Brown 		return QDF_STATUS_E_FAILURE;
456cd395495SRajeev Kumar 
4578afde5a8SDustin Brown 	/* legacy sys message handler? */
4588afde5a8SDustin Brown 	if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER)
4598afde5a8SDustin Brown 		return sched_ctx->legacy_sys_handler(msg);
46091abaccbSDustin Brown 
4618afde5a8SDustin Brown 	timer_callback = msg->callback;
4628afde5a8SDustin Brown 	QDF_BUG(timer_callback);
4638afde5a8SDustin Brown 	if (!timer_callback)
4648afde5a8SDustin Brown 		return QDF_STATUS_E_FAILURE;
46591abaccbSDustin Brown 
4668afde5a8SDustin Brown 	timer_callback(msg->bodyptr);
4678afde5a8SDustin Brown 
4688afde5a8SDustin Brown 	return QDF_STATUS_SUCCESS;
469cd395495SRajeev Kumar }
470cd395495SRajeev Kumar 
4716e2fed8fSSantosh Anbu QDF_STATUS scheduler_mlme_mq_handler(struct scheduler_msg *msg)
4726e2fed8fSSantosh Anbu {
4736e2fed8fSSantosh Anbu 	scheduler_msg_process_fn_t mlme_msg_handler;
4746e2fed8fSSantosh Anbu 
4756e2fed8fSSantosh Anbu 	QDF_BUG(msg);
4766e2fed8fSSantosh Anbu 	if (!msg)
4776e2fed8fSSantosh Anbu 		return QDF_STATUS_E_FAILURE;
4786e2fed8fSSantosh Anbu 
4796e2fed8fSSantosh Anbu 	mlme_msg_handler = msg->callback;
4806e2fed8fSSantosh Anbu 
4816e2fed8fSSantosh Anbu 	QDF_BUG(mlme_msg_handler);
4826e2fed8fSSantosh Anbu 	if (!mlme_msg_handler)
4836e2fed8fSSantosh Anbu 		return QDF_STATUS_E_FAILURE;
4846e2fed8fSSantosh Anbu 
4856e2fed8fSSantosh Anbu 	mlme_msg_handler(msg);
4866e2fed8fSSantosh Anbu 
4876e2fed8fSSantosh Anbu 	return QDF_STATUS_SUCCESS;
4886e2fed8fSSantosh Anbu }
4896e2fed8fSSantosh Anbu 
4906ecd284eSVignesh Viswanathan QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg)
4916ecd284eSVignesh Viswanathan {
4926ecd284eSVignesh Viswanathan 	QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *);
4936ecd284eSVignesh Viswanathan 
4948afde5a8SDustin Brown 	QDF_BUG(msg);
4958afde5a8SDustin Brown 	if (!msg)
4966ecd284eSVignesh Viswanathan 		return QDF_STATUS_E_FAILURE;
4976ecd284eSVignesh Viswanathan 
4986ecd284eSVignesh Viswanathan 	scan_q_msg_handler = msg->callback;
4996ecd284eSVignesh Viswanathan 
5008afde5a8SDustin Brown 	QDF_BUG(scan_q_msg_handler);
5018afde5a8SDustin Brown 	if (!scan_q_msg_handler)
5026ecd284eSVignesh Viswanathan 		return QDF_STATUS_E_FAILURE;
5038afde5a8SDustin Brown 
5046ecd284eSVignesh Viswanathan 	scan_q_msg_handler(msg);
5056ecd284eSVignesh Viswanathan 
5066ecd284eSVignesh Viswanathan 	return QDF_STATUS_SUCCESS;
5076ecd284eSVignesh Viswanathan }
5086ecd284eSVignesh Viswanathan 
509cd395495SRajeev Kumar QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t
510cd395495SRajeev Kumar 						wma_callback)
511cd395495SRajeev Kumar {
512cd395495SRajeev Kumar 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
513cd395495SRajeev Kumar 
5148afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
5158afde5a8SDustin Brown 	if (!sched_ctx)
516cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
517cd395495SRajeev Kumar 
518cd395495SRajeev Kumar 	sched_ctx->legacy_wma_handler = wma_callback;
519cd395495SRajeev Kumar 
520cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
521cd395495SRajeev Kumar }
522cd395495SRajeev Kumar 
523cd395495SRajeev Kumar QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t
524cd395495SRajeev Kumar 						sys_callback)
525cd395495SRajeev Kumar {
526cd395495SRajeev Kumar 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
527cd395495SRajeev Kumar 
5288afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
5298afde5a8SDustin Brown 	if (!sched_ctx)
530cd395495SRajeev Kumar 		return QDF_STATUS_E_FAILURE;
531cd395495SRajeev Kumar 
532cd395495SRajeev Kumar 	sched_ctx->legacy_sys_handler = sys_callback;
533cd395495SRajeev Kumar 
534cd395495SRajeev Kumar 	return QDF_STATUS_SUCCESS;
535cd395495SRajeev Kumar }
536cd395495SRajeev Kumar 
537dce49ecfSKrunal Soni QDF_STATUS scheduler_deregister_wma_legacy_handler(void)
538dce49ecfSKrunal Soni {
539dce49ecfSKrunal Soni 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
540dce49ecfSKrunal Soni 
5418afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
5428afde5a8SDustin Brown 	if (!sched_ctx)
543dce49ecfSKrunal Soni 		return QDF_STATUS_E_FAILURE;
544dce49ecfSKrunal Soni 
545dce49ecfSKrunal Soni 	sched_ctx->legacy_wma_handler = NULL;
546dce49ecfSKrunal Soni 
547dce49ecfSKrunal Soni 	return QDF_STATUS_SUCCESS;
548dce49ecfSKrunal Soni }
549dce49ecfSKrunal Soni 
550dce49ecfSKrunal Soni QDF_STATUS scheduler_deregister_sys_legacy_handler(void)
551dce49ecfSKrunal Soni {
552dce49ecfSKrunal Soni 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
553dce49ecfSKrunal Soni 
5548afde5a8SDustin Brown 	QDF_BUG(sched_ctx);
5558afde5a8SDustin Brown 	if (!sched_ctx)
556dce49ecfSKrunal Soni 		return QDF_STATUS_E_FAILURE;
557dce49ecfSKrunal Soni 
558dce49ecfSKrunal Soni 	sched_ctx->legacy_sys_handler = NULL;
559dce49ecfSKrunal Soni 
560dce49ecfSKrunal Soni 	return QDF_STATUS_SUCCESS;
561dce49ecfSKrunal Soni }
562dce49ecfSKrunal Soni 
563c7ee85c4SDustin Brown static QDF_STATUS scheduler_msg_flush_noop(struct scheduler_msg *msg)
564c7ee85c4SDustin Brown {
565c7ee85c4SDustin Brown 	return QDF_STATUS_SUCCESS;
566c7ee85c4SDustin Brown }
567c7ee85c4SDustin Brown 
568ad85c389SAshish Kumar Dhanotiya void scheduler_mc_timer_callback(qdf_mc_timer_t *timer)
569cd395495SRajeev Kumar {
570afc63bc8SRajeev Kumar 	struct scheduler_msg msg = {0};
571cd395495SRajeev Kumar 	QDF_STATUS status;
572cd395495SRajeev Kumar 
573cd395495SRajeev Kumar 	qdf_mc_timer_callback_t callback = NULL;
574cd395495SRajeev Kumar 	void *user_data = NULL;
575cd395495SRajeev Kumar 	QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW;
576cd395495SRajeev Kumar 
5778afde5a8SDustin Brown 	QDF_BUG(timer);
5788afde5a8SDustin Brown 	if (!timer)
579cd395495SRajeev Kumar 		return;
580cd395495SRajeev Kumar 
581cd395495SRajeev Kumar 	qdf_spin_lock_irqsave(&timer->platform_info.spinlock);
582cd395495SRajeev Kumar 
583cd395495SRajeev Kumar 	switch (timer->state) {
584cd395495SRajeev Kumar 	case QDF_TIMER_STATE_STARTING:
585cd395495SRajeev Kumar 		/* we are in this state because someone just started the timer,
586cd395495SRajeev Kumar 		 * MC timer got started and expired, but the time content have
587cd395495SRajeev Kumar 		 * not been updated this is a rare race condition!
588cd395495SRajeev Kumar 		 */
589cd395495SRajeev Kumar 		timer->state = QDF_TIMER_STATE_STOPPED;
590cd395495SRajeev Kumar 		status = QDF_STATUS_E_ALREADY;
591cd395495SRajeev Kumar 		break;
592cd395495SRajeev Kumar 
593cd395495SRajeev Kumar 	case QDF_TIMER_STATE_STOPPED:
594cd395495SRajeev Kumar 		status = QDF_STATUS_E_ALREADY;
595cd395495SRajeev Kumar 		break;
596cd395495SRajeev Kumar 
597cd395495SRajeev Kumar 	case QDF_TIMER_STATE_UNUSED:
598cd395495SRajeev Kumar 		status = QDF_STATUS_E_EXISTS;
599cd395495SRajeev Kumar 		break;
600cd395495SRajeev Kumar 
601cd395495SRajeev Kumar 	case QDF_TIMER_STATE_RUNNING:
602cd395495SRajeev Kumar 		/* need to go to stop state here because the call-back function
603cd395495SRajeev Kumar 		 * may restart timer (to emulate periodic timer)
604cd395495SRajeev Kumar 		 */
605cd395495SRajeev Kumar 		timer->state = QDF_TIMER_STATE_STOPPED;
606cd395495SRajeev Kumar 		/* copy the relevant timer information to local variables;
607cd395495SRajeev Kumar 		 * once we exits from this critical section, the timer content
608cd395495SRajeev Kumar 		 * may be modified by other tasks
609cd395495SRajeev Kumar 		 */
610cd395495SRajeev Kumar 		callback = timer->callback;
611cd395495SRajeev Kumar 		user_data = timer->user_data;
612cd395495SRajeev Kumar 		type = timer->type;
613cd395495SRajeev Kumar 		status = QDF_STATUS_SUCCESS;
614cd395495SRajeev Kumar 		break;
615cd395495SRajeev Kumar 
616cd395495SRajeev Kumar 	default:
617cd395495SRajeev Kumar 		QDF_ASSERT(0);
618cd395495SRajeev Kumar 		status = QDF_STATUS_E_FAULT;
619cd395495SRajeev Kumar 		break;
620cd395495SRajeev Kumar 	}
621cd395495SRajeev Kumar 
622cd395495SRajeev Kumar 	qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock);
623cd395495SRajeev Kumar 
6248afde5a8SDustin Brown 	if (QDF_IS_STATUS_ERROR(status)) {
6258afde5a8SDustin Brown 		sched_debug("MC timer fired but is not running; skip callback");
626cd395495SRajeev Kumar 		return;
627cd395495SRajeev Kumar 	}
628cd395495SRajeev Kumar 
629cd395495SRajeev Kumar 	qdf_try_allowing_sleep(type);
630cd395495SRajeev Kumar 
6318afde5a8SDustin Brown 	QDF_BUG(callback);
6328afde5a8SDustin Brown 	if (!callback)
633cd395495SRajeev Kumar 		return;
634cd395495SRajeev Kumar 
635cd395495SRajeev Kumar 	/* serialize to scheduler controller thread */
636cd395495SRajeev Kumar 	msg.type = SYS_MSG_ID_MC_TIMER;
637cd395495SRajeev Kumar 	msg.reserved = SYS_MSG_COOKIE;
638cd395495SRajeev Kumar 	msg.callback = callback;
639cd395495SRajeev Kumar 	msg.bodyptr = user_data;
640cd395495SRajeev Kumar 	msg.bodyval = 0;
641cd395495SRajeev Kumar 
642c7ee85c4SDustin Brown 	/* bodyptr points to user data, do not free it during msg flush */
643c7ee85c4SDustin Brown 	msg.flush_callback = scheduler_msg_flush_noop;
644c7ee85c4SDustin Brown 
6458b7e2ee3Sgaurank kathpalia 	status = scheduler_post_message(QDF_MODULE_ID_SCHEDULER,
6468b7e2ee3Sgaurank kathpalia 					QDF_MODULE_ID_SCHEDULER,
6478b7e2ee3Sgaurank kathpalia 					QDF_MODULE_ID_SYS, &msg);
6488afde5a8SDustin Brown 	if (QDF_IS_STATUS_ERROR(status))
64991abaccbSDustin Brown 		sched_err("Could not enqueue timer to timer queue");
650cd395495SRajeev Kumar }
65187a8e445SVignesh Viswanathan 
65287a8e445SVignesh Viswanathan QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size)
65387a8e445SVignesh Viswanathan {
65487a8e445SVignesh Viswanathan 	uint8_t qidx;
65587a8e445SVignesh Viswanathan 	struct scheduler_mq_type *target_mq;
65687a8e445SVignesh Viswanathan 	struct scheduler_ctx *sched_ctx;
65787a8e445SVignesh Viswanathan 
65887a8e445SVignesh Viswanathan 	sched_ctx = scheduler_get_context();
65987a8e445SVignesh Viswanathan 	if (!sched_ctx)
66087a8e445SVignesh Viswanathan 		return QDF_STATUS_E_INVAL;
66187a8e445SVignesh Viswanathan 
66287a8e445SVignesh Viswanathan 	/* WMA also uses the target_if queue, so replace the QID */
66387a8e445SVignesh Viswanathan 	if (QDF_MODULE_ID_WMA == qid)
66487a8e445SVignesh Viswanathan 		qid = QDF_MODULE_ID_TARGET_IF;
66587a8e445SVignesh Viswanathan 
66687a8e445SVignesh Viswanathan 	qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid];
66787a8e445SVignesh Viswanathan 	if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
66887a8e445SVignesh Viswanathan 		sched_err("Scheduler is deinitialized");
66987a8e445SVignesh Viswanathan 		return QDF_STATUS_E_FAILURE;
67087a8e445SVignesh Viswanathan 	}
67187a8e445SVignesh Viswanathan 
67287a8e445SVignesh Viswanathan 	target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
67387a8e445SVignesh Viswanathan 
67487a8e445SVignesh Viswanathan 	*size = qdf_list_size(&target_mq->mq_list);
67587a8e445SVignesh Viswanathan 
67687a8e445SVignesh Viswanathan 	return QDF_STATUS_SUCCESS;
67787a8e445SVignesh Viswanathan }
6781397a33fSMadhvapathi Sriram 
6791397a33fSMadhvapathi Sriram QDF_STATUS scheduler_post_message_debug(QDF_MODULE_ID src_id,
6801397a33fSMadhvapathi Sriram 					QDF_MODULE_ID dest_id,
6811397a33fSMadhvapathi Sriram 					QDF_MODULE_ID que_id,
6821397a33fSMadhvapathi Sriram 					struct scheduler_msg *msg,
6831397a33fSMadhvapathi Sriram 					int line,
6841397a33fSMadhvapathi Sriram 					const char *func)
6851397a33fSMadhvapathi Sriram {
6861397a33fSMadhvapathi Sriram 	QDF_STATUS status;
6871397a33fSMadhvapathi Sriram 
6881397a33fSMadhvapathi Sriram 	status = scheduler_post_msg(scheduler_get_qid(src_id, dest_id, que_id),
6891397a33fSMadhvapathi Sriram 				    msg);
6901397a33fSMadhvapathi Sriram 
6911397a33fSMadhvapathi Sriram 	if (QDF_IS_STATUS_ERROR(status))
6921397a33fSMadhvapathi Sriram 		sched_err("couldn't post from %d to %d - called from %d, %s",
6931397a33fSMadhvapathi Sriram 			  src_id, dest_id, line, func);
6941397a33fSMadhvapathi Sriram 
6951397a33fSMadhvapathi Sriram 	return status;
6961397a33fSMadhvapathi Sriram }
69711f5a63aSNaga 
69811f5a63aSNaga qdf_export_symbol(scheduler_post_message_debug);
699