xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_api.c (revision eb134979c1cacbd1eb12caa116020b86fad96e1c)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <scheduler_api.h>
20 #include <scheduler_core.h>
21 #include <qdf_atomic.h>
22 #include <qdf_module.h>
23 #include <qdf_platform.h>
24 
25 QDF_STATUS scheduler_disable(void)
26 {
27 	struct scheduler_ctx *sched_ctx;
28 
29 	sched_debug("Disabling Scheduler");
30 
31 	sched_ctx = scheduler_get_context();
32 	QDF_BUG(sched_ctx);
33 	if (!sched_ctx)
34 		return QDF_STATUS_E_INVAL;
35 
36 	if (!sched_ctx->sch_thread) {
37 		sched_debug("Scheduler already disabled");
38 		return QDF_STATUS_SUCCESS;
39 	}
40 
41 	/* send shutdown signal to scheduler thread */
42 	qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag);
43 	qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
44 	qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
45 
46 	/* wait for scheduler thread to shutdown */
47 	qdf_wait_single_event(&sched_ctx->sch_shutdown, 0);
48 	sched_ctx->sch_thread = NULL;
49 
50 	/* flush any unprocessed scheduler messages */
51 	scheduler_queues_flush(sched_ctx);
52 
53 	return QDF_STATUS_SUCCESS;
54 }
55 
56 static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched)
57 {
58 	char symbol[QDF_SYMBOL_LEN];
59 
60 	if (sched->watchdog_callback)
61 		qdf_sprint_symbol(symbol, sched->watchdog_callback);
62 
63 	sched_fatal("Callback %s (type 0x%x) exceeded its allotted time of %ds",
64 		    sched->watchdog_callback ? symbol : "<null>",
65 		    sched->watchdog_msg_type,
66 		    SCHEDULER_WATCHDOG_TIMEOUT / 1000);
67 }
68 
69 static void scheduler_watchdog_timeout(void *arg)
70 {
71 	struct scheduler_ctx *sched = arg;
72 
73 	if (qdf_is_recovering()) {
74 		sched_debug("Recovery is in progress ignore timeout");
75 		return;
76 	}
77 
78 	scheduler_watchdog_notify(sched);
79 	if (sched->sch_thread)
80 		qdf_print_thread_trace(sched->sch_thread);
81 
82 	/* avoid crashing during shutdown */
83 	if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag))
84 		return;
85 
86 	QDF_DEBUG_PANIC("Going down for Scheduler Watchdog Bite!");
87 }
88 
89 QDF_STATUS scheduler_enable(void)
90 {
91 	struct scheduler_ctx *sched_ctx;
92 
93 	sched_debug("Enabling Scheduler");
94 
95 	sched_ctx = scheduler_get_context();
96 	QDF_BUG(sched_ctx);
97 	if (!sched_ctx)
98 		return QDF_STATUS_E_INVAL;
99 
100 	qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK,
101 			     &sched_ctx->sch_event_flag);
102 	qdf_atomic_clear_bit(MC_POST_EVENT_MASK,
103 			     &sched_ctx->sch_event_flag);
104 
105 	/* create the scheduler thread */
106 	sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx,
107 						  "scheduler_thread");
108 	if (!sched_ctx->sch_thread) {
109 		sched_fatal("Failed to create scheduler thread");
110 		return QDF_STATUS_E_RESOURCES;
111 	}
112 
113 	sched_debug("Scheduler thread created");
114 
115 	/* wait for the scheduler thread to startup */
116 	qdf_wake_up_process(sched_ctx->sch_thread);
117 	qdf_wait_single_event(&sched_ctx->sch_start_event, 0);
118 
119 	sched_debug("Scheduler thread started");
120 
121 	return QDF_STATUS_SUCCESS;
122 }
123 
124 QDF_STATUS scheduler_init(void)
125 {
126 	QDF_STATUS status;
127 	struct scheduler_ctx *sched_ctx;
128 
129 	sched_debug("Initializing Scheduler");
130 
131 	status = scheduler_create_ctx();
132 	if (QDF_IS_STATUS_ERROR(status)) {
133 		sched_fatal("Failed to create context; status:%d", status);
134 		return status;
135 	}
136 
137 	sched_ctx = scheduler_get_context();
138 	QDF_BUG(sched_ctx);
139 	if (!sched_ctx) {
140 		status = QDF_STATUS_E_FAILURE;
141 		goto ctx_destroy;
142 	}
143 
144 	status = scheduler_queues_init(sched_ctx);
145 	if (QDF_IS_STATUS_ERROR(status)) {
146 		sched_fatal("Failed to init queues; status:%d", status);
147 		goto ctx_destroy;
148 	}
149 
150 	status = qdf_event_create(&sched_ctx->sch_start_event);
151 	if (QDF_IS_STATUS_ERROR(status)) {
152 		sched_fatal("Failed to create start event; status:%d", status);
153 		goto queues_deinit;
154 	}
155 
156 	status = qdf_event_create(&sched_ctx->sch_shutdown);
157 	if (QDF_IS_STATUS_ERROR(status)) {
158 		sched_fatal("Failed to create shutdown event; status:%d",
159 			    status);
160 		goto start_event_destroy;
161 	}
162 
163 	status = qdf_event_create(&sched_ctx->resume_sch_event);
164 	if (QDF_IS_STATUS_ERROR(status)) {
165 		sched_fatal("Failed to create resume event; status:%d", status);
166 		goto shutdown_event_destroy;
167 	}
168 
169 	qdf_spinlock_create(&sched_ctx->sch_thread_lock);
170 	qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue);
171 	sched_ctx->sch_event_flag = 0;
172 	qdf_timer_init(NULL,
173 		       &sched_ctx->watchdog_timer,
174 		       &scheduler_watchdog_timeout,
175 		       sched_ctx,
176 		       QDF_TIMER_TYPE_SW);
177 
178 	qdf_register_mc_timer_callback(scheduler_mc_timer_callback);
179 
180 	return QDF_STATUS_SUCCESS;
181 
182 shutdown_event_destroy:
183 	qdf_event_destroy(&sched_ctx->sch_shutdown);
184 
185 start_event_destroy:
186 	qdf_event_destroy(&sched_ctx->sch_start_event);
187 
188 queues_deinit:
189 	scheduler_queues_deinit(sched_ctx);
190 
191 ctx_destroy:
192 	scheduler_destroy_ctx();
193 
194 	return status;
195 }
196 
197 QDF_STATUS scheduler_deinit(void)
198 {
199 	QDF_STATUS status;
200 	struct scheduler_ctx *sched_ctx;
201 
202 	sched_debug("Deinitializing Scheduler");
203 
204 	sched_ctx = scheduler_get_context();
205 	QDF_BUG(sched_ctx);
206 	if (!sched_ctx)
207 		return QDF_STATUS_E_INVAL;
208 
209 	qdf_timer_free(&sched_ctx->watchdog_timer);
210 	qdf_spinlock_destroy(&sched_ctx->sch_thread_lock);
211 	qdf_event_destroy(&sched_ctx->resume_sch_event);
212 	qdf_event_destroy(&sched_ctx->sch_shutdown);
213 	qdf_event_destroy(&sched_ctx->sch_start_event);
214 
215 	status = scheduler_queues_deinit(sched_ctx);
216 	if (QDF_IS_STATUS_ERROR(status))
217 		sched_err("Failed to deinit queues; status:%d", status);
218 
219 	status = scheduler_destroy_ctx();
220 	if (QDF_IS_STATUS_ERROR(status))
221 		sched_err("Failed to destroy context; status:%d", status);
222 
223 	return QDF_STATUS_SUCCESS;
224 }
225 
226 QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid,
227 					  struct scheduler_msg *msg,
228 					  bool is_high_priority)
229 {
230 	uint8_t qidx;
231 	struct scheduler_mq_type *target_mq;
232 	struct scheduler_msg *queue_msg;
233 	struct scheduler_ctx *sched_ctx;
234 	uint16_t src_id;
235 	uint16_t dest_id;
236 	uint16_t que_id;
237 
238 	QDF_BUG(msg);
239 	if (!msg)
240 		return QDF_STATUS_E_INVAL;
241 
242 	sched_ctx = scheduler_get_context();
243 	QDF_BUG(sched_ctx);
244 	if (!sched_ctx)
245 		return QDF_STATUS_E_INVAL;
246 
247 	if (!sched_ctx->sch_thread) {
248 		sched_err("Cannot post message; scheduler thread is stopped");
249 		return QDF_STATUS_E_FAILURE;
250 	}
251 
252 	if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) {
253 		QDF_DEBUG_PANIC("Scheduler messages must be initialized");
254 		return QDF_STATUS_E_FAILURE;
255 	}
256 
257 	dest_id = scheduler_get_dest_id(qid);
258 	src_id = scheduler_get_src_id(qid);
259 	que_id = scheduler_get_que_id(qid);
260 
261 	if (que_id >= QDF_MODULE_ID_MAX || src_id >= QDF_MODULE_ID_MAX ||
262 	    dest_id >= QDF_MODULE_ID_MAX) {
263 		sched_err("Src_id/Dest_id invalid, cannot post message");
264 		return QDF_STATUS_E_FAILURE;
265 	}
266 	/* Target_If is a special message queue in phase 3 convergence beacause
267 	 * its used by both legacy WMA and as well as new UMAC components which
268 	 * directly populate callback handlers in message body.
269 	 * 1) WMA legacy messages should not have callback
270 	 * 2) New target_if message needs to have valid callback
271 	 * Clear callback handler for legacy WMA messages such that in case
272 	 * if someone is sending legacy WMA message from stack which has
273 	 * uninitialized callback then its handled properly. Also change
274 	 * legacy WMA message queue id to target_if queue such that its  always
275 	 * handled in right order.
276 	 */
277 	if (QDF_MODULE_ID_WMA == que_id) {
278 		msg->callback = NULL;
279 		/* change legacy WMA message id to new target_if mq id */
280 		que_id = QDF_MODULE_ID_TARGET_IF;
281 	}
282 	qdf_mtrace(src_id, dest_id, msg->type, 0xFF, 0);
283 
284 	qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[que_id];
285 	if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
286 		sched_err("Scheduler is deinitialized ignore msg");
287 		return QDF_STATUS_E_FAILURE;
288 	}
289 
290 	if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) {
291 		QDF_DEBUG_PANIC("callback not registered for qid[%d]", que_id);
292 		return QDF_STATUS_E_FAILURE;
293 	}
294 
295 	target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
296 
297 	queue_msg = scheduler_core_msg_dup(msg);
298 	if (!queue_msg)
299 		return QDF_STATUS_E_NOMEM;
300 
301 	if (is_high_priority)
302 		scheduler_mq_put_front(target_mq, queue_msg);
303 	else
304 		scheduler_mq_put(target_mq, queue_msg);
305 
306 	qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
307 	qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
308 
309 	return QDF_STATUS_SUCCESS;
310 }
311 
312 QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid,
313 				     scheduler_msg_process_fn_t callback)
314 {
315 	struct scheduler_mq_ctx *ctx;
316 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
317 
318 	sched_enter();
319 
320 	QDF_BUG(sched_ctx);
321 	if (!sched_ctx)
322 		return QDF_STATUS_E_FAILURE;
323 
324 	if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
325 		sched_err("Already registered max %d no of message queues",
326 			  SCHEDULER_NUMBER_OF_MSG_QUEUE);
327 		return QDF_STATUS_E_FAILURE;
328 	}
329 
330 	ctx = &sched_ctx->queue_ctx;
331 	ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx;
332 	ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid;
333 	ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback;
334 	sched_ctx->sch_last_qidx++;
335 
336 	sched_exit();
337 
338 	return QDF_STATUS_SUCCESS;
339 }
340 
341 QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid)
342 {
343 	struct scheduler_mq_ctx *ctx;
344 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
345 	uint8_t qidx;
346 
347 	sched_enter();
348 
349 	QDF_BUG(sched_ctx);
350 	if (!sched_ctx)
351 		return QDF_STATUS_E_FAILURE;
352 
353 	ctx = &sched_ctx->queue_ctx;
354 	qidx = ctx->scheduler_msg_qid_to_qidx[qid];
355 	ctx->scheduler_msg_process_fn[qidx] = NULL;
356 	sched_ctx->sch_last_qidx--;
357 	ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE;
358 
359 	sched_exit();
360 
361 	return QDF_STATUS_SUCCESS;
362 }
363 
364 void scheduler_resume(void)
365 {
366 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
367 
368 	if (sched_ctx)
369 		qdf_event_set(&sched_ctx->resume_sch_event);
370 }
371 
372 void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback)
373 {
374 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
375 
376 	if (sched_ctx)
377 		sched_ctx->hdd_callback = callback;
378 }
379 void scheduler_wake_up_controller_thread(void)
380 {
381 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
382 
383 	if (sched_ctx)
384 		qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
385 }
386 void scheduler_set_event_mask(uint32_t event_mask)
387 {
388 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
389 
390 	if (sched_ctx)
391 		qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag);
392 }
393 
394 void scheduler_clear_event_mask(uint32_t event_mask)
395 {
396 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
397 
398 	if (sched_ctx)
399 		qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag);
400 }
401 
402 QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg)
403 {
404 	QDF_STATUS status;
405 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
406 	QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *);
407 
408 	QDF_BUG(msg);
409 	if (!msg)
410 		return QDF_STATUS_E_FAILURE;
411 
412 	QDF_BUG(sched_ctx);
413 	if (!sched_ctx)
414 		return QDF_STATUS_E_FAILURE;
415 
416 	target_if_msg_handler = msg->callback;
417 
418 	/* Target_If is a special message queue in phase 3 convergence beacause
419 	 * its used by both legacy WMA and as well as new UMAC components. New
420 	 * UMAC components directly pass their message handlers as callback in
421 	 * message body.
422 	 * 1) All Legacy WMA messages do not contain message callback so invoke
423 	 *    registered legacy WMA handler. Scheduler message posting APIs
424 	 *    makes sure legacy WMA messages do not have callbacks.
425 	 * 2) For new messages which have valid callbacks invoke their callbacks
426 	 *    directly.
427 	 */
428 	if (!target_if_msg_handler)
429 		status = sched_ctx->legacy_wma_handler(msg);
430 	else
431 		status = target_if_msg_handler(msg);
432 
433 	return status;
434 }
435 
436 QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg)
437 {
438 	QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *);
439 
440 	QDF_BUG(msg);
441 	if (!msg)
442 		return QDF_STATUS_E_FAILURE;
443 
444 	os_if_msg_handler = msg->callback;
445 
446 	QDF_BUG(os_if_msg_handler);
447 	if (!os_if_msg_handler)
448 		return QDF_STATUS_E_FAILURE;
449 
450 	os_if_msg_handler(msg);
451 
452 	return QDF_STATUS_SUCCESS;
453 }
454 
455 QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg)
456 {
457 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
458 	qdf_mc_timer_callback_t timer_callback;
459 
460 	QDF_BUG(msg);
461 	if (!msg)
462 		return QDF_STATUS_E_FAILURE;
463 
464 	QDF_BUG(sched_ctx);
465 	if (!sched_ctx)
466 		return QDF_STATUS_E_FAILURE;
467 
468 	/* legacy sys message handler? */
469 	if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER)
470 		return sched_ctx->legacy_sys_handler(msg);
471 
472 	/* scheduler_msg_process_fn_t and qdf_mc_timer_callback_t have
473 	 * different parameters and return type
474 	 */
475 	timer_callback = (qdf_mc_timer_callback_t)msg->callback;
476 	QDF_BUG(timer_callback);
477 	if (!timer_callback)
478 		return QDF_STATUS_E_FAILURE;
479 
480 	timer_callback(msg->bodyptr);
481 
482 	return QDF_STATUS_SUCCESS;
483 }
484 
485 QDF_STATUS scheduler_mlme_mq_handler(struct scheduler_msg *msg)
486 {
487 	scheduler_msg_process_fn_t mlme_msg_handler;
488 
489 	QDF_BUG(msg);
490 	if (!msg)
491 		return QDF_STATUS_E_FAILURE;
492 
493 	mlme_msg_handler = msg->callback;
494 
495 	QDF_BUG(mlme_msg_handler);
496 	if (!mlme_msg_handler)
497 		return QDF_STATUS_E_FAILURE;
498 
499 	mlme_msg_handler(msg);
500 
501 	return QDF_STATUS_SUCCESS;
502 }
503 
504 QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg)
505 {
506 	QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *);
507 
508 	QDF_BUG(msg);
509 	if (!msg)
510 		return QDF_STATUS_E_FAILURE;
511 
512 	scan_q_msg_handler = msg->callback;
513 
514 	QDF_BUG(scan_q_msg_handler);
515 	if (!scan_q_msg_handler)
516 		return QDF_STATUS_E_FAILURE;
517 
518 	scan_q_msg_handler(msg);
519 
520 	return QDF_STATUS_SUCCESS;
521 }
522 
523 QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t
524 						wma_callback)
525 {
526 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
527 
528 	QDF_BUG(sched_ctx);
529 	if (!sched_ctx)
530 		return QDF_STATUS_E_FAILURE;
531 
532 	sched_ctx->legacy_wma_handler = wma_callback;
533 
534 	return QDF_STATUS_SUCCESS;
535 }
536 
537 QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t
538 						sys_callback)
539 {
540 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
541 
542 	QDF_BUG(sched_ctx);
543 	if (!sched_ctx)
544 		return QDF_STATUS_E_FAILURE;
545 
546 	sched_ctx->legacy_sys_handler = sys_callback;
547 
548 	return QDF_STATUS_SUCCESS;
549 }
550 
551 QDF_STATUS scheduler_deregister_wma_legacy_handler(void)
552 {
553 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
554 
555 	QDF_BUG(sched_ctx);
556 	if (!sched_ctx)
557 		return QDF_STATUS_E_FAILURE;
558 
559 	sched_ctx->legacy_wma_handler = NULL;
560 
561 	return QDF_STATUS_SUCCESS;
562 }
563 
564 QDF_STATUS scheduler_deregister_sys_legacy_handler(void)
565 {
566 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
567 
568 	QDF_BUG(sched_ctx);
569 	if (!sched_ctx)
570 		return QDF_STATUS_E_FAILURE;
571 
572 	sched_ctx->legacy_sys_handler = NULL;
573 
574 	return QDF_STATUS_SUCCESS;
575 }
576 
577 static QDF_STATUS scheduler_msg_flush_noop(struct scheduler_msg *msg)
578 {
579 	return QDF_STATUS_SUCCESS;
580 }
581 
582 void scheduler_mc_timer_callback(qdf_mc_timer_t *timer)
583 {
584 	struct scheduler_msg msg = {0};
585 	QDF_STATUS status;
586 
587 	qdf_mc_timer_callback_t callback = NULL;
588 	void *user_data = NULL;
589 	QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW;
590 
591 	QDF_BUG(timer);
592 	if (!timer)
593 		return;
594 
595 	qdf_spin_lock_irqsave(&timer->platform_info.spinlock);
596 
597 	switch (timer->state) {
598 	case QDF_TIMER_STATE_STARTING:
599 		/* we are in this state because someone just started the timer,
600 		 * MC timer got started and expired, but the time content have
601 		 * not been updated this is a rare race condition!
602 		 */
603 		timer->state = QDF_TIMER_STATE_STOPPED;
604 		status = QDF_STATUS_E_ALREADY;
605 		break;
606 
607 	case QDF_TIMER_STATE_STOPPED:
608 		status = QDF_STATUS_E_ALREADY;
609 		break;
610 
611 	case QDF_TIMER_STATE_UNUSED:
612 		status = QDF_STATUS_E_EXISTS;
613 		break;
614 
615 	case QDF_TIMER_STATE_RUNNING:
616 		/* need to go to stop state here because the call-back function
617 		 * may restart timer (to emulate periodic timer)
618 		 */
619 		timer->state = QDF_TIMER_STATE_STOPPED;
620 		/* copy the relevant timer information to local variables;
621 		 * once we exits from this critical section, the timer content
622 		 * may be modified by other tasks
623 		 */
624 		callback = timer->callback;
625 		user_data = timer->user_data;
626 		type = timer->type;
627 		status = QDF_STATUS_SUCCESS;
628 		break;
629 
630 	default:
631 		QDF_ASSERT(0);
632 		status = QDF_STATUS_E_FAULT;
633 		break;
634 	}
635 
636 	qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock);
637 
638 	if (QDF_IS_STATUS_ERROR(status)) {
639 		sched_debug("MC timer fired but is not running; skip callback");
640 		return;
641 	}
642 
643 	qdf_try_allowing_sleep(type);
644 
645 	QDF_BUG(callback);
646 	if (!callback)
647 		return;
648 
649 	/* serialize to scheduler controller thread */
650 	msg.type = SYS_MSG_ID_MC_TIMER;
651 	msg.reserved = SYS_MSG_COOKIE;
652 	msg.callback = (scheduler_msg_process_fn_t)callback;
653 	msg.bodyptr = user_data;
654 	msg.bodyval = 0;
655 
656 	/* bodyptr points to user data, do not free it during msg flush */
657 	msg.flush_callback = scheduler_msg_flush_noop;
658 
659 	status = scheduler_post_message(QDF_MODULE_ID_SCHEDULER,
660 					QDF_MODULE_ID_SCHEDULER,
661 					QDF_MODULE_ID_SYS, &msg);
662 	if (QDF_IS_STATUS_ERROR(status))
663 		sched_err("Could not enqueue timer to timer queue");
664 }
665 
666 QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size)
667 {
668 	uint8_t qidx;
669 	struct scheduler_mq_type *target_mq;
670 	struct scheduler_ctx *sched_ctx;
671 
672 	sched_ctx = scheduler_get_context();
673 	if (!sched_ctx)
674 		return QDF_STATUS_E_INVAL;
675 
676 	/* WMA also uses the target_if queue, so replace the QID */
677 	if (QDF_MODULE_ID_WMA == qid)
678 		qid = QDF_MODULE_ID_TARGET_IF;
679 
680 	qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid];
681 	if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
682 		sched_err("Scheduler is deinitialized");
683 		return QDF_STATUS_E_FAILURE;
684 	}
685 
686 	target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
687 
688 	*size = qdf_list_size(&target_mq->mq_list);
689 
690 	return QDF_STATUS_SUCCESS;
691 }
692 
693 QDF_STATUS scheduler_post_message_debug(QDF_MODULE_ID src_id,
694 					QDF_MODULE_ID dest_id,
695 					QDF_MODULE_ID que_id,
696 					struct scheduler_msg *msg,
697 					int line,
698 					const char *func)
699 {
700 	QDF_STATUS status;
701 
702 	status = scheduler_post_msg(scheduler_get_qid(src_id, dest_id, que_id),
703 				    msg);
704 
705 	if (QDF_IS_STATUS_ERROR(status))
706 		sched_err("couldn't post from %d to %d - called from %d, %s",
707 			  src_id, dest_id, line, func);
708 
709 	return status;
710 }
711 
712 qdf_export_symbol(scheduler_post_message_debug);
713