xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_api.c (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <scheduler_api.h>
20 #include <scheduler_core.h>
21 #include <qdf_atomic.h>
22 
23 /* Debug variable to detect if controller thread is stuck */
24 static qdf_atomic_t scheduler_msg_post_fail_count;
25 
26 static void scheduler_flush_mqs(struct scheduler_ctx *sched_ctx)
27 {
28 	int i;
29 
30 	/* Here each of the MC thread MQ shall be drained and returned to the
31 	 * Core. Before returning a wrapper to the Core, the Scheduler message
32 	 * shall be freed first
33 	 */
34 	sched_info("Flushing scheduler message queue");
35 
36 	QDF_ASSERT(sched_ctx);
37 	if (!sched_ctx) {
38 		sched_err("sched_ctx is NULL");
39 		return;
40 	}
41 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
42 		scheduler_cleanup_queues(sched_ctx, i);
43 }
44 
45 QDF_STATUS scheduler_disable(void)
46 {
47 	struct scheduler_ctx *sched_ctx;
48 
49 	sched_info("Disabling Scheduler");
50 
51 	sched_ctx = scheduler_get_context();
52 	QDF_BUG(sched_ctx);
53 	if (!sched_ctx) {
54 		sched_err("sched_ctx is NULL");
55 		return QDF_STATUS_E_INVAL;
56 	}
57 
58 	/* send shutdown signal to scheduler thread */
59 	qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag);
60 	qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
61 	qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
62 
63 	/* wait for scheduler thread to shutdown */
64 	qdf_wait_single_event(&sched_ctx->sch_shutdown, 0);
65 	sched_ctx->sch_thread = NULL;
66 
67 	/* flush any unprocessed scheduler messages */
68 	scheduler_flush_mqs(sched_ctx);
69 
70 	return QDF_STATUS_SUCCESS;
71 }
72 
73 static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched)
74 {
75 	char symbol[QDF_SYMBOL_LEN];
76 
77 	if (sched->watchdog_callback)
78 		qdf_sprint_symbol(symbol, sched->watchdog_callback);
79 
80 	sched_err("WLAN_BUG_RCA: Callback %s (type 0x%x) exceeded its allotted time of %ds",
81 		  sched->watchdog_callback ? symbol : "<null>",
82 		  sched->watchdog_msg_type, SCHEDULER_WATCHDOG_TIMEOUT / 1000);
83 }
84 
85 #ifdef CONFIG_SLUB_DEBUG_ON
86 static void scheduler_watchdog_timeout(void *arg)
87 {
88 	struct scheduler_ctx *sched = arg;
89 
90 	scheduler_watchdog_notify(sched);
91 	if (sched->sch_thread)
92 		qdf_print_thread_trace(sched->sch_thread);
93 
94 	/* avoid crashing during shutdown */
95 	if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag))
96 		return;
97 
98 	sched_fatal("Going down for Scheduler Watchdog Bite!");
99 	QDF_BUG(0);
100 }
101 #else
102 static void scheduler_watchdog_timeout(void *arg)
103 {
104 	scheduler_watchdog_notify((struct scheduler_ctx *)arg);
105 }
106 #endif
107 
108 QDF_STATUS scheduler_enable(void)
109 {
110 	struct scheduler_ctx *sched_ctx;
111 
112 	sched_info("Enabling Scheduler");
113 
114 	sched_ctx = scheduler_get_context();
115 	QDF_BUG(sched_ctx);
116 	if (!sched_ctx) {
117 		sched_err("sched_ctx is null");
118 		return QDF_STATUS_E_INVAL;
119 	}
120 
121 	qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK,
122 			     &sched_ctx->sch_event_flag);
123 	qdf_atomic_clear_bit(MC_POST_EVENT_MASK,
124 			     &sched_ctx->sch_event_flag);
125 
126 	/* create the scheduler thread */
127 	sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx,
128 						  "scheduler_thread");
129 	if (IS_ERR(sched_ctx->sch_thread)) {
130 		sched_err("Failed to create scheduler thread");
131 		return QDF_STATUS_E_RESOURCES;
132 	}
133 
134 	sched_info("Scheduler thread created");
135 
136 	/* wait for the scheduler thread to startup */
137 	qdf_wake_up_process(sched_ctx->sch_thread);
138 	qdf_wait_single_event(&sched_ctx->sch_start_event, 0);
139 
140 	sched_info("Scheduler thread started");
141 
142 	return QDF_STATUS_SUCCESS;
143 }
144 
145 QDF_STATUS scheduler_init(void)
146 {
147 	QDF_STATUS status;
148 	struct scheduler_ctx *sched_ctx;
149 
150 	sched_info("Initializing Scheduler");
151 
152 	status = scheduler_create_ctx();
153 	if (QDF_IS_STATUS_ERROR(status)) {
154 		sched_err("Failed to create context; status:%d", status);
155 		return status;
156 	}
157 
158 	sched_ctx = scheduler_get_context();
159 	QDF_BUG(sched_ctx);
160 	if (!sched_ctx) {
161 		sched_err("sched_ctx is null");
162 		status = QDF_STATUS_E_FAILURE;
163 		goto ctx_destroy;
164 	}
165 
166 	status = scheduler_queues_init(sched_ctx);
167 	if (QDF_IS_STATUS_ERROR(status)) {
168 		sched_err("Failed to init queues; status:%d", status);
169 		goto ctx_destroy;
170 	}
171 
172 	status = qdf_event_create(&sched_ctx->sch_start_event);
173 	if (QDF_IS_STATUS_ERROR(status)) {
174 		sched_err("Failed to create start event; status:%d", status);
175 		goto queues_deinit;
176 	}
177 
178 	status = qdf_event_create(&sched_ctx->sch_shutdown);
179 	if (QDF_IS_STATUS_ERROR(status)) {
180 		sched_err("Failed to create shutdown event; status:%d", status);
181 		goto start_event_destroy;
182 	}
183 
184 	status = qdf_event_create(&sched_ctx->resume_sch_event);
185 	if (QDF_IS_STATUS_ERROR(status)) {
186 		sched_err("Failed to create resume event; status:%d", status);
187 		goto shutdown_event_destroy;
188 	}
189 
190 	qdf_spinlock_create(&sched_ctx->sch_thread_lock);
191 	qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue);
192 	sched_ctx->sch_event_flag = 0;
193 	qdf_timer_init(NULL,
194 		       &sched_ctx->watchdog_timer,
195 		       &scheduler_watchdog_timeout,
196 		       sched_ctx,
197 		       QDF_TIMER_TYPE_SW);
198 
199 	qdf_register_mc_timer_callback(scheduler_mc_timer_callback);
200 
201 	return QDF_STATUS_SUCCESS;
202 
203 shutdown_event_destroy:
204 	qdf_event_destroy(&sched_ctx->sch_shutdown);
205 
206 start_event_destroy:
207 	qdf_event_destroy(&sched_ctx->sch_start_event);
208 
209 queues_deinit:
210 	scheduler_queues_deinit(sched_ctx);
211 
212 ctx_destroy:
213 	scheduler_destroy_ctx();
214 
215 	return status;
216 }
217 
218 QDF_STATUS scheduler_deinit(void)
219 {
220 	QDF_STATUS status;
221 	struct scheduler_ctx *sched_ctx;
222 
223 	sched_info("Deinitializing Scheduler");
224 
225 	sched_ctx = scheduler_get_context();
226 	QDF_BUG(sched_ctx);
227 	if (!sched_ctx) {
228 		sched_err("sched_ctx is null");
229 		return QDF_STATUS_E_INVAL;
230 	}
231 
232 	qdf_timer_free(&sched_ctx->watchdog_timer);
233 	qdf_spinlock_destroy(&sched_ctx->sch_thread_lock);
234 	qdf_event_destroy(&sched_ctx->resume_sch_event);
235 	qdf_event_destroy(&sched_ctx->sch_shutdown);
236 	qdf_event_destroy(&sched_ctx->sch_start_event);
237 
238 	status = scheduler_queues_deinit(sched_ctx);
239 	if (QDF_IS_STATUS_ERROR(status))
240 		sched_err("Failed to deinit queues; status:%d", status);
241 
242 	status = scheduler_destroy_ctx();
243 	if (QDF_IS_STATUS_ERROR(status))
244 		sched_err("Failed to destroy context; status:%d", status);
245 
246 	return QDF_STATUS_SUCCESS;
247 }
248 
249 QDF_STATUS scheduler_post_msg_by_priority(QDF_MODULE_ID qid,
250 		struct scheduler_msg *pMsg, bool is_high_priority)
251 {
252 	uint8_t qidx;
253 	uint32_t msg_wrapper_fail_count;
254 	struct scheduler_mq_type *target_mq = NULL;
255 	struct scheduler_msg_wrapper *msg_wrapper = NULL;
256 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
257 
258 	if (!pMsg) {
259 		sched_err("pMsg is null");
260 		return QDF_STATUS_E_INVAL;
261 	}
262 
263 	if (!sched_ctx) {
264 		sched_err("sched_ctx is null");
265 		return QDF_STATUS_E_INVAL;
266 	}
267 
268 	if (!sched_ctx->sch_thread) {
269 		sched_err("Cannot post message; scheduler thread is stopped");
270 		return QDF_STATUS_E_FAILURE;
271 	}
272 
273 	if ((0 != pMsg->reserved) && (SYS_MSG_COOKIE != pMsg->reserved)) {
274 		sched_err("Un-initialized message pointer.. please initialize it");
275 		QDF_BUG(0);
276 		return QDF_STATUS_E_FAILURE;
277 	}
278 
279 	/* Target_If is a special message queue in phase 3 convergence beacause
280 	 * its used by both legacy WMA and as well as new UMAC components which
281 	 * directly populate callback handlers in message body.
282 	 * 1) WMA legacy messages should not have callback
283 	 * 2) New target_if message needs to have valid callback
284 	 * Clear callback handler for legacy WMA messages such that in case
285 	 * if someone is sending legacy WMA message from stack which has
286 	 * uninitialized callback then its handled properly. Also change
287 	 * legacy WMA message queue id to target_if queue such that its  always
288 	 * handled in right order.
289 	 */
290 	if (QDF_MODULE_ID_WMA == qid) {
291 		pMsg->callback = NULL;
292 		/* change legacy WMA message id to new target_if mq id */
293 		qid = QDF_MODULE_ID_TARGET_IF;
294 	}
295 
296 	qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid];
297 	if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
298 		sched_err("Scheduler is deinitialized ignore msg");
299 		return QDF_STATUS_E_FAILURE;
300 	}
301 
302 	if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) {
303 		sched_err("callback not registered for qid[%d]", qid);
304 		QDF_ASSERT(0);
305 		return QDF_STATUS_E_FAILURE;
306 	}
307 
308 	target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
309 	QDF_ASSERT(target_mq);
310 	if (target_mq == NULL) {
311 		sched_err("target_mq == NULL");
312 		return QDF_STATUS_E_FAILURE;
313 	}
314 
315 	/* Try and get a free Msg wrapper */
316 	msg_wrapper = scheduler_mq_get(&sched_ctx->queue_ctx.free_msg_q);
317 	if (NULL == msg_wrapper) {
318 		msg_wrapper_fail_count =
319 			qdf_atomic_inc_return(&scheduler_msg_post_fail_count);
320 		/* log only 1st failure to avoid over running log buffer */
321 		if (msg_wrapper_fail_count == 1)
322 			sched_err("Scheduler message wrapper empty");
323 
324 		if (SCHEDULER_WRAPPER_MAX_FAIL_COUNT == msg_wrapper_fail_count)
325 			QDF_BUG(0);
326 
327 		return QDF_STATUS_E_RESOURCES;
328 	}
329 	qdf_atomic_set(&scheduler_msg_post_fail_count, 0);
330 
331 	/* Copy the message now */
332 	qdf_mem_copy((void *)msg_wrapper->msg_buf,
333 			(void *)pMsg, sizeof(struct scheduler_msg));
334 
335 	if (is_high_priority)
336 		scheduler_mq_put_front(target_mq, msg_wrapper);
337 	else
338 		scheduler_mq_put(target_mq, msg_wrapper);
339 
340 	qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
341 	qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
342 
343 	return QDF_STATUS_SUCCESS;
344 }
345 
346 QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid,
347 		scheduler_msg_process_fn_t callback)
348 {
349 	struct scheduler_mq_ctx *ctx;
350 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
351 
352 	sched_enter();
353 
354 	if (!sched_ctx) {
355 		QDF_ASSERT(0);
356 		sched_err("sched_ctx is NULL");
357 		return QDF_STATUS_E_FAILURE;
358 	}
359 
360 	if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
361 		sched_err("Already registered max %d no of message queues",
362 			  SCHEDULER_NUMBER_OF_MSG_QUEUE);
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	ctx = &sched_ctx->queue_ctx;
367 	ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx;
368 	ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid;
369 	ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback;
370 	sched_ctx->sch_last_qidx++;
371 
372 	sched_exit();
373 
374 	return QDF_STATUS_SUCCESS;
375 }
376 
377 QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid)
378 {
379 	struct scheduler_mq_ctx *ctx;
380 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
381 	uint8_t qidx;
382 
383 	sched_enter();
384 
385 	if (!sched_ctx) {
386 		QDF_ASSERT(0);
387 		sched_err("sched_ctx is NULL");
388 		return QDF_STATUS_E_FAILURE;
389 	}
390 
391 	ctx = &sched_ctx->queue_ctx;
392 	qidx = ctx->scheduler_msg_qid_to_qidx[qid];
393 	ctx->scheduler_msg_process_fn[qidx] = NULL;
394 	sched_ctx->sch_last_qidx--;
395 	ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE;
396 
397 	sched_exit();
398 
399 	return QDF_STATUS_SUCCESS;
400 }
401 
402 void scheduler_resume(void)
403 {
404 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
405 
406 	if (sched_ctx)
407 		qdf_event_set(&sched_ctx->resume_sch_event);
408 }
409 
410 void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback)
411 {
412 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
413 
414 	if (sched_ctx)
415 		sched_ctx->hdd_callback = callback;
416 }
417 void scheduler_wake_up_controller_thread(void)
418 {
419 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
420 
421 	if (sched_ctx)
422 		qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
423 }
424 void scheduler_set_event_mask(uint32_t event_mask)
425 {
426 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
427 
428 	if (sched_ctx)
429 		qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag);
430 }
431 
432 void scheduler_clear_event_mask(uint32_t event_mask)
433 {
434 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
435 
436 	if (sched_ctx)
437 		qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag);
438 }
439 
440 QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg)
441 {
442 	QDF_STATUS status;
443 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
444 	QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *);
445 
446 	if (NULL == msg || NULL == sched_ctx) {
447 		sched_err("msg %pK sch %pK", msg, sched_ctx);
448 		return QDF_STATUS_E_FAILURE;
449 	}
450 
451 	target_if_msg_handler = msg->callback;
452 
453 	/* Target_If is a special message queue in phase 3 convergence beacause
454 	 * its used by both legacy WMA and as well as new UMAC components. New
455 	 * UMAC components directly pass their message handlers as callback in
456 	 * message body.
457 	 * 1) All Legacy WMA messages do not contain message callback so invoke
458 	 *    registered legacy WMA handler. Scheduler message posting APIs
459 	 *    makes sure legacy WMA messages do not have callbacks.
460 	 * 2) For new messages which have valid callbacks invoke their callbacks
461 	 *    directly.
462 	 */
463 	if (NULL == target_if_msg_handler)
464 		status = sched_ctx->legacy_wma_handler(msg);
465 	else
466 		status = target_if_msg_handler(msg);
467 
468 	return status;
469 }
470 
471 QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg)
472 {
473 	QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *);
474 
475 	if (NULL == msg) {
476 		sched_err("Msg is NULL");
477 		return QDF_STATUS_E_FAILURE;
478 	}
479 
480 	os_if_msg_handler = msg->callback;
481 
482 	if (NULL == os_if_msg_handler) {
483 		sched_err("Msg callback is NULL");
484 		QDF_ASSERT(0);
485 		return QDF_STATUS_E_FAILURE;
486 	}
487 	os_if_msg_handler(msg);
488 
489 	return QDF_STATUS_SUCCESS;
490 }
491 
492 QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg)
493 {
494 	QDF_STATUS status;
495 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
496 	qdf_mc_timer_callback_t timer_q_msg_handler;
497 
498 	if (NULL == msg || NULL == sched_ctx) {
499 		sched_err("msg %pK sch %pK", msg, sched_ctx);
500 		return QDF_STATUS_E_FAILURE;
501 	}
502 
503 	timer_q_msg_handler = msg->callback;
504 
505 	/* Timer message handler */
506 	if (SYS_MSG_COOKIE == msg->reserved &&
507 		SYS_MSG_ID_MC_TIMER == msg->type) {
508 		if (timer_q_msg_handler) {
509 			status = QDF_STATUS_SUCCESS;
510 			timer_q_msg_handler(msg->bodyptr);
511 		} else {
512 			sched_err("Timer cb is null");
513 			status = QDF_STATUS_E_FAILURE;
514 		}
515 
516 		return status;
517 	} else {
518 		/* Legacy sys message handler */
519 		status = sched_ctx->legacy_sys_handler(msg);
520 
521 		return status;
522 	}
523 }
524 
525 QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg)
526 {
527 	QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *);
528 
529 	if (NULL == msg) {
530 		sched_err("Msg is NULL");
531 		return QDF_STATUS_E_FAILURE;
532 	}
533 
534 	scan_q_msg_handler = msg->callback;
535 
536 	if (NULL == scan_q_msg_handler) {
537 		sched_err("Msg callback is NULL");
538 		QDF_ASSERT(0);
539 		return QDF_STATUS_E_FAILURE;
540 	}
541 	scan_q_msg_handler(msg);
542 
543 	return QDF_STATUS_SUCCESS;
544 }
545 
546 QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t
547 						wma_callback)
548 {
549 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
550 
551 	if (NULL == sched_ctx) {
552 		sched_err("scheduler context is null");
553 		return QDF_STATUS_E_FAILURE;
554 	}
555 
556 	sched_ctx->legacy_wma_handler = wma_callback;
557 
558 	return QDF_STATUS_SUCCESS;
559 }
560 
561 QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t
562 						sys_callback)
563 {
564 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
565 
566 	if (NULL == sched_ctx) {
567 		sched_err("scheduler context is null");
568 		return QDF_STATUS_E_FAILURE;
569 	}
570 
571 	sched_ctx->legacy_sys_handler = sys_callback;
572 
573 	return QDF_STATUS_SUCCESS;
574 }
575 
576 QDF_STATUS scheduler_deregister_wma_legacy_handler(void)
577 {
578 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
579 
580 	if (NULL == sched_ctx) {
581 		sched_err("scheduler context is null");
582 		return QDF_STATUS_E_FAILURE;
583 	}
584 
585 	sched_ctx->legacy_wma_handler = NULL;
586 
587 	return QDF_STATUS_SUCCESS;
588 }
589 
590 QDF_STATUS scheduler_deregister_sys_legacy_handler(void)
591 {
592 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
593 
594 	if (NULL == sched_ctx) {
595 		sched_err("scheduler context is null");
596 		return QDF_STATUS_E_FAILURE;
597 	}
598 
599 	sched_ctx->legacy_sys_handler = NULL;
600 
601 	return QDF_STATUS_SUCCESS;
602 }
603 
604 void scheduler_mc_timer_callback(unsigned long data)
605 {
606 	qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data;
607 	struct scheduler_msg msg = {0};
608 	QDF_STATUS status;
609 
610 	qdf_mc_timer_callback_t callback = NULL;
611 	void *user_data = NULL;
612 	QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW;
613 
614 	QDF_ASSERT(timer);
615 
616 	if (timer == NULL) {
617 		sched_err("Null pointer passed in!");
618 		return;
619 	}
620 
621 	qdf_spin_lock_irqsave(&timer->platform_info.spinlock);
622 
623 	switch (timer->state) {
624 	case QDF_TIMER_STATE_STARTING:
625 		/* we are in this state because someone just started the timer,
626 		 * MC timer got started and expired, but the time content have
627 		 * not been updated this is a rare race condition!
628 		 */
629 		timer->state = QDF_TIMER_STATE_STOPPED;
630 		status = QDF_STATUS_E_ALREADY;
631 		break;
632 
633 	case QDF_TIMER_STATE_STOPPED:
634 		status = QDF_STATUS_E_ALREADY;
635 		break;
636 
637 	case QDF_TIMER_STATE_UNUSED:
638 		status = QDF_STATUS_E_EXISTS;
639 		break;
640 
641 	case QDF_TIMER_STATE_RUNNING:
642 		/* need to go to stop state here because the call-back function
643 		 * may restart timer (to emulate periodic timer)
644 		 */
645 		timer->state = QDF_TIMER_STATE_STOPPED;
646 		/* copy the relevant timer information to local variables;
647 		 * once we exits from this critical section, the timer content
648 		 * may be modified by other tasks
649 		 */
650 		callback = timer->callback;
651 		user_data = timer->user_data;
652 		type = timer->type;
653 		status = QDF_STATUS_SUCCESS;
654 		break;
655 
656 	default:
657 		QDF_ASSERT(0);
658 		status = QDF_STATUS_E_FAULT;
659 		break;
660 	}
661 
662 	qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock);
663 
664 	if (QDF_STATUS_SUCCESS != status) {
665 		sched_err("TIMER callback called in a wrong state=%d",
666 			  timer->state);
667 		return;
668 	}
669 
670 	qdf_try_allowing_sleep(type);
671 
672 	if (callback == NULL) {
673 		sched_err("No TIMER callback, Couldn't enqueue timer to any queue");
674 		QDF_ASSERT(0);
675 		return;
676 	}
677 
678 	/* serialize to scheduler controller thread */
679 	msg.type = SYS_MSG_ID_MC_TIMER;
680 	msg.reserved = SYS_MSG_COOKIE;
681 	msg.callback = callback;
682 	msg.bodyptr = user_data;
683 	msg.bodyval = 0;
684 
685 	if (scheduler_post_msg(QDF_MODULE_ID_SYS, &msg) == QDF_STATUS_SUCCESS)
686 		return;
687 	sched_err("Could not enqueue timer to timer queue");
688 }
689