xref: /wlan-dirver/qca-wifi-host-cmn/scheduler/src/scheduler_api.c (revision c3b7a68d5402abab41c142a05f3c036ad802181a)
1 /*
2  * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include <scheduler_api.h>
29 #include <scheduler_core.h>
30 #include <qdf_atomic.h>
31 
32 /* Debug variable to detect if controller thread is stuck */
33 static qdf_atomic_t scheduler_msg_post_fail_count;
34 
35 static void scheduler_flush_mqs(struct scheduler_ctx *sched_ctx)
36 {
37 	int i;
38 
39 	/* Here each of the MC thread MQ shall be drained and returned to the
40 	 * Core. Before returning a wrapper to the Core, the Scheduler message
41 	 * shall be freed first
42 	 */
43 	sched_info("Flushing scheduler message queue");
44 
45 	QDF_ASSERT(sched_ctx);
46 	if (!sched_ctx) {
47 		sched_err("sched_ctx is NULL");
48 		return;
49 	}
50 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
51 		scheduler_cleanup_queues(sched_ctx, i);
52 }
53 
54 static QDF_STATUS scheduler_close(struct scheduler_ctx *sched_ctx)
55 {
56 	sched_info("Closing Scheduler");
57 
58 	QDF_BUG(sched_ctx);
59 	if (!sched_ctx) {
60 		sched_err("sched_ctx is NULL");
61 		return QDF_STATUS_E_INVAL;
62 	}
63 
64 	/* send shutdown signal to scheduler thread */
65 	qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag);
66 	qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
67 	qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
68 
69 	/* wait for scheduler thread to shutdown */
70 	qdf_wait_single_event(&sched_ctx->sch_shutdown, 0);
71 	sched_ctx->sch_thread = NULL;
72 
73 	/* flush any unprocessed scheduler messages */
74 	scheduler_flush_mqs(sched_ctx);
75 
76 	qdf_timer_free(&sched_ctx->watchdog_timer);
77 	qdf_spinlock_destroy(&sched_ctx->sch_thread_lock);
78 	qdf_event_destroy(&sched_ctx->resume_sch_event);
79 	qdf_event_destroy(&sched_ctx->sch_shutdown);
80 	qdf_event_destroy(&sched_ctx->sch_start_event);
81 
82 	return QDF_STATUS_SUCCESS;
83 }
84 
85 static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched)
86 {
87 	char symbol[QDF_SYMBOL_LEN];
88 
89 	if (sched->watchdog_callback)
90 		qdf_sprint_symbol(symbol, sched->watchdog_callback);
91 
92 	sched_err("Callback %s (type 0x%x) exceeded its allotted time of %ds",
93 		  sched->watchdog_callback ? symbol : "<null>",
94 		  sched->watchdog_msg_type, SCHEDULER_WATCHDOG_TIMEOUT / 1000);
95 }
96 
97 #ifdef CONFIG_SLUB_DEBUG_ON
98 static void scheduler_watchdog_timeout(void *arg)
99 {
100 	struct scheduler_ctx *sched = arg;
101 
102 	scheduler_watchdog_notify(sched);
103 	if (sched->sch_thread)
104 		qdf_print_thread_trace(sched->sch_thread);
105 
106 	/* avoid crashing during shutdown */
107 	if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag))
108 		return;
109 
110 	sched_fatal("Going down for Scheduler Watchdog Bite!");
111 	QDF_BUG(0);
112 }
113 #else
114 static void scheduler_watchdog_timeout(void *arg)
115 {
116 	scheduler_watchdog_notify((struct scheduler_ctx *)arg);
117 }
118 #endif
119 
120 static QDF_STATUS scheduler_open(struct scheduler_ctx *sched_ctx)
121 {
122 	QDF_STATUS status;
123 
124 	sched_info("Opening Scheduler");
125 
126 	/* Sanity checks */
127 	QDF_BUG(sched_ctx);
128 	if (!sched_ctx) {
129 		sched_err("sched_ctx is null");
130 		return QDF_STATUS_E_INVAL;
131 	}
132 
133 	status = qdf_event_create(&sched_ctx->sch_start_event);
134 	if (QDF_IS_STATUS_ERROR(status)) {
135 		sched_err("Failed to create start event; status:%d", status);
136 		return status;
137 	}
138 
139 	status = qdf_event_create(&sched_ctx->sch_shutdown);
140 	if (QDF_IS_STATUS_ERROR(status)) {
141 		sched_err("Failed to create shutdown event; status:%d", status);
142 		goto start_event_destroy;
143 	}
144 
145 	status = qdf_event_create(&sched_ctx->resume_sch_event);
146 	if (QDF_IS_STATUS_ERROR(status)) {
147 		sched_err("Failed to create resume event; status:%d", status);
148 		goto shutdown_event_destroy;
149 	}
150 
151 	qdf_spinlock_create(&sched_ctx->sch_thread_lock);
152 	qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue);
153 	sched_ctx->sch_event_flag = 0;
154 	qdf_timer_init(NULL,
155 		       &sched_ctx->watchdog_timer,
156 		       &scheduler_watchdog_timeout,
157 		       sched_ctx,
158 		       QDF_TIMER_TYPE_SW);
159 
160 	/* create the scheduler thread */
161 	sched_ctx->sch_thread = qdf_create_thread(scheduler_thread,
162 					sched_ctx, "scheduler_thread");
163 	if (IS_ERR(sched_ctx->sch_thread)) {
164 		sched_err("Failed to create scheduler thread");
165 		status = QDF_STATUS_E_RESOURCES;
166 		goto wd_timer_destroy;
167 	}
168 
169 	sched_info("Scheduler thread created");
170 
171 	/* wait for the scheduler thread to startup */
172 	qdf_wake_up_process(sched_ctx->sch_thread);
173 	qdf_wait_single_event(&sched_ctx->sch_start_event, 0);
174 
175 	sched_info("Scheduler thread started");
176 
177 	return QDF_STATUS_SUCCESS;
178 
179 wd_timer_destroy:
180 	qdf_timer_free(&sched_ctx->watchdog_timer);
181 	qdf_spinlock_destroy(&sched_ctx->sch_thread_lock);
182 	qdf_event_destroy(&sched_ctx->resume_sch_event);
183 
184 shutdown_event_destroy:
185 	qdf_event_destroy(&sched_ctx->sch_shutdown);
186 
187 start_event_destroy:
188 	qdf_event_destroy(&sched_ctx->sch_start_event);
189 
190 	return status;
191 }
192 
193 QDF_STATUS scheduler_init(void)
194 {
195 	QDF_STATUS status = QDF_STATUS_SUCCESS;
196 	struct scheduler_ctx *sched_ctx;
197 
198 	sched_info("Initializing Scheduler");
199 
200 	status = scheduler_create_ctx();
201 	if (QDF_STATUS_SUCCESS != status) {
202 		sched_err("can't create scheduler ctx");
203 		return status;
204 	}
205 
206 	sched_ctx = scheduler_get_context();
207 	QDF_BUG(sched_ctx);
208 	if (!sched_ctx) {
209 		sched_err("sched_ctx is null");
210 		status = QDF_STATUS_E_FAILURE;
211 		goto ctx_destroy;
212 	}
213 
214 	status = scheduler_queues_init(sched_ctx);
215 	if (QDF_IS_STATUS_ERROR(status)) {
216 		sched_err("Queue init failed");
217 		goto ctx_destroy;
218 	}
219 
220 	status = scheduler_open(sched_ctx);
221 	if (QDF_IS_STATUS_ERROR(status)) {
222 		sched_err("Failed to open QDF Scheduler");
223 		goto queues_deinit;
224 	}
225 
226 	qdf_register_mc_timer_callback(scheduler_mc_timer_callback);
227 
228 	return QDF_STATUS_SUCCESS;
229 
230 queues_deinit:
231 	scheduler_queues_deinit(sched_ctx);
232 
233 ctx_destroy:
234 	scheduler_destroy_ctx();
235 
236 	return status;
237 }
238 
239 QDF_STATUS scheduler_deinit(void)
240 {
241 	QDF_STATUS status = QDF_STATUS_SUCCESS;
242 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
243 
244 	sched_info("Deinitializing Scheduler");
245 
246 	status = scheduler_close(sched_ctx);
247 	if (QDF_STATUS_SUCCESS != status) {
248 		sched_err("Scheduler close failed");
249 		return status;
250 	}
251 
252 	scheduler_queues_deinit(sched_ctx);
253 
254 	return scheduler_destroy_ctx();
255 }
256 
257 
258 QDF_STATUS scheduler_post_msg_by_priority(QDF_MODULE_ID qid,
259 		struct scheduler_msg *pMsg, bool is_high_priority)
260 {
261 	uint8_t qidx;
262 	uint32_t msg_wrapper_fail_count;
263 	struct scheduler_mq_type *target_mq = NULL;
264 	struct scheduler_msg_wrapper *msg_wrapper = NULL;
265 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
266 
267 	if (!pMsg) {
268 		sched_err("pMsg is null");
269 		return QDF_STATUS_E_INVAL;
270 	}
271 
272 	if (!sched_ctx) {
273 		sched_err("sched_ctx is null");
274 		return QDF_STATUS_E_INVAL;
275 	}
276 
277 	if (!sched_ctx->sch_thread) {
278 		sched_err("Cannot post message; scheduler thread is stopped");
279 		return QDF_STATUS_E_FAILURE;
280 	}
281 
282 	if ((0 != pMsg->reserved) && (SYS_MSG_COOKIE != pMsg->reserved)) {
283 		sched_err("Un-initialized message pointer.. please initialize it");
284 		QDF_BUG(0);
285 		return QDF_STATUS_E_FAILURE;
286 	}
287 
288 	/* Target_If is a special message queue in phase 3 convergence beacause
289 	 * its used by both legacy WMA and as well as new UMAC components which
290 	 * directly populate callback handlers in message body.
291 	 * 1) WMA legacy messages should not have callback
292 	 * 2) New target_if message needs to have valid callback
293 	 * Clear callback handler for legacy WMA messages such that in case
294 	 * if someone is sending legacy WMA message from stack which has
295 	 * uninitialized callback then its handled properly. Also change
296 	 * legacy WMA message queue id to target_if queue such that its  always
297 	 * handled in right order.
298 	 */
299 	if (QDF_MODULE_ID_WMA == qid) {
300 		pMsg->callback = NULL;
301 		/* change legacy WMA message id to new target_if mq id */
302 		qid = QDF_MODULE_ID_TARGET_IF;
303 	}
304 
305 	qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid];
306 	if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
307 		sched_err("Scheduler is deinitialized ignore msg");
308 		return QDF_STATUS_E_FAILURE;
309 	}
310 
311 	if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) {
312 		sched_err("callback not registered for qid[%d]", qid);
313 		QDF_ASSERT(0);
314 		return QDF_STATUS_E_FAILURE;
315 	}
316 
317 	target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
318 	QDF_ASSERT(target_mq);
319 	if (target_mq == NULL) {
320 		sched_err("target_mq == NULL");
321 		return QDF_STATUS_E_FAILURE;
322 	}
323 
324 	/* Try and get a free Msg wrapper */
325 	msg_wrapper = scheduler_mq_get(&sched_ctx->queue_ctx.free_msg_q);
326 	if (NULL == msg_wrapper) {
327 		msg_wrapper_fail_count =
328 			qdf_atomic_inc_return(&scheduler_msg_post_fail_count);
329 		/* log only 1st failure to avoid over running log buffer */
330 		if (msg_wrapper_fail_count == 1)
331 			sched_err("Scheduler message wrapper empty");
332 
333 		if (SCHEDULER_WRAPPER_MAX_FAIL_COUNT == msg_wrapper_fail_count)
334 			QDF_BUG(0);
335 
336 		return QDF_STATUS_E_RESOURCES;
337 	}
338 	qdf_atomic_set(&scheduler_msg_post_fail_count, 0);
339 
340 	/* Copy the message now */
341 	qdf_mem_copy((void *)msg_wrapper->msg_buf,
342 			(void *)pMsg, sizeof(struct scheduler_msg));
343 
344 	if (is_high_priority)
345 		scheduler_mq_put_front(target_mq, msg_wrapper);
346 	else
347 		scheduler_mq_put(target_mq, msg_wrapper);
348 
349 	qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
350 	qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
351 
352 	return QDF_STATUS_SUCCESS;
353 }
354 
355 QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid,
356 		scheduler_msg_process_fn_t callback)
357 {
358 	struct scheduler_mq_ctx *ctx;
359 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
360 
361 	sched_enter();
362 
363 	if (!sched_ctx) {
364 		QDF_ASSERT(0);
365 		sched_err("sched_ctx is NULL");
366 		return QDF_STATUS_E_FAILURE;
367 	}
368 
369 	if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
370 		sched_err("Already registered max %d no of message queues",
371 			  SCHEDULER_NUMBER_OF_MSG_QUEUE);
372 		return QDF_STATUS_E_FAILURE;
373 	}
374 
375 	ctx = &sched_ctx->queue_ctx;
376 	ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx;
377 	ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid;
378 	ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback;
379 	sched_ctx->sch_last_qidx++;
380 
381 	sched_exit();
382 
383 	return QDF_STATUS_SUCCESS;
384 }
385 
386 QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid)
387 {
388 	struct scheduler_mq_ctx *ctx;
389 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
390 	uint8_t qidx;
391 
392 	sched_enter();
393 
394 	if (!sched_ctx) {
395 		QDF_ASSERT(0);
396 		sched_err("sched_ctx is NULL");
397 		return QDF_STATUS_E_FAILURE;
398 	}
399 
400 	ctx = &sched_ctx->queue_ctx;
401 	qidx = ctx->scheduler_msg_qid_to_qidx[qid];
402 	ctx->scheduler_msg_process_fn[qidx] = NULL;
403 	sched_ctx->sch_last_qidx--;
404 	ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE;
405 
406 	sched_exit();
407 
408 	return QDF_STATUS_SUCCESS;
409 }
410 
411 void scheduler_resume(void)
412 {
413 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
414 
415 	if (sched_ctx)
416 		qdf_event_set(&sched_ctx->resume_sch_event);
417 }
418 
419 void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback)
420 {
421 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
422 
423 	if (sched_ctx)
424 		sched_ctx->hdd_callback = callback;
425 }
426 void scheduler_wake_up_controller_thread(void)
427 {
428 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
429 
430 	if (sched_ctx)
431 		qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
432 }
433 void scheduler_set_event_mask(uint32_t event_mask)
434 {
435 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
436 
437 	if (sched_ctx)
438 		qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag);
439 }
440 
441 void scheduler_clear_event_mask(uint32_t event_mask)
442 {
443 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
444 
445 	if (sched_ctx)
446 		qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag);
447 }
448 
449 QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg)
450 {
451 	QDF_STATUS status;
452 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
453 	QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *);
454 
455 	if (NULL == msg || NULL == sched_ctx) {
456 		sched_err("msg %pK sch %pK", msg, sched_ctx);
457 		return QDF_STATUS_E_FAILURE;
458 	}
459 
460 	target_if_msg_handler = msg->callback;
461 
462 	/* Target_If is a special message queue in phase 3 convergence beacause
463 	 * its used by both legacy WMA and as well as new UMAC components. New
464 	 * UMAC components directly pass their message handlers as callback in
465 	 * message body.
466 	 * 1) All Legacy WMA messages do not contain message callback so invoke
467 	 *    registered legacy WMA handler. Scheduler message posting APIs
468 	 *    makes sure legacy WMA messages do not have callbacks.
469 	 * 2) For new messages which have valid callbacks invoke their callbacks
470 	 *    directly.
471 	 */
472 	if (NULL == target_if_msg_handler)
473 		status = sched_ctx->legacy_wma_handler(msg);
474 	else
475 		status = target_if_msg_handler(msg);
476 
477 	return status;
478 }
479 
480 QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg)
481 {
482 	QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *);
483 
484 	if (NULL == msg) {
485 		sched_err("Msg is NULL");
486 		return QDF_STATUS_E_FAILURE;
487 	}
488 
489 	os_if_msg_handler = msg->callback;
490 
491 	if (NULL == os_if_msg_handler) {
492 		sched_err("Msg callback is NULL");
493 		QDF_ASSERT(0);
494 		return QDF_STATUS_E_FAILURE;
495 	}
496 	os_if_msg_handler(msg);
497 
498 	return QDF_STATUS_SUCCESS;
499 }
500 
501 QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg)
502 {
503 	QDF_STATUS status;
504 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
505 	qdf_mc_timer_callback_t timer_q_msg_handler;
506 
507 	if (NULL == msg || NULL == sched_ctx) {
508 		sched_err("msg %pK sch %pK", msg, sched_ctx);
509 		return QDF_STATUS_E_FAILURE;
510 	}
511 
512 	timer_q_msg_handler = msg->callback;
513 
514 	/* Timer message handler */
515 	if (SYS_MSG_COOKIE == msg->reserved &&
516 		SYS_MSG_ID_MC_TIMER == msg->type) {
517 		if (timer_q_msg_handler) {
518 			status = QDF_STATUS_SUCCESS;
519 			timer_q_msg_handler(msg->bodyptr);
520 		} else {
521 			sched_err("Timer cb is null");
522 			status = QDF_STATUS_E_FAILURE;
523 		}
524 
525 		return status;
526 	} else {
527 		/* Legacy sys message handler */
528 		status = sched_ctx->legacy_sys_handler(msg);
529 
530 		return status;
531 	}
532 }
533 
534 QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t
535 						wma_callback)
536 {
537 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
538 
539 	if (NULL == sched_ctx) {
540 		sched_err("scheduler context is null");
541 		return QDF_STATUS_E_FAILURE;
542 	}
543 
544 	sched_ctx->legacy_wma_handler = wma_callback;
545 
546 	return QDF_STATUS_SUCCESS;
547 }
548 
549 QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t
550 						sys_callback)
551 {
552 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
553 
554 	if (NULL == sched_ctx) {
555 		sched_err("scheduler context is null");
556 		return QDF_STATUS_E_FAILURE;
557 	}
558 
559 	sched_ctx->legacy_sys_handler = sys_callback;
560 
561 	return QDF_STATUS_SUCCESS;
562 }
563 
564 QDF_STATUS scheduler_deregister_wma_legacy_handler(void)
565 {
566 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
567 
568 	if (NULL == sched_ctx) {
569 		sched_err("scheduler context is null");
570 		return QDF_STATUS_E_FAILURE;
571 	}
572 
573 	sched_ctx->legacy_wma_handler = NULL;
574 
575 	return QDF_STATUS_SUCCESS;
576 }
577 
578 QDF_STATUS scheduler_deregister_sys_legacy_handler(void)
579 {
580 	struct scheduler_ctx *sched_ctx = scheduler_get_context();
581 
582 	if (NULL == sched_ctx) {
583 		sched_err("scheduler context is null");
584 		return QDF_STATUS_E_FAILURE;
585 	}
586 
587 	sched_ctx->legacy_sys_handler = NULL;
588 
589 	return QDF_STATUS_SUCCESS;
590 }
591 
592 void scheduler_mc_timer_callback(unsigned long data)
593 {
594 	qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data;
595 	struct scheduler_msg msg = {0};
596 	QDF_STATUS status;
597 
598 	qdf_mc_timer_callback_t callback = NULL;
599 	void *user_data = NULL;
600 	QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW;
601 
602 	QDF_ASSERT(timer);
603 
604 	if (timer == NULL) {
605 		sched_err("Null pointer passed in!");
606 		return;
607 	}
608 
609 	qdf_spin_lock_irqsave(&timer->platform_info.spinlock);
610 
611 	switch (timer->state) {
612 	case QDF_TIMER_STATE_STARTING:
613 		/* we are in this state because someone just started the timer,
614 		 * MC timer got started and expired, but the time content have
615 		 * not been updated this is a rare race condition!
616 		 */
617 		timer->state = QDF_TIMER_STATE_STOPPED;
618 		status = QDF_STATUS_E_ALREADY;
619 		break;
620 
621 	case QDF_TIMER_STATE_STOPPED:
622 		status = QDF_STATUS_E_ALREADY;
623 		break;
624 
625 	case QDF_TIMER_STATE_UNUSED:
626 		status = QDF_STATUS_E_EXISTS;
627 		break;
628 
629 	case QDF_TIMER_STATE_RUNNING:
630 		/* need to go to stop state here because the call-back function
631 		 * may restart timer (to emulate periodic timer)
632 		 */
633 		timer->state = QDF_TIMER_STATE_STOPPED;
634 		/* copy the relevant timer information to local variables;
635 		 * once we exits from this critical section, the timer content
636 		 * may be modified by other tasks
637 		 */
638 		callback = timer->callback;
639 		user_data = timer->user_data;
640 		type = timer->type;
641 		status = QDF_STATUS_SUCCESS;
642 		break;
643 
644 	default:
645 		QDF_ASSERT(0);
646 		status = QDF_STATUS_E_FAULT;
647 		break;
648 	}
649 
650 	qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock);
651 
652 	if (QDF_STATUS_SUCCESS != status) {
653 		sched_err("TIMER callback called in a wrong state=%d",
654 			  timer->state);
655 		return;
656 	}
657 
658 	qdf_try_allowing_sleep(type);
659 
660 	if (callback == NULL) {
661 		sched_err("No TIMER callback, Couldn't enqueue timer to any queue");
662 		QDF_ASSERT(0);
663 		return;
664 	}
665 
666 	/* serialize to scheduler controller thread */
667 	msg.type = SYS_MSG_ID_MC_TIMER;
668 	msg.reserved = SYS_MSG_COOKIE;
669 	msg.callback = callback;
670 	msg.bodyptr = user_data;
671 	msg.bodyval = 0;
672 
673 	if (scheduler_post_msg(QDF_MODULE_ID_SYS, &msg) == QDF_STATUS_SUCCESS)
674 		return;
675 	sched_err("Could not enqueue timer to timer queue");
676 }
677