1cd395495SRajeev Kumar /* 22f4b444fSVivek * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 3*8cfe6b10SAsutosh Mohapatra * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4cd395495SRajeev Kumar * 5cd395495SRajeev Kumar * Permission to use, copy, modify, and/or distribute this software for 6cd395495SRajeev Kumar * any purpose with or without fee is hereby granted, provided that the 7cd395495SRajeev Kumar * above copyright notice and this permission notice appear in all 8cd395495SRajeev Kumar * copies. 9cd395495SRajeev Kumar * 10cd395495SRajeev Kumar * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11cd395495SRajeev Kumar * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12cd395495SRajeev Kumar * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13cd395495SRajeev Kumar * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14cd395495SRajeev Kumar * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15cd395495SRajeev Kumar * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16cd395495SRajeev Kumar * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17cd395495SRajeev Kumar * PERFORMANCE OF THIS SOFTWARE. 18cd395495SRajeev Kumar */ 19cd395495SRajeev Kumar 20cd395495SRajeev Kumar #include <scheduler_api.h> 21cd395495SRajeev Kumar #include <scheduler_core.h> 2273c05a80SRajeev Kumar #include <qdf_atomic.h> 2311f5a63aSNaga #include <qdf_module.h> 246d768494SArun Kumar Khandavalli #include <qdf_platform.h> 2573c05a80SRajeev Kumar 26901120c0Ssandhu struct sched_qdf_mc_timer_cb_wrapper { 27901120c0Ssandhu qdf_mc_timer_callback_t timer_callback; 28901120c0Ssandhu void *data; 29901120c0Ssandhu }; 30901120c0Ssandhu 31e0c9f669SDustin Brown QDF_STATUS scheduler_disable(void) 32cd395495SRajeev Kumar { 33e0c9f669SDustin Brown struct scheduler_ctx *sched_ctx; 3491abaccbSDustin Brown 358afde5a8SDustin Brown sched_debug("Disabling Scheduler"); 36e0c9f669SDustin Brown 37e0c9f669SDustin Brown sched_ctx = scheduler_get_context(); 38f4c76f93SDustin Brown QDF_BUG(sched_ctx); 398afde5a8SDustin Brown if (!sched_ctx) 40f4c76f93SDustin Brown return QDF_STATUS_E_INVAL; 41bac753d9SDustin Brown 4245a38684SArun Kumar Khandavalli if (!sched_ctx->sch_thread) { 4345a38684SArun Kumar Khandavalli sched_debug("Scheduler already disabled"); 4445a38684SArun Kumar Khandavalli return QDF_STATUS_SUCCESS; 4545a38684SArun Kumar Khandavalli } 4645a38684SArun Kumar Khandavalli 47f4c76f93SDustin Brown /* send shutdown signal to scheduler thread */ 48ef615e76SHouston Hoffman qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag); 49ef615e76SHouston Hoffman qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 50cd395495SRajeev Kumar qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 51bac753d9SDustin Brown 52f4c76f93SDustin Brown /* wait for scheduler thread to shutdown */ 53cd395495SRajeev Kumar qdf_wait_single_event(&sched_ctx->sch_shutdown, 0); 547b329469SDustin Brown sched_ctx->sch_thread = NULL; 55cd395495SRajeev Kumar 56f4c76f93SDustin Brown /* flush any unprocessed scheduler messages */ 57c7ee85c4SDustin Brown scheduler_queues_flush(sched_ctx); 58cd395495SRajeev Kumar 59cd395495SRajeev Kumar return QDF_STATUS_SUCCESS; 60cd395495SRajeev Kumar } 61cd395495SRajeev Kumar 62e226cebdSDustin Brown static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched) 63e226cebdSDustin Brown { 64bac753d9SDustin Brown char symbol[QDF_SYMBOL_LEN]; 65e226cebdSDustin Brown 66e226cebdSDustin Brown if (sched->watchdog_callback) 67e226cebdSDustin Brown qdf_sprint_symbol(symbol, sched->watchdog_callback); 68e226cebdSDustin Brown 69bea437e2SVivek sched_fatal("Callback %s (type 0x%x) exceeded its allotted time of %ds", 70bac753d9SDustin Brown sched->watchdog_callback ? symbol : "<null>", 71bea437e2SVivek sched->watchdog_msg_type, 722f4b444fSVivek sched->timeout / 1000); 73e226cebdSDustin Brown } 74e226cebdSDustin Brown 75bac753d9SDustin Brown static void scheduler_watchdog_timeout(void *arg) 76e226cebdSDustin Brown { 77bac753d9SDustin Brown struct scheduler_ctx *sched = arg; 78bac753d9SDustin Brown 796d768494SArun Kumar Khandavalli if (qdf_is_recovering()) { 806d768494SArun Kumar Khandavalli sched_debug("Recovery is in progress ignore timeout"); 816d768494SArun Kumar Khandavalli return; 826d768494SArun Kumar Khandavalli } 836d768494SArun Kumar Khandavalli 84bac753d9SDustin Brown scheduler_watchdog_notify(sched); 857b329469SDustin Brown if (sched->sch_thread) 867b329469SDustin Brown qdf_print_thread_trace(sched->sch_thread); 87bac753d9SDustin Brown 88bac753d9SDustin Brown /* avoid crashing during shutdown */ 89ef615e76SHouston Hoffman if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag)) 90bac753d9SDustin Brown return; 91bac753d9SDustin Brown 92*8cfe6b10SAsutosh Mohapatra sched_err("Triggering self recovery on sheduler timeout"); 93*8cfe6b10SAsutosh Mohapatra qdf_trigger_self_recovery(NULL, QDF_SCHED_TIMEOUT); 94e226cebdSDustin Brown } 95e226cebdSDustin Brown 96e0c9f669SDustin Brown QDF_STATUS scheduler_enable(void) 97cd395495SRajeev Kumar { 98e0c9f669SDustin Brown struct scheduler_ctx *sched_ctx; 99f4c76f93SDustin Brown 1008afde5a8SDustin Brown sched_debug("Enabling Scheduler"); 10191abaccbSDustin Brown 102e0c9f669SDustin Brown sched_ctx = scheduler_get_context(); 103f4c76f93SDustin Brown QDF_BUG(sched_ctx); 1048afde5a8SDustin Brown if (!sched_ctx) 105f4c76f93SDustin Brown return QDF_STATUS_E_INVAL; 10691abaccbSDustin Brown 107e0c9f669SDustin Brown qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK, 108e0c9f669SDustin Brown &sched_ctx->sch_event_flag); 109e0c9f669SDustin Brown qdf_atomic_clear_bit(MC_POST_EVENT_MASK, 110e0c9f669SDustin Brown &sched_ctx->sch_event_flag); 111e0c9f669SDustin Brown 112e0c9f669SDustin Brown /* create the scheduler thread */ 113e0c9f669SDustin Brown sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx, 114e0c9f669SDustin Brown "scheduler_thread"); 1151f55ed1aSChaitanya Kiran Godavarthi if (!sched_ctx->sch_thread) { 116bea437e2SVivek sched_fatal("Failed to create scheduler thread"); 117e0c9f669SDustin Brown return QDF_STATUS_E_RESOURCES; 118e0c9f669SDustin Brown } 119e0c9f669SDustin Brown 1208afde5a8SDustin Brown sched_debug("Scheduler thread created"); 121e0c9f669SDustin Brown 122e0c9f669SDustin Brown /* wait for the scheduler thread to startup */ 123e0c9f669SDustin Brown qdf_wake_up_process(sched_ctx->sch_thread); 124e0c9f669SDustin Brown qdf_wait_single_event(&sched_ctx->sch_start_event, 0); 125e0c9f669SDustin Brown 1268afde5a8SDustin Brown sched_debug("Scheduler thread started"); 127e0c9f669SDustin Brown 128e0c9f669SDustin Brown return QDF_STATUS_SUCCESS; 129e0c9f669SDustin Brown } 130e0c9f669SDustin Brown 131e0c9f669SDustin Brown QDF_STATUS scheduler_init(void) 132e0c9f669SDustin Brown { 133e0c9f669SDustin Brown QDF_STATUS status; 134e0c9f669SDustin Brown struct scheduler_ctx *sched_ctx; 135e0c9f669SDustin Brown 1368afde5a8SDustin Brown sched_debug("Initializing Scheduler"); 137e0c9f669SDustin Brown 138e0c9f669SDustin Brown status = scheduler_create_ctx(); 139e0c9f669SDustin Brown if (QDF_IS_STATUS_ERROR(status)) { 140bea437e2SVivek sched_fatal("Failed to create context; status:%d", status); 141e0c9f669SDustin Brown return status; 142e0c9f669SDustin Brown } 143e0c9f669SDustin Brown 144e0c9f669SDustin Brown sched_ctx = scheduler_get_context(); 145e0c9f669SDustin Brown QDF_BUG(sched_ctx); 146e0c9f669SDustin Brown if (!sched_ctx) { 147e0c9f669SDustin Brown status = QDF_STATUS_E_FAILURE; 148e0c9f669SDustin Brown goto ctx_destroy; 149e0c9f669SDustin Brown } 150e0c9f669SDustin Brown 151e0c9f669SDustin Brown status = scheduler_queues_init(sched_ctx); 152e0c9f669SDustin Brown if (QDF_IS_STATUS_ERROR(status)) { 153bea437e2SVivek sched_fatal("Failed to init queues; status:%d", status); 154e0c9f669SDustin Brown goto ctx_destroy; 155e0c9f669SDustin Brown } 156e0c9f669SDustin Brown 157f4c76f93SDustin Brown status = qdf_event_create(&sched_ctx->sch_start_event); 158f4c76f93SDustin Brown if (QDF_IS_STATUS_ERROR(status)) { 159bea437e2SVivek sched_fatal("Failed to create start event; status:%d", status); 160e0c9f669SDustin Brown goto queues_deinit; 161f4c76f93SDustin Brown } 162f4c76f93SDustin Brown 163f4c76f93SDustin Brown status = qdf_event_create(&sched_ctx->sch_shutdown); 164f4c76f93SDustin Brown if (QDF_IS_STATUS_ERROR(status)) { 165bea437e2SVivek sched_fatal("Failed to create shutdown event; status:%d", 166bea437e2SVivek status); 167f4c76f93SDustin Brown goto start_event_destroy; 168f4c76f93SDustin Brown } 169f4c76f93SDustin Brown 170f4c76f93SDustin Brown status = qdf_event_create(&sched_ctx->resume_sch_event); 171f4c76f93SDustin Brown if (QDF_IS_STATUS_ERROR(status)) { 172bea437e2SVivek sched_fatal("Failed to create resume event; status:%d", status); 173f4c76f93SDustin Brown goto shutdown_event_destroy; 174f4c76f93SDustin Brown } 175f4c76f93SDustin Brown 176cd395495SRajeev Kumar qdf_spinlock_create(&sched_ctx->sch_thread_lock); 177cd395495SRajeev Kumar qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue); 178cd395495SRajeev Kumar sched_ctx->sch_event_flag = 0; 17997f44cd3SVivek sched_ctx->timeout = SCHEDULER_WATCHDOG_TIMEOUT; 180e226cebdSDustin Brown qdf_timer_init(NULL, 181e226cebdSDustin Brown &sched_ctx->watchdog_timer, 182bac753d9SDustin Brown &scheduler_watchdog_timeout, 183e226cebdSDustin Brown sched_ctx, 184e226cebdSDustin Brown QDF_TIMER_TYPE_SW); 185e226cebdSDustin Brown 186e0c9f669SDustin Brown qdf_register_mc_timer_callback(scheduler_mc_timer_callback); 18791abaccbSDustin Brown 188cd395495SRajeev Kumar return QDF_STATUS_SUCCESS; 189f4c76f93SDustin Brown 190f4c76f93SDustin Brown shutdown_event_destroy: 191f4c76f93SDustin Brown qdf_event_destroy(&sched_ctx->sch_shutdown); 192f4c76f93SDustin Brown 193f4c76f93SDustin Brown start_event_destroy: 194f4c76f93SDustin Brown qdf_event_destroy(&sched_ctx->sch_start_event); 195f4c76f93SDustin Brown 196f4c76f93SDustin Brown queues_deinit: 197f4c76f93SDustin Brown scheduler_queues_deinit(sched_ctx); 198f4c76f93SDustin Brown 199f4c76f93SDustin Brown ctx_destroy: 200f4c76f93SDustin Brown scheduler_destroy_ctx(); 201f4c76f93SDustin Brown 202f4c76f93SDustin Brown return status; 203cd395495SRajeev Kumar } 204cd395495SRajeev Kumar 205cd395495SRajeev Kumar QDF_STATUS scheduler_deinit(void) 206cd395495SRajeev Kumar { 207e0c9f669SDustin Brown QDF_STATUS status; 208e0c9f669SDustin Brown struct scheduler_ctx *sched_ctx; 209cd395495SRajeev Kumar 2108afde5a8SDustin Brown sched_debug("Deinitializing Scheduler"); 21191abaccbSDustin Brown 212e0c9f669SDustin Brown sched_ctx = scheduler_get_context(); 213e0c9f669SDustin Brown QDF_BUG(sched_ctx); 2148afde5a8SDustin Brown if (!sched_ctx) 215e0c9f669SDustin Brown return QDF_STATUS_E_INVAL; 21691abaccbSDustin Brown 217e0c9f669SDustin Brown qdf_timer_free(&sched_ctx->watchdog_timer); 218e0c9f669SDustin Brown qdf_spinlock_destroy(&sched_ctx->sch_thread_lock); 219e0c9f669SDustin Brown qdf_event_destroy(&sched_ctx->resume_sch_event); 220e0c9f669SDustin Brown qdf_event_destroy(&sched_ctx->sch_shutdown); 221e0c9f669SDustin Brown qdf_event_destroy(&sched_ctx->sch_start_event); 222f4c76f93SDustin Brown 223e0c9f669SDustin Brown status = scheduler_queues_deinit(sched_ctx); 224e0c9f669SDustin Brown if (QDF_IS_STATUS_ERROR(status)) 225e0c9f669SDustin Brown sched_err("Failed to deinit queues; status:%d", status); 226e0c9f669SDustin Brown 227e0c9f669SDustin Brown status = scheduler_destroy_ctx(); 228e0c9f669SDustin Brown if (QDF_IS_STATUS_ERROR(status)) 229e0c9f669SDustin Brown sched_err("Failed to destroy context; status:%d", status); 230e0c9f669SDustin Brown 231e0c9f669SDustin Brown return QDF_STATUS_SUCCESS; 232cd395495SRajeev Kumar } 233cd395495SRajeev Kumar 2346e4b9c54Sgaurank kathpalia QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid, 235c7ee85c4SDustin Brown struct scheduler_msg *msg, 236c7ee85c4SDustin Brown bool is_high_priority) 237cd395495SRajeev Kumar { 238cd395495SRajeev Kumar uint8_t qidx; 239c7ee85c4SDustin Brown struct scheduler_mq_type *target_mq; 240c7ee85c4SDustin Brown struct scheduler_msg *queue_msg; 241c7ee85c4SDustin Brown struct scheduler_ctx *sched_ctx; 2426e4b9c54Sgaurank kathpalia uint16_t src_id; 2436e4b9c54Sgaurank kathpalia uint16_t dest_id; 244302a1d97Sgaurank kathpalia uint16_t que_id; 245cd395495SRajeev Kumar 2468afde5a8SDustin Brown QDF_BUG(msg); 2478afde5a8SDustin Brown if (!msg) 248d2cd9eabSDustin Brown return QDF_STATUS_E_INVAL; 249d2cd9eabSDustin Brown 250c7ee85c4SDustin Brown sched_ctx = scheduler_get_context(); 2518afde5a8SDustin Brown QDF_BUG(sched_ctx); 2528afde5a8SDustin Brown if (!sched_ctx) 253d2cd9eabSDustin Brown return QDF_STATUS_E_INVAL; 254d2cd9eabSDustin Brown 255d2cd9eabSDustin Brown if (!sched_ctx->sch_thread) { 256d2cd9eabSDustin Brown sched_err("Cannot post message; scheduler thread is stopped"); 257cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 258cd395495SRajeev Kumar } 259cd395495SRajeev Kumar 260c7ee85c4SDustin Brown if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) { 261fe41df9cSDustin Brown QDF_DEBUG_PANIC("Scheduler messages must be initialized"); 262afc63bc8SRajeev Kumar return QDF_STATUS_E_FAILURE; 263afc63bc8SRajeev Kumar } 264afc63bc8SRajeev Kumar 2656e4b9c54Sgaurank kathpalia dest_id = scheduler_get_dest_id(qid); 2666e4b9c54Sgaurank kathpalia src_id = scheduler_get_src_id(qid); 267302a1d97Sgaurank kathpalia que_id = scheduler_get_que_id(qid); 2686e4b9c54Sgaurank kathpalia 269302a1d97Sgaurank kathpalia if (que_id >= QDF_MODULE_ID_MAX || src_id >= QDF_MODULE_ID_MAX || 270302a1d97Sgaurank kathpalia dest_id >= QDF_MODULE_ID_MAX) { 2716e4b9c54Sgaurank kathpalia sched_err("Src_id/Dest_id invalid, cannot post message"); 2726e4b9c54Sgaurank kathpalia return QDF_STATUS_E_FAILURE; 2736e4b9c54Sgaurank kathpalia } 27445c28558SJeff Johnson /* Target_If is a special message queue in phase 3 convergence because 275cd395495SRajeev Kumar * its used by both legacy WMA and as well as new UMAC components which 276cd395495SRajeev Kumar * directly populate callback handlers in message body. 277cd395495SRajeev Kumar * 1) WMA legacy messages should not have callback 278cd395495SRajeev Kumar * 2) New target_if message needs to have valid callback 279cd395495SRajeev Kumar * Clear callback handler for legacy WMA messages such that in case 280cd395495SRajeev Kumar * if someone is sending legacy WMA message from stack which has 281cd395495SRajeev Kumar * uninitialized callback then its handled properly. Also change 282cd395495SRajeev Kumar * legacy WMA message queue id to target_if queue such that its always 283cd395495SRajeev Kumar * handled in right order. 284cd395495SRajeev Kumar */ 285302a1d97Sgaurank kathpalia if (QDF_MODULE_ID_WMA == que_id) { 286c7ee85c4SDustin Brown msg->callback = NULL; 287cd395495SRajeev Kumar /* change legacy WMA message id to new target_if mq id */ 288302a1d97Sgaurank kathpalia que_id = QDF_MODULE_ID_TARGET_IF; 289cd395495SRajeev Kumar } 2905e652ebbSgaurank kathpalia qdf_mtrace(src_id, dest_id, msg->type, 0xFF, 0); 291cd395495SRajeev Kumar 292302a1d97Sgaurank kathpalia qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[que_id]; 293cd395495SRajeev Kumar if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 29491abaccbSDustin Brown sched_err("Scheduler is deinitialized ignore msg"); 295cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 296cd395495SRajeev Kumar } 29791abaccbSDustin Brown 298cd395495SRajeev Kumar if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) { 299d0c05845SAsutosh Mohapatra sched_err("callback not registered for qid[%d]", que_id); 300cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 301cd395495SRajeev Kumar } 30291abaccbSDustin Brown 303cd395495SRajeev Kumar target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 304cd395495SRajeev Kumar 305c7ee85c4SDustin Brown queue_msg = scheduler_core_msg_dup(msg); 306c7ee85c4SDustin Brown if (!queue_msg) 307c7ee85c4SDustin Brown return QDF_STATUS_E_NOMEM; 308cd395495SRajeev Kumar 309cd395495SRajeev Kumar if (is_high_priority) 310c7ee85c4SDustin Brown scheduler_mq_put_front(target_mq, queue_msg); 311cd395495SRajeev Kumar else 312c7ee85c4SDustin Brown scheduler_mq_put(target_mq, queue_msg); 313cd395495SRajeev Kumar 314ef615e76SHouston Hoffman qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 315cd395495SRajeev Kumar qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 316cd395495SRajeev Kumar 317cd395495SRajeev Kumar return QDF_STATUS_SUCCESS; 318cd395495SRajeev Kumar } 319cd395495SRajeev Kumar 320cd395495SRajeev Kumar QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, 321cd395495SRajeev Kumar scheduler_msg_process_fn_t callback) 322cd395495SRajeev Kumar { 323cd395495SRajeev Kumar struct scheduler_mq_ctx *ctx; 324cd395495SRajeev Kumar struct scheduler_ctx *sched_ctx = scheduler_get_context(); 325cd395495SRajeev Kumar 32691abaccbSDustin Brown sched_enter(); 32791abaccbSDustin Brown 3288afde5a8SDustin Brown QDF_BUG(sched_ctx); 3298afde5a8SDustin Brown if (!sched_ctx) 330cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 331cd395495SRajeev Kumar 332cd395495SRajeev Kumar if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 33391abaccbSDustin Brown sched_err("Already registered max %d no of message queues", 334cd395495SRajeev Kumar SCHEDULER_NUMBER_OF_MSG_QUEUE); 335cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 336cd395495SRajeev Kumar } 337cd395495SRajeev Kumar 338cd395495SRajeev Kumar ctx = &sched_ctx->queue_ctx; 339cd395495SRajeev Kumar ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx; 340cd395495SRajeev Kumar ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid; 341cd395495SRajeev Kumar ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback; 342cd395495SRajeev Kumar sched_ctx->sch_last_qidx++; 34391abaccbSDustin Brown 34491abaccbSDustin Brown sched_exit(); 34591abaccbSDustin Brown 346cd395495SRajeev Kumar return QDF_STATUS_SUCCESS; 347cd395495SRajeev Kumar } 348cd395495SRajeev Kumar 349cd395495SRajeev Kumar QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid) 350cd395495SRajeev Kumar { 351cd395495SRajeev Kumar struct scheduler_mq_ctx *ctx; 352cd395495SRajeev Kumar struct scheduler_ctx *sched_ctx = scheduler_get_context(); 353cd395495SRajeev Kumar uint8_t qidx; 354cd395495SRajeev Kumar 35591abaccbSDustin Brown sched_enter(); 35691abaccbSDustin Brown 3578afde5a8SDustin Brown QDF_BUG(sched_ctx); 3588afde5a8SDustin Brown if (!sched_ctx) 359cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 36091abaccbSDustin Brown 361cd395495SRajeev Kumar ctx = &sched_ctx->queue_ctx; 362cd395495SRajeev Kumar qidx = ctx->scheduler_msg_qid_to_qidx[qid]; 363cd395495SRajeev Kumar ctx->scheduler_msg_process_fn[qidx] = NULL; 36458cac671SYuanyuan Liu sched_ctx->sch_last_qidx--; 365cd395495SRajeev Kumar ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE; 36691abaccbSDustin Brown 36791abaccbSDustin Brown sched_exit(); 36891abaccbSDustin Brown 369cd395495SRajeev Kumar return QDF_STATUS_SUCCESS; 370cd395495SRajeev Kumar } 371cd395495SRajeev Kumar 372cd395495SRajeev Kumar void scheduler_resume(void) 373cd395495SRajeev Kumar { 374cd395495SRajeev Kumar struct scheduler_ctx *sched_ctx = scheduler_get_context(); 375cd395495SRajeev Kumar 376cd395495SRajeev Kumar if (sched_ctx) 377cd395495SRajeev Kumar qdf_event_set(&sched_ctx->resume_sch_event); 378cd395495SRajeev Kumar } 379cd395495SRajeev Kumar 380cd395495SRajeev Kumar void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback) 381cd395495SRajeev Kumar { 382cd395495SRajeev Kumar struct scheduler_ctx *sched_ctx = scheduler_get_context(); 383cd395495SRajeev Kumar 384cd395495SRajeev Kumar if (sched_ctx) 385cd395495SRajeev Kumar sched_ctx->hdd_callback = callback; 386cd395495SRajeev Kumar } 387cd395495SRajeev Kumar void scheduler_wake_up_controller_thread(void) 388cd395495SRajeev Kumar { 389cd395495SRajeev Kumar struct scheduler_ctx *sched_ctx = scheduler_get_context(); 390cd395495SRajeev Kumar 391cd395495SRajeev Kumar if (sched_ctx) 392cd395495SRajeev Kumar qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 393cd395495SRajeev Kumar } 394cd395495SRajeev Kumar void scheduler_set_event_mask(uint32_t event_mask) 395cd395495SRajeev Kumar { 396cd395495SRajeev Kumar struct scheduler_ctx *sched_ctx = scheduler_get_context(); 397cd395495SRajeev Kumar 398cd395495SRajeev Kumar if (sched_ctx) 399ef615e76SHouston Hoffman qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag); 400cd395495SRajeev Kumar } 401cd395495SRajeev Kumar 402cd395495SRajeev Kumar void scheduler_clear_event_mask(uint32_t event_mask) 403cd395495SRajeev Kumar { 404cd395495SRajeev Kumar struct scheduler_ctx *sched_ctx = scheduler_get_context(); 405cd395495SRajeev Kumar 406cd395495SRajeev Kumar if (sched_ctx) 407ef615e76SHouston Hoffman qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag); 408cd395495SRajeev Kumar } 409cd395495SRajeev Kumar 410cd395495SRajeev Kumar QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg) 411cd395495SRajeev Kumar { 412cd395495SRajeev Kumar QDF_STATUS status; 413cd395495SRajeev Kumar struct scheduler_ctx *sched_ctx = scheduler_get_context(); 414cd395495SRajeev Kumar QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *); 415cd395495SRajeev Kumar 4168afde5a8SDustin Brown QDF_BUG(msg); 4178afde5a8SDustin Brown if (!msg) 418cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 4198afde5a8SDustin Brown 4208afde5a8SDustin Brown QDF_BUG(sched_ctx); 4218afde5a8SDustin Brown if (!sched_ctx) 4228afde5a8SDustin Brown return QDF_STATUS_E_FAILURE; 423cd395495SRajeev Kumar 424cd395495SRajeev Kumar target_if_msg_handler = msg->callback; 425cd395495SRajeev Kumar 42645c28558SJeff Johnson /* Target_If is a special message queue in phase 3 convergence because 427cd395495SRajeev Kumar * its used by both legacy WMA and as well as new UMAC components. New 428cd395495SRajeev Kumar * UMAC components directly pass their message handlers as callback in 429cd395495SRajeev Kumar * message body. 430cd395495SRajeev Kumar * 1) All Legacy WMA messages do not contain message callback so invoke 431cd395495SRajeev Kumar * registered legacy WMA handler. Scheduler message posting APIs 432cd395495SRajeev Kumar * makes sure legacy WMA messages do not have callbacks. 433cd395495SRajeev Kumar * 2) For new messages which have valid callbacks invoke their callbacks 434cd395495SRajeev Kumar * directly. 435cd395495SRajeev Kumar */ 4368afde5a8SDustin Brown if (!target_if_msg_handler) 437cd395495SRajeev Kumar status = sched_ctx->legacy_wma_handler(msg); 438cd395495SRajeev Kumar else 439cd395495SRajeev Kumar status = target_if_msg_handler(msg); 440cd395495SRajeev Kumar 441cd395495SRajeev Kumar return status; 442cd395495SRajeev Kumar } 443cd395495SRajeev Kumar 444cd395495SRajeev Kumar QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg) 445cd395495SRajeev Kumar { 446cd395495SRajeev Kumar QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *); 447cd395495SRajeev Kumar 4488afde5a8SDustin Brown QDF_BUG(msg); 4498afde5a8SDustin Brown if (!msg) 450cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 451cd395495SRajeev Kumar 452cd395495SRajeev Kumar os_if_msg_handler = msg->callback; 453cd395495SRajeev Kumar 4548afde5a8SDustin Brown QDF_BUG(os_if_msg_handler); 4558afde5a8SDustin Brown if (!os_if_msg_handler) 456cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 4578afde5a8SDustin Brown 458cd395495SRajeev Kumar os_if_msg_handler(msg); 459cd395495SRajeev Kumar 460cd395495SRajeev Kumar return QDF_STATUS_SUCCESS; 461cd395495SRajeev Kumar } 462cd395495SRajeev Kumar 463901120c0Ssandhu struct sched_qdf_mc_timer_cb_wrapper *scheduler_qdf_mc_timer_init( 464901120c0Ssandhu qdf_mc_timer_callback_t timer_callback, 465901120c0Ssandhu void *data) 466901120c0Ssandhu { 467901120c0Ssandhu struct sched_qdf_mc_timer_cb_wrapper *wrapper_ptr; 468901120c0Ssandhu 469901120c0Ssandhu wrapper_ptr = qdf_mem_malloc(sizeof(*wrapper_ptr)); 470901120c0Ssandhu if (!wrapper_ptr) 471901120c0Ssandhu return NULL; 472901120c0Ssandhu 473901120c0Ssandhu wrapper_ptr->timer_callback = timer_callback; 474901120c0Ssandhu wrapper_ptr->data = data; 475901120c0Ssandhu return wrapper_ptr; 476901120c0Ssandhu } 477901120c0Ssandhu 478901120c0Ssandhu void *scheduler_qdf_mc_timer_deinit_return_data_ptr( 479901120c0Ssandhu struct sched_qdf_mc_timer_cb_wrapper *wrapper_ptr) 480901120c0Ssandhu { 481901120c0Ssandhu void *data_ptr; 482901120c0Ssandhu 483901120c0Ssandhu if (!wrapper_ptr) { 484901120c0Ssandhu sched_err("pointer to wrapper ptr is NULL"); 485901120c0Ssandhu return NULL; 486901120c0Ssandhu } 487901120c0Ssandhu 488901120c0Ssandhu data_ptr = wrapper_ptr->data; 489901120c0Ssandhu qdf_mem_free(wrapper_ptr); 490901120c0Ssandhu return data_ptr; 491901120c0Ssandhu } 492901120c0Ssandhu 493901120c0Ssandhu QDF_STATUS scheduler_qdf_mc_timer_callback_t_wrapper(struct scheduler_msg *msg) 494901120c0Ssandhu { 495901120c0Ssandhu struct sched_qdf_mc_timer_cb_wrapper *mc_timer_wrapper; 496901120c0Ssandhu qdf_mc_timer_callback_t timer_cb; 497901120c0Ssandhu 498901120c0Ssandhu mc_timer_wrapper = msg->bodyptr; 499901120c0Ssandhu if (!mc_timer_wrapper) { 500901120c0Ssandhu sched_err("NULL mc_timer_wrapper from msg body"); 501901120c0Ssandhu return QDF_STATUS_E_FAILURE; 502901120c0Ssandhu } 503901120c0Ssandhu 504901120c0Ssandhu timer_cb = mc_timer_wrapper->timer_callback; 505901120c0Ssandhu 506901120c0Ssandhu QDF_BUG(timer_cb); 507901120c0Ssandhu if (!timer_cb) 508901120c0Ssandhu goto sched_qdf_mc_timer_err; 509901120c0Ssandhu 510901120c0Ssandhu timer_cb(mc_timer_wrapper->data); 511901120c0Ssandhu 512901120c0Ssandhu qdf_mem_free(mc_timer_wrapper); 513901120c0Ssandhu return QDF_STATUS_SUCCESS; 514901120c0Ssandhu 515901120c0Ssandhu sched_qdf_mc_timer_err: 516901120c0Ssandhu sched_err("failed to get timer cb is NULL"); 517901120c0Ssandhu qdf_mem_free(mc_timer_wrapper); 518901120c0Ssandhu return QDF_STATUS_E_FAILURE; 519901120c0Ssandhu } 520901120c0Ssandhu 521cd395495SRajeev Kumar QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg) 522cd395495SRajeev Kumar { 523cd395495SRajeev Kumar struct scheduler_ctx *sched_ctx = scheduler_get_context(); 524901120c0Ssandhu scheduler_msg_process_fn_t sched_mc_timer_callback; 525cd395495SRajeev Kumar 5268afde5a8SDustin Brown QDF_BUG(msg); 5278afde5a8SDustin Brown if (!msg) 528cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 529cd395495SRajeev Kumar 5308afde5a8SDustin Brown QDF_BUG(sched_ctx); 5318afde5a8SDustin Brown if (!sched_ctx) 5328afde5a8SDustin Brown return QDF_STATUS_E_FAILURE; 533cd395495SRajeev Kumar 5348afde5a8SDustin Brown /* legacy sys message handler? */ 5358afde5a8SDustin Brown if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER) 5368afde5a8SDustin Brown return sched_ctx->legacy_sys_handler(msg); 53791abaccbSDustin Brown 538901120c0Ssandhu sched_mc_timer_callback = msg->callback; 539901120c0Ssandhu QDF_BUG(sched_mc_timer_callback); 540901120c0Ssandhu if (!sched_mc_timer_callback) 5418afde5a8SDustin Brown return QDF_STATUS_E_FAILURE; 54291abaccbSDustin Brown 543901120c0Ssandhu return sched_mc_timer_callback(msg); 544cd395495SRajeev Kumar } 545cd395495SRajeev Kumar 5466e2fed8fSSantosh Anbu QDF_STATUS scheduler_mlme_mq_handler(struct scheduler_msg *msg) 5476e2fed8fSSantosh Anbu { 5486e2fed8fSSantosh Anbu scheduler_msg_process_fn_t mlme_msg_handler; 5496e2fed8fSSantosh Anbu 5506e2fed8fSSantosh Anbu QDF_BUG(msg); 5516e2fed8fSSantosh Anbu if (!msg) 5526e2fed8fSSantosh Anbu return QDF_STATUS_E_FAILURE; 5536e2fed8fSSantosh Anbu 5546e2fed8fSSantosh Anbu mlme_msg_handler = msg->callback; 5556e2fed8fSSantosh Anbu 5566e2fed8fSSantosh Anbu QDF_BUG(mlme_msg_handler); 5576e2fed8fSSantosh Anbu if (!mlme_msg_handler) 5586e2fed8fSSantosh Anbu return QDF_STATUS_E_FAILURE; 5596e2fed8fSSantosh Anbu 5606e2fed8fSSantosh Anbu mlme_msg_handler(msg); 5616e2fed8fSSantosh Anbu 5626e2fed8fSSantosh Anbu return QDF_STATUS_SUCCESS; 5636e2fed8fSSantosh Anbu } 5646e2fed8fSSantosh Anbu 5656ecd284eSVignesh Viswanathan QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg) 5666ecd284eSVignesh Viswanathan { 5676ecd284eSVignesh Viswanathan QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *); 5686ecd284eSVignesh Viswanathan 5698afde5a8SDustin Brown QDF_BUG(msg); 5708afde5a8SDustin Brown if (!msg) 5716ecd284eSVignesh Viswanathan return QDF_STATUS_E_FAILURE; 5726ecd284eSVignesh Viswanathan 5736ecd284eSVignesh Viswanathan scan_q_msg_handler = msg->callback; 5746ecd284eSVignesh Viswanathan 5758afde5a8SDustin Brown QDF_BUG(scan_q_msg_handler); 5768afde5a8SDustin Brown if (!scan_q_msg_handler) 5776ecd284eSVignesh Viswanathan return QDF_STATUS_E_FAILURE; 5788afde5a8SDustin Brown 5796ecd284eSVignesh Viswanathan scan_q_msg_handler(msg); 5806ecd284eSVignesh Viswanathan 5816ecd284eSVignesh Viswanathan return QDF_STATUS_SUCCESS; 5826ecd284eSVignesh Viswanathan } 5836ecd284eSVignesh Viswanathan 58497f44cd3SVivek void scheduler_set_watchdog_timeout(uint32_t timeout) 58597f44cd3SVivek { 58697f44cd3SVivek struct scheduler_ctx *sched_ctx = scheduler_get_context(); 58797f44cd3SVivek 58897f44cd3SVivek QDF_BUG(sched_ctx); 58997f44cd3SVivek if (!sched_ctx) 59097f44cd3SVivek return; 59197f44cd3SVivek 59297f44cd3SVivek sched_ctx->timeout = timeout; 59397f44cd3SVivek } 59497f44cd3SVivek 595cd395495SRajeev Kumar QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t 596cd395495SRajeev Kumar wma_callback) 597cd395495SRajeev Kumar { 598cd395495SRajeev Kumar struct scheduler_ctx *sched_ctx = scheduler_get_context(); 599cd395495SRajeev Kumar 6008afde5a8SDustin Brown QDF_BUG(sched_ctx); 6018afde5a8SDustin Brown if (!sched_ctx) 602cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 603cd395495SRajeev Kumar 604cd395495SRajeev Kumar sched_ctx->legacy_wma_handler = wma_callback; 605cd395495SRajeev Kumar 606cd395495SRajeev Kumar return QDF_STATUS_SUCCESS; 607cd395495SRajeev Kumar } 608cd395495SRajeev Kumar 609cd395495SRajeev Kumar QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t 610cd395495SRajeev Kumar sys_callback) 611cd395495SRajeev Kumar { 612cd395495SRajeev Kumar struct scheduler_ctx *sched_ctx = scheduler_get_context(); 613cd395495SRajeev Kumar 6148afde5a8SDustin Brown QDF_BUG(sched_ctx); 6158afde5a8SDustin Brown if (!sched_ctx) 616cd395495SRajeev Kumar return QDF_STATUS_E_FAILURE; 617cd395495SRajeev Kumar 618cd395495SRajeev Kumar sched_ctx->legacy_sys_handler = sys_callback; 619cd395495SRajeev Kumar 620cd395495SRajeev Kumar return QDF_STATUS_SUCCESS; 621cd395495SRajeev Kumar } 622cd395495SRajeev Kumar 623dce49ecfSKrunal Soni QDF_STATUS scheduler_deregister_wma_legacy_handler(void) 624dce49ecfSKrunal Soni { 625dce49ecfSKrunal Soni struct scheduler_ctx *sched_ctx = scheduler_get_context(); 626dce49ecfSKrunal Soni 6278afde5a8SDustin Brown QDF_BUG(sched_ctx); 6288afde5a8SDustin Brown if (!sched_ctx) 629dce49ecfSKrunal Soni return QDF_STATUS_E_FAILURE; 630dce49ecfSKrunal Soni 631dce49ecfSKrunal Soni sched_ctx->legacy_wma_handler = NULL; 632dce49ecfSKrunal Soni 633dce49ecfSKrunal Soni return QDF_STATUS_SUCCESS; 634dce49ecfSKrunal Soni } 635dce49ecfSKrunal Soni 636dce49ecfSKrunal Soni QDF_STATUS scheduler_deregister_sys_legacy_handler(void) 637dce49ecfSKrunal Soni { 638dce49ecfSKrunal Soni struct scheduler_ctx *sched_ctx = scheduler_get_context(); 639dce49ecfSKrunal Soni 6408afde5a8SDustin Brown QDF_BUG(sched_ctx); 6418afde5a8SDustin Brown if (!sched_ctx) 642dce49ecfSKrunal Soni return QDF_STATUS_E_FAILURE; 643dce49ecfSKrunal Soni 644dce49ecfSKrunal Soni sched_ctx->legacy_sys_handler = NULL; 645dce49ecfSKrunal Soni 646dce49ecfSKrunal Soni return QDF_STATUS_SUCCESS; 647dce49ecfSKrunal Soni } 648dce49ecfSKrunal Soni 649901120c0Ssandhu static QDF_STATUS scheduler_msg_flush_mc(struct scheduler_msg *msg) 650c7ee85c4SDustin Brown { 651901120c0Ssandhu scheduler_qdf_mc_timer_deinit_return_data_ptr(msg->bodyptr); 652c7ee85c4SDustin Brown return QDF_STATUS_SUCCESS; 653c7ee85c4SDustin Brown } 654c7ee85c4SDustin Brown 655ad85c389SAshish Kumar Dhanotiya void scheduler_mc_timer_callback(qdf_mc_timer_t *timer) 656cd395495SRajeev Kumar { 657afc63bc8SRajeev Kumar struct scheduler_msg msg = {0}; 658cd395495SRajeev Kumar QDF_STATUS status; 659901120c0Ssandhu struct sched_qdf_mc_timer_cb_wrapper *mc_timer_wrapper; 660cd395495SRajeev Kumar qdf_mc_timer_callback_t callback = NULL; 661cd395495SRajeev Kumar void *user_data = NULL; 662cd395495SRajeev Kumar QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW; 663cd395495SRajeev Kumar 6648afde5a8SDustin Brown QDF_BUG(timer); 6658afde5a8SDustin Brown if (!timer) 666cd395495SRajeev Kumar return; 667cd395495SRajeev Kumar 6682888b71dSabhinav kumar /* 6692888b71dSabhinav kumar * Save the jiffies value in a per-timer context in qdf_mc_timer_t. 6702888b71dSabhinav kumar * It will help the debugger to know the exact time at which the host 6712888b71dSabhinav kumar * stops/expiry of the QDF timer. 6722888b71dSabhinav kumar */ 6732888b71dSabhinav kumar timer->timer_end_jiffies = jiffies; 6742888b71dSabhinav kumar 675cd395495SRajeev Kumar qdf_spin_lock_irqsave(&timer->platform_info.spinlock); 676cd395495SRajeev Kumar 677cd395495SRajeev Kumar switch (timer->state) { 678cd395495SRajeev Kumar case QDF_TIMER_STATE_STARTING: 679cd395495SRajeev Kumar /* we are in this state because someone just started the timer, 680cd395495SRajeev Kumar * MC timer got started and expired, but the time content have 681cd395495SRajeev Kumar * not been updated this is a rare race condition! 682cd395495SRajeev Kumar */ 683cd395495SRajeev Kumar timer->state = QDF_TIMER_STATE_STOPPED; 684cd395495SRajeev Kumar status = QDF_STATUS_E_ALREADY; 685cd395495SRajeev Kumar break; 686cd395495SRajeev Kumar 687cd395495SRajeev Kumar case QDF_TIMER_STATE_STOPPED: 688cd395495SRajeev Kumar status = QDF_STATUS_E_ALREADY; 689cd395495SRajeev Kumar break; 690cd395495SRajeev Kumar 691cd395495SRajeev Kumar case QDF_TIMER_STATE_UNUSED: 692cd395495SRajeev Kumar status = QDF_STATUS_E_EXISTS; 693cd395495SRajeev Kumar break; 694cd395495SRajeev Kumar 695cd395495SRajeev Kumar case QDF_TIMER_STATE_RUNNING: 696cd395495SRajeev Kumar /* need to go to stop state here because the call-back function 697cd395495SRajeev Kumar * may restart timer (to emulate periodic timer) 698cd395495SRajeev Kumar */ 699cd395495SRajeev Kumar timer->state = QDF_TIMER_STATE_STOPPED; 700cd395495SRajeev Kumar /* copy the relevant timer information to local variables; 701cd395495SRajeev Kumar * once we exits from this critical section, the timer content 702cd395495SRajeev Kumar * may be modified by other tasks 703cd395495SRajeev Kumar */ 704cd395495SRajeev Kumar callback = timer->callback; 705cd395495SRajeev Kumar user_data = timer->user_data; 706cd395495SRajeev Kumar type = timer->type; 707cd395495SRajeev Kumar status = QDF_STATUS_SUCCESS; 708cd395495SRajeev Kumar break; 709cd395495SRajeev Kumar 710cd395495SRajeev Kumar default: 711cd395495SRajeev Kumar QDF_ASSERT(0); 712cd395495SRajeev Kumar status = QDF_STATUS_E_FAULT; 713cd395495SRajeev Kumar break; 714cd395495SRajeev Kumar } 715cd395495SRajeev Kumar 716cd395495SRajeev Kumar qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); 717cd395495SRajeev Kumar 7188afde5a8SDustin Brown if (QDF_IS_STATUS_ERROR(status)) { 7198afde5a8SDustin Brown sched_debug("MC timer fired but is not running; skip callback"); 720cd395495SRajeev Kumar return; 721cd395495SRajeev Kumar } 722cd395495SRajeev Kumar 723cd395495SRajeev Kumar qdf_try_allowing_sleep(type); 724cd395495SRajeev Kumar 7258afde5a8SDustin Brown QDF_BUG(callback); 7268afde5a8SDustin Brown if (!callback) 727cd395495SRajeev Kumar return; 728cd395495SRajeev Kumar 729901120c0Ssandhu mc_timer_wrapper = scheduler_qdf_mc_timer_init(callback, user_data); 730901120c0Ssandhu if (!mc_timer_wrapper) { 731901120c0Ssandhu sched_err("failed to allocate sched_qdf_mc_timer_cb_wrapper"); 732901120c0Ssandhu return; 733901120c0Ssandhu } 734901120c0Ssandhu 735cd395495SRajeev Kumar /* serialize to scheduler controller thread */ 736cd395495SRajeev Kumar msg.type = SYS_MSG_ID_MC_TIMER; 737cd395495SRajeev Kumar msg.reserved = SYS_MSG_COOKIE; 738901120c0Ssandhu msg.callback = scheduler_qdf_mc_timer_callback_t_wrapper; 739901120c0Ssandhu msg.bodyptr = mc_timer_wrapper; 740cd395495SRajeev Kumar msg.bodyval = 0; 741901120c0Ssandhu msg.flush_callback = scheduler_msg_flush_mc; 742c7ee85c4SDustin Brown 7438b7e2ee3Sgaurank kathpalia status = scheduler_post_message(QDF_MODULE_ID_SCHEDULER, 7448b7e2ee3Sgaurank kathpalia QDF_MODULE_ID_SCHEDULER, 7458b7e2ee3Sgaurank kathpalia QDF_MODULE_ID_SYS, &msg); 746901120c0Ssandhu if (QDF_IS_STATUS_ERROR(status)) { 74791abaccbSDustin Brown sched_err("Could not enqueue timer to timer queue"); 748901120c0Ssandhu qdf_mem_free(mc_timer_wrapper); 749901120c0Ssandhu } 750cd395495SRajeev Kumar } 75187a8e445SVignesh Viswanathan 75287a8e445SVignesh Viswanathan QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size) 75387a8e445SVignesh Viswanathan { 75487a8e445SVignesh Viswanathan uint8_t qidx; 75587a8e445SVignesh Viswanathan struct scheduler_mq_type *target_mq; 75687a8e445SVignesh Viswanathan struct scheduler_ctx *sched_ctx; 75787a8e445SVignesh Viswanathan 75887a8e445SVignesh Viswanathan sched_ctx = scheduler_get_context(); 75987a8e445SVignesh Viswanathan if (!sched_ctx) 76087a8e445SVignesh Viswanathan return QDF_STATUS_E_INVAL; 76187a8e445SVignesh Viswanathan 76287a8e445SVignesh Viswanathan /* WMA also uses the target_if queue, so replace the QID */ 76387a8e445SVignesh Viswanathan if (QDF_MODULE_ID_WMA == qid) 76487a8e445SVignesh Viswanathan qid = QDF_MODULE_ID_TARGET_IF; 76587a8e445SVignesh Viswanathan 76687a8e445SVignesh Viswanathan qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid]; 76787a8e445SVignesh Viswanathan if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 76887a8e445SVignesh Viswanathan sched_err("Scheduler is deinitialized"); 76987a8e445SVignesh Viswanathan return QDF_STATUS_E_FAILURE; 77087a8e445SVignesh Viswanathan } 77187a8e445SVignesh Viswanathan 77287a8e445SVignesh Viswanathan target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 77387a8e445SVignesh Viswanathan 77487a8e445SVignesh Viswanathan *size = qdf_list_size(&target_mq->mq_list); 77587a8e445SVignesh Viswanathan 77687a8e445SVignesh Viswanathan return QDF_STATUS_SUCCESS; 77787a8e445SVignesh Viswanathan } 7781397a33fSMadhvapathi Sriram 7791397a33fSMadhvapathi Sriram QDF_STATUS scheduler_post_message_debug(QDF_MODULE_ID src_id, 7801397a33fSMadhvapathi Sriram QDF_MODULE_ID dest_id, 7811397a33fSMadhvapathi Sriram QDF_MODULE_ID que_id, 7821397a33fSMadhvapathi Sriram struct scheduler_msg *msg, 7831397a33fSMadhvapathi Sriram int line, 7841397a33fSMadhvapathi Sriram const char *func) 7851397a33fSMadhvapathi Sriram { 7861397a33fSMadhvapathi Sriram QDF_STATUS status; 7871397a33fSMadhvapathi Sriram 7881397a33fSMadhvapathi Sriram status = scheduler_post_msg(scheduler_get_qid(src_id, dest_id, que_id), 7891397a33fSMadhvapathi Sriram msg); 7901397a33fSMadhvapathi Sriram 7911397a33fSMadhvapathi Sriram if (QDF_IS_STATUS_ERROR(status)) 7921397a33fSMadhvapathi Sriram sched_err("couldn't post from %d to %d - called from %d, %s", 7931397a33fSMadhvapathi Sriram src_id, dest_id, line, func); 7941397a33fSMadhvapathi Sriram 7951397a33fSMadhvapathi Sriram return status; 7961397a33fSMadhvapathi Sriram } 79711f5a63aSNaga 79811f5a63aSNaga qdf_export_symbol(scheduler_post_message_debug); 799