1 /* 2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <scheduler_api.h> 21 #include <scheduler_core.h> 22 #include <qdf_atomic.h> 23 #include <qdf_module.h> 24 #include <qdf_platform.h> 25 26 struct sched_qdf_mc_timer_cb_wrapper { 27 qdf_mc_timer_callback_t timer_callback; 28 void *data; 29 }; 30 scheduler_disable(void)31 QDF_STATUS scheduler_disable(void) 32 { 33 struct scheduler_ctx *sched_ctx; 34 35 sched_debug("Disabling Scheduler"); 36 37 sched_ctx = scheduler_get_context(); 38 QDF_BUG(sched_ctx); 39 if (!sched_ctx) 40 return QDF_STATUS_E_INVAL; 41 42 if (!sched_ctx->sch_thread) { 43 sched_debug("Scheduler already disabled"); 44 return QDF_STATUS_SUCCESS; 45 } 46 47 /* send shutdown signal to scheduler thread */ 48 qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag); 49 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 50 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 51 52 /* wait for scheduler thread to shutdown */ 53 qdf_wait_single_event(&sched_ctx->sch_shutdown, 0); 54 sched_ctx->sch_thread = NULL; 55 56 /* flush any unprocessed scheduler messages */ 57 scheduler_queues_flush(sched_ctx); 58 59 return QDF_STATUS_SUCCESS; 60 } 61 scheduler_watchdog_notify(struct scheduler_ctx * sched)62 static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched) 63 { 64 char symbol[QDF_SYMBOL_LEN]; 65 66 if (sched->watchdog_callback) 67 qdf_sprint_symbol(symbol, sched->watchdog_callback); 68 69 sched_fatal("Callback %s (type 0x%x) exceeded its allotted time of %ds", 70 sched->watchdog_callback ? symbol : "<null>", 71 sched->watchdog_msg_type, 72 sched->timeout / 1000); 73 } 74 scheduler_watchdog_timeout(void * arg)75 static void scheduler_watchdog_timeout(void *arg) 76 { 77 struct scheduler_ctx *sched = arg; 78 79 if (qdf_is_recovering()) { 80 sched_debug("Recovery is in progress ignore timeout"); 81 return; 82 } 83 84 scheduler_watchdog_notify(sched); 85 if (sched->sch_thread) 86 qdf_print_thread_trace(sched->sch_thread); 87 88 /* avoid crashing during shutdown */ 89 if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag)) 90 return; 91 92 sched_err("Triggering self recovery on sheduler timeout"); 93 qdf_trigger_self_recovery(NULL, QDF_SCHED_TIMEOUT); 94 } 95 scheduler_enable(void)96 QDF_STATUS scheduler_enable(void) 97 { 98 struct scheduler_ctx *sched_ctx; 99 100 sched_debug("Enabling Scheduler"); 101 102 sched_ctx = scheduler_get_context(); 103 QDF_BUG(sched_ctx); 104 if (!sched_ctx) 105 return QDF_STATUS_E_INVAL; 106 107 qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK, 108 &sched_ctx->sch_event_flag); 109 qdf_atomic_clear_bit(MC_POST_EVENT_MASK, 110 &sched_ctx->sch_event_flag); 111 112 /* create the scheduler thread */ 113 sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx, 114 "scheduler_thread"); 115 if (!sched_ctx->sch_thread) { 116 sched_fatal("Failed to create scheduler thread"); 117 return QDF_STATUS_E_RESOURCES; 118 } 119 120 sched_debug("Scheduler thread created"); 121 122 /* wait for the scheduler thread to startup */ 123 qdf_wake_up_process(sched_ctx->sch_thread); 124 qdf_wait_single_event(&sched_ctx->sch_start_event, 0); 125 126 sched_debug("Scheduler thread started"); 127 128 return QDF_STATUS_SUCCESS; 129 } 130 scheduler_init(void)131 QDF_STATUS scheduler_init(void) 132 { 133 QDF_STATUS status; 134 struct scheduler_ctx *sched_ctx; 135 136 sched_debug("Initializing Scheduler"); 137 138 status = scheduler_create_ctx(); 139 if (QDF_IS_STATUS_ERROR(status)) { 140 sched_fatal("Failed to create context; status:%d", status); 141 return status; 142 } 143 144 sched_ctx = scheduler_get_context(); 145 QDF_BUG(sched_ctx); 146 if (!sched_ctx) { 147 status = QDF_STATUS_E_FAILURE; 148 goto ctx_destroy; 149 } 150 151 status = scheduler_queues_init(sched_ctx); 152 if (QDF_IS_STATUS_ERROR(status)) { 153 sched_fatal("Failed to init queues; status:%d", status); 154 goto ctx_destroy; 155 } 156 157 status = qdf_event_create(&sched_ctx->sch_start_event); 158 if (QDF_IS_STATUS_ERROR(status)) { 159 sched_fatal("Failed to create start event; status:%d", status); 160 goto queues_deinit; 161 } 162 163 status = qdf_event_create(&sched_ctx->sch_shutdown); 164 if (QDF_IS_STATUS_ERROR(status)) { 165 sched_fatal("Failed to create shutdown event; status:%d", 166 status); 167 goto start_event_destroy; 168 } 169 170 status = qdf_event_create(&sched_ctx->resume_sch_event); 171 if (QDF_IS_STATUS_ERROR(status)) { 172 sched_fatal("Failed to create resume event; status:%d", status); 173 goto shutdown_event_destroy; 174 } 175 176 qdf_spinlock_create(&sched_ctx->sch_thread_lock); 177 qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue); 178 sched_ctx->sch_event_flag = 0; 179 sched_ctx->timeout = SCHEDULER_WATCHDOG_TIMEOUT; 180 qdf_timer_init(NULL, 181 &sched_ctx->watchdog_timer, 182 &scheduler_watchdog_timeout, 183 sched_ctx, 184 QDF_TIMER_TYPE_SW); 185 186 qdf_register_mc_timer_callback(scheduler_mc_timer_callback); 187 188 return QDF_STATUS_SUCCESS; 189 190 shutdown_event_destroy: 191 qdf_event_destroy(&sched_ctx->sch_shutdown); 192 193 start_event_destroy: 194 qdf_event_destroy(&sched_ctx->sch_start_event); 195 196 queues_deinit: 197 scheduler_queues_deinit(sched_ctx); 198 199 ctx_destroy: 200 scheduler_destroy_ctx(); 201 202 return status; 203 } 204 scheduler_deinit(void)205 QDF_STATUS scheduler_deinit(void) 206 { 207 QDF_STATUS status; 208 struct scheduler_ctx *sched_ctx; 209 210 sched_debug("Deinitializing Scheduler"); 211 212 sched_ctx = scheduler_get_context(); 213 QDF_BUG(sched_ctx); 214 if (!sched_ctx) 215 return QDF_STATUS_E_INVAL; 216 217 qdf_timer_free(&sched_ctx->watchdog_timer); 218 qdf_spinlock_destroy(&sched_ctx->sch_thread_lock); 219 qdf_event_destroy(&sched_ctx->resume_sch_event); 220 qdf_event_destroy(&sched_ctx->sch_shutdown); 221 qdf_event_destroy(&sched_ctx->sch_start_event); 222 223 status = scheduler_queues_deinit(sched_ctx); 224 if (QDF_IS_STATUS_ERROR(status)) 225 sched_err("Failed to deinit queues; status:%d", status); 226 227 status = scheduler_destroy_ctx(); 228 if (QDF_IS_STATUS_ERROR(status)) 229 sched_err("Failed to destroy context; status:%d", status); 230 231 return QDF_STATUS_SUCCESS; 232 } 233 scheduler_post_msg_by_priority(uint32_t qid,struct scheduler_msg * msg,bool is_high_priority)234 QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid, 235 struct scheduler_msg *msg, 236 bool is_high_priority) 237 { 238 uint8_t qidx; 239 struct scheduler_mq_type *target_mq; 240 struct scheduler_msg *queue_msg; 241 struct scheduler_ctx *sched_ctx; 242 uint16_t src_id; 243 uint16_t dest_id; 244 uint16_t que_id; 245 246 QDF_BUG(msg); 247 if (!msg) 248 return QDF_STATUS_E_INVAL; 249 250 sched_ctx = scheduler_get_context(); 251 QDF_BUG(sched_ctx); 252 if (!sched_ctx) 253 return QDF_STATUS_E_INVAL; 254 255 if (!sched_ctx->sch_thread) { 256 sched_err("Cannot post message; scheduler thread is stopped"); 257 return QDF_STATUS_E_FAILURE; 258 } 259 260 if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) { 261 QDF_DEBUG_PANIC("Scheduler messages must be initialized"); 262 return QDF_STATUS_E_FAILURE; 263 } 264 265 dest_id = scheduler_get_dest_id(qid); 266 src_id = scheduler_get_src_id(qid); 267 que_id = scheduler_get_que_id(qid); 268 269 if (que_id >= QDF_MODULE_ID_MAX || src_id >= QDF_MODULE_ID_MAX || 270 dest_id >= QDF_MODULE_ID_MAX) { 271 sched_err("Src_id/Dest_id invalid, cannot post message"); 272 return QDF_STATUS_E_FAILURE; 273 } 274 /* Target_If is a special message queue in phase 3 convergence because 275 * its used by both legacy WMA and as well as new UMAC components which 276 * directly populate callback handlers in message body. 277 * 1) WMA legacy messages should not have callback 278 * 2) New target_if message needs to have valid callback 279 * Clear callback handler for legacy WMA messages such that in case 280 * if someone is sending legacy WMA message from stack which has 281 * uninitialized callback then its handled properly. Also change 282 * legacy WMA message queue id to target_if queue such that its always 283 * handled in right order. 284 */ 285 if (QDF_MODULE_ID_WMA == que_id) { 286 msg->callback = NULL; 287 /* change legacy WMA message id to new target_if mq id */ 288 que_id = QDF_MODULE_ID_TARGET_IF; 289 } 290 qdf_mtrace(src_id, dest_id, msg->type, 0xFF, 0); 291 292 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[que_id]; 293 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 294 sched_err("Scheduler is deinitialized ignore msg"); 295 return QDF_STATUS_E_FAILURE; 296 } 297 298 if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) { 299 sched_err("callback not registered for qid[%d]", que_id); 300 return QDF_STATUS_E_FAILURE; 301 } 302 303 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 304 305 queue_msg = scheduler_core_msg_dup(msg); 306 if (!queue_msg) 307 return QDF_STATUS_E_NOMEM; 308 309 if (is_high_priority) 310 scheduler_mq_put_front(target_mq, queue_msg); 311 else 312 scheduler_mq_put(target_mq, queue_msg); 313 314 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 315 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 316 317 return QDF_STATUS_SUCCESS; 318 } 319 scheduler_register_module(QDF_MODULE_ID qid,scheduler_msg_process_fn_t callback)320 QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, 321 scheduler_msg_process_fn_t callback) 322 { 323 struct scheduler_mq_ctx *ctx; 324 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 325 326 sched_enter(); 327 328 QDF_BUG(sched_ctx); 329 if (!sched_ctx) 330 return QDF_STATUS_E_FAILURE; 331 332 if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 333 sched_err("Already registered max %d no of message queues", 334 SCHEDULER_NUMBER_OF_MSG_QUEUE); 335 return QDF_STATUS_E_FAILURE; 336 } 337 338 ctx = &sched_ctx->queue_ctx; 339 ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx; 340 ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid; 341 ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback; 342 sched_ctx->sch_last_qidx++; 343 344 sched_exit(); 345 346 return QDF_STATUS_SUCCESS; 347 } 348 scheduler_deregister_module(QDF_MODULE_ID qid)349 QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid) 350 { 351 struct scheduler_mq_ctx *ctx; 352 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 353 uint8_t qidx; 354 355 sched_enter(); 356 357 QDF_BUG(sched_ctx); 358 if (!sched_ctx) 359 return QDF_STATUS_E_FAILURE; 360 361 ctx = &sched_ctx->queue_ctx; 362 qidx = ctx->scheduler_msg_qid_to_qidx[qid]; 363 ctx->scheduler_msg_process_fn[qidx] = NULL; 364 sched_ctx->sch_last_qidx--; 365 ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE; 366 367 sched_exit(); 368 369 return QDF_STATUS_SUCCESS; 370 } 371 scheduler_resume(void)372 void scheduler_resume(void) 373 { 374 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 375 376 if (sched_ctx) 377 qdf_event_set(&sched_ctx->resume_sch_event); 378 } 379 scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback)380 void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback) 381 { 382 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 383 384 if (sched_ctx) 385 sched_ctx->hdd_callback = callback; 386 } scheduler_wake_up_controller_thread(void)387 void scheduler_wake_up_controller_thread(void) 388 { 389 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 390 391 if (sched_ctx) 392 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 393 } scheduler_set_event_mask(uint32_t event_mask)394 void scheduler_set_event_mask(uint32_t event_mask) 395 { 396 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 397 398 if (sched_ctx) 399 qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag); 400 } 401 scheduler_clear_event_mask(uint32_t event_mask)402 void scheduler_clear_event_mask(uint32_t event_mask) 403 { 404 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 405 406 if (sched_ctx) 407 qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag); 408 } 409 scheduler_target_if_mq_handler(struct scheduler_msg * msg)410 QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg) 411 { 412 QDF_STATUS status; 413 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 414 QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *); 415 416 QDF_BUG(msg); 417 if (!msg) 418 return QDF_STATUS_E_FAILURE; 419 420 QDF_BUG(sched_ctx); 421 if (!sched_ctx) 422 return QDF_STATUS_E_FAILURE; 423 424 target_if_msg_handler = msg->callback; 425 426 /* Target_If is a special message queue in phase 3 convergence because 427 * its used by both legacy WMA and as well as new UMAC components. New 428 * UMAC components directly pass their message handlers as callback in 429 * message body. 430 * 1) All Legacy WMA messages do not contain message callback so invoke 431 * registered legacy WMA handler. Scheduler message posting APIs 432 * makes sure legacy WMA messages do not have callbacks. 433 * 2) For new messages which have valid callbacks invoke their callbacks 434 * directly. 435 */ 436 if (!target_if_msg_handler) 437 status = sched_ctx->legacy_wma_handler(msg); 438 else 439 status = target_if_msg_handler(msg); 440 441 return status; 442 } 443 scheduler_os_if_mq_handler(struct scheduler_msg * msg)444 QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg) 445 { 446 QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *); 447 448 QDF_BUG(msg); 449 if (!msg) 450 return QDF_STATUS_E_FAILURE; 451 452 os_if_msg_handler = msg->callback; 453 454 QDF_BUG(os_if_msg_handler); 455 if (!os_if_msg_handler) 456 return QDF_STATUS_E_FAILURE; 457 458 os_if_msg_handler(msg); 459 460 return QDF_STATUS_SUCCESS; 461 } 462 scheduler_qdf_mc_timer_init(qdf_mc_timer_callback_t timer_callback,void * data)463 struct sched_qdf_mc_timer_cb_wrapper *scheduler_qdf_mc_timer_init( 464 qdf_mc_timer_callback_t timer_callback, 465 void *data) 466 { 467 struct sched_qdf_mc_timer_cb_wrapper *wrapper_ptr; 468 469 wrapper_ptr = qdf_mem_malloc(sizeof(*wrapper_ptr)); 470 if (!wrapper_ptr) 471 return NULL; 472 473 wrapper_ptr->timer_callback = timer_callback; 474 wrapper_ptr->data = data; 475 return wrapper_ptr; 476 } 477 scheduler_qdf_mc_timer_deinit_return_data_ptr(struct sched_qdf_mc_timer_cb_wrapper * wrapper_ptr)478 void *scheduler_qdf_mc_timer_deinit_return_data_ptr( 479 struct sched_qdf_mc_timer_cb_wrapper *wrapper_ptr) 480 { 481 void *data_ptr; 482 483 if (!wrapper_ptr) { 484 sched_err("pointer to wrapper ptr is NULL"); 485 return NULL; 486 } 487 488 data_ptr = wrapper_ptr->data; 489 qdf_mem_free(wrapper_ptr); 490 return data_ptr; 491 } 492 scheduler_qdf_mc_timer_callback_t_wrapper(struct scheduler_msg * msg)493 QDF_STATUS scheduler_qdf_mc_timer_callback_t_wrapper(struct scheduler_msg *msg) 494 { 495 struct sched_qdf_mc_timer_cb_wrapper *mc_timer_wrapper; 496 qdf_mc_timer_callback_t timer_cb; 497 498 mc_timer_wrapper = msg->bodyptr; 499 if (!mc_timer_wrapper) { 500 sched_err("NULL mc_timer_wrapper from msg body"); 501 return QDF_STATUS_E_FAILURE; 502 } 503 504 timer_cb = mc_timer_wrapper->timer_callback; 505 506 QDF_BUG(timer_cb); 507 if (!timer_cb) 508 goto sched_qdf_mc_timer_err; 509 510 timer_cb(mc_timer_wrapper->data); 511 512 qdf_mem_free(mc_timer_wrapper); 513 return QDF_STATUS_SUCCESS; 514 515 sched_qdf_mc_timer_err: 516 sched_err("failed to get timer cb is NULL"); 517 qdf_mem_free(mc_timer_wrapper); 518 return QDF_STATUS_E_FAILURE; 519 } 520 scheduler_timer_q_mq_handler(struct scheduler_msg * msg)521 QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg) 522 { 523 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 524 scheduler_msg_process_fn_t sched_mc_timer_callback; 525 526 QDF_BUG(msg); 527 if (!msg) 528 return QDF_STATUS_E_FAILURE; 529 530 QDF_BUG(sched_ctx); 531 if (!sched_ctx) 532 return QDF_STATUS_E_FAILURE; 533 534 /* legacy sys message handler? */ 535 if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER) 536 return sched_ctx->legacy_sys_handler(msg); 537 538 sched_mc_timer_callback = msg->callback; 539 QDF_BUG(sched_mc_timer_callback); 540 if (!sched_mc_timer_callback) 541 return QDF_STATUS_E_FAILURE; 542 543 return sched_mc_timer_callback(msg); 544 } 545 scheduler_mlme_mq_handler(struct scheduler_msg * msg)546 QDF_STATUS scheduler_mlme_mq_handler(struct scheduler_msg *msg) 547 { 548 scheduler_msg_process_fn_t mlme_msg_handler; 549 550 QDF_BUG(msg); 551 if (!msg) 552 return QDF_STATUS_E_FAILURE; 553 554 mlme_msg_handler = msg->callback; 555 556 QDF_BUG(mlme_msg_handler); 557 if (!mlme_msg_handler) 558 return QDF_STATUS_E_FAILURE; 559 560 mlme_msg_handler(msg); 561 562 return QDF_STATUS_SUCCESS; 563 } 564 scheduler_scan_mq_handler(struct scheduler_msg * msg)565 QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg) 566 { 567 QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *); 568 569 QDF_BUG(msg); 570 if (!msg) 571 return QDF_STATUS_E_FAILURE; 572 573 scan_q_msg_handler = msg->callback; 574 575 QDF_BUG(scan_q_msg_handler); 576 if (!scan_q_msg_handler) 577 return QDF_STATUS_E_FAILURE; 578 579 scan_q_msg_handler(msg); 580 581 return QDF_STATUS_SUCCESS; 582 } 583 scheduler_set_watchdog_timeout(uint32_t timeout)584 void scheduler_set_watchdog_timeout(uint32_t timeout) 585 { 586 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 587 588 QDF_BUG(sched_ctx); 589 if (!sched_ctx) 590 return; 591 592 sched_ctx->timeout = timeout; 593 } 594 scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t wma_callback)595 QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t 596 wma_callback) 597 { 598 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 599 600 QDF_BUG(sched_ctx); 601 if (!sched_ctx) 602 return QDF_STATUS_E_FAILURE; 603 604 sched_ctx->legacy_wma_handler = wma_callback; 605 606 return QDF_STATUS_SUCCESS; 607 } 608 scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t sys_callback)609 QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t 610 sys_callback) 611 { 612 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 613 614 QDF_BUG(sched_ctx); 615 if (!sched_ctx) 616 return QDF_STATUS_E_FAILURE; 617 618 sched_ctx->legacy_sys_handler = sys_callback; 619 620 return QDF_STATUS_SUCCESS; 621 } 622 scheduler_deregister_wma_legacy_handler(void)623 QDF_STATUS scheduler_deregister_wma_legacy_handler(void) 624 { 625 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 626 627 QDF_BUG(sched_ctx); 628 if (!sched_ctx) 629 return QDF_STATUS_E_FAILURE; 630 631 sched_ctx->legacy_wma_handler = NULL; 632 633 return QDF_STATUS_SUCCESS; 634 } 635 scheduler_deregister_sys_legacy_handler(void)636 QDF_STATUS scheduler_deregister_sys_legacy_handler(void) 637 { 638 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 639 640 QDF_BUG(sched_ctx); 641 if (!sched_ctx) 642 return QDF_STATUS_E_FAILURE; 643 644 sched_ctx->legacy_sys_handler = NULL; 645 646 return QDF_STATUS_SUCCESS; 647 } 648 scheduler_msg_flush_mc(struct scheduler_msg * msg)649 static QDF_STATUS scheduler_msg_flush_mc(struct scheduler_msg *msg) 650 { 651 scheduler_qdf_mc_timer_deinit_return_data_ptr(msg->bodyptr); 652 return QDF_STATUS_SUCCESS; 653 } 654 scheduler_mc_timer_callback(qdf_mc_timer_t * timer)655 void scheduler_mc_timer_callback(qdf_mc_timer_t *timer) 656 { 657 struct scheduler_msg msg = {0}; 658 QDF_STATUS status; 659 struct sched_qdf_mc_timer_cb_wrapper *mc_timer_wrapper; 660 qdf_mc_timer_callback_t callback = NULL; 661 void *user_data = NULL; 662 QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW; 663 664 QDF_BUG(timer); 665 if (!timer) 666 return; 667 668 /* 669 * Save the jiffies value in a per-timer context in qdf_mc_timer_t. 670 * It will help the debugger to know the exact time at which the host 671 * stops/expiry of the QDF timer. 672 */ 673 timer->timer_end_jiffies = jiffies; 674 675 qdf_spin_lock_irqsave(&timer->platform_info.spinlock); 676 677 switch (timer->state) { 678 case QDF_TIMER_STATE_STARTING: 679 /* we are in this state because someone just started the timer, 680 * MC timer got started and expired, but the time content have 681 * not been updated this is a rare race condition! 682 */ 683 timer->state = QDF_TIMER_STATE_STOPPED; 684 status = QDF_STATUS_E_ALREADY; 685 break; 686 687 case QDF_TIMER_STATE_STOPPED: 688 status = QDF_STATUS_E_ALREADY; 689 break; 690 691 case QDF_TIMER_STATE_UNUSED: 692 status = QDF_STATUS_E_EXISTS; 693 break; 694 695 case QDF_TIMER_STATE_RUNNING: 696 /* need to go to stop state here because the call-back function 697 * may restart timer (to emulate periodic timer) 698 */ 699 timer->state = QDF_TIMER_STATE_STOPPED; 700 /* copy the relevant timer information to local variables; 701 * once we exits from this critical section, the timer content 702 * may be modified by other tasks 703 */ 704 callback = timer->callback; 705 user_data = timer->user_data; 706 type = timer->type; 707 status = QDF_STATUS_SUCCESS; 708 break; 709 710 default: 711 QDF_ASSERT(0); 712 status = QDF_STATUS_E_FAULT; 713 break; 714 } 715 716 qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); 717 718 if (QDF_IS_STATUS_ERROR(status)) { 719 sched_debug("MC timer fired but is not running; skip callback"); 720 return; 721 } 722 723 qdf_try_allowing_sleep(type); 724 725 QDF_BUG(callback); 726 if (!callback) 727 return; 728 729 mc_timer_wrapper = scheduler_qdf_mc_timer_init(callback, user_data); 730 if (!mc_timer_wrapper) { 731 sched_err("failed to allocate sched_qdf_mc_timer_cb_wrapper"); 732 return; 733 } 734 735 /* serialize to scheduler controller thread */ 736 msg.type = SYS_MSG_ID_MC_TIMER; 737 msg.reserved = SYS_MSG_COOKIE; 738 msg.callback = scheduler_qdf_mc_timer_callback_t_wrapper; 739 msg.bodyptr = mc_timer_wrapper; 740 msg.bodyval = 0; 741 msg.flush_callback = scheduler_msg_flush_mc; 742 743 status = scheduler_post_message(QDF_MODULE_ID_SCHEDULER, 744 QDF_MODULE_ID_SCHEDULER, 745 QDF_MODULE_ID_SYS, &msg); 746 if (QDF_IS_STATUS_ERROR(status)) { 747 sched_err("Could not enqueue timer to timer queue"); 748 qdf_mem_free(mc_timer_wrapper); 749 } 750 } 751 scheduler_get_queue_size(QDF_MODULE_ID qid,uint32_t * size)752 QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size) 753 { 754 uint8_t qidx; 755 struct scheduler_mq_type *target_mq; 756 struct scheduler_ctx *sched_ctx; 757 758 sched_ctx = scheduler_get_context(); 759 if (!sched_ctx) 760 return QDF_STATUS_E_INVAL; 761 762 /* WMA also uses the target_if queue, so replace the QID */ 763 if (QDF_MODULE_ID_WMA == qid) 764 qid = QDF_MODULE_ID_TARGET_IF; 765 766 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid]; 767 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 768 sched_err("Scheduler is deinitialized"); 769 return QDF_STATUS_E_FAILURE; 770 } 771 772 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 773 774 *size = qdf_list_size(&target_mq->mq_list); 775 776 return QDF_STATUS_SUCCESS; 777 } 778 scheduler_post_message_debug(QDF_MODULE_ID src_id,QDF_MODULE_ID dest_id,QDF_MODULE_ID que_id,struct scheduler_msg * msg,int line,const char * func)779 QDF_STATUS scheduler_post_message_debug(QDF_MODULE_ID src_id, 780 QDF_MODULE_ID dest_id, 781 QDF_MODULE_ID que_id, 782 struct scheduler_msg *msg, 783 int line, 784 const char *func) 785 { 786 QDF_STATUS status; 787 788 status = scheduler_post_msg(scheduler_get_qid(src_id, dest_id, que_id), 789 msg); 790 791 if (QDF_IS_STATUS_ERROR(status)) 792 sched_err("couldn't post from %d to %d - called from %d, %s", 793 src_id, dest_id, line, func); 794 795 return status; 796 } 797 798 qdf_export_symbol(scheduler_post_message_debug); 799