1 /* 2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <scheduler_api.h> 21 #include <scheduler_core.h> 22 #include <qdf_atomic.h> 23 #include <qdf_module.h> 24 #include <qdf_platform.h> 25 26 struct sched_qdf_mc_timer_cb_wrapper { 27 qdf_mc_timer_callback_t timer_callback; 28 void *data; 29 }; 30 31 QDF_STATUS scheduler_disable(void) 32 { 33 struct scheduler_ctx *sched_ctx; 34 35 sched_debug("Disabling Scheduler"); 36 37 sched_ctx = scheduler_get_context(); 38 QDF_BUG(sched_ctx); 39 if (!sched_ctx) 40 return QDF_STATUS_E_INVAL; 41 42 if (!sched_ctx->sch_thread) { 43 sched_debug("Scheduler already disabled"); 44 return QDF_STATUS_SUCCESS; 45 } 46 47 /* send shutdown signal to scheduler thread */ 48 qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag); 49 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 50 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 51 52 /* wait for scheduler thread to shutdown */ 53 qdf_wait_single_event(&sched_ctx->sch_shutdown, 0); 54 sched_ctx->sch_thread = NULL; 55 56 /* flush any unprocessed scheduler messages */ 57 scheduler_queues_flush(sched_ctx); 58 59 return QDF_STATUS_SUCCESS; 60 } 61 62 static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched) 63 { 64 char symbol[QDF_SYMBOL_LEN]; 65 66 if (sched->watchdog_callback) 67 qdf_sprint_symbol(symbol, sched->watchdog_callback); 68 69 sched_fatal("Callback %s (type 0x%x) exceeded its allotted time of %ds", 70 sched->watchdog_callback ? symbol : "<null>", 71 sched->watchdog_msg_type, 72 sched->timeout / 1000); 73 } 74 75 static void scheduler_watchdog_timeout(void *arg) 76 { 77 struct scheduler_ctx *sched = arg; 78 79 if (qdf_is_recovering()) { 80 sched_debug("Recovery is in progress ignore timeout"); 81 return; 82 } 83 84 scheduler_watchdog_notify(sched); 85 if (sched->sch_thread) 86 qdf_print_thread_trace(sched->sch_thread); 87 88 /* avoid crashing during shutdown */ 89 if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag)) 90 return; 91 92 SCHED_DEBUG_PANIC("Going down for Scheduler Watchdog Bite!"); 93 } 94 95 QDF_STATUS scheduler_enable(void) 96 { 97 struct scheduler_ctx *sched_ctx; 98 99 sched_debug("Enabling Scheduler"); 100 101 sched_ctx = scheduler_get_context(); 102 QDF_BUG(sched_ctx); 103 if (!sched_ctx) 104 return QDF_STATUS_E_INVAL; 105 106 qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK, 107 &sched_ctx->sch_event_flag); 108 qdf_atomic_clear_bit(MC_POST_EVENT_MASK, 109 &sched_ctx->sch_event_flag); 110 111 /* create the scheduler thread */ 112 sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx, 113 "scheduler_thread"); 114 if (!sched_ctx->sch_thread) { 115 sched_fatal("Failed to create scheduler thread"); 116 return QDF_STATUS_E_RESOURCES; 117 } 118 119 sched_debug("Scheduler thread created"); 120 121 /* wait for the scheduler thread to startup */ 122 qdf_wake_up_process(sched_ctx->sch_thread); 123 qdf_wait_single_event(&sched_ctx->sch_start_event, 0); 124 125 sched_debug("Scheduler thread started"); 126 127 return QDF_STATUS_SUCCESS; 128 } 129 130 QDF_STATUS scheduler_init(void) 131 { 132 QDF_STATUS status; 133 struct scheduler_ctx *sched_ctx; 134 135 sched_debug("Initializing Scheduler"); 136 137 status = scheduler_create_ctx(); 138 if (QDF_IS_STATUS_ERROR(status)) { 139 sched_fatal("Failed to create context; status:%d", status); 140 return status; 141 } 142 143 sched_ctx = scheduler_get_context(); 144 QDF_BUG(sched_ctx); 145 if (!sched_ctx) { 146 status = QDF_STATUS_E_FAILURE; 147 goto ctx_destroy; 148 } 149 150 status = scheduler_queues_init(sched_ctx); 151 if (QDF_IS_STATUS_ERROR(status)) { 152 sched_fatal("Failed to init queues; status:%d", status); 153 goto ctx_destroy; 154 } 155 156 status = qdf_event_create(&sched_ctx->sch_start_event); 157 if (QDF_IS_STATUS_ERROR(status)) { 158 sched_fatal("Failed to create start event; status:%d", status); 159 goto queues_deinit; 160 } 161 162 status = qdf_event_create(&sched_ctx->sch_shutdown); 163 if (QDF_IS_STATUS_ERROR(status)) { 164 sched_fatal("Failed to create shutdown event; status:%d", 165 status); 166 goto start_event_destroy; 167 } 168 169 status = qdf_event_create(&sched_ctx->resume_sch_event); 170 if (QDF_IS_STATUS_ERROR(status)) { 171 sched_fatal("Failed to create resume event; status:%d", status); 172 goto shutdown_event_destroy; 173 } 174 175 qdf_spinlock_create(&sched_ctx->sch_thread_lock); 176 qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue); 177 sched_ctx->sch_event_flag = 0; 178 sched_ctx->timeout = SCHEDULER_WATCHDOG_TIMEOUT; 179 qdf_timer_init(NULL, 180 &sched_ctx->watchdog_timer, 181 &scheduler_watchdog_timeout, 182 sched_ctx, 183 QDF_TIMER_TYPE_SW); 184 185 qdf_register_mc_timer_callback(scheduler_mc_timer_callback); 186 187 return QDF_STATUS_SUCCESS; 188 189 shutdown_event_destroy: 190 qdf_event_destroy(&sched_ctx->sch_shutdown); 191 192 start_event_destroy: 193 qdf_event_destroy(&sched_ctx->sch_start_event); 194 195 queues_deinit: 196 scheduler_queues_deinit(sched_ctx); 197 198 ctx_destroy: 199 scheduler_destroy_ctx(); 200 201 return status; 202 } 203 204 QDF_STATUS scheduler_deinit(void) 205 { 206 QDF_STATUS status; 207 struct scheduler_ctx *sched_ctx; 208 209 sched_debug("Deinitializing Scheduler"); 210 211 sched_ctx = scheduler_get_context(); 212 QDF_BUG(sched_ctx); 213 if (!sched_ctx) 214 return QDF_STATUS_E_INVAL; 215 216 qdf_timer_free(&sched_ctx->watchdog_timer); 217 qdf_spinlock_destroy(&sched_ctx->sch_thread_lock); 218 qdf_event_destroy(&sched_ctx->resume_sch_event); 219 qdf_event_destroy(&sched_ctx->sch_shutdown); 220 qdf_event_destroy(&sched_ctx->sch_start_event); 221 222 status = scheduler_queues_deinit(sched_ctx); 223 if (QDF_IS_STATUS_ERROR(status)) 224 sched_err("Failed to deinit queues; status:%d", status); 225 226 status = scheduler_destroy_ctx(); 227 if (QDF_IS_STATUS_ERROR(status)) 228 sched_err("Failed to destroy context; status:%d", status); 229 230 return QDF_STATUS_SUCCESS; 231 } 232 233 QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid, 234 struct scheduler_msg *msg, 235 bool is_high_priority) 236 { 237 uint8_t qidx; 238 struct scheduler_mq_type *target_mq; 239 struct scheduler_msg *queue_msg; 240 struct scheduler_ctx *sched_ctx; 241 uint16_t src_id; 242 uint16_t dest_id; 243 uint16_t que_id; 244 245 QDF_BUG(msg); 246 if (!msg) 247 return QDF_STATUS_E_INVAL; 248 249 sched_ctx = scheduler_get_context(); 250 QDF_BUG(sched_ctx); 251 if (!sched_ctx) 252 return QDF_STATUS_E_INVAL; 253 254 if (!sched_ctx->sch_thread) { 255 sched_err("Cannot post message; scheduler thread is stopped"); 256 return QDF_STATUS_E_FAILURE; 257 } 258 259 if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) { 260 QDF_DEBUG_PANIC("Scheduler messages must be initialized"); 261 return QDF_STATUS_E_FAILURE; 262 } 263 264 dest_id = scheduler_get_dest_id(qid); 265 src_id = scheduler_get_src_id(qid); 266 que_id = scheduler_get_que_id(qid); 267 268 if (que_id >= QDF_MODULE_ID_MAX || src_id >= QDF_MODULE_ID_MAX || 269 dest_id >= QDF_MODULE_ID_MAX) { 270 sched_err("Src_id/Dest_id invalid, cannot post message"); 271 return QDF_STATUS_E_FAILURE; 272 } 273 /* Target_If is a special message queue in phase 3 convergence because 274 * its used by both legacy WMA and as well as new UMAC components which 275 * directly populate callback handlers in message body. 276 * 1) WMA legacy messages should not have callback 277 * 2) New target_if message needs to have valid callback 278 * Clear callback handler for legacy WMA messages such that in case 279 * if someone is sending legacy WMA message from stack which has 280 * uninitialized callback then its handled properly. Also change 281 * legacy WMA message queue id to target_if queue such that its always 282 * handled in right order. 283 */ 284 if (QDF_MODULE_ID_WMA == que_id) { 285 msg->callback = NULL; 286 /* change legacy WMA message id to new target_if mq id */ 287 que_id = QDF_MODULE_ID_TARGET_IF; 288 } 289 qdf_mtrace(src_id, dest_id, msg->type, 0xFF, 0); 290 291 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[que_id]; 292 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 293 sched_err("Scheduler is deinitialized ignore msg"); 294 return QDF_STATUS_E_FAILURE; 295 } 296 297 if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) { 298 sched_err("callback not registered for qid[%d]", que_id); 299 return QDF_STATUS_E_FAILURE; 300 } 301 302 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 303 304 queue_msg = scheduler_core_msg_dup(msg); 305 if (!queue_msg) 306 return QDF_STATUS_E_NOMEM; 307 308 if (is_high_priority) 309 scheduler_mq_put_front(target_mq, queue_msg); 310 else 311 scheduler_mq_put(target_mq, queue_msg); 312 313 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 314 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 315 316 return QDF_STATUS_SUCCESS; 317 } 318 319 QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, 320 scheduler_msg_process_fn_t callback) 321 { 322 struct scheduler_mq_ctx *ctx; 323 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 324 325 sched_enter(); 326 327 QDF_BUG(sched_ctx); 328 if (!sched_ctx) 329 return QDF_STATUS_E_FAILURE; 330 331 if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 332 sched_err("Already registered max %d no of message queues", 333 SCHEDULER_NUMBER_OF_MSG_QUEUE); 334 return QDF_STATUS_E_FAILURE; 335 } 336 337 ctx = &sched_ctx->queue_ctx; 338 ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx; 339 ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid; 340 ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback; 341 sched_ctx->sch_last_qidx++; 342 343 sched_exit(); 344 345 return QDF_STATUS_SUCCESS; 346 } 347 348 QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid) 349 { 350 struct scheduler_mq_ctx *ctx; 351 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 352 uint8_t qidx; 353 354 sched_enter(); 355 356 QDF_BUG(sched_ctx); 357 if (!sched_ctx) 358 return QDF_STATUS_E_FAILURE; 359 360 ctx = &sched_ctx->queue_ctx; 361 qidx = ctx->scheduler_msg_qid_to_qidx[qid]; 362 ctx->scheduler_msg_process_fn[qidx] = NULL; 363 sched_ctx->sch_last_qidx--; 364 ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE; 365 366 sched_exit(); 367 368 return QDF_STATUS_SUCCESS; 369 } 370 371 void scheduler_resume(void) 372 { 373 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 374 375 if (sched_ctx) 376 qdf_event_set(&sched_ctx->resume_sch_event); 377 } 378 379 void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback) 380 { 381 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 382 383 if (sched_ctx) 384 sched_ctx->hdd_callback = callback; 385 } 386 void scheduler_wake_up_controller_thread(void) 387 { 388 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 389 390 if (sched_ctx) 391 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 392 } 393 void scheduler_set_event_mask(uint32_t event_mask) 394 { 395 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 396 397 if (sched_ctx) 398 qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag); 399 } 400 401 void scheduler_clear_event_mask(uint32_t event_mask) 402 { 403 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 404 405 if (sched_ctx) 406 qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag); 407 } 408 409 QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg) 410 { 411 QDF_STATUS status; 412 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 413 QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *); 414 415 QDF_BUG(msg); 416 if (!msg) 417 return QDF_STATUS_E_FAILURE; 418 419 QDF_BUG(sched_ctx); 420 if (!sched_ctx) 421 return QDF_STATUS_E_FAILURE; 422 423 target_if_msg_handler = msg->callback; 424 425 /* Target_If is a special message queue in phase 3 convergence because 426 * its used by both legacy WMA and as well as new UMAC components. New 427 * UMAC components directly pass their message handlers as callback in 428 * message body. 429 * 1) All Legacy WMA messages do not contain message callback so invoke 430 * registered legacy WMA handler. Scheduler message posting APIs 431 * makes sure legacy WMA messages do not have callbacks. 432 * 2) For new messages which have valid callbacks invoke their callbacks 433 * directly. 434 */ 435 if (!target_if_msg_handler) 436 status = sched_ctx->legacy_wma_handler(msg); 437 else 438 status = target_if_msg_handler(msg); 439 440 return status; 441 } 442 443 QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg) 444 { 445 QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *); 446 447 QDF_BUG(msg); 448 if (!msg) 449 return QDF_STATUS_E_FAILURE; 450 451 os_if_msg_handler = msg->callback; 452 453 QDF_BUG(os_if_msg_handler); 454 if (!os_if_msg_handler) 455 return QDF_STATUS_E_FAILURE; 456 457 os_if_msg_handler(msg); 458 459 return QDF_STATUS_SUCCESS; 460 } 461 462 struct sched_qdf_mc_timer_cb_wrapper *scheduler_qdf_mc_timer_init( 463 qdf_mc_timer_callback_t timer_callback, 464 void *data) 465 { 466 struct sched_qdf_mc_timer_cb_wrapper *wrapper_ptr; 467 468 wrapper_ptr = qdf_mem_malloc(sizeof(*wrapper_ptr)); 469 if (!wrapper_ptr) 470 return NULL; 471 472 wrapper_ptr->timer_callback = timer_callback; 473 wrapper_ptr->data = data; 474 return wrapper_ptr; 475 } 476 477 void *scheduler_qdf_mc_timer_deinit_return_data_ptr( 478 struct sched_qdf_mc_timer_cb_wrapper *wrapper_ptr) 479 { 480 void *data_ptr; 481 482 if (!wrapper_ptr) { 483 sched_err("pointer to wrapper ptr is NULL"); 484 return NULL; 485 } 486 487 data_ptr = wrapper_ptr->data; 488 qdf_mem_free(wrapper_ptr); 489 return data_ptr; 490 } 491 492 QDF_STATUS scheduler_qdf_mc_timer_callback_t_wrapper(struct scheduler_msg *msg) 493 { 494 struct sched_qdf_mc_timer_cb_wrapper *mc_timer_wrapper; 495 qdf_mc_timer_callback_t timer_cb; 496 497 mc_timer_wrapper = msg->bodyptr; 498 if (!mc_timer_wrapper) { 499 sched_err("NULL mc_timer_wrapper from msg body"); 500 return QDF_STATUS_E_FAILURE; 501 } 502 503 timer_cb = mc_timer_wrapper->timer_callback; 504 505 QDF_BUG(timer_cb); 506 if (!timer_cb) 507 goto sched_qdf_mc_timer_err; 508 509 timer_cb(mc_timer_wrapper->data); 510 511 qdf_mem_free(mc_timer_wrapper); 512 return QDF_STATUS_SUCCESS; 513 514 sched_qdf_mc_timer_err: 515 sched_err("failed to get timer cb is NULL"); 516 qdf_mem_free(mc_timer_wrapper); 517 return QDF_STATUS_E_FAILURE; 518 } 519 520 QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg) 521 { 522 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 523 scheduler_msg_process_fn_t sched_mc_timer_callback; 524 525 QDF_BUG(msg); 526 if (!msg) 527 return QDF_STATUS_E_FAILURE; 528 529 QDF_BUG(sched_ctx); 530 if (!sched_ctx) 531 return QDF_STATUS_E_FAILURE; 532 533 /* legacy sys message handler? */ 534 if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER) 535 return sched_ctx->legacy_sys_handler(msg); 536 537 sched_mc_timer_callback = msg->callback; 538 QDF_BUG(sched_mc_timer_callback); 539 if (!sched_mc_timer_callback) 540 return QDF_STATUS_E_FAILURE; 541 542 return sched_mc_timer_callback(msg); 543 } 544 545 QDF_STATUS scheduler_mlme_mq_handler(struct scheduler_msg *msg) 546 { 547 scheduler_msg_process_fn_t mlme_msg_handler; 548 549 QDF_BUG(msg); 550 if (!msg) 551 return QDF_STATUS_E_FAILURE; 552 553 mlme_msg_handler = msg->callback; 554 555 QDF_BUG(mlme_msg_handler); 556 if (!mlme_msg_handler) 557 return QDF_STATUS_E_FAILURE; 558 559 mlme_msg_handler(msg); 560 561 return QDF_STATUS_SUCCESS; 562 } 563 564 QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg) 565 { 566 QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *); 567 568 QDF_BUG(msg); 569 if (!msg) 570 return QDF_STATUS_E_FAILURE; 571 572 scan_q_msg_handler = msg->callback; 573 574 QDF_BUG(scan_q_msg_handler); 575 if (!scan_q_msg_handler) 576 return QDF_STATUS_E_FAILURE; 577 578 scan_q_msg_handler(msg); 579 580 return QDF_STATUS_SUCCESS; 581 } 582 583 void scheduler_set_watchdog_timeout(uint32_t timeout) 584 { 585 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 586 587 QDF_BUG(sched_ctx); 588 if (!sched_ctx) 589 return; 590 591 sched_ctx->timeout = timeout; 592 } 593 594 QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t 595 wma_callback) 596 { 597 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 598 599 QDF_BUG(sched_ctx); 600 if (!sched_ctx) 601 return QDF_STATUS_E_FAILURE; 602 603 sched_ctx->legacy_wma_handler = wma_callback; 604 605 return QDF_STATUS_SUCCESS; 606 } 607 608 QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t 609 sys_callback) 610 { 611 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 612 613 QDF_BUG(sched_ctx); 614 if (!sched_ctx) 615 return QDF_STATUS_E_FAILURE; 616 617 sched_ctx->legacy_sys_handler = sys_callback; 618 619 return QDF_STATUS_SUCCESS; 620 } 621 622 QDF_STATUS scheduler_deregister_wma_legacy_handler(void) 623 { 624 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 625 626 QDF_BUG(sched_ctx); 627 if (!sched_ctx) 628 return QDF_STATUS_E_FAILURE; 629 630 sched_ctx->legacy_wma_handler = NULL; 631 632 return QDF_STATUS_SUCCESS; 633 } 634 635 QDF_STATUS scheduler_deregister_sys_legacy_handler(void) 636 { 637 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 638 639 QDF_BUG(sched_ctx); 640 if (!sched_ctx) 641 return QDF_STATUS_E_FAILURE; 642 643 sched_ctx->legacy_sys_handler = NULL; 644 645 return QDF_STATUS_SUCCESS; 646 } 647 648 static QDF_STATUS scheduler_msg_flush_mc(struct scheduler_msg *msg) 649 { 650 scheduler_qdf_mc_timer_deinit_return_data_ptr(msg->bodyptr); 651 return QDF_STATUS_SUCCESS; 652 } 653 654 void scheduler_mc_timer_callback(qdf_mc_timer_t *timer) 655 { 656 struct scheduler_msg msg = {0}; 657 QDF_STATUS status; 658 struct sched_qdf_mc_timer_cb_wrapper *mc_timer_wrapper; 659 qdf_mc_timer_callback_t callback = NULL; 660 void *user_data = NULL; 661 QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW; 662 663 QDF_BUG(timer); 664 if (!timer) 665 return; 666 667 /* 668 * Save the jiffies value in a per-timer context in qdf_mc_timer_t. 669 * It will help the debugger to know the exact time at which the host 670 * stops/expiry of the QDF timer. 671 */ 672 timer->timer_end_jiffies = jiffies; 673 674 qdf_spin_lock_irqsave(&timer->platform_info.spinlock); 675 676 switch (timer->state) { 677 case QDF_TIMER_STATE_STARTING: 678 /* we are in this state because someone just started the timer, 679 * MC timer got started and expired, but the time content have 680 * not been updated this is a rare race condition! 681 */ 682 timer->state = QDF_TIMER_STATE_STOPPED; 683 status = QDF_STATUS_E_ALREADY; 684 break; 685 686 case QDF_TIMER_STATE_STOPPED: 687 status = QDF_STATUS_E_ALREADY; 688 break; 689 690 case QDF_TIMER_STATE_UNUSED: 691 status = QDF_STATUS_E_EXISTS; 692 break; 693 694 case QDF_TIMER_STATE_RUNNING: 695 /* need to go to stop state here because the call-back function 696 * may restart timer (to emulate periodic timer) 697 */ 698 timer->state = QDF_TIMER_STATE_STOPPED; 699 /* copy the relevant timer information to local variables; 700 * once we exits from this critical section, the timer content 701 * may be modified by other tasks 702 */ 703 callback = timer->callback; 704 user_data = timer->user_data; 705 type = timer->type; 706 status = QDF_STATUS_SUCCESS; 707 break; 708 709 default: 710 QDF_ASSERT(0); 711 status = QDF_STATUS_E_FAULT; 712 break; 713 } 714 715 qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); 716 717 if (QDF_IS_STATUS_ERROR(status)) { 718 sched_debug("MC timer fired but is not running; skip callback"); 719 return; 720 } 721 722 qdf_try_allowing_sleep(type); 723 724 QDF_BUG(callback); 725 if (!callback) 726 return; 727 728 mc_timer_wrapper = scheduler_qdf_mc_timer_init(callback, user_data); 729 if (!mc_timer_wrapper) { 730 sched_err("failed to allocate sched_qdf_mc_timer_cb_wrapper"); 731 return; 732 } 733 734 /* serialize to scheduler controller thread */ 735 msg.type = SYS_MSG_ID_MC_TIMER; 736 msg.reserved = SYS_MSG_COOKIE; 737 msg.callback = scheduler_qdf_mc_timer_callback_t_wrapper; 738 msg.bodyptr = mc_timer_wrapper; 739 msg.bodyval = 0; 740 msg.flush_callback = scheduler_msg_flush_mc; 741 742 status = scheduler_post_message(QDF_MODULE_ID_SCHEDULER, 743 QDF_MODULE_ID_SCHEDULER, 744 QDF_MODULE_ID_SYS, &msg); 745 if (QDF_IS_STATUS_ERROR(status)) { 746 sched_err("Could not enqueue timer to timer queue"); 747 qdf_mem_free(mc_timer_wrapper); 748 } 749 } 750 751 QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size) 752 { 753 uint8_t qidx; 754 struct scheduler_mq_type *target_mq; 755 struct scheduler_ctx *sched_ctx; 756 757 sched_ctx = scheduler_get_context(); 758 if (!sched_ctx) 759 return QDF_STATUS_E_INVAL; 760 761 /* WMA also uses the target_if queue, so replace the QID */ 762 if (QDF_MODULE_ID_WMA == qid) 763 qid = QDF_MODULE_ID_TARGET_IF; 764 765 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid]; 766 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 767 sched_err("Scheduler is deinitialized"); 768 return QDF_STATUS_E_FAILURE; 769 } 770 771 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 772 773 *size = qdf_list_size(&target_mq->mq_list); 774 775 return QDF_STATUS_SUCCESS; 776 } 777 778 QDF_STATUS scheduler_post_message_debug(QDF_MODULE_ID src_id, 779 QDF_MODULE_ID dest_id, 780 QDF_MODULE_ID que_id, 781 struct scheduler_msg *msg, 782 int line, 783 const char *func) 784 { 785 QDF_STATUS status; 786 787 status = scheduler_post_msg(scheduler_get_qid(src_id, dest_id, que_id), 788 msg); 789 790 if (QDF_IS_STATUS_ERROR(status)) 791 sched_err("couldn't post from %d to %d - called from %d, %s", 792 src_id, dest_id, line, func); 793 794 return status; 795 } 796 797 qdf_export_symbol(scheduler_post_message_debug); 798