1 /* 2 * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <scheduler_api.h> 20 #include <scheduler_core.h> 21 #include <qdf_atomic.h> 22 #include <qdf_module.h> 23 24 QDF_STATUS scheduler_disable(void) 25 { 26 struct scheduler_ctx *sched_ctx; 27 28 sched_debug("Disabling Scheduler"); 29 30 sched_ctx = scheduler_get_context(); 31 QDF_BUG(sched_ctx); 32 if (!sched_ctx) 33 return QDF_STATUS_E_INVAL; 34 35 /* send shutdown signal to scheduler thread */ 36 qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag); 37 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 38 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 39 40 /* wait for scheduler thread to shutdown */ 41 qdf_wait_single_event(&sched_ctx->sch_shutdown, 0); 42 sched_ctx->sch_thread = NULL; 43 44 /* flush any unprocessed scheduler messages */ 45 scheduler_queues_flush(sched_ctx); 46 47 return QDF_STATUS_SUCCESS; 48 } 49 50 static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched) 51 { 52 char symbol[QDF_SYMBOL_LEN]; 53 54 if (sched->watchdog_callback) 55 qdf_sprint_symbol(symbol, sched->watchdog_callback); 56 57 sched_fatal("Callback %s (type 0x%x) exceeded its allotted time of %ds", 58 sched->watchdog_callback ? symbol : "<null>", 59 sched->watchdog_msg_type, 60 SCHEDULER_WATCHDOG_TIMEOUT / 1000); 61 } 62 63 static void scheduler_watchdog_timeout(void *arg) 64 { 65 struct scheduler_ctx *sched = arg; 66 67 scheduler_watchdog_notify(sched); 68 if (sched->sch_thread) 69 qdf_print_thread_trace(sched->sch_thread); 70 71 /* avoid crashing during shutdown */ 72 if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag)) 73 return; 74 75 QDF_DEBUG_PANIC("Going down for Scheduler Watchdog Bite!"); 76 } 77 78 QDF_STATUS scheduler_enable(void) 79 { 80 struct scheduler_ctx *sched_ctx; 81 82 sched_debug("Enabling Scheduler"); 83 84 sched_ctx = scheduler_get_context(); 85 QDF_BUG(sched_ctx); 86 if (!sched_ctx) 87 return QDF_STATUS_E_INVAL; 88 89 qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK, 90 &sched_ctx->sch_event_flag); 91 qdf_atomic_clear_bit(MC_POST_EVENT_MASK, 92 &sched_ctx->sch_event_flag); 93 94 /* create the scheduler thread */ 95 sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx, 96 "scheduler_thread"); 97 if (!sched_ctx->sch_thread) { 98 sched_fatal("Failed to create scheduler thread"); 99 return QDF_STATUS_E_RESOURCES; 100 } 101 102 sched_debug("Scheduler thread created"); 103 104 /* wait for the scheduler thread to startup */ 105 qdf_wake_up_process(sched_ctx->sch_thread); 106 qdf_wait_single_event(&sched_ctx->sch_start_event, 0); 107 108 sched_debug("Scheduler thread started"); 109 110 return QDF_STATUS_SUCCESS; 111 } 112 113 QDF_STATUS scheduler_init(void) 114 { 115 QDF_STATUS status; 116 struct scheduler_ctx *sched_ctx; 117 118 sched_debug("Initializing Scheduler"); 119 120 status = scheduler_create_ctx(); 121 if (QDF_IS_STATUS_ERROR(status)) { 122 sched_fatal("Failed to create context; status:%d", status); 123 return status; 124 } 125 126 sched_ctx = scheduler_get_context(); 127 QDF_BUG(sched_ctx); 128 if (!sched_ctx) { 129 status = QDF_STATUS_E_FAILURE; 130 goto ctx_destroy; 131 } 132 133 status = scheduler_queues_init(sched_ctx); 134 if (QDF_IS_STATUS_ERROR(status)) { 135 sched_fatal("Failed to init queues; status:%d", status); 136 goto ctx_destroy; 137 } 138 139 status = qdf_event_create(&sched_ctx->sch_start_event); 140 if (QDF_IS_STATUS_ERROR(status)) { 141 sched_fatal("Failed to create start event; status:%d", status); 142 goto queues_deinit; 143 } 144 145 status = qdf_event_create(&sched_ctx->sch_shutdown); 146 if (QDF_IS_STATUS_ERROR(status)) { 147 sched_fatal("Failed to create shutdown event; status:%d", 148 status); 149 goto start_event_destroy; 150 } 151 152 status = qdf_event_create(&sched_ctx->resume_sch_event); 153 if (QDF_IS_STATUS_ERROR(status)) { 154 sched_fatal("Failed to create resume event; status:%d", status); 155 goto shutdown_event_destroy; 156 } 157 158 qdf_spinlock_create(&sched_ctx->sch_thread_lock); 159 qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue); 160 sched_ctx->sch_event_flag = 0; 161 qdf_timer_init(NULL, 162 &sched_ctx->watchdog_timer, 163 &scheduler_watchdog_timeout, 164 sched_ctx, 165 QDF_TIMER_TYPE_SW); 166 167 qdf_register_mc_timer_callback(scheduler_mc_timer_callback); 168 169 return QDF_STATUS_SUCCESS; 170 171 shutdown_event_destroy: 172 qdf_event_destroy(&sched_ctx->sch_shutdown); 173 174 start_event_destroy: 175 qdf_event_destroy(&sched_ctx->sch_start_event); 176 177 queues_deinit: 178 scheduler_queues_deinit(sched_ctx); 179 180 ctx_destroy: 181 scheduler_destroy_ctx(); 182 183 return status; 184 } 185 186 QDF_STATUS scheduler_deinit(void) 187 { 188 QDF_STATUS status; 189 struct scheduler_ctx *sched_ctx; 190 191 sched_debug("Deinitializing Scheduler"); 192 193 sched_ctx = scheduler_get_context(); 194 QDF_BUG(sched_ctx); 195 if (!sched_ctx) 196 return QDF_STATUS_E_INVAL; 197 198 qdf_timer_free(&sched_ctx->watchdog_timer); 199 qdf_spinlock_destroy(&sched_ctx->sch_thread_lock); 200 qdf_event_destroy(&sched_ctx->resume_sch_event); 201 qdf_event_destroy(&sched_ctx->sch_shutdown); 202 qdf_event_destroy(&sched_ctx->sch_start_event); 203 204 status = scheduler_queues_deinit(sched_ctx); 205 if (QDF_IS_STATUS_ERROR(status)) 206 sched_err("Failed to deinit queues; status:%d", status); 207 208 status = scheduler_destroy_ctx(); 209 if (QDF_IS_STATUS_ERROR(status)) 210 sched_err("Failed to destroy context; status:%d", status); 211 212 return QDF_STATUS_SUCCESS; 213 } 214 215 QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid, 216 struct scheduler_msg *msg, 217 bool is_high_priority) 218 { 219 uint8_t qidx; 220 struct scheduler_mq_type *target_mq; 221 struct scheduler_msg *queue_msg; 222 struct scheduler_ctx *sched_ctx; 223 uint16_t src_id; 224 uint16_t dest_id; 225 uint16_t que_id; 226 227 QDF_BUG(msg); 228 if (!msg) 229 return QDF_STATUS_E_INVAL; 230 231 sched_ctx = scheduler_get_context(); 232 QDF_BUG(sched_ctx); 233 if (!sched_ctx) 234 return QDF_STATUS_E_INVAL; 235 236 if (!sched_ctx->sch_thread) { 237 sched_err("Cannot post message; scheduler thread is stopped"); 238 return QDF_STATUS_E_FAILURE; 239 } 240 241 if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) { 242 QDF_DEBUG_PANIC("Scheduler messages must be initialized"); 243 return QDF_STATUS_E_FAILURE; 244 } 245 246 dest_id = scheduler_get_dest_id(qid); 247 src_id = scheduler_get_src_id(qid); 248 que_id = scheduler_get_que_id(qid); 249 250 if (que_id >= QDF_MODULE_ID_MAX || src_id >= QDF_MODULE_ID_MAX || 251 dest_id >= QDF_MODULE_ID_MAX) { 252 sched_err("Src_id/Dest_id invalid, cannot post message"); 253 return QDF_STATUS_E_FAILURE; 254 } 255 /* Target_If is a special message queue in phase 3 convergence beacause 256 * its used by both legacy WMA and as well as new UMAC components which 257 * directly populate callback handlers in message body. 258 * 1) WMA legacy messages should not have callback 259 * 2) New target_if message needs to have valid callback 260 * Clear callback handler for legacy WMA messages such that in case 261 * if someone is sending legacy WMA message from stack which has 262 * uninitialized callback then its handled properly. Also change 263 * legacy WMA message queue id to target_if queue such that its always 264 * handled in right order. 265 */ 266 if (QDF_MODULE_ID_WMA == que_id) { 267 msg->callback = NULL; 268 /* change legacy WMA message id to new target_if mq id */ 269 que_id = QDF_MODULE_ID_TARGET_IF; 270 } 271 qdf_mtrace(src_id, dest_id, msg->type, 0xFF, 0); 272 273 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[que_id]; 274 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 275 sched_err("Scheduler is deinitialized ignore msg"); 276 return QDF_STATUS_E_FAILURE; 277 } 278 279 if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) { 280 QDF_DEBUG_PANIC("callback not registered for qid[%d]", que_id); 281 return QDF_STATUS_E_FAILURE; 282 } 283 284 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 285 286 queue_msg = scheduler_core_msg_dup(msg); 287 if (!queue_msg) 288 return QDF_STATUS_E_NOMEM; 289 290 if (is_high_priority) 291 scheduler_mq_put_front(target_mq, queue_msg); 292 else 293 scheduler_mq_put(target_mq, queue_msg); 294 295 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 296 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 297 298 return QDF_STATUS_SUCCESS; 299 } 300 301 QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, 302 scheduler_msg_process_fn_t callback) 303 { 304 struct scheduler_mq_ctx *ctx; 305 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 306 307 sched_enter(); 308 309 QDF_BUG(sched_ctx); 310 if (!sched_ctx) 311 return QDF_STATUS_E_FAILURE; 312 313 if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 314 sched_err("Already registered max %d no of message queues", 315 SCHEDULER_NUMBER_OF_MSG_QUEUE); 316 return QDF_STATUS_E_FAILURE; 317 } 318 319 ctx = &sched_ctx->queue_ctx; 320 ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx; 321 ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid; 322 ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback; 323 sched_ctx->sch_last_qidx++; 324 325 sched_exit(); 326 327 return QDF_STATUS_SUCCESS; 328 } 329 330 QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid) 331 { 332 struct scheduler_mq_ctx *ctx; 333 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 334 uint8_t qidx; 335 336 sched_enter(); 337 338 QDF_BUG(sched_ctx); 339 if (!sched_ctx) 340 return QDF_STATUS_E_FAILURE; 341 342 ctx = &sched_ctx->queue_ctx; 343 qidx = ctx->scheduler_msg_qid_to_qidx[qid]; 344 ctx->scheduler_msg_process_fn[qidx] = NULL; 345 sched_ctx->sch_last_qidx--; 346 ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE; 347 348 sched_exit(); 349 350 return QDF_STATUS_SUCCESS; 351 } 352 353 void scheduler_resume(void) 354 { 355 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 356 357 if (sched_ctx) 358 qdf_event_set(&sched_ctx->resume_sch_event); 359 } 360 361 void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback) 362 { 363 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 364 365 if (sched_ctx) 366 sched_ctx->hdd_callback = callback; 367 } 368 void scheduler_wake_up_controller_thread(void) 369 { 370 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 371 372 if (sched_ctx) 373 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 374 } 375 void scheduler_set_event_mask(uint32_t event_mask) 376 { 377 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 378 379 if (sched_ctx) 380 qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag); 381 } 382 383 void scheduler_clear_event_mask(uint32_t event_mask) 384 { 385 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 386 387 if (sched_ctx) 388 qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag); 389 } 390 391 QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg) 392 { 393 QDF_STATUS status; 394 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 395 QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *); 396 397 QDF_BUG(msg); 398 if (!msg) 399 return QDF_STATUS_E_FAILURE; 400 401 QDF_BUG(sched_ctx); 402 if (!sched_ctx) 403 return QDF_STATUS_E_FAILURE; 404 405 target_if_msg_handler = msg->callback; 406 407 /* Target_If is a special message queue in phase 3 convergence beacause 408 * its used by both legacy WMA and as well as new UMAC components. New 409 * UMAC components directly pass their message handlers as callback in 410 * message body. 411 * 1) All Legacy WMA messages do not contain message callback so invoke 412 * registered legacy WMA handler. Scheduler message posting APIs 413 * makes sure legacy WMA messages do not have callbacks. 414 * 2) For new messages which have valid callbacks invoke their callbacks 415 * directly. 416 */ 417 if (!target_if_msg_handler) 418 status = sched_ctx->legacy_wma_handler(msg); 419 else 420 status = target_if_msg_handler(msg); 421 422 return status; 423 } 424 425 QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg) 426 { 427 QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *); 428 429 QDF_BUG(msg); 430 if (!msg) 431 return QDF_STATUS_E_FAILURE; 432 433 os_if_msg_handler = msg->callback; 434 435 QDF_BUG(os_if_msg_handler); 436 if (!os_if_msg_handler) 437 return QDF_STATUS_E_FAILURE; 438 439 os_if_msg_handler(msg); 440 441 return QDF_STATUS_SUCCESS; 442 } 443 444 QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg) 445 { 446 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 447 qdf_mc_timer_callback_t timer_callback; 448 449 QDF_BUG(msg); 450 if (!msg) 451 return QDF_STATUS_E_FAILURE; 452 453 QDF_BUG(sched_ctx); 454 if (!sched_ctx) 455 return QDF_STATUS_E_FAILURE; 456 457 /* legacy sys message handler? */ 458 if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER) 459 return sched_ctx->legacy_sys_handler(msg); 460 461 timer_callback = msg->callback; 462 QDF_BUG(timer_callback); 463 if (!timer_callback) 464 return QDF_STATUS_E_FAILURE; 465 466 timer_callback(msg->bodyptr); 467 468 return QDF_STATUS_SUCCESS; 469 } 470 471 QDF_STATUS scheduler_mlme_mq_handler(struct scheduler_msg *msg) 472 { 473 scheduler_msg_process_fn_t mlme_msg_handler; 474 475 QDF_BUG(msg); 476 if (!msg) 477 return QDF_STATUS_E_FAILURE; 478 479 mlme_msg_handler = msg->callback; 480 481 QDF_BUG(mlme_msg_handler); 482 if (!mlme_msg_handler) 483 return QDF_STATUS_E_FAILURE; 484 485 mlme_msg_handler(msg); 486 487 return QDF_STATUS_SUCCESS; 488 } 489 490 QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg) 491 { 492 QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *); 493 494 QDF_BUG(msg); 495 if (!msg) 496 return QDF_STATUS_E_FAILURE; 497 498 scan_q_msg_handler = msg->callback; 499 500 QDF_BUG(scan_q_msg_handler); 501 if (!scan_q_msg_handler) 502 return QDF_STATUS_E_FAILURE; 503 504 scan_q_msg_handler(msg); 505 506 return QDF_STATUS_SUCCESS; 507 } 508 509 QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t 510 wma_callback) 511 { 512 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 513 514 QDF_BUG(sched_ctx); 515 if (!sched_ctx) 516 return QDF_STATUS_E_FAILURE; 517 518 sched_ctx->legacy_wma_handler = wma_callback; 519 520 return QDF_STATUS_SUCCESS; 521 } 522 523 QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t 524 sys_callback) 525 { 526 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 527 528 QDF_BUG(sched_ctx); 529 if (!sched_ctx) 530 return QDF_STATUS_E_FAILURE; 531 532 sched_ctx->legacy_sys_handler = sys_callback; 533 534 return QDF_STATUS_SUCCESS; 535 } 536 537 QDF_STATUS scheduler_deregister_wma_legacy_handler(void) 538 { 539 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 540 541 QDF_BUG(sched_ctx); 542 if (!sched_ctx) 543 return QDF_STATUS_E_FAILURE; 544 545 sched_ctx->legacy_wma_handler = NULL; 546 547 return QDF_STATUS_SUCCESS; 548 } 549 550 QDF_STATUS scheduler_deregister_sys_legacy_handler(void) 551 { 552 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 553 554 QDF_BUG(sched_ctx); 555 if (!sched_ctx) 556 return QDF_STATUS_E_FAILURE; 557 558 sched_ctx->legacy_sys_handler = NULL; 559 560 return QDF_STATUS_SUCCESS; 561 } 562 563 static QDF_STATUS scheduler_msg_flush_noop(struct scheduler_msg *msg) 564 { 565 return QDF_STATUS_SUCCESS; 566 } 567 568 void scheduler_mc_timer_callback(qdf_mc_timer_t *timer) 569 { 570 struct scheduler_msg msg = {0}; 571 QDF_STATUS status; 572 573 qdf_mc_timer_callback_t callback = NULL; 574 void *user_data = NULL; 575 QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW; 576 577 QDF_BUG(timer); 578 if (!timer) 579 return; 580 581 qdf_spin_lock_irqsave(&timer->platform_info.spinlock); 582 583 switch (timer->state) { 584 case QDF_TIMER_STATE_STARTING: 585 /* we are in this state because someone just started the timer, 586 * MC timer got started and expired, but the time content have 587 * not been updated this is a rare race condition! 588 */ 589 timer->state = QDF_TIMER_STATE_STOPPED; 590 status = QDF_STATUS_E_ALREADY; 591 break; 592 593 case QDF_TIMER_STATE_STOPPED: 594 status = QDF_STATUS_E_ALREADY; 595 break; 596 597 case QDF_TIMER_STATE_UNUSED: 598 status = QDF_STATUS_E_EXISTS; 599 break; 600 601 case QDF_TIMER_STATE_RUNNING: 602 /* need to go to stop state here because the call-back function 603 * may restart timer (to emulate periodic timer) 604 */ 605 timer->state = QDF_TIMER_STATE_STOPPED; 606 /* copy the relevant timer information to local variables; 607 * once we exits from this critical section, the timer content 608 * may be modified by other tasks 609 */ 610 callback = timer->callback; 611 user_data = timer->user_data; 612 type = timer->type; 613 status = QDF_STATUS_SUCCESS; 614 break; 615 616 default: 617 QDF_ASSERT(0); 618 status = QDF_STATUS_E_FAULT; 619 break; 620 } 621 622 qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); 623 624 if (QDF_IS_STATUS_ERROR(status)) { 625 sched_debug("MC timer fired but is not running; skip callback"); 626 return; 627 } 628 629 qdf_try_allowing_sleep(type); 630 631 QDF_BUG(callback); 632 if (!callback) 633 return; 634 635 /* serialize to scheduler controller thread */ 636 msg.type = SYS_MSG_ID_MC_TIMER; 637 msg.reserved = SYS_MSG_COOKIE; 638 msg.callback = callback; 639 msg.bodyptr = user_data; 640 msg.bodyval = 0; 641 642 /* bodyptr points to user data, do not free it during msg flush */ 643 msg.flush_callback = scheduler_msg_flush_noop; 644 645 status = scheduler_post_message(QDF_MODULE_ID_SCHEDULER, 646 QDF_MODULE_ID_SCHEDULER, 647 QDF_MODULE_ID_SYS, &msg); 648 if (QDF_IS_STATUS_ERROR(status)) 649 sched_err("Could not enqueue timer to timer queue"); 650 } 651 652 QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size) 653 { 654 uint8_t qidx; 655 struct scheduler_mq_type *target_mq; 656 struct scheduler_ctx *sched_ctx; 657 658 sched_ctx = scheduler_get_context(); 659 if (!sched_ctx) 660 return QDF_STATUS_E_INVAL; 661 662 /* WMA also uses the target_if queue, so replace the QID */ 663 if (QDF_MODULE_ID_WMA == qid) 664 qid = QDF_MODULE_ID_TARGET_IF; 665 666 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid]; 667 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 668 sched_err("Scheduler is deinitialized"); 669 return QDF_STATUS_E_FAILURE; 670 } 671 672 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 673 674 *size = qdf_list_size(&target_mq->mq_list); 675 676 return QDF_STATUS_SUCCESS; 677 } 678 679 QDF_STATUS scheduler_post_message_debug(QDF_MODULE_ID src_id, 680 QDF_MODULE_ID dest_id, 681 QDF_MODULE_ID que_id, 682 struct scheduler_msg *msg, 683 int line, 684 const char *func) 685 { 686 QDF_STATUS status; 687 688 status = scheduler_post_msg(scheduler_get_qid(src_id, dest_id, que_id), 689 msg); 690 691 if (QDF_IS_STATUS_ERROR(status)) 692 sched_err("couldn't post from %d to %d - called from %d, %s", 693 src_id, dest_id, line, func); 694 695 return status; 696 } 697 698 qdf_export_symbol(scheduler_post_message_debug); 699