1 /* 2 * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <scheduler_api.h> 20 #include <scheduler_core.h> 21 #include <qdf_atomic.h> 22 23 QDF_STATUS scheduler_disable(void) 24 { 25 struct scheduler_ctx *sched_ctx; 26 27 sched_debug("Disabling Scheduler"); 28 29 sched_ctx = scheduler_get_context(); 30 QDF_BUG(sched_ctx); 31 if (!sched_ctx) 32 return QDF_STATUS_E_INVAL; 33 34 /* send shutdown signal to scheduler thread */ 35 qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag); 36 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 37 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 38 39 /* wait for scheduler thread to shutdown */ 40 qdf_wait_single_event(&sched_ctx->sch_shutdown, 0); 41 sched_ctx->sch_thread = NULL; 42 43 /* flush any unprocessed scheduler messages */ 44 scheduler_queues_flush(sched_ctx); 45 46 return QDF_STATUS_SUCCESS; 47 } 48 49 static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched) 50 { 51 char symbol[QDF_SYMBOL_LEN]; 52 53 if (sched->watchdog_callback) 54 qdf_sprint_symbol(symbol, sched->watchdog_callback); 55 56 sched_err("WLAN_BUG_RCA: Callback %s (type 0x%x) exceeded its allotted time of %ds", 57 sched->watchdog_callback ? symbol : "<null>", 58 sched->watchdog_msg_type, SCHEDULER_WATCHDOG_TIMEOUT / 1000); 59 } 60 61 #ifdef CONFIG_SLUB_DEBUG_ON 62 static void scheduler_watchdog_timeout(void *arg) 63 { 64 struct scheduler_ctx *sched = arg; 65 66 scheduler_watchdog_notify(sched); 67 if (sched->sch_thread) 68 qdf_print_thread_trace(sched->sch_thread); 69 70 /* avoid crashing during shutdown */ 71 if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag)) 72 return; 73 74 QDF_DEBUG_PANIC("Going down for Scheduler Watchdog Bite!"); 75 } 76 #else 77 static void scheduler_watchdog_timeout(void *arg) 78 { 79 scheduler_watchdog_notify((struct scheduler_ctx *)arg); 80 } 81 #endif 82 83 QDF_STATUS scheduler_enable(void) 84 { 85 struct scheduler_ctx *sched_ctx; 86 87 sched_debug("Enabling Scheduler"); 88 89 sched_ctx = scheduler_get_context(); 90 QDF_BUG(sched_ctx); 91 if (!sched_ctx) 92 return QDF_STATUS_E_INVAL; 93 94 qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK, 95 &sched_ctx->sch_event_flag); 96 qdf_atomic_clear_bit(MC_POST_EVENT_MASK, 97 &sched_ctx->sch_event_flag); 98 99 /* create the scheduler thread */ 100 sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx, 101 "scheduler_thread"); 102 if (!sched_ctx->sch_thread) { 103 sched_err("Failed to create scheduler thread"); 104 return QDF_STATUS_E_RESOURCES; 105 } 106 107 sched_debug("Scheduler thread created"); 108 109 /* wait for the scheduler thread to startup */ 110 qdf_wake_up_process(sched_ctx->sch_thread); 111 qdf_wait_single_event(&sched_ctx->sch_start_event, 0); 112 113 sched_debug("Scheduler thread started"); 114 115 return QDF_STATUS_SUCCESS; 116 } 117 118 QDF_STATUS scheduler_init(void) 119 { 120 QDF_STATUS status; 121 struct scheduler_ctx *sched_ctx; 122 123 sched_debug("Initializing Scheduler"); 124 125 status = scheduler_create_ctx(); 126 if (QDF_IS_STATUS_ERROR(status)) { 127 sched_err("Failed to create context; status:%d", status); 128 return status; 129 } 130 131 sched_ctx = scheduler_get_context(); 132 QDF_BUG(sched_ctx); 133 if (!sched_ctx) { 134 status = QDF_STATUS_E_FAILURE; 135 goto ctx_destroy; 136 } 137 138 status = scheduler_queues_init(sched_ctx); 139 if (QDF_IS_STATUS_ERROR(status)) { 140 sched_err("Failed to init queues; status:%d", status); 141 goto ctx_destroy; 142 } 143 144 status = qdf_event_create(&sched_ctx->sch_start_event); 145 if (QDF_IS_STATUS_ERROR(status)) { 146 sched_err("Failed to create start event; status:%d", status); 147 goto queues_deinit; 148 } 149 150 status = qdf_event_create(&sched_ctx->sch_shutdown); 151 if (QDF_IS_STATUS_ERROR(status)) { 152 sched_err("Failed to create shutdown event; status:%d", status); 153 goto start_event_destroy; 154 } 155 156 status = qdf_event_create(&sched_ctx->resume_sch_event); 157 if (QDF_IS_STATUS_ERROR(status)) { 158 sched_err("Failed to create resume event; status:%d", status); 159 goto shutdown_event_destroy; 160 } 161 162 qdf_spinlock_create(&sched_ctx->sch_thread_lock); 163 qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue); 164 sched_ctx->sch_event_flag = 0; 165 qdf_timer_init(NULL, 166 &sched_ctx->watchdog_timer, 167 &scheduler_watchdog_timeout, 168 sched_ctx, 169 QDF_TIMER_TYPE_SW); 170 171 qdf_register_mc_timer_callback(scheduler_mc_timer_callback); 172 173 return QDF_STATUS_SUCCESS; 174 175 shutdown_event_destroy: 176 qdf_event_destroy(&sched_ctx->sch_shutdown); 177 178 start_event_destroy: 179 qdf_event_destroy(&sched_ctx->sch_start_event); 180 181 queues_deinit: 182 scheduler_queues_deinit(sched_ctx); 183 184 ctx_destroy: 185 scheduler_destroy_ctx(); 186 187 return status; 188 } 189 190 QDF_STATUS scheduler_deinit(void) 191 { 192 QDF_STATUS status; 193 struct scheduler_ctx *sched_ctx; 194 195 sched_debug("Deinitializing Scheduler"); 196 197 sched_ctx = scheduler_get_context(); 198 QDF_BUG(sched_ctx); 199 if (!sched_ctx) 200 return QDF_STATUS_E_INVAL; 201 202 qdf_timer_free(&sched_ctx->watchdog_timer); 203 qdf_spinlock_destroy(&sched_ctx->sch_thread_lock); 204 qdf_event_destroy(&sched_ctx->resume_sch_event); 205 qdf_event_destroy(&sched_ctx->sch_shutdown); 206 qdf_event_destroy(&sched_ctx->sch_start_event); 207 208 status = scheduler_queues_deinit(sched_ctx); 209 if (QDF_IS_STATUS_ERROR(status)) 210 sched_err("Failed to deinit queues; status:%d", status); 211 212 status = scheduler_destroy_ctx(); 213 if (QDF_IS_STATUS_ERROR(status)) 214 sched_err("Failed to destroy context; status:%d", status); 215 216 return QDF_STATUS_SUCCESS; 217 } 218 219 QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid, 220 struct scheduler_msg *msg, 221 bool is_high_priority) 222 { 223 uint8_t qidx; 224 struct scheduler_mq_type *target_mq; 225 struct scheduler_msg *queue_msg; 226 struct scheduler_ctx *sched_ctx; 227 uint16_t src_id; 228 uint16_t dest_id; 229 uint16_t que_id; 230 231 QDF_BUG(msg); 232 if (!msg) 233 return QDF_STATUS_E_INVAL; 234 235 sched_ctx = scheduler_get_context(); 236 QDF_BUG(sched_ctx); 237 if (!sched_ctx) 238 return QDF_STATUS_E_INVAL; 239 240 if (!sched_ctx->sch_thread) { 241 sched_err("Cannot post message; scheduler thread is stopped"); 242 return QDF_STATUS_E_FAILURE; 243 } 244 245 if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) { 246 QDF_DEBUG_PANIC("Scheduler messages must be initialized"); 247 return QDF_STATUS_E_FAILURE; 248 } 249 250 dest_id = scheduler_get_dest_id(qid); 251 src_id = scheduler_get_src_id(qid); 252 que_id = scheduler_get_que_id(qid); 253 254 if (que_id >= QDF_MODULE_ID_MAX || src_id >= QDF_MODULE_ID_MAX || 255 dest_id >= QDF_MODULE_ID_MAX) { 256 sched_err("Src_id/Dest_id invalid, cannot post message"); 257 return QDF_STATUS_E_FAILURE; 258 } 259 /* Target_If is a special message queue in phase 3 convergence beacause 260 * its used by both legacy WMA and as well as new UMAC components which 261 * directly populate callback handlers in message body. 262 * 1) WMA legacy messages should not have callback 263 * 2) New target_if message needs to have valid callback 264 * Clear callback handler for legacy WMA messages such that in case 265 * if someone is sending legacy WMA message from stack which has 266 * uninitialized callback then its handled properly. Also change 267 * legacy WMA message queue id to target_if queue such that its always 268 * handled in right order. 269 */ 270 if (QDF_MODULE_ID_WMA == que_id) { 271 msg->callback = NULL; 272 /* change legacy WMA message id to new target_if mq id */ 273 que_id = QDF_MODULE_ID_TARGET_IF; 274 } 275 qdf_mtrace(src_id, dest_id, msg->type, 0xFF, 0); 276 277 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[que_id]; 278 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 279 sched_err("Scheduler is deinitialized ignore msg"); 280 return QDF_STATUS_E_FAILURE; 281 } 282 283 if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) { 284 QDF_DEBUG_PANIC("callback not registered for qid[%d]", que_id); 285 return QDF_STATUS_E_FAILURE; 286 } 287 288 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 289 290 queue_msg = scheduler_core_msg_dup(msg); 291 if (!queue_msg) 292 return QDF_STATUS_E_NOMEM; 293 294 if (is_high_priority) 295 scheduler_mq_put_front(target_mq, queue_msg); 296 else 297 scheduler_mq_put(target_mq, queue_msg); 298 299 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 300 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 301 302 return QDF_STATUS_SUCCESS; 303 } 304 305 QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, 306 scheduler_msg_process_fn_t callback) 307 { 308 struct scheduler_mq_ctx *ctx; 309 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 310 311 sched_enter(); 312 313 QDF_BUG(sched_ctx); 314 if (!sched_ctx) 315 return QDF_STATUS_E_FAILURE; 316 317 if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 318 sched_err("Already registered max %d no of message queues", 319 SCHEDULER_NUMBER_OF_MSG_QUEUE); 320 return QDF_STATUS_E_FAILURE; 321 } 322 323 ctx = &sched_ctx->queue_ctx; 324 ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx; 325 ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid; 326 ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback; 327 sched_ctx->sch_last_qidx++; 328 329 sched_exit(); 330 331 return QDF_STATUS_SUCCESS; 332 } 333 334 QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid) 335 { 336 struct scheduler_mq_ctx *ctx; 337 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 338 uint8_t qidx; 339 340 sched_enter(); 341 342 QDF_BUG(sched_ctx); 343 if (!sched_ctx) 344 return QDF_STATUS_E_FAILURE; 345 346 ctx = &sched_ctx->queue_ctx; 347 qidx = ctx->scheduler_msg_qid_to_qidx[qid]; 348 ctx->scheduler_msg_process_fn[qidx] = NULL; 349 sched_ctx->sch_last_qidx--; 350 ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE; 351 352 sched_exit(); 353 354 return QDF_STATUS_SUCCESS; 355 } 356 357 void scheduler_resume(void) 358 { 359 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 360 361 if (sched_ctx) 362 qdf_event_set(&sched_ctx->resume_sch_event); 363 } 364 365 void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback) 366 { 367 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 368 369 if (sched_ctx) 370 sched_ctx->hdd_callback = callback; 371 } 372 void scheduler_wake_up_controller_thread(void) 373 { 374 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 375 376 if (sched_ctx) 377 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 378 } 379 void scheduler_set_event_mask(uint32_t event_mask) 380 { 381 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 382 383 if (sched_ctx) 384 qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag); 385 } 386 387 void scheduler_clear_event_mask(uint32_t event_mask) 388 { 389 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 390 391 if (sched_ctx) 392 qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag); 393 } 394 395 QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg) 396 { 397 QDF_STATUS status; 398 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 399 QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *); 400 401 QDF_BUG(msg); 402 if (!msg) 403 return QDF_STATUS_E_FAILURE; 404 405 QDF_BUG(sched_ctx); 406 if (!sched_ctx) 407 return QDF_STATUS_E_FAILURE; 408 409 target_if_msg_handler = msg->callback; 410 411 /* Target_If is a special message queue in phase 3 convergence beacause 412 * its used by both legacy WMA and as well as new UMAC components. New 413 * UMAC components directly pass their message handlers as callback in 414 * message body. 415 * 1) All Legacy WMA messages do not contain message callback so invoke 416 * registered legacy WMA handler. Scheduler message posting APIs 417 * makes sure legacy WMA messages do not have callbacks. 418 * 2) For new messages which have valid callbacks invoke their callbacks 419 * directly. 420 */ 421 if (!target_if_msg_handler) 422 status = sched_ctx->legacy_wma_handler(msg); 423 else 424 status = target_if_msg_handler(msg); 425 426 return status; 427 } 428 429 QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg) 430 { 431 QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *); 432 433 QDF_BUG(msg); 434 if (!msg) 435 return QDF_STATUS_E_FAILURE; 436 437 os_if_msg_handler = msg->callback; 438 439 QDF_BUG(os_if_msg_handler); 440 if (!os_if_msg_handler) 441 return QDF_STATUS_E_FAILURE; 442 443 os_if_msg_handler(msg); 444 445 return QDF_STATUS_SUCCESS; 446 } 447 448 QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg) 449 { 450 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 451 qdf_mc_timer_callback_t timer_callback; 452 453 QDF_BUG(msg); 454 if (!msg) 455 return QDF_STATUS_E_FAILURE; 456 457 QDF_BUG(sched_ctx); 458 if (!sched_ctx) 459 return QDF_STATUS_E_FAILURE; 460 461 /* legacy sys message handler? */ 462 if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER) 463 return sched_ctx->legacy_sys_handler(msg); 464 465 timer_callback = msg->callback; 466 QDF_BUG(timer_callback); 467 if (!timer_callback) 468 return QDF_STATUS_E_FAILURE; 469 470 timer_callback(msg->bodyptr); 471 472 return QDF_STATUS_SUCCESS; 473 } 474 475 QDF_STATUS scheduler_mlme_mq_handler(struct scheduler_msg *msg) 476 { 477 scheduler_msg_process_fn_t mlme_msg_handler; 478 479 QDF_BUG(msg); 480 if (!msg) 481 return QDF_STATUS_E_FAILURE; 482 483 mlme_msg_handler = msg->callback; 484 485 QDF_BUG(mlme_msg_handler); 486 if (!mlme_msg_handler) 487 return QDF_STATUS_E_FAILURE; 488 489 mlme_msg_handler(msg); 490 491 return QDF_STATUS_SUCCESS; 492 } 493 494 QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg) 495 { 496 QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *); 497 498 QDF_BUG(msg); 499 if (!msg) 500 return QDF_STATUS_E_FAILURE; 501 502 scan_q_msg_handler = msg->callback; 503 504 QDF_BUG(scan_q_msg_handler); 505 if (!scan_q_msg_handler) 506 return QDF_STATUS_E_FAILURE; 507 508 scan_q_msg_handler(msg); 509 510 return QDF_STATUS_SUCCESS; 511 } 512 513 QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t 514 wma_callback) 515 { 516 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 517 518 QDF_BUG(sched_ctx); 519 if (!sched_ctx) 520 return QDF_STATUS_E_FAILURE; 521 522 sched_ctx->legacy_wma_handler = wma_callback; 523 524 return QDF_STATUS_SUCCESS; 525 } 526 527 QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t 528 sys_callback) 529 { 530 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 531 532 QDF_BUG(sched_ctx); 533 if (!sched_ctx) 534 return QDF_STATUS_E_FAILURE; 535 536 sched_ctx->legacy_sys_handler = sys_callback; 537 538 return QDF_STATUS_SUCCESS; 539 } 540 541 QDF_STATUS scheduler_deregister_wma_legacy_handler(void) 542 { 543 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 544 545 QDF_BUG(sched_ctx); 546 if (!sched_ctx) 547 return QDF_STATUS_E_FAILURE; 548 549 sched_ctx->legacy_wma_handler = NULL; 550 551 return QDF_STATUS_SUCCESS; 552 } 553 554 QDF_STATUS scheduler_deregister_sys_legacy_handler(void) 555 { 556 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 557 558 QDF_BUG(sched_ctx); 559 if (!sched_ctx) 560 return QDF_STATUS_E_FAILURE; 561 562 sched_ctx->legacy_sys_handler = NULL; 563 564 return QDF_STATUS_SUCCESS; 565 } 566 567 static QDF_STATUS scheduler_msg_flush_noop(struct scheduler_msg *msg) 568 { 569 return QDF_STATUS_SUCCESS; 570 } 571 572 void scheduler_mc_timer_callback(qdf_mc_timer_t *timer) 573 { 574 struct scheduler_msg msg = {0}; 575 QDF_STATUS status; 576 577 qdf_mc_timer_callback_t callback = NULL; 578 void *user_data = NULL; 579 QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW; 580 581 QDF_BUG(timer); 582 if (!timer) 583 return; 584 585 qdf_spin_lock_irqsave(&timer->platform_info.spinlock); 586 587 switch (timer->state) { 588 case QDF_TIMER_STATE_STARTING: 589 /* we are in this state because someone just started the timer, 590 * MC timer got started and expired, but the time content have 591 * not been updated this is a rare race condition! 592 */ 593 timer->state = QDF_TIMER_STATE_STOPPED; 594 status = QDF_STATUS_E_ALREADY; 595 break; 596 597 case QDF_TIMER_STATE_STOPPED: 598 status = QDF_STATUS_E_ALREADY; 599 break; 600 601 case QDF_TIMER_STATE_UNUSED: 602 status = QDF_STATUS_E_EXISTS; 603 break; 604 605 case QDF_TIMER_STATE_RUNNING: 606 /* need to go to stop state here because the call-back function 607 * may restart timer (to emulate periodic timer) 608 */ 609 timer->state = QDF_TIMER_STATE_STOPPED; 610 /* copy the relevant timer information to local variables; 611 * once we exits from this critical section, the timer content 612 * may be modified by other tasks 613 */ 614 callback = timer->callback; 615 user_data = timer->user_data; 616 type = timer->type; 617 status = QDF_STATUS_SUCCESS; 618 break; 619 620 default: 621 QDF_ASSERT(0); 622 status = QDF_STATUS_E_FAULT; 623 break; 624 } 625 626 qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); 627 628 if (QDF_IS_STATUS_ERROR(status)) { 629 sched_debug("MC timer fired but is not running; skip callback"); 630 return; 631 } 632 633 qdf_try_allowing_sleep(type); 634 635 QDF_BUG(callback); 636 if (!callback) 637 return; 638 639 /* serialize to scheduler controller thread */ 640 msg.type = SYS_MSG_ID_MC_TIMER; 641 msg.reserved = SYS_MSG_COOKIE; 642 msg.callback = callback; 643 msg.bodyptr = user_data; 644 msg.bodyval = 0; 645 646 /* bodyptr points to user data, do not free it during msg flush */ 647 msg.flush_callback = scheduler_msg_flush_noop; 648 649 status = scheduler_post_message(QDF_MODULE_ID_SCHEDULER, 650 QDF_MODULE_ID_SCHEDULER, 651 QDF_MODULE_ID_SYS, &msg); 652 if (QDF_IS_STATUS_ERROR(status)) 653 sched_err("Could not enqueue timer to timer queue"); 654 } 655 656 QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size) 657 { 658 uint8_t qidx; 659 struct scheduler_mq_type *target_mq; 660 struct scheduler_ctx *sched_ctx; 661 662 sched_ctx = scheduler_get_context(); 663 if (!sched_ctx) 664 return QDF_STATUS_E_INVAL; 665 666 /* WMA also uses the target_if queue, so replace the QID */ 667 if (QDF_MODULE_ID_WMA == qid) 668 qid = QDF_MODULE_ID_TARGET_IF; 669 670 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid]; 671 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 672 sched_err("Scheduler is deinitialized"); 673 return QDF_STATUS_E_FAILURE; 674 } 675 676 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 677 678 *size = qdf_list_size(&target_mq->mq_list); 679 680 return QDF_STATUS_SUCCESS; 681 } 682 683 QDF_STATUS scheduler_post_message_debug(QDF_MODULE_ID src_id, 684 QDF_MODULE_ID dest_id, 685 QDF_MODULE_ID que_id, 686 struct scheduler_msg *msg, 687 int line, 688 const char *func) 689 { 690 QDF_STATUS status; 691 692 status = scheduler_post_msg(scheduler_get_qid(src_id, dest_id, que_id), 693 msg); 694 695 if (QDF_IS_STATUS_ERROR(status)) 696 sched_err("couldn't post from %d to %d - called from %d, %s", 697 src_id, dest_id, line, func); 698 699 return status; 700 } 701