1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <scheduler_api.h> 20 #include <scheduler_core.h> 21 #include <qdf_atomic.h> 22 23 QDF_STATUS scheduler_disable(void) 24 { 25 struct scheduler_ctx *sched_ctx; 26 27 sched_debug("Disabling Scheduler"); 28 29 sched_ctx = scheduler_get_context(); 30 QDF_BUG(sched_ctx); 31 if (!sched_ctx) 32 return QDF_STATUS_E_INVAL; 33 34 /* send shutdown signal to scheduler thread */ 35 qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag); 36 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 37 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 38 39 /* wait for scheduler thread to shutdown */ 40 qdf_wait_single_event(&sched_ctx->sch_shutdown, 0); 41 sched_ctx->sch_thread = NULL; 42 43 /* flush any unprocessed scheduler messages */ 44 scheduler_queues_flush(sched_ctx); 45 46 return QDF_STATUS_SUCCESS; 47 } 48 49 static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched) 50 { 51 char symbol[QDF_SYMBOL_LEN]; 52 53 if (sched->watchdog_callback) 54 qdf_sprint_symbol(symbol, sched->watchdog_callback); 55 56 sched_err("WLAN_BUG_RCA: Callback %s (type 0x%x) exceeded its allotted time of %ds", 57 sched->watchdog_callback ? symbol : "<null>", 58 sched->watchdog_msg_type, SCHEDULER_WATCHDOG_TIMEOUT / 1000); 59 } 60 61 #ifdef CONFIG_SLUB_DEBUG_ON 62 static void scheduler_watchdog_timeout(void *arg) 63 { 64 struct scheduler_ctx *sched = arg; 65 66 scheduler_watchdog_notify(sched); 67 if (sched->sch_thread) 68 qdf_print_thread_trace(sched->sch_thread); 69 70 /* avoid crashing during shutdown */ 71 if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag)) 72 return; 73 74 QDF_DEBUG_PANIC("Going down for Scheduler Watchdog Bite!"); 75 } 76 #else 77 static void scheduler_watchdog_timeout(void *arg) 78 { 79 scheduler_watchdog_notify((struct scheduler_ctx *)arg); 80 } 81 #endif 82 83 QDF_STATUS scheduler_enable(void) 84 { 85 struct scheduler_ctx *sched_ctx; 86 87 sched_debug("Enabling Scheduler"); 88 89 sched_ctx = scheduler_get_context(); 90 QDF_BUG(sched_ctx); 91 if (!sched_ctx) 92 return QDF_STATUS_E_INVAL; 93 94 qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK, 95 &sched_ctx->sch_event_flag); 96 qdf_atomic_clear_bit(MC_POST_EVENT_MASK, 97 &sched_ctx->sch_event_flag); 98 99 /* create the scheduler thread */ 100 sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx, 101 "scheduler_thread"); 102 if (IS_ERR(sched_ctx->sch_thread)) { 103 sched_err("Failed to create scheduler thread"); 104 return QDF_STATUS_E_RESOURCES; 105 } 106 107 sched_debug("Scheduler thread created"); 108 109 /* wait for the scheduler thread to startup */ 110 qdf_wake_up_process(sched_ctx->sch_thread); 111 qdf_wait_single_event(&sched_ctx->sch_start_event, 0); 112 113 sched_debug("Scheduler thread started"); 114 115 return QDF_STATUS_SUCCESS; 116 } 117 118 QDF_STATUS scheduler_init(void) 119 { 120 QDF_STATUS status; 121 struct scheduler_ctx *sched_ctx; 122 123 sched_debug("Initializing Scheduler"); 124 125 status = scheduler_create_ctx(); 126 if (QDF_IS_STATUS_ERROR(status)) { 127 sched_err("Failed to create context; status:%d", status); 128 return status; 129 } 130 131 sched_ctx = scheduler_get_context(); 132 QDF_BUG(sched_ctx); 133 if (!sched_ctx) { 134 status = QDF_STATUS_E_FAILURE; 135 goto ctx_destroy; 136 } 137 138 status = scheduler_queues_init(sched_ctx); 139 if (QDF_IS_STATUS_ERROR(status)) { 140 sched_err("Failed to init queues; status:%d", status); 141 goto ctx_destroy; 142 } 143 144 status = qdf_event_create(&sched_ctx->sch_start_event); 145 if (QDF_IS_STATUS_ERROR(status)) { 146 sched_err("Failed to create start event; status:%d", status); 147 goto queues_deinit; 148 } 149 150 status = qdf_event_create(&sched_ctx->sch_shutdown); 151 if (QDF_IS_STATUS_ERROR(status)) { 152 sched_err("Failed to create shutdown event; status:%d", status); 153 goto start_event_destroy; 154 } 155 156 status = qdf_event_create(&sched_ctx->resume_sch_event); 157 if (QDF_IS_STATUS_ERROR(status)) { 158 sched_err("Failed to create resume event; status:%d", status); 159 goto shutdown_event_destroy; 160 } 161 162 qdf_spinlock_create(&sched_ctx->sch_thread_lock); 163 qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue); 164 sched_ctx->sch_event_flag = 0; 165 qdf_timer_init(NULL, 166 &sched_ctx->watchdog_timer, 167 &scheduler_watchdog_timeout, 168 sched_ctx, 169 QDF_TIMER_TYPE_SW); 170 171 qdf_register_mc_timer_callback(scheduler_mc_timer_callback); 172 173 return QDF_STATUS_SUCCESS; 174 175 shutdown_event_destroy: 176 qdf_event_destroy(&sched_ctx->sch_shutdown); 177 178 start_event_destroy: 179 qdf_event_destroy(&sched_ctx->sch_start_event); 180 181 queues_deinit: 182 scheduler_queues_deinit(sched_ctx); 183 184 ctx_destroy: 185 scheduler_destroy_ctx(); 186 187 return status; 188 } 189 190 QDF_STATUS scheduler_deinit(void) 191 { 192 QDF_STATUS status; 193 struct scheduler_ctx *sched_ctx; 194 195 sched_debug("Deinitializing Scheduler"); 196 197 sched_ctx = scheduler_get_context(); 198 QDF_BUG(sched_ctx); 199 if (!sched_ctx) 200 return QDF_STATUS_E_INVAL; 201 202 qdf_timer_free(&sched_ctx->watchdog_timer); 203 qdf_spinlock_destroy(&sched_ctx->sch_thread_lock); 204 qdf_event_destroy(&sched_ctx->resume_sch_event); 205 qdf_event_destroy(&sched_ctx->sch_shutdown); 206 qdf_event_destroy(&sched_ctx->sch_start_event); 207 208 status = scheduler_queues_deinit(sched_ctx); 209 if (QDF_IS_STATUS_ERROR(status)) 210 sched_err("Failed to deinit queues; status:%d", status); 211 212 status = scheduler_destroy_ctx(); 213 if (QDF_IS_STATUS_ERROR(status)) 214 sched_err("Failed to destroy context; status:%d", status); 215 216 return QDF_STATUS_SUCCESS; 217 } 218 219 QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid, 220 struct scheduler_msg *msg, 221 bool is_high_priority) 222 { 223 uint8_t qidx; 224 struct scheduler_mq_type *target_mq; 225 struct scheduler_msg *queue_msg; 226 struct scheduler_ctx *sched_ctx; 227 uint16_t src_id; 228 uint16_t dest_id; 229 uint16_t que_id; 230 231 QDF_BUG(msg); 232 if (!msg) 233 return QDF_STATUS_E_INVAL; 234 235 sched_ctx = scheduler_get_context(); 236 QDF_BUG(sched_ctx); 237 if (!sched_ctx) 238 return QDF_STATUS_E_INVAL; 239 240 if (!sched_ctx->sch_thread) { 241 sched_err("Cannot post message; scheduler thread is stopped"); 242 return QDF_STATUS_E_FAILURE; 243 } 244 245 if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) { 246 QDF_DEBUG_PANIC("Scheduler messages must be initialized"); 247 return QDF_STATUS_E_FAILURE; 248 } 249 250 dest_id = scheduler_get_dest_id(qid); 251 src_id = scheduler_get_src_id(qid); 252 que_id = scheduler_get_que_id(qid); 253 254 if (que_id >= QDF_MODULE_ID_MAX || src_id >= QDF_MODULE_ID_MAX || 255 dest_id >= QDF_MODULE_ID_MAX) { 256 sched_err("Src_id/Dest_id invalid, cannot post message"); 257 return QDF_STATUS_E_FAILURE; 258 } 259 /* Target_If is a special message queue in phase 3 convergence beacause 260 * its used by both legacy WMA and as well as new UMAC components which 261 * directly populate callback handlers in message body. 262 * 1) WMA legacy messages should not have callback 263 * 2) New target_if message needs to have valid callback 264 * Clear callback handler for legacy WMA messages such that in case 265 * if someone is sending legacy WMA message from stack which has 266 * uninitialized callback then its handled properly. Also change 267 * legacy WMA message queue id to target_if queue such that its always 268 * handled in right order. 269 */ 270 if (QDF_MODULE_ID_WMA == que_id) { 271 msg->callback = NULL; 272 /* change legacy WMA message id to new target_if mq id */ 273 que_id = QDF_MODULE_ID_TARGET_IF; 274 } 275 276 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[que_id]; 277 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 278 sched_err("Scheduler is deinitialized ignore msg"); 279 return QDF_STATUS_E_FAILURE; 280 } 281 282 if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) { 283 QDF_DEBUG_PANIC("callback not registered for qid[%d]", que_id); 284 return QDF_STATUS_E_FAILURE; 285 } 286 287 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 288 289 queue_msg = scheduler_core_msg_dup(msg); 290 if (!queue_msg) 291 return QDF_STATUS_E_NOMEM; 292 293 if (is_high_priority) 294 scheduler_mq_put_front(target_mq, queue_msg); 295 else 296 scheduler_mq_put(target_mq, queue_msg); 297 298 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 299 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 300 301 return QDF_STATUS_SUCCESS; 302 } 303 304 QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, 305 scheduler_msg_process_fn_t callback) 306 { 307 struct scheduler_mq_ctx *ctx; 308 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 309 310 sched_enter(); 311 312 QDF_BUG(sched_ctx); 313 if (!sched_ctx) 314 return QDF_STATUS_E_FAILURE; 315 316 if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 317 sched_err("Already registered max %d no of message queues", 318 SCHEDULER_NUMBER_OF_MSG_QUEUE); 319 return QDF_STATUS_E_FAILURE; 320 } 321 322 ctx = &sched_ctx->queue_ctx; 323 ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx; 324 ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid; 325 ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback; 326 sched_ctx->sch_last_qidx++; 327 328 sched_exit(); 329 330 return QDF_STATUS_SUCCESS; 331 } 332 333 QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid) 334 { 335 struct scheduler_mq_ctx *ctx; 336 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 337 uint8_t qidx; 338 339 sched_enter(); 340 341 QDF_BUG(sched_ctx); 342 if (!sched_ctx) 343 return QDF_STATUS_E_FAILURE; 344 345 ctx = &sched_ctx->queue_ctx; 346 qidx = ctx->scheduler_msg_qid_to_qidx[qid]; 347 ctx->scheduler_msg_process_fn[qidx] = NULL; 348 sched_ctx->sch_last_qidx--; 349 ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE; 350 351 sched_exit(); 352 353 return QDF_STATUS_SUCCESS; 354 } 355 356 void scheduler_resume(void) 357 { 358 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 359 360 if (sched_ctx) 361 qdf_event_set(&sched_ctx->resume_sch_event); 362 } 363 364 void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback) 365 { 366 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 367 368 if (sched_ctx) 369 sched_ctx->hdd_callback = callback; 370 } 371 void scheduler_wake_up_controller_thread(void) 372 { 373 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 374 375 if (sched_ctx) 376 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 377 } 378 void scheduler_set_event_mask(uint32_t event_mask) 379 { 380 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 381 382 if (sched_ctx) 383 qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag); 384 } 385 386 void scheduler_clear_event_mask(uint32_t event_mask) 387 { 388 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 389 390 if (sched_ctx) 391 qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag); 392 } 393 394 QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg) 395 { 396 QDF_STATUS status; 397 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 398 QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *); 399 400 QDF_BUG(msg); 401 if (!msg) 402 return QDF_STATUS_E_FAILURE; 403 404 QDF_BUG(sched_ctx); 405 if (!sched_ctx) 406 return QDF_STATUS_E_FAILURE; 407 408 target_if_msg_handler = msg->callback; 409 410 /* Target_If is a special message queue in phase 3 convergence beacause 411 * its used by both legacy WMA and as well as new UMAC components. New 412 * UMAC components directly pass their message handlers as callback in 413 * message body. 414 * 1) All Legacy WMA messages do not contain message callback so invoke 415 * registered legacy WMA handler. Scheduler message posting APIs 416 * makes sure legacy WMA messages do not have callbacks. 417 * 2) For new messages which have valid callbacks invoke their callbacks 418 * directly. 419 */ 420 if (!target_if_msg_handler) 421 status = sched_ctx->legacy_wma_handler(msg); 422 else 423 status = target_if_msg_handler(msg); 424 425 return status; 426 } 427 428 QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg) 429 { 430 QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *); 431 432 QDF_BUG(msg); 433 if (!msg) 434 return QDF_STATUS_E_FAILURE; 435 436 os_if_msg_handler = msg->callback; 437 438 QDF_BUG(os_if_msg_handler); 439 if (!os_if_msg_handler) 440 return QDF_STATUS_E_FAILURE; 441 442 os_if_msg_handler(msg); 443 444 return QDF_STATUS_SUCCESS; 445 } 446 447 QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg) 448 { 449 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 450 qdf_mc_timer_callback_t timer_callback; 451 452 QDF_BUG(msg); 453 if (!msg) 454 return QDF_STATUS_E_FAILURE; 455 456 QDF_BUG(sched_ctx); 457 if (!sched_ctx) 458 return QDF_STATUS_E_FAILURE; 459 460 /* legacy sys message handler? */ 461 if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER) 462 return sched_ctx->legacy_sys_handler(msg); 463 464 timer_callback = msg->callback; 465 QDF_BUG(timer_callback); 466 if (!timer_callback) 467 return QDF_STATUS_E_FAILURE; 468 469 timer_callback(msg->bodyptr); 470 471 return QDF_STATUS_SUCCESS; 472 } 473 474 QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg) 475 { 476 QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *); 477 478 QDF_BUG(msg); 479 if (!msg) 480 return QDF_STATUS_E_FAILURE; 481 482 scan_q_msg_handler = msg->callback; 483 484 QDF_BUG(scan_q_msg_handler); 485 if (!scan_q_msg_handler) 486 return QDF_STATUS_E_FAILURE; 487 488 scan_q_msg_handler(msg); 489 490 return QDF_STATUS_SUCCESS; 491 } 492 493 QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t 494 wma_callback) 495 { 496 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 497 498 QDF_BUG(sched_ctx); 499 if (!sched_ctx) 500 return QDF_STATUS_E_FAILURE; 501 502 sched_ctx->legacy_wma_handler = wma_callback; 503 504 return QDF_STATUS_SUCCESS; 505 } 506 507 QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t 508 sys_callback) 509 { 510 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 511 512 QDF_BUG(sched_ctx); 513 if (!sched_ctx) 514 return QDF_STATUS_E_FAILURE; 515 516 sched_ctx->legacy_sys_handler = sys_callback; 517 518 return QDF_STATUS_SUCCESS; 519 } 520 521 QDF_STATUS scheduler_deregister_wma_legacy_handler(void) 522 { 523 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 524 525 QDF_BUG(sched_ctx); 526 if (!sched_ctx) 527 return QDF_STATUS_E_FAILURE; 528 529 sched_ctx->legacy_wma_handler = NULL; 530 531 return QDF_STATUS_SUCCESS; 532 } 533 534 QDF_STATUS scheduler_deregister_sys_legacy_handler(void) 535 { 536 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 537 538 QDF_BUG(sched_ctx); 539 if (!sched_ctx) 540 return QDF_STATUS_E_FAILURE; 541 542 sched_ctx->legacy_sys_handler = NULL; 543 544 return QDF_STATUS_SUCCESS; 545 } 546 547 static QDF_STATUS scheduler_msg_flush_noop(struct scheduler_msg *msg) 548 { 549 return QDF_STATUS_SUCCESS; 550 } 551 552 void scheduler_mc_timer_callback(unsigned long data) 553 { 554 qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data; 555 struct scheduler_msg msg = {0}; 556 QDF_STATUS status; 557 558 qdf_mc_timer_callback_t callback = NULL; 559 void *user_data = NULL; 560 QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW; 561 562 QDF_BUG(timer); 563 if (!timer) 564 return; 565 566 qdf_spin_lock_irqsave(&timer->platform_info.spinlock); 567 568 switch (timer->state) { 569 case QDF_TIMER_STATE_STARTING: 570 /* we are in this state because someone just started the timer, 571 * MC timer got started and expired, but the time content have 572 * not been updated this is a rare race condition! 573 */ 574 timer->state = QDF_TIMER_STATE_STOPPED; 575 status = QDF_STATUS_E_ALREADY; 576 break; 577 578 case QDF_TIMER_STATE_STOPPED: 579 status = QDF_STATUS_E_ALREADY; 580 break; 581 582 case QDF_TIMER_STATE_UNUSED: 583 status = QDF_STATUS_E_EXISTS; 584 break; 585 586 case QDF_TIMER_STATE_RUNNING: 587 /* need to go to stop state here because the call-back function 588 * may restart timer (to emulate periodic timer) 589 */ 590 timer->state = QDF_TIMER_STATE_STOPPED; 591 /* copy the relevant timer information to local variables; 592 * once we exits from this critical section, the timer content 593 * may be modified by other tasks 594 */ 595 callback = timer->callback; 596 user_data = timer->user_data; 597 type = timer->type; 598 status = QDF_STATUS_SUCCESS; 599 break; 600 601 default: 602 QDF_ASSERT(0); 603 status = QDF_STATUS_E_FAULT; 604 break; 605 } 606 607 qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); 608 609 if (QDF_IS_STATUS_ERROR(status)) { 610 sched_debug("MC timer fired but is not running; skip callback"); 611 return; 612 } 613 614 qdf_try_allowing_sleep(type); 615 616 QDF_BUG(callback); 617 if (!callback) 618 return; 619 620 /* serialize to scheduler controller thread */ 621 msg.type = SYS_MSG_ID_MC_TIMER; 622 msg.reserved = SYS_MSG_COOKIE; 623 msg.callback = callback; 624 msg.bodyptr = user_data; 625 msg.bodyval = 0; 626 627 /* bodyptr points to user data, do not free it during msg flush */ 628 msg.flush_callback = scheduler_msg_flush_noop; 629 630 status = scheduler_post_msg(QDF_MODULE_ID_SYS, &msg); 631 if (QDF_IS_STATUS_ERROR(status)) 632 sched_err("Could not enqueue timer to timer queue"); 633 } 634 635 QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size) 636 { 637 uint8_t qidx; 638 struct scheduler_mq_type *target_mq; 639 struct scheduler_ctx *sched_ctx; 640 641 sched_ctx = scheduler_get_context(); 642 if (!sched_ctx) 643 return QDF_STATUS_E_INVAL; 644 645 /* WMA also uses the target_if queue, so replace the QID */ 646 if (QDF_MODULE_ID_WMA == qid) 647 qid = QDF_MODULE_ID_TARGET_IF; 648 649 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid]; 650 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 651 sched_err("Scheduler is deinitialized"); 652 return QDF_STATUS_E_FAILURE; 653 } 654 655 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 656 657 *size = qdf_list_size(&target_mq->mq_list); 658 659 return QDF_STATUS_SUCCESS; 660 } 661