1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <scheduler_api.h> 20 #include <scheduler_core.h> 21 #include <qdf_atomic.h> 22 23 QDF_STATUS scheduler_disable(void) 24 { 25 struct scheduler_ctx *sched_ctx; 26 27 sched_debug("Disabling Scheduler"); 28 29 sched_ctx = scheduler_get_context(); 30 QDF_BUG(sched_ctx); 31 if (!sched_ctx) 32 return QDF_STATUS_E_INVAL; 33 34 /* send shutdown signal to scheduler thread */ 35 qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag); 36 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 37 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 38 39 /* wait for scheduler thread to shutdown */ 40 qdf_wait_single_event(&sched_ctx->sch_shutdown, 0); 41 sched_ctx->sch_thread = NULL; 42 43 /* flush any unprocessed scheduler messages */ 44 scheduler_queues_flush(sched_ctx); 45 46 return QDF_STATUS_SUCCESS; 47 } 48 49 static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched) 50 { 51 char symbol[QDF_SYMBOL_LEN]; 52 53 if (sched->watchdog_callback) 54 qdf_sprint_symbol(symbol, sched->watchdog_callback); 55 56 sched_err("WLAN_BUG_RCA: Callback %s (type 0x%x) exceeded its allotted time of %ds", 57 sched->watchdog_callback ? symbol : "<null>", 58 sched->watchdog_msg_type, SCHEDULER_WATCHDOG_TIMEOUT / 1000); 59 } 60 61 #ifdef CONFIG_SLUB_DEBUG_ON 62 static void scheduler_watchdog_timeout(void *arg) 63 { 64 struct scheduler_ctx *sched = arg; 65 66 scheduler_watchdog_notify(sched); 67 if (sched->sch_thread) 68 qdf_print_thread_trace(sched->sch_thread); 69 70 /* avoid crashing during shutdown */ 71 if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag)) 72 return; 73 74 QDF_DEBUG_PANIC("Going down for Scheduler Watchdog Bite!"); 75 } 76 #else 77 static void scheduler_watchdog_timeout(void *arg) 78 { 79 scheduler_watchdog_notify((struct scheduler_ctx *)arg); 80 } 81 #endif 82 83 QDF_STATUS scheduler_enable(void) 84 { 85 struct scheduler_ctx *sched_ctx; 86 87 sched_debug("Enabling Scheduler"); 88 89 sched_ctx = scheduler_get_context(); 90 QDF_BUG(sched_ctx); 91 if (!sched_ctx) 92 return QDF_STATUS_E_INVAL; 93 94 qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK, 95 &sched_ctx->sch_event_flag); 96 qdf_atomic_clear_bit(MC_POST_EVENT_MASK, 97 &sched_ctx->sch_event_flag); 98 99 /* create the scheduler thread */ 100 sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx, 101 "scheduler_thread"); 102 if (IS_ERR(sched_ctx->sch_thread)) { 103 sched_err("Failed to create scheduler thread"); 104 return QDF_STATUS_E_RESOURCES; 105 } 106 107 sched_debug("Scheduler thread created"); 108 109 /* wait for the scheduler thread to startup */ 110 qdf_wake_up_process(sched_ctx->sch_thread); 111 qdf_wait_single_event(&sched_ctx->sch_start_event, 0); 112 113 sched_debug("Scheduler thread started"); 114 115 return QDF_STATUS_SUCCESS; 116 } 117 118 QDF_STATUS scheduler_init(void) 119 { 120 QDF_STATUS status; 121 struct scheduler_ctx *sched_ctx; 122 123 sched_debug("Initializing Scheduler"); 124 125 status = scheduler_create_ctx(); 126 if (QDF_IS_STATUS_ERROR(status)) { 127 sched_err("Failed to create context; status:%d", status); 128 return status; 129 } 130 131 sched_ctx = scheduler_get_context(); 132 QDF_BUG(sched_ctx); 133 if (!sched_ctx) { 134 status = QDF_STATUS_E_FAILURE; 135 goto ctx_destroy; 136 } 137 138 status = scheduler_queues_init(sched_ctx); 139 if (QDF_IS_STATUS_ERROR(status)) { 140 sched_err("Failed to init queues; status:%d", status); 141 goto ctx_destroy; 142 } 143 144 status = qdf_event_create(&sched_ctx->sch_start_event); 145 if (QDF_IS_STATUS_ERROR(status)) { 146 sched_err("Failed to create start event; status:%d", status); 147 goto queues_deinit; 148 } 149 150 status = qdf_event_create(&sched_ctx->sch_shutdown); 151 if (QDF_IS_STATUS_ERROR(status)) { 152 sched_err("Failed to create shutdown event; status:%d", status); 153 goto start_event_destroy; 154 } 155 156 status = qdf_event_create(&sched_ctx->resume_sch_event); 157 if (QDF_IS_STATUS_ERROR(status)) { 158 sched_err("Failed to create resume event; status:%d", status); 159 goto shutdown_event_destroy; 160 } 161 162 qdf_spinlock_create(&sched_ctx->sch_thread_lock); 163 qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue); 164 sched_ctx->sch_event_flag = 0; 165 qdf_timer_init(NULL, 166 &sched_ctx->watchdog_timer, 167 &scheduler_watchdog_timeout, 168 sched_ctx, 169 QDF_TIMER_TYPE_SW); 170 171 qdf_register_mc_timer_callback(scheduler_mc_timer_callback); 172 173 return QDF_STATUS_SUCCESS; 174 175 shutdown_event_destroy: 176 qdf_event_destroy(&sched_ctx->sch_shutdown); 177 178 start_event_destroy: 179 qdf_event_destroy(&sched_ctx->sch_start_event); 180 181 queues_deinit: 182 scheduler_queues_deinit(sched_ctx); 183 184 ctx_destroy: 185 scheduler_destroy_ctx(); 186 187 return status; 188 } 189 190 QDF_STATUS scheduler_deinit(void) 191 { 192 QDF_STATUS status; 193 struct scheduler_ctx *sched_ctx; 194 195 sched_debug("Deinitializing Scheduler"); 196 197 sched_ctx = scheduler_get_context(); 198 QDF_BUG(sched_ctx); 199 if (!sched_ctx) 200 return QDF_STATUS_E_INVAL; 201 202 qdf_timer_free(&sched_ctx->watchdog_timer); 203 qdf_spinlock_destroy(&sched_ctx->sch_thread_lock); 204 qdf_event_destroy(&sched_ctx->resume_sch_event); 205 qdf_event_destroy(&sched_ctx->sch_shutdown); 206 qdf_event_destroy(&sched_ctx->sch_start_event); 207 208 status = scheduler_queues_deinit(sched_ctx); 209 if (QDF_IS_STATUS_ERROR(status)) 210 sched_err("Failed to deinit queues; status:%d", status); 211 212 status = scheduler_destroy_ctx(); 213 if (QDF_IS_STATUS_ERROR(status)) 214 sched_err("Failed to destroy context; status:%d", status); 215 216 return QDF_STATUS_SUCCESS; 217 } 218 219 QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid, 220 struct scheduler_msg *msg, 221 bool is_high_priority) 222 { 223 uint8_t qidx; 224 struct scheduler_mq_type *target_mq; 225 struct scheduler_msg *queue_msg; 226 struct scheduler_ctx *sched_ctx; 227 uint16_t src_id; 228 uint16_t dest_id; 229 230 QDF_BUG(msg); 231 if (!msg) 232 return QDF_STATUS_E_INVAL; 233 234 sched_ctx = scheduler_get_context(); 235 QDF_BUG(sched_ctx); 236 if (!sched_ctx) 237 return QDF_STATUS_E_INVAL; 238 239 if (!sched_ctx->sch_thread) { 240 sched_err("Cannot post message; scheduler thread is stopped"); 241 return QDF_STATUS_E_FAILURE; 242 } 243 244 if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) { 245 QDF_DEBUG_PANIC("Scheduler messages must be initialized"); 246 return QDF_STATUS_E_FAILURE; 247 } 248 249 dest_id = scheduler_get_dest_id(qid); 250 src_id = scheduler_get_src_id(qid); 251 252 if (dest_id >= QDF_MODULE_ID_MAX || src_id >= QDF_MODULE_ID_MAX) { 253 sched_err("Src_id/Dest_id invalid, cannot post message"); 254 return QDF_STATUS_E_FAILURE; 255 } 256 /* Target_If is a special message queue in phase 3 convergence beacause 257 * its used by both legacy WMA and as well as new UMAC components which 258 * directly populate callback handlers in message body. 259 * 1) WMA legacy messages should not have callback 260 * 2) New target_if message needs to have valid callback 261 * Clear callback handler for legacy WMA messages such that in case 262 * if someone is sending legacy WMA message from stack which has 263 * uninitialized callback then its handled properly. Also change 264 * legacy WMA message queue id to target_if queue such that its always 265 * handled in right order. 266 */ 267 if (QDF_MODULE_ID_WMA == dest_id) { 268 msg->callback = NULL; 269 /* change legacy WMA message id to new target_if mq id */ 270 dest_id = QDF_MODULE_ID_TARGET_IF; 271 } 272 273 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[dest_id]; 274 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 275 sched_err("Scheduler is deinitialized ignore msg"); 276 return QDF_STATUS_E_FAILURE; 277 } 278 279 if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) { 280 QDF_DEBUG_PANIC("callback not registered for qid[%d]", dest_id); 281 return QDF_STATUS_E_FAILURE; 282 } 283 284 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 285 286 queue_msg = scheduler_core_msg_dup(msg); 287 if (!queue_msg) 288 return QDF_STATUS_E_NOMEM; 289 290 if (is_high_priority) 291 scheduler_mq_put_front(target_mq, queue_msg); 292 else 293 scheduler_mq_put(target_mq, queue_msg); 294 295 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 296 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 297 298 return QDF_STATUS_SUCCESS; 299 } 300 301 QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, 302 scheduler_msg_process_fn_t callback) 303 { 304 struct scheduler_mq_ctx *ctx; 305 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 306 307 sched_enter(); 308 309 QDF_BUG(sched_ctx); 310 if (!sched_ctx) 311 return QDF_STATUS_E_FAILURE; 312 313 if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 314 sched_err("Already registered max %d no of message queues", 315 SCHEDULER_NUMBER_OF_MSG_QUEUE); 316 return QDF_STATUS_E_FAILURE; 317 } 318 319 ctx = &sched_ctx->queue_ctx; 320 ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx; 321 ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid; 322 ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback; 323 sched_ctx->sch_last_qidx++; 324 325 sched_exit(); 326 327 return QDF_STATUS_SUCCESS; 328 } 329 330 QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid) 331 { 332 struct scheduler_mq_ctx *ctx; 333 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 334 uint8_t qidx; 335 336 sched_enter(); 337 338 QDF_BUG(sched_ctx); 339 if (!sched_ctx) 340 return QDF_STATUS_E_FAILURE; 341 342 ctx = &sched_ctx->queue_ctx; 343 qidx = ctx->scheduler_msg_qid_to_qidx[qid]; 344 ctx->scheduler_msg_process_fn[qidx] = NULL; 345 sched_ctx->sch_last_qidx--; 346 ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE; 347 348 sched_exit(); 349 350 return QDF_STATUS_SUCCESS; 351 } 352 353 void scheduler_resume(void) 354 { 355 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 356 357 if (sched_ctx) 358 qdf_event_set(&sched_ctx->resume_sch_event); 359 } 360 361 void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback) 362 { 363 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 364 365 if (sched_ctx) 366 sched_ctx->hdd_callback = callback; 367 } 368 void scheduler_wake_up_controller_thread(void) 369 { 370 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 371 372 if (sched_ctx) 373 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 374 } 375 void scheduler_set_event_mask(uint32_t event_mask) 376 { 377 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 378 379 if (sched_ctx) 380 qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag); 381 } 382 383 void scheduler_clear_event_mask(uint32_t event_mask) 384 { 385 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 386 387 if (sched_ctx) 388 qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag); 389 } 390 391 QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg) 392 { 393 QDF_STATUS status; 394 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 395 QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *); 396 397 QDF_BUG(msg); 398 if (!msg) 399 return QDF_STATUS_E_FAILURE; 400 401 QDF_BUG(sched_ctx); 402 if (!sched_ctx) 403 return QDF_STATUS_E_FAILURE; 404 405 target_if_msg_handler = msg->callback; 406 407 /* Target_If is a special message queue in phase 3 convergence beacause 408 * its used by both legacy WMA and as well as new UMAC components. New 409 * UMAC components directly pass their message handlers as callback in 410 * message body. 411 * 1) All Legacy WMA messages do not contain message callback so invoke 412 * registered legacy WMA handler. Scheduler message posting APIs 413 * makes sure legacy WMA messages do not have callbacks. 414 * 2) For new messages which have valid callbacks invoke their callbacks 415 * directly. 416 */ 417 if (!target_if_msg_handler) 418 status = sched_ctx->legacy_wma_handler(msg); 419 else 420 status = target_if_msg_handler(msg); 421 422 return status; 423 } 424 425 QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg) 426 { 427 QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *); 428 429 QDF_BUG(msg); 430 if (!msg) 431 return QDF_STATUS_E_FAILURE; 432 433 os_if_msg_handler = msg->callback; 434 435 QDF_BUG(os_if_msg_handler); 436 if (!os_if_msg_handler) 437 return QDF_STATUS_E_FAILURE; 438 439 os_if_msg_handler(msg); 440 441 return QDF_STATUS_SUCCESS; 442 } 443 444 QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg) 445 { 446 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 447 qdf_mc_timer_callback_t timer_callback; 448 449 QDF_BUG(msg); 450 if (!msg) 451 return QDF_STATUS_E_FAILURE; 452 453 QDF_BUG(sched_ctx); 454 if (!sched_ctx) 455 return QDF_STATUS_E_FAILURE; 456 457 /* legacy sys message handler? */ 458 if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER) 459 return sched_ctx->legacy_sys_handler(msg); 460 461 timer_callback = msg->callback; 462 QDF_BUG(timer_callback); 463 if (!timer_callback) 464 return QDF_STATUS_E_FAILURE; 465 466 timer_callback(msg->bodyptr); 467 468 return QDF_STATUS_SUCCESS; 469 } 470 471 QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg) 472 { 473 QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *); 474 475 QDF_BUG(msg); 476 if (!msg) 477 return QDF_STATUS_E_FAILURE; 478 479 scan_q_msg_handler = msg->callback; 480 481 QDF_BUG(scan_q_msg_handler); 482 if (!scan_q_msg_handler) 483 return QDF_STATUS_E_FAILURE; 484 485 scan_q_msg_handler(msg); 486 487 return QDF_STATUS_SUCCESS; 488 } 489 490 QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t 491 wma_callback) 492 { 493 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 494 495 QDF_BUG(sched_ctx); 496 if (!sched_ctx) 497 return QDF_STATUS_E_FAILURE; 498 499 sched_ctx->legacy_wma_handler = wma_callback; 500 501 return QDF_STATUS_SUCCESS; 502 } 503 504 QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t 505 sys_callback) 506 { 507 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 508 509 QDF_BUG(sched_ctx); 510 if (!sched_ctx) 511 return QDF_STATUS_E_FAILURE; 512 513 sched_ctx->legacy_sys_handler = sys_callback; 514 515 return QDF_STATUS_SUCCESS; 516 } 517 518 QDF_STATUS scheduler_deregister_wma_legacy_handler(void) 519 { 520 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 521 522 QDF_BUG(sched_ctx); 523 if (!sched_ctx) 524 return QDF_STATUS_E_FAILURE; 525 526 sched_ctx->legacy_wma_handler = NULL; 527 528 return QDF_STATUS_SUCCESS; 529 } 530 531 QDF_STATUS scheduler_deregister_sys_legacy_handler(void) 532 { 533 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 534 535 QDF_BUG(sched_ctx); 536 if (!sched_ctx) 537 return QDF_STATUS_E_FAILURE; 538 539 sched_ctx->legacy_sys_handler = NULL; 540 541 return QDF_STATUS_SUCCESS; 542 } 543 544 static QDF_STATUS scheduler_msg_flush_noop(struct scheduler_msg *msg) 545 { 546 return QDF_STATUS_SUCCESS; 547 } 548 549 void scheduler_mc_timer_callback(unsigned long data) 550 { 551 qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data; 552 struct scheduler_msg msg = {0}; 553 QDF_STATUS status; 554 555 qdf_mc_timer_callback_t callback = NULL; 556 void *user_data = NULL; 557 QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW; 558 559 QDF_BUG(timer); 560 if (!timer) 561 return; 562 563 qdf_spin_lock_irqsave(&timer->platform_info.spinlock); 564 565 switch (timer->state) { 566 case QDF_TIMER_STATE_STARTING: 567 /* we are in this state because someone just started the timer, 568 * MC timer got started and expired, but the time content have 569 * not been updated this is a rare race condition! 570 */ 571 timer->state = QDF_TIMER_STATE_STOPPED; 572 status = QDF_STATUS_E_ALREADY; 573 break; 574 575 case QDF_TIMER_STATE_STOPPED: 576 status = QDF_STATUS_E_ALREADY; 577 break; 578 579 case QDF_TIMER_STATE_UNUSED: 580 status = QDF_STATUS_E_EXISTS; 581 break; 582 583 case QDF_TIMER_STATE_RUNNING: 584 /* need to go to stop state here because the call-back function 585 * may restart timer (to emulate periodic timer) 586 */ 587 timer->state = QDF_TIMER_STATE_STOPPED; 588 /* copy the relevant timer information to local variables; 589 * once we exits from this critical section, the timer content 590 * may be modified by other tasks 591 */ 592 callback = timer->callback; 593 user_data = timer->user_data; 594 type = timer->type; 595 status = QDF_STATUS_SUCCESS; 596 break; 597 598 default: 599 QDF_ASSERT(0); 600 status = QDF_STATUS_E_FAULT; 601 break; 602 } 603 604 qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); 605 606 if (QDF_IS_STATUS_ERROR(status)) { 607 sched_debug("MC timer fired but is not running; skip callback"); 608 return; 609 } 610 611 qdf_try_allowing_sleep(type); 612 613 QDF_BUG(callback); 614 if (!callback) 615 return; 616 617 /* serialize to scheduler controller thread */ 618 msg.type = SYS_MSG_ID_MC_TIMER; 619 msg.reserved = SYS_MSG_COOKIE; 620 msg.callback = callback; 621 msg.bodyptr = user_data; 622 msg.bodyval = 0; 623 624 /* bodyptr points to user data, do not free it during msg flush */ 625 msg.flush_callback = scheduler_msg_flush_noop; 626 627 status = scheduler_post_msg(QDF_MODULE_ID_SYS, &msg); 628 if (QDF_IS_STATUS_ERROR(status)) 629 sched_err("Could not enqueue timer to timer queue"); 630 } 631 632 QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size) 633 { 634 uint8_t qidx; 635 struct scheduler_mq_type *target_mq; 636 struct scheduler_ctx *sched_ctx; 637 638 sched_ctx = scheduler_get_context(); 639 if (!sched_ctx) 640 return QDF_STATUS_E_INVAL; 641 642 /* WMA also uses the target_if queue, so replace the QID */ 643 if (QDF_MODULE_ID_WMA == qid) 644 qid = QDF_MODULE_ID_TARGET_IF; 645 646 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid]; 647 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 648 sched_err("Scheduler is deinitialized"); 649 return QDF_STATUS_E_FAILURE; 650 } 651 652 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 653 654 *size = qdf_list_size(&target_mq->mq_list); 655 656 return QDF_STATUS_SUCCESS; 657 } 658