1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <scheduler_api.h> 20 #include <scheduler_core.h> 21 #include <qdf_atomic.h> 22 23 QDF_STATUS scheduler_disable(void) 24 { 25 struct scheduler_ctx *sched_ctx; 26 27 sched_info("Disabling Scheduler"); 28 29 sched_ctx = scheduler_get_context(); 30 QDF_BUG(sched_ctx); 31 if (!sched_ctx) { 32 sched_err("sched_ctx is NULL"); 33 return QDF_STATUS_E_INVAL; 34 } 35 36 /* send shutdown signal to scheduler thread */ 37 qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag); 38 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 39 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 40 41 /* wait for scheduler thread to shutdown */ 42 qdf_wait_single_event(&sched_ctx->sch_shutdown, 0); 43 sched_ctx->sch_thread = NULL; 44 45 /* flush any unprocessed scheduler messages */ 46 scheduler_queues_flush(sched_ctx); 47 48 return QDF_STATUS_SUCCESS; 49 } 50 51 static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched) 52 { 53 char symbol[QDF_SYMBOL_LEN]; 54 55 if (sched->watchdog_callback) 56 qdf_sprint_symbol(symbol, sched->watchdog_callback); 57 58 sched_err("WLAN_BUG_RCA: Callback %s (type 0x%x) exceeded its allotted time of %ds", 59 sched->watchdog_callback ? symbol : "<null>", 60 sched->watchdog_msg_type, SCHEDULER_WATCHDOG_TIMEOUT / 1000); 61 } 62 63 #ifdef CONFIG_SLUB_DEBUG_ON 64 static void scheduler_watchdog_timeout(void *arg) 65 { 66 struct scheduler_ctx *sched = arg; 67 68 scheduler_watchdog_notify(sched); 69 if (sched->sch_thread) 70 qdf_print_thread_trace(sched->sch_thread); 71 72 /* avoid crashing during shutdown */ 73 if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag)) 74 return; 75 76 sched_fatal("Going down for Scheduler Watchdog Bite!"); 77 QDF_BUG(0); 78 } 79 #else 80 static void scheduler_watchdog_timeout(void *arg) 81 { 82 scheduler_watchdog_notify((struct scheduler_ctx *)arg); 83 } 84 #endif 85 86 QDF_STATUS scheduler_enable(void) 87 { 88 struct scheduler_ctx *sched_ctx; 89 90 sched_info("Enabling Scheduler"); 91 92 sched_ctx = scheduler_get_context(); 93 QDF_BUG(sched_ctx); 94 if (!sched_ctx) { 95 sched_err("sched_ctx is null"); 96 return QDF_STATUS_E_INVAL; 97 } 98 99 qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK, 100 &sched_ctx->sch_event_flag); 101 qdf_atomic_clear_bit(MC_POST_EVENT_MASK, 102 &sched_ctx->sch_event_flag); 103 104 /* create the scheduler thread */ 105 sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx, 106 "scheduler_thread"); 107 if (IS_ERR(sched_ctx->sch_thread)) { 108 sched_err("Failed to create scheduler thread"); 109 return QDF_STATUS_E_RESOURCES; 110 } 111 112 sched_info("Scheduler thread created"); 113 114 /* wait for the scheduler thread to startup */ 115 qdf_wake_up_process(sched_ctx->sch_thread); 116 qdf_wait_single_event(&sched_ctx->sch_start_event, 0); 117 118 sched_info("Scheduler thread started"); 119 120 return QDF_STATUS_SUCCESS; 121 } 122 123 QDF_STATUS scheduler_init(void) 124 { 125 QDF_STATUS status; 126 struct scheduler_ctx *sched_ctx; 127 128 sched_info("Initializing Scheduler"); 129 130 status = scheduler_create_ctx(); 131 if (QDF_IS_STATUS_ERROR(status)) { 132 sched_err("Failed to create context; status:%d", status); 133 return status; 134 } 135 136 sched_ctx = scheduler_get_context(); 137 QDF_BUG(sched_ctx); 138 if (!sched_ctx) { 139 sched_err("sched_ctx is null"); 140 status = QDF_STATUS_E_FAILURE; 141 goto ctx_destroy; 142 } 143 144 status = scheduler_queues_init(sched_ctx); 145 if (QDF_IS_STATUS_ERROR(status)) { 146 sched_err("Failed to init queues; status:%d", status); 147 goto ctx_destroy; 148 } 149 150 status = qdf_event_create(&sched_ctx->sch_start_event); 151 if (QDF_IS_STATUS_ERROR(status)) { 152 sched_err("Failed to create start event; status:%d", status); 153 goto queues_deinit; 154 } 155 156 status = qdf_event_create(&sched_ctx->sch_shutdown); 157 if (QDF_IS_STATUS_ERROR(status)) { 158 sched_err("Failed to create shutdown event; status:%d", status); 159 goto start_event_destroy; 160 } 161 162 status = qdf_event_create(&sched_ctx->resume_sch_event); 163 if (QDF_IS_STATUS_ERROR(status)) { 164 sched_err("Failed to create resume event; status:%d", status); 165 goto shutdown_event_destroy; 166 } 167 168 qdf_spinlock_create(&sched_ctx->sch_thread_lock); 169 qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue); 170 sched_ctx->sch_event_flag = 0; 171 qdf_timer_init(NULL, 172 &sched_ctx->watchdog_timer, 173 &scheduler_watchdog_timeout, 174 sched_ctx, 175 QDF_TIMER_TYPE_SW); 176 177 qdf_register_mc_timer_callback(scheduler_mc_timer_callback); 178 179 return QDF_STATUS_SUCCESS; 180 181 shutdown_event_destroy: 182 qdf_event_destroy(&sched_ctx->sch_shutdown); 183 184 start_event_destroy: 185 qdf_event_destroy(&sched_ctx->sch_start_event); 186 187 queues_deinit: 188 scheduler_queues_deinit(sched_ctx); 189 190 ctx_destroy: 191 scheduler_destroy_ctx(); 192 193 return status; 194 } 195 196 QDF_STATUS scheduler_deinit(void) 197 { 198 QDF_STATUS status; 199 struct scheduler_ctx *sched_ctx; 200 201 sched_info("Deinitializing Scheduler"); 202 203 sched_ctx = scheduler_get_context(); 204 QDF_BUG(sched_ctx); 205 if (!sched_ctx) { 206 sched_err("sched_ctx is null"); 207 return QDF_STATUS_E_INVAL; 208 } 209 210 qdf_timer_free(&sched_ctx->watchdog_timer); 211 qdf_spinlock_destroy(&sched_ctx->sch_thread_lock); 212 qdf_event_destroy(&sched_ctx->resume_sch_event); 213 qdf_event_destroy(&sched_ctx->sch_shutdown); 214 qdf_event_destroy(&sched_ctx->sch_start_event); 215 216 status = scheduler_queues_deinit(sched_ctx); 217 if (QDF_IS_STATUS_ERROR(status)) 218 sched_err("Failed to deinit queues; status:%d", status); 219 220 status = scheduler_destroy_ctx(); 221 if (QDF_IS_STATUS_ERROR(status)) 222 sched_err("Failed to destroy context; status:%d", status); 223 224 return QDF_STATUS_SUCCESS; 225 } 226 227 QDF_STATUS scheduler_post_msg_by_priority(QDF_MODULE_ID qid, 228 struct scheduler_msg *msg, 229 bool is_high_priority) 230 { 231 uint8_t qidx; 232 struct scheduler_mq_type *target_mq; 233 struct scheduler_msg *queue_msg; 234 struct scheduler_ctx *sched_ctx; 235 236 if (!msg) { 237 sched_err("msg is null"); 238 return QDF_STATUS_E_INVAL; 239 } 240 241 sched_ctx = scheduler_get_context(); 242 if (!sched_ctx) { 243 sched_err("sched_ctx is null"); 244 return QDF_STATUS_E_INVAL; 245 } 246 247 if (!sched_ctx->sch_thread) { 248 sched_err("Cannot post message; scheduler thread is stopped"); 249 return QDF_STATUS_E_FAILURE; 250 } 251 252 if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) { 253 sched_err("Uninitialized scheduler message. Please initialize it"); 254 QDF_DEBUG_PANIC(); 255 return QDF_STATUS_E_FAILURE; 256 } 257 258 /* Target_If is a special message queue in phase 3 convergence beacause 259 * its used by both legacy WMA and as well as new UMAC components which 260 * directly populate callback handlers in message body. 261 * 1) WMA legacy messages should not have callback 262 * 2) New target_if message needs to have valid callback 263 * Clear callback handler for legacy WMA messages such that in case 264 * if someone is sending legacy WMA message from stack which has 265 * uninitialized callback then its handled properly. Also change 266 * legacy WMA message queue id to target_if queue such that its always 267 * handled in right order. 268 */ 269 if (QDF_MODULE_ID_WMA == qid) { 270 msg->callback = NULL; 271 /* change legacy WMA message id to new target_if mq id */ 272 qid = QDF_MODULE_ID_TARGET_IF; 273 } 274 275 qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid]; 276 if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 277 sched_err("Scheduler is deinitialized ignore msg"); 278 return QDF_STATUS_E_FAILURE; 279 } 280 281 if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) { 282 sched_err("callback not registered for qid[%d]", qid); 283 QDF_ASSERT(0); 284 return QDF_STATUS_E_FAILURE; 285 } 286 287 target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); 288 289 queue_msg = scheduler_core_msg_dup(msg); 290 if (!queue_msg) 291 return QDF_STATUS_E_NOMEM; 292 293 if (is_high_priority) 294 scheduler_mq_put_front(target_mq, queue_msg); 295 else 296 scheduler_mq_put(target_mq, queue_msg); 297 298 qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); 299 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 300 301 return QDF_STATUS_SUCCESS; 302 } 303 304 QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, 305 scheduler_msg_process_fn_t callback) 306 { 307 struct scheduler_mq_ctx *ctx; 308 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 309 310 sched_enter(); 311 312 if (!sched_ctx) { 313 QDF_ASSERT(0); 314 sched_err("sched_ctx is NULL"); 315 return QDF_STATUS_E_FAILURE; 316 } 317 318 if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { 319 sched_err("Already registered max %d no of message queues", 320 SCHEDULER_NUMBER_OF_MSG_QUEUE); 321 return QDF_STATUS_E_FAILURE; 322 } 323 324 ctx = &sched_ctx->queue_ctx; 325 ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx; 326 ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid; 327 ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback; 328 sched_ctx->sch_last_qidx++; 329 330 sched_exit(); 331 332 return QDF_STATUS_SUCCESS; 333 } 334 335 QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid) 336 { 337 struct scheduler_mq_ctx *ctx; 338 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 339 uint8_t qidx; 340 341 sched_enter(); 342 343 if (!sched_ctx) { 344 QDF_ASSERT(0); 345 sched_err("sched_ctx is NULL"); 346 return QDF_STATUS_E_FAILURE; 347 } 348 349 ctx = &sched_ctx->queue_ctx; 350 qidx = ctx->scheduler_msg_qid_to_qidx[qid]; 351 ctx->scheduler_msg_process_fn[qidx] = NULL; 352 sched_ctx->sch_last_qidx--; 353 ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE; 354 355 sched_exit(); 356 357 return QDF_STATUS_SUCCESS; 358 } 359 360 void scheduler_resume(void) 361 { 362 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 363 364 if (sched_ctx) 365 qdf_event_set(&sched_ctx->resume_sch_event); 366 } 367 368 void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback) 369 { 370 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 371 372 if (sched_ctx) 373 sched_ctx->hdd_callback = callback; 374 } 375 void scheduler_wake_up_controller_thread(void) 376 { 377 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 378 379 if (sched_ctx) 380 qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); 381 } 382 void scheduler_set_event_mask(uint32_t event_mask) 383 { 384 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 385 386 if (sched_ctx) 387 qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag); 388 } 389 390 void scheduler_clear_event_mask(uint32_t event_mask) 391 { 392 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 393 394 if (sched_ctx) 395 qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag); 396 } 397 398 QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg) 399 { 400 QDF_STATUS status; 401 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 402 QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *); 403 404 if (NULL == msg || NULL == sched_ctx) { 405 sched_err("msg %pK sch %pK", msg, sched_ctx); 406 return QDF_STATUS_E_FAILURE; 407 } 408 409 target_if_msg_handler = msg->callback; 410 411 /* Target_If is a special message queue in phase 3 convergence beacause 412 * its used by both legacy WMA and as well as new UMAC components. New 413 * UMAC components directly pass their message handlers as callback in 414 * message body. 415 * 1) All Legacy WMA messages do not contain message callback so invoke 416 * registered legacy WMA handler. Scheduler message posting APIs 417 * makes sure legacy WMA messages do not have callbacks. 418 * 2) For new messages which have valid callbacks invoke their callbacks 419 * directly. 420 */ 421 if (NULL == target_if_msg_handler) 422 status = sched_ctx->legacy_wma_handler(msg); 423 else 424 status = target_if_msg_handler(msg); 425 426 return status; 427 } 428 429 QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg) 430 { 431 QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *); 432 433 if (NULL == msg) { 434 sched_err("Msg is NULL"); 435 return QDF_STATUS_E_FAILURE; 436 } 437 438 os_if_msg_handler = msg->callback; 439 440 if (NULL == os_if_msg_handler) { 441 sched_err("Msg callback is NULL"); 442 QDF_ASSERT(0); 443 return QDF_STATUS_E_FAILURE; 444 } 445 os_if_msg_handler(msg); 446 447 return QDF_STATUS_SUCCESS; 448 } 449 450 QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg) 451 { 452 QDF_STATUS status; 453 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 454 qdf_mc_timer_callback_t timer_q_msg_handler; 455 456 if (NULL == msg || NULL == sched_ctx) { 457 sched_err("msg %pK sch %pK", msg, sched_ctx); 458 return QDF_STATUS_E_FAILURE; 459 } 460 461 timer_q_msg_handler = msg->callback; 462 463 /* Timer message handler */ 464 if (SYS_MSG_COOKIE == msg->reserved && 465 SYS_MSG_ID_MC_TIMER == msg->type) { 466 if (timer_q_msg_handler) { 467 status = QDF_STATUS_SUCCESS; 468 timer_q_msg_handler(msg->bodyptr); 469 } else { 470 sched_err("Timer cb is null"); 471 status = QDF_STATUS_E_FAILURE; 472 } 473 474 return status; 475 } else { 476 /* Legacy sys message handler */ 477 status = sched_ctx->legacy_sys_handler(msg); 478 479 return status; 480 } 481 } 482 483 QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg) 484 { 485 QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *); 486 487 if (NULL == msg) { 488 sched_err("Msg is NULL"); 489 return QDF_STATUS_E_FAILURE; 490 } 491 492 scan_q_msg_handler = msg->callback; 493 494 if (NULL == scan_q_msg_handler) { 495 sched_err("Msg callback is NULL"); 496 QDF_ASSERT(0); 497 return QDF_STATUS_E_FAILURE; 498 } 499 scan_q_msg_handler(msg); 500 501 return QDF_STATUS_SUCCESS; 502 } 503 504 QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t 505 wma_callback) 506 { 507 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 508 509 if (NULL == sched_ctx) { 510 sched_err("scheduler context is null"); 511 return QDF_STATUS_E_FAILURE; 512 } 513 514 sched_ctx->legacy_wma_handler = wma_callback; 515 516 return QDF_STATUS_SUCCESS; 517 } 518 519 QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t 520 sys_callback) 521 { 522 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 523 524 if (NULL == sched_ctx) { 525 sched_err("scheduler context is null"); 526 return QDF_STATUS_E_FAILURE; 527 } 528 529 sched_ctx->legacy_sys_handler = sys_callback; 530 531 return QDF_STATUS_SUCCESS; 532 } 533 534 QDF_STATUS scheduler_deregister_wma_legacy_handler(void) 535 { 536 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 537 538 if (NULL == sched_ctx) { 539 sched_err("scheduler context is null"); 540 return QDF_STATUS_E_FAILURE; 541 } 542 543 sched_ctx->legacy_wma_handler = NULL; 544 545 return QDF_STATUS_SUCCESS; 546 } 547 548 QDF_STATUS scheduler_deregister_sys_legacy_handler(void) 549 { 550 struct scheduler_ctx *sched_ctx = scheduler_get_context(); 551 552 if (NULL == sched_ctx) { 553 sched_err("scheduler context is null"); 554 return QDF_STATUS_E_FAILURE; 555 } 556 557 sched_ctx->legacy_sys_handler = NULL; 558 559 return QDF_STATUS_SUCCESS; 560 } 561 562 static QDF_STATUS scheduler_msg_flush_noop(struct scheduler_msg *msg) 563 { 564 return QDF_STATUS_SUCCESS; 565 } 566 567 void scheduler_mc_timer_callback(unsigned long data) 568 { 569 qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data; 570 struct scheduler_msg msg = {0}; 571 QDF_STATUS status; 572 573 qdf_mc_timer_callback_t callback = NULL; 574 void *user_data = NULL; 575 QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW; 576 577 QDF_ASSERT(timer); 578 579 if (timer == NULL) { 580 sched_err("Null pointer passed in!"); 581 return; 582 } 583 584 qdf_spin_lock_irqsave(&timer->platform_info.spinlock); 585 586 switch (timer->state) { 587 case QDF_TIMER_STATE_STARTING: 588 /* we are in this state because someone just started the timer, 589 * MC timer got started and expired, but the time content have 590 * not been updated this is a rare race condition! 591 */ 592 timer->state = QDF_TIMER_STATE_STOPPED; 593 status = QDF_STATUS_E_ALREADY; 594 break; 595 596 case QDF_TIMER_STATE_STOPPED: 597 status = QDF_STATUS_E_ALREADY; 598 break; 599 600 case QDF_TIMER_STATE_UNUSED: 601 status = QDF_STATUS_E_EXISTS; 602 break; 603 604 case QDF_TIMER_STATE_RUNNING: 605 /* need to go to stop state here because the call-back function 606 * may restart timer (to emulate periodic timer) 607 */ 608 timer->state = QDF_TIMER_STATE_STOPPED; 609 /* copy the relevant timer information to local variables; 610 * once we exits from this critical section, the timer content 611 * may be modified by other tasks 612 */ 613 callback = timer->callback; 614 user_data = timer->user_data; 615 type = timer->type; 616 status = QDF_STATUS_SUCCESS; 617 break; 618 619 default: 620 QDF_ASSERT(0); 621 status = QDF_STATUS_E_FAULT; 622 break; 623 } 624 625 qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); 626 627 if (QDF_STATUS_SUCCESS != status) { 628 sched_err("TIMER callback called in a wrong state=%d", 629 timer->state); 630 return; 631 } 632 633 qdf_try_allowing_sleep(type); 634 635 if (callback == NULL) { 636 sched_err("No TIMER callback, Couldn't enqueue timer to any queue"); 637 QDF_ASSERT(0); 638 return; 639 } 640 641 /* serialize to scheduler controller thread */ 642 msg.type = SYS_MSG_ID_MC_TIMER; 643 msg.reserved = SYS_MSG_COOKIE; 644 msg.callback = callback; 645 msg.bodyptr = user_data; 646 msg.bodyval = 0; 647 648 /* bodyptr points to user data, do not free it during msg flush */ 649 msg.flush_callback = scheduler_msg_flush_noop; 650 651 if (scheduler_post_msg(QDF_MODULE_ID_SYS, &msg) == QDF_STATUS_SUCCESS) 652 return; 653 sched_err("Could not enqueue timer to timer queue"); 654 } 655