1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <scheduler_core.h> 20 #include <qdf_atomic.h> 21 #include "qdf_flex_mem.h" 22 23 static struct scheduler_ctx g_sched_ctx; 24 static struct scheduler_ctx *gp_sched_ctx; 25 26 #ifndef WLAN_SCHED_REDUCTION_LIMIT 27 #define WLAN_SCHED_REDUCTION_LIMIT 0 28 #endif 29 30 DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg), 31 WLAN_SCHED_REDUCTION_LIMIT); 32 33 #ifdef WLAN_SCHED_HISTORY_SIZE 34 35 struct sched_history_item { 36 void *callback; 37 uint32_t type_id; 38 uint64_t start_us; 39 uint64_t duration_us; 40 }; 41 42 static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE]; 43 static uint32_t sched_history_index; 44 45 static void sched_history_start(struct scheduler_msg *msg) 46 { 47 struct sched_history_item hist = { 48 .callback = msg->callback, 49 .type_id = msg->type, 50 .start_us = qdf_get_log_timestamp_usecs(), 51 }; 52 53 sched_history[sched_history_index] = hist; 54 } 55 56 static void sched_history_stop(void) 57 { 58 struct sched_history_item *hist = &sched_history[sched_history_index]; 59 60 hist->duration_us = qdf_get_log_timestamp_usecs() - hist->start_us; 61 62 sched_history_index++; 63 sched_history_index %= WLAN_SCHED_HISTORY_SIZE; 64 } 65 66 #else /* WLAN_SCHED_HISTORY_SIZE */ 67 68 static inline void sched_history_start(struct scheduler_msg *msg) { } 69 static inline void sched_history_stop(void) { } 70 71 #endif /* WLAN_SCHED_HISTORY_SIZE */ 72 73 QDF_STATUS scheduler_create_ctx(void) 74 { 75 qdf_flex_mem_init(&sched_pool); 76 gp_sched_ctx = &g_sched_ctx; 77 78 return QDF_STATUS_SUCCESS; 79 } 80 81 QDF_STATUS scheduler_destroy_ctx(void) 82 { 83 gp_sched_ctx = NULL; 84 qdf_flex_mem_deinit(&sched_pool); 85 86 return QDF_STATUS_SUCCESS; 87 } 88 89 struct scheduler_ctx *scheduler_get_context(void) 90 { 91 QDF_BUG(gp_sched_ctx); 92 93 return gp_sched_ctx; 94 } 95 96 static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q) 97 { 98 sched_enter(); 99 100 qdf_spinlock_create(&msg_q->mq_lock); 101 qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES); 102 103 sched_exit(); 104 105 return QDF_STATUS_SUCCESS; 106 } 107 108 static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q) 109 { 110 sched_enter(); 111 112 qdf_list_destroy(&msg_q->mq_list); 113 qdf_spinlock_destroy(&msg_q->mq_lock); 114 115 sched_exit(); 116 } 117 118 static qdf_atomic_t __sched_queue_depth; 119 120 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx) 121 { 122 QDF_STATUS status; 123 int i; 124 125 sched_enter(); 126 127 QDF_BUG(sched_ctx); 128 if (!sched_ctx) 129 return QDF_STATUS_E_FAILURE; 130 131 qdf_atomic_set(&__sched_queue_depth, 0); 132 133 /* Initialize all message queues */ 134 for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) { 135 status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]); 136 if (QDF_STATUS_SUCCESS != status) 137 return status; 138 } 139 140 /* Initialize all qid to qidx mapping to invalid values */ 141 for (i = 0; i < QDF_MODULE_ID_MAX; i++) 142 sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] = 143 SCHEDULER_NUMBER_OF_MSG_QUEUE; 144 145 sched_exit(); 146 147 return status; 148 } 149 150 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx) 151 { 152 int i; 153 154 sched_enter(); 155 156 QDF_BUG(sched_ctx); 157 if (!sched_ctx) 158 return QDF_STATUS_E_FAILURE; 159 160 /* De-Initialize all message queues */ 161 for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) 162 scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]); 163 164 /* Initialize all qid to qidx mapping to invalid values */ 165 for (i = 0; i < QDF_MODULE_ID_MAX; i++) 166 sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] = 167 SCHEDULER_NUMBER_OF_MSG_QUEUE; 168 169 sched_exit(); 170 171 return QDF_STATUS_SUCCESS; 172 } 173 174 void scheduler_mq_put(struct scheduler_mq_type *msg_q, 175 struct scheduler_msg *msg) 176 { 177 qdf_spin_lock_irqsave(&msg_q->mq_lock); 178 qdf_list_insert_back(&msg_q->mq_list, &msg->node); 179 qdf_spin_unlock_irqrestore(&msg_q->mq_lock); 180 } 181 182 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q, 183 struct scheduler_msg *msg) 184 { 185 qdf_spin_lock_irqsave(&msg_q->mq_lock); 186 qdf_list_insert_front(&msg_q->mq_list, &msg->node); 187 qdf_spin_unlock_irqrestore(&msg_q->mq_lock); 188 } 189 190 struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q) 191 { 192 QDF_STATUS status; 193 qdf_list_node_t *node; 194 195 qdf_spin_lock_irqsave(&msg_q->mq_lock); 196 status = qdf_list_remove_front(&msg_q->mq_list, &node); 197 qdf_spin_unlock_irqrestore(&msg_q->mq_lock); 198 199 if (QDF_IS_STATUS_ERROR(status)) 200 return NULL; 201 202 return qdf_container_of(node, struct scheduler_msg, node); 203 } 204 205 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx) 206 { 207 return scheduler_all_queues_deinit(sched_ctx); 208 } 209 210 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx) 211 { 212 QDF_STATUS status; 213 214 sched_enter(); 215 216 QDF_BUG(sched_ctx); 217 if (!sched_ctx) 218 return QDF_STATUS_E_FAILURE; 219 220 status = scheduler_all_queues_init(sched_ctx); 221 if (QDF_IS_STATUS_ERROR(status)) { 222 scheduler_all_queues_deinit(sched_ctx); 223 sched_err("Failed to initialize the msg queues"); 224 return status; 225 } 226 227 sched_debug("Queue init passed"); 228 229 sched_exit(); 230 231 return QDF_STATUS_SUCCESS; 232 } 233 234 struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg) 235 { 236 struct scheduler_msg *dup; 237 238 if (qdf_atomic_inc_return(&__sched_queue_depth) > 239 SCHEDULER_CORE_MAX_MESSAGES) 240 goto buffer_full; 241 242 dup = qdf_flex_mem_alloc(&sched_pool); 243 if (!dup) { 244 sched_err("out of memory"); 245 goto dec_queue_count; 246 } 247 248 qdf_mem_copy(dup, msg, sizeof(*dup)); 249 250 return dup; 251 252 buffer_full: 253 QDF_DEBUG_PANIC("Scheduler buffer is full"); 254 255 dec_queue_count: 256 qdf_atomic_dec(&__sched_queue_depth); 257 258 return NULL; 259 } 260 261 void scheduler_core_msg_free(struct scheduler_msg *msg) 262 { 263 qdf_flex_mem_free(&sched_pool, msg); 264 qdf_atomic_dec(&__sched_queue_depth); 265 } 266 267 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx, 268 bool *shutdown) 269 { 270 int i; 271 QDF_STATUS status; 272 struct scheduler_msg *msg; 273 274 if (!sch_ctx) { 275 QDF_DEBUG_PANIC("sch_ctx is null"); 276 return; 277 } 278 279 /* start with highest priority queue : timer queue at index 0 */ 280 i = 0; 281 while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) { 282 /* Check if MC needs to shutdown */ 283 if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, 284 &sch_ctx->sch_event_flag)) { 285 sched_debug("scheduler thread signaled to shutdown"); 286 *shutdown = true; 287 288 /* Check for any Suspend Indication */ 289 if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK, 290 &sch_ctx->sch_event_flag)) { 291 /* Unblock anyone waiting on suspend */ 292 if (gp_sched_ctx->hdd_callback) 293 gp_sched_ctx->hdd_callback(); 294 } 295 296 break; 297 } 298 299 msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]); 300 if (!msg) { 301 /* check next queue */ 302 i++; 303 continue; 304 } 305 306 if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) { 307 sch_ctx->watchdog_msg_type = msg->type; 308 sch_ctx->watchdog_callback = msg->callback; 309 310 sched_history_start(msg); 311 qdf_timer_start(&sch_ctx->watchdog_timer, 312 SCHEDULER_WATCHDOG_TIMEOUT); 313 status = sch_ctx->queue_ctx. 314 scheduler_msg_process_fn[i](msg); 315 qdf_timer_stop(&sch_ctx->watchdog_timer); 316 sched_history_stop(); 317 318 if (QDF_IS_STATUS_ERROR(status)) 319 sched_err("Failed processing Qid[%d] message", 320 sch_ctx->queue_ctx.sch_msg_q[i].qid); 321 322 scheduler_core_msg_free(msg); 323 } 324 325 /* start again with highest priority queue at index 0 */ 326 i = 0; 327 } 328 329 /* Check for any Suspend Indication */ 330 if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK, 331 &sch_ctx->sch_event_flag)) { 332 qdf_spin_lock(&sch_ctx->sch_thread_lock); 333 qdf_event_reset(&sch_ctx->resume_sch_event); 334 /* controller thread suspend completion callback */ 335 if (gp_sched_ctx->hdd_callback) 336 gp_sched_ctx->hdd_callback(); 337 qdf_spin_unlock(&sch_ctx->sch_thread_lock); 338 /* Wait for resume indication */ 339 qdf_wait_single_event(&sch_ctx->resume_sch_event, 0); 340 } 341 342 return; /* Nothing to process wait on wait queue */ 343 } 344 345 int scheduler_thread(void *arg) 346 { 347 struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg; 348 int retWaitStatus = 0; 349 bool shutdown = false; 350 351 if (!arg) { 352 QDF_DEBUG_PANIC("arg is null"); 353 return 0; 354 } 355 qdf_set_user_nice(current, -2); 356 357 /* Ack back to the context from which the main controller thread 358 * has been created 359 */ 360 qdf_event_set(&sch_ctx->sch_start_event); 361 sched_debug("scheduler thread %d (%s) starting up", 362 current->pid, current->comm); 363 364 while (!shutdown) { 365 /* This implements the execution model algorithm */ 366 retWaitStatus = qdf_wait_queue_interruptible( 367 sch_ctx->sch_wait_queue, 368 qdf_atomic_test_bit(MC_POST_EVENT_MASK, 369 &sch_ctx->sch_event_flag) || 370 qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK, 371 &sch_ctx->sch_event_flag)); 372 373 if (retWaitStatus == -ERESTARTSYS) 374 QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS"); 375 376 qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag); 377 scheduler_thread_process_queues(sch_ctx, &shutdown); 378 } 379 380 /* If we get here the scheduler thread must exit */ 381 sched_debug("Scheduler thread exiting"); 382 qdf_event_set(&sch_ctx->sch_shutdown); 383 qdf_exit_thread(QDF_STATUS_SUCCESS); 384 385 return 0; 386 } 387 388 static void scheduler_flush_single_queue(struct scheduler_mq_type *mq) 389 { 390 struct scheduler_msg *msg; 391 QDF_STATUS (*flush_cb)(struct scheduler_msg *); 392 393 while ((msg = scheduler_mq_get(mq))) { 394 if (msg->flush_callback) { 395 sched_debug("Calling flush callback; type: %x", 396 msg->type); 397 flush_cb = msg->flush_callback; 398 flush_cb(msg); 399 } else if (msg->bodyptr) { 400 sched_debug("Freeing scheduler msg bodyptr; type: %x", 401 msg->type); 402 qdf_mem_free(msg->bodyptr); 403 } 404 405 scheduler_core_msg_free(msg); 406 } 407 } 408 409 void scheduler_queues_flush(struct scheduler_ctx *sched_ctx) 410 { 411 struct scheduler_mq_type *mq; 412 int i; 413 414 sched_debug("Flushing scheduler message queues"); 415 416 for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) { 417 mq = &sched_ctx->queue_ctx.sch_msg_q[i]; 418 scheduler_flush_single_queue(mq); 419 } 420 421 qdf_flex_mem_release(&sched_pool); 422 } 423 424