1 /* 2 * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <hif_exec.h> 20 #include <ce_main.h> 21 #include <hif_irq_affinity.h> 22 #include "qdf_module.h" 23 #include "qdf_net_if.h" 24 /* mapping NAPI budget 0 to internal budget 0 25 * NAPI budget 1 to internal budget [1,scaler -1] 26 * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc 27 */ 28 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \ 29 (((n) << (s)) - 1) 30 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \ 31 (((n) + 1) >> (s)) 32 33 static struct hif_exec_context *hif_exec_tasklet_create(void); 34 35 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 36 struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS]; 37 38 static inline int hif_get_next_record_index(qdf_atomic_t *table_index, 39 int array_size) 40 { 41 int record_index = qdf_atomic_inc_return(table_index); 42 43 return record_index & (array_size - 1); 44 } 45 46 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx, 47 struct hif_event_record *event, uint8_t intr_grp_id) 48 { 49 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 50 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 51 struct hif_exec_context *hif_ext_group; 52 struct hif_event_history *hist_ev; 53 struct hif_event_record *record; 54 int record_index; 55 56 if (!hif_state->hif_num_extgroup) 57 return; 58 59 if (scn->event_disable_mask & BIT(event->type)) 60 return; 61 62 if (intr_grp_id >= HIF_NUM_INT_CONTEXTS) { 63 hif_err("Invalid interrupt group id %d", intr_grp_id); 64 return; 65 } 66 67 hif_ext_group = hif_state->hif_ext_group[intr_grp_id]; 68 hist_ev = hif_ext_group->evt_hist; 69 70 record_index = hif_get_next_record_index( 71 &hist_ev->index, HIF_EVENT_HIST_MAX); 72 73 record = &hist_ev->event[record_index]; 74 75 record->hal_ring_id = event->hal_ring_id; 76 record->hp = event->hp; 77 record->tp = event->tp; 78 record->cpu_id = qdf_get_cpu(); 79 record->timestamp = qdf_get_log_timestamp(); 80 record->type = event->type; 81 } 82 83 static void hif_event_history_init(struct hif_exec_context *hif_ext_grp) 84 { 85 hif_ext_grp->evt_hist = &hif_event_desc_history[hif_ext_grp->grp_id]; 86 qdf_atomic_set(&hif_ext_grp->evt_hist->index, -1); 87 } 88 #else 89 static inline void hif_event_history_init(struct hif_exec_context *hif_ext_grp) 90 { 91 } 92 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 93 94 /** 95 * hif_print_napi_latency_stats() - print NAPI scheduling latency stats 96 * @hif_state: hif context 97 * 98 * return: void 99 */ 100 #ifdef HIF_LATENCY_PROFILE_ENABLE 101 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state) 102 { 103 struct hif_exec_context *hif_ext_group; 104 int i, j; 105 int64_t cur_tstamp; 106 107 const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] = { 108 "0-2 ms", 109 "3-10 ms", 110 "11-20 ms", 111 "21-50 ms", 112 "51-100 ms", 113 "101-250 ms", 114 "251-500 ms", 115 "> 500 ms" 116 }; 117 118 cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); 119 120 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, 121 "Current timestamp: %lld", cur_tstamp); 122 123 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 124 if (hif_state->hif_ext_group[i]) { 125 hif_ext_group = hif_state->hif_ext_group[i]; 126 127 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, 128 "Interrupts in the HIF Group"); 129 130 for (j = 0; j < hif_ext_group->numirq; j++) { 131 QDF_TRACE(QDF_MODULE_ID_HIF, 132 QDF_TRACE_LEVEL_FATAL, 133 " %s", 134 hif_ext_group->irq_name 135 (hif_ext_group->irq[j])); 136 } 137 138 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, 139 "Last serviced timestamp: %lld", 140 hif_ext_group->tstamp); 141 142 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, 143 "Latency Bucket | Time elapsed"); 144 145 for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) { 146 QDF_TRACE(QDF_MODULE_ID_HIF, 147 QDF_TRACE_LEVEL_FATAL, 148 "%s | %lld", time_str[j], 149 hif_ext_group-> 150 sched_latency_stats[j]); 151 } 152 } 153 } 154 } 155 #else 156 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state) 157 { 158 } 159 #endif 160 161 /** 162 * hif_clear_napi_stats() - reset NAPI stats 163 * @hif_ctx: hif context 164 * 165 * return: void 166 */ 167 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx) 168 { 169 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 170 struct hif_exec_context *hif_ext_group; 171 size_t i; 172 173 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 174 hif_ext_group = hif_state->hif_ext_group[i]; 175 176 if (!hif_ext_group) 177 return; 178 179 qdf_mem_set(hif_ext_group->sched_latency_stats, 180 sizeof(hif_ext_group->sched_latency_stats), 181 0x0); 182 } 183 } 184 185 qdf_export_symbol(hif_clear_napi_stats); 186 187 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 188 /** 189 * hif_get_poll_times_hist_str() - Get HIF poll times histogram string 190 * @stats: NAPI stats to get poll time buckets 191 * @buf: buffer to fill histogram string 192 * @buf_len: length of the buffer 193 * 194 * Return: void 195 */ 196 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf, 197 uint8_t buf_len) 198 { 199 int i; 200 int str_index = 0; 201 202 for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++) 203 str_index += qdf_scnprintf(buf + str_index, buf_len - str_index, 204 "%u|", stats->poll_time_buckets[i]); 205 } 206 207 /** 208 * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI 209 * @hif_ext_group: hif_ext_group of type NAPI 210 * 211 * The function is called at the end of a NAPI poll to calculate poll time 212 * buckets. 213 * 214 * Return: void 215 */ 216 static 217 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group) 218 { 219 struct qca_napi_stat *napi_stat; 220 unsigned long long poll_time_ns; 221 uint32_t poll_time_us; 222 uint32_t bucket_size_us = 500; 223 uint32_t bucket; 224 uint32_t cpu_id = qdf_get_cpu(); 225 226 poll_time_ns = sched_clock() - hif_ext_group->poll_start_time; 227 poll_time_us = qdf_do_div(poll_time_ns, 1000); 228 229 napi_stat = &hif_ext_group->stats[cpu_id]; 230 if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time) 231 hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns; 232 233 bucket = poll_time_us / bucket_size_us; 234 if (bucket >= QCA_NAPI_NUM_BUCKETS) 235 bucket = QCA_NAPI_NUM_BUCKETS - 1; 236 ++napi_stat->poll_time_buckets[bucket]; 237 } 238 239 /** 240 * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield 241 * @hif_ext_group: hif_ext_group of type NAPI 242 * 243 * Return: true if NAPI needs to yield, else false 244 */ 245 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group) 246 { 247 bool time_limit_reached = false; 248 unsigned long long poll_time_ns; 249 int cpu_id = qdf_get_cpu(); 250 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 251 struct hif_config_info *cfg = &scn->hif_config; 252 253 poll_time_ns = sched_clock() - hif_ext_group->poll_start_time; 254 time_limit_reached = 255 poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0; 256 257 if (time_limit_reached) { 258 hif_ext_group->stats[cpu_id].time_limit_reached++; 259 hif_ext_group->force_break = true; 260 } 261 262 return time_limit_reached; 263 } 264 265 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id) 266 { 267 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 268 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 269 struct hif_exec_context *hif_ext_group; 270 bool ret_val = false; 271 272 if (!(grp_id < hif_state->hif_num_extgroup) || 273 !(grp_id < HIF_MAX_GROUP)) 274 return false; 275 276 hif_ext_group = hif_state->hif_ext_group[grp_id]; 277 278 if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE) 279 ret_val = hif_exec_poll_should_yield(hif_ext_group); 280 281 return ret_val; 282 } 283 284 /** 285 * hif_exec_update_service_start_time() - Update NAPI poll start time 286 * @hif_ext_group: hif_ext_group of type NAPI 287 * 288 * The function is called at the beginning of a NAPI poll to record the poll 289 * start time. 290 * 291 * Return: None 292 */ 293 static inline 294 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group) 295 { 296 hif_ext_group->poll_start_time = sched_clock(); 297 } 298 299 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx) 300 { 301 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 302 struct hif_exec_context *hif_ext_group; 303 struct qca_napi_stat *napi_stats; 304 int i, j; 305 306 /* 307 * Max value of uint_32 (poll_time_bucket) = 4294967295 308 * Thus we need 10 chars + 1 space =11 chars for each bucket value. 309 * +1 space for '\0'. 310 */ 311 char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'}; 312 313 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, 314 "NAPI[#]CPU[#] |scheds |polls |comps |dones |t-lim |max(us)|hist(500us buckets)"); 315 316 for (i = 0; 317 (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]); 318 i++) { 319 hif_ext_group = hif_state->hif_ext_group[i]; 320 for (j = 0; j < num_possible_cpus(); j++) { 321 napi_stats = &hif_ext_group->stats[j]; 322 if (!napi_stats->napi_schedules) 323 continue; 324 325 hif_get_poll_times_hist_str(napi_stats, 326 hist_str, 327 sizeof(hist_str)); 328 QDF_TRACE(QDF_MODULE_ID_HIF, 329 QDF_TRACE_LEVEL_ERROR, 330 "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s", 331 i, j, 332 napi_stats->napi_schedules, 333 napi_stats->napi_polls, 334 napi_stats->napi_completes, 335 napi_stats->napi_workdone, 336 napi_stats->time_limit_reached, 337 qdf_do_div(napi_stats->napi_max_poll_time, 338 1000), 339 hist_str); 340 } 341 } 342 343 hif_print_napi_latency_stats(hif_state); 344 } 345 346 qdf_export_symbol(hif_print_napi_stats); 347 348 #else 349 350 static inline 351 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf, 352 uint8_t buf_len) 353 { 354 } 355 356 static inline 357 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group) 358 { 359 } 360 361 static inline 362 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group) 363 { 364 } 365 366 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx) 367 { 368 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 369 struct hif_exec_context *hif_ext_group; 370 struct qca_napi_stat *napi_stats; 371 int i, j; 372 373 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, 374 "NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone"); 375 376 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 377 if (hif_state->hif_ext_group[i]) { 378 hif_ext_group = hif_state->hif_ext_group[i]; 379 for (j = 0; j < num_possible_cpus(); j++) { 380 napi_stats = &(hif_ext_group->stats[j]); 381 if (napi_stats->napi_schedules != 0) 382 QDF_TRACE(QDF_MODULE_ID_HIF, 383 QDF_TRACE_LEVEL_FATAL, 384 "NAPI[%2d]CPU[%d]: " 385 "%7d %7d %7d %7d ", 386 i, j, 387 napi_stats->napi_schedules, 388 napi_stats->napi_polls, 389 napi_stats->napi_completes, 390 napi_stats->napi_workdone); 391 } 392 } 393 } 394 395 hif_print_napi_latency_stats(hif_state); 396 } 397 qdf_export_symbol(hif_print_napi_stats); 398 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 399 400 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx) 401 { 402 struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx); 403 404 tasklet_schedule(&t_ctx->tasklet); 405 } 406 407 /** 408 * hif_exec_tasklet() - grp tasklet 409 * data: context 410 * 411 * return: void 412 */ 413 static void hif_exec_tasklet_fn(unsigned long data) 414 { 415 struct hif_exec_context *hif_ext_group = 416 (struct hif_exec_context *)data; 417 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 418 unsigned int work_done; 419 420 work_done = 421 hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET); 422 423 if (hif_ext_group->work_complete(hif_ext_group, work_done)) { 424 qdf_atomic_dec(&(scn->active_grp_tasklet_cnt)); 425 hif_ext_group->irq_enable(hif_ext_group); 426 } else { 427 hif_exec_tasklet_schedule(hif_ext_group); 428 } 429 } 430 431 /** 432 * hif_latency_profile_measure() - calculate latency and update histogram 433 * hif_ext_group: hif exec context 434 * 435 * return: None 436 */ 437 #ifdef HIF_LATENCY_PROFILE_ENABLE 438 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group) 439 { 440 int64_t cur_tstamp; 441 int64_t time_elapsed; 442 443 cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); 444 445 if (cur_tstamp > hif_ext_group->tstamp) 446 time_elapsed = (cur_tstamp - hif_ext_group->tstamp); 447 else 448 time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp); 449 450 hif_ext_group->tstamp = cur_tstamp; 451 452 if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2) 453 hif_ext_group->sched_latency_stats[0]++; 454 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10) 455 hif_ext_group->sched_latency_stats[1]++; 456 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20) 457 hif_ext_group->sched_latency_stats[2]++; 458 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50) 459 hif_ext_group->sched_latency_stats[3]++; 460 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100) 461 hif_ext_group->sched_latency_stats[4]++; 462 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250) 463 hif_ext_group->sched_latency_stats[5]++; 464 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500) 465 hif_ext_group->sched_latency_stats[6]++; 466 else 467 hif_ext_group->sched_latency_stats[7]++; 468 } 469 #else 470 static inline 471 void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group) 472 { 473 } 474 #endif 475 476 /** 477 * hif_latency_profile_start() - Update the start timestamp for HIF ext group 478 * hif_ext_group: hif exec context 479 * 480 * return: None 481 */ 482 #ifdef HIF_LATENCY_PROFILE_ENABLE 483 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group) 484 { 485 hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get()); 486 } 487 #else 488 static inline 489 void hif_latency_profile_start(struct hif_exec_context *hif_ext_group) 490 { 491 } 492 #endif 493 494 #ifdef FEATURE_NAPI 495 /** 496 * hif_exec_poll() - napi poll 497 * napi: napi struct 498 * budget: budget for napi 499 * 500 * Return: mapping of internal budget to napi 501 */ 502 static int hif_exec_poll(struct napi_struct *napi, int budget) 503 { 504 struct hif_napi_exec_context *napi_exec_ctx = 505 qdf_container_of(napi, struct hif_napi_exec_context, napi); 506 struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx; 507 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 508 int work_done; 509 int normalized_budget = 0; 510 int actual_dones; 511 int shift = hif_ext_group->scale_bin_shift; 512 int cpu = smp_processor_id(); 513 514 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 515 0, 0, 0, HIF_EVENT_BH_SCHED); 516 517 hif_ext_group->force_break = false; 518 hif_exec_update_service_start_time(hif_ext_group); 519 520 if (budget) 521 normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift); 522 523 hif_latency_profile_measure(hif_ext_group); 524 525 work_done = hif_ext_group->handler(hif_ext_group->context, 526 normalized_budget); 527 528 actual_dones = work_done; 529 530 if (!hif_ext_group->force_break && work_done < normalized_budget) { 531 napi_complete(napi); 532 qdf_atomic_dec(&scn->active_grp_tasklet_cnt); 533 hif_ext_group->irq_enable(hif_ext_group); 534 hif_ext_group->stats[cpu].napi_completes++; 535 } else { 536 /* if the ext_group supports time based yield, claim full work 537 * done anyways */ 538 work_done = normalized_budget; 539 } 540 541 hif_ext_group->stats[cpu].napi_polls++; 542 hif_ext_group->stats[cpu].napi_workdone += actual_dones; 543 544 /* map internal budget to NAPI budget */ 545 if (work_done) 546 work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift); 547 548 hif_exec_fill_poll_time_histogram(hif_ext_group); 549 550 return work_done; 551 } 552 553 /** 554 * hif_exec_napi_schedule() - schedule the napi exec instance 555 * @ctx: a hif_exec_context known to be of napi type 556 */ 557 static void hif_exec_napi_schedule(struct hif_exec_context *ctx) 558 { 559 struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx); 560 ctx->stats[smp_processor_id()].napi_schedules++; 561 562 napi_schedule(&n_ctx->napi); 563 } 564 565 /** 566 * hif_exec_napi_kill() - stop a napi exec context from being rescheduled 567 * @ctx: a hif_exec_context known to be of napi type 568 */ 569 static void hif_exec_napi_kill(struct hif_exec_context *ctx) 570 { 571 struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx); 572 int irq_ind; 573 574 if (ctx->inited) { 575 napi_disable(&n_ctx->napi); 576 ctx->inited = 0; 577 } 578 579 for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++) 580 hif_irq_affinity_remove(ctx->os_irq[irq_ind]); 581 582 netif_napi_del(&(n_ctx->napi)); 583 } 584 585 struct hif_execution_ops napi_sched_ops = { 586 .schedule = &hif_exec_napi_schedule, 587 .kill = &hif_exec_napi_kill, 588 }; 589 590 /** 591 * hif_exec_napi_create() - allocate and initialize a napi exec context 592 * @scale: a binary shift factor to map NAPI budget from\to internal 593 * budget 594 */ 595 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale) 596 { 597 struct hif_napi_exec_context *ctx; 598 599 ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context)); 600 if (!ctx) 601 return NULL; 602 603 ctx->exec_ctx.sched_ops = &napi_sched_ops; 604 ctx->exec_ctx.inited = true; 605 ctx->exec_ctx.scale_bin_shift = scale; 606 qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev); 607 netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll, 608 QCA_NAPI_BUDGET); 609 napi_enable(&ctx->napi); 610 611 return &ctx->exec_ctx; 612 } 613 #else 614 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale) 615 { 616 HIF_WARN("%s: FEATURE_NAPI not defined, making tasklet", __func__); 617 return hif_exec_tasklet_create(); 618 } 619 #endif 620 621 622 /** 623 * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled 624 * @ctx: a hif_exec_context known to be of tasklet type 625 */ 626 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx) 627 { 628 struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx); 629 int irq_ind; 630 631 if (ctx->inited) { 632 tasklet_disable(&t_ctx->tasklet); 633 tasklet_kill(&t_ctx->tasklet); 634 } 635 ctx->inited = false; 636 637 for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++) 638 hif_irq_affinity_remove(ctx->os_irq[irq_ind]); 639 } 640 641 struct hif_execution_ops tasklet_sched_ops = { 642 .schedule = &hif_exec_tasklet_schedule, 643 .kill = &hif_exec_tasklet_kill, 644 }; 645 646 /** 647 * hif_exec_tasklet_schedule() - allocate and initialize a tasklet exec context 648 */ 649 static struct hif_exec_context *hif_exec_tasklet_create(void) 650 { 651 struct hif_tasklet_exec_context *ctx; 652 653 ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context)); 654 if (!ctx) 655 return NULL; 656 657 ctx->exec_ctx.sched_ops = &tasklet_sched_ops; 658 tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn, 659 (unsigned long)ctx); 660 661 ctx->exec_ctx.inited = true; 662 663 return &ctx->exec_ctx; 664 } 665 666 /** 667 * hif_exec_get_ctx() - retrieve an exec context based on an id 668 * @softc: the hif context owning the exec context 669 * @id: the id of the exec context 670 * 671 * mostly added to make it easier to rename or move the context array 672 */ 673 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc, 674 uint8_t id) 675 { 676 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc); 677 678 if (id < hif_state->hif_num_extgroup) 679 return hif_state->hif_ext_group[id]; 680 681 return NULL; 682 } 683 684 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc, 685 uint8_t id) 686 { 687 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc); 688 689 if (id < hif_state->hif_num_extgroup) 690 return hif_state->hif_ext_group[id]->os_irq[0]; 691 return -EINVAL; 692 } 693 694 qdf_export_symbol(hif_get_int_ctx_irq_num); 695 696 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx) 697 { 698 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 699 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 700 struct hif_exec_context *hif_ext_group; 701 int i, status; 702 703 if (scn->ext_grp_irq_configured) { 704 HIF_ERROR("%s Called after ext grp irq configured\n", __func__); 705 return QDF_STATUS_E_FAILURE; 706 } 707 708 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 709 hif_ext_group = hif_state->hif_ext_group[i]; 710 status = 0; 711 qdf_spinlock_create(&hif_ext_group->irq_lock); 712 if (hif_ext_group->configured && 713 hif_ext_group->irq_requested == false) { 714 hif_ext_group->irq_enabled = true; 715 status = hif_grp_irq_configure(scn, hif_ext_group); 716 } 717 if (status != 0) { 718 HIF_ERROR("%s: failed for group %d", __func__, i); 719 hif_ext_group->irq_enabled = false; 720 } 721 } 722 723 scn->ext_grp_irq_configured = true; 724 725 return QDF_STATUS_SUCCESS; 726 } 727 728 qdf_export_symbol(hif_configure_ext_group_interrupts); 729 730 #ifdef WLAN_SUSPEND_RESUME_TEST 731 /** 732 * hif_check_and_trigger_ut_resume() - check if unit-test command was used to 733 * to trigger fake-suspend command, if yes 734 * then issue resume procedure. 735 * @scn: opaque HIF software context 736 * 737 * This API checks if unit-test command was used to trigger fake-suspend command 738 * and if answer is yes then it would trigger resume procedure. 739 * 740 * Make this API inline to save API-switch overhead and do branch-prediction to 741 * optimize performance impact. 742 * 743 * Return: void 744 */ 745 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn) 746 { 747 if (qdf_unlikely(hif_irq_trigger_ut_resume(scn))) 748 hif_ut_fw_resume(scn); 749 } 750 #else 751 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn) 752 { 753 } 754 #endif 755 756 /** 757 * hif_ext_group_interrupt_handler() - handler for related interrupts 758 * @irq: irq number of the interrupt 759 * @context: the associated hif_exec_group context 760 * 761 * This callback function takes care of dissabling the associated interrupts 762 * and scheduling the expected bottom half for the exec_context. 763 * This callback function also helps keep track of the count running contexts. 764 */ 765 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context) 766 { 767 struct hif_exec_context *hif_ext_group = context; 768 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 769 770 if (hif_ext_group->irq_requested) { 771 hif_latency_profile_start(hif_ext_group); 772 773 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 774 0, 0, 0, HIF_EVENT_IRQ_TRIGGER); 775 776 hif_ext_group->irq_disable(hif_ext_group); 777 /* 778 * if private ioctl has issued fake suspend command to put 779 * FW in D0-WOW state then here is our chance to bring FW out 780 * of WOW mode. 781 * 782 * The reason why you need to explicitly wake-up the FW is here: 783 * APSS should have been in fully awake through-out when 784 * fake APSS suspend command was issued (to put FW in WOW mode) 785 * hence organic way of waking-up the FW 786 * (as part-of APSS-host wake-up) won't happen because 787 * in reality APSS didn't really suspend. 788 */ 789 hif_check_and_trigger_ut_resume(scn); 790 qdf_atomic_inc(&scn->active_grp_tasklet_cnt); 791 792 hif_ext_group->sched_ops->schedule(hif_ext_group); 793 } 794 795 return IRQ_HANDLED; 796 } 797 798 /** 799 * hif_exec_kill() - grp tasklet kill 800 * scn: hif_softc 801 * 802 * return: void 803 */ 804 void hif_exec_kill(struct hif_opaque_softc *hif_ctx) 805 { 806 int i; 807 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 808 809 for (i = 0; i < hif_state->hif_num_extgroup; i++) 810 hif_state->hif_ext_group[i]->sched_ops->kill( 811 hif_state->hif_ext_group[i]); 812 813 qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0); 814 } 815 816 /** 817 * hif_register_ext_group() - API to register external group 818 * interrupt handler. 819 * @hif_ctx : HIF Context 820 * @numirq: number of irq's in the group 821 * @irq: array of irq values 822 * @handler: callback interrupt handler function 823 * @cb_ctx: context to passed in callback 824 * @type: napi vs tasklet 825 * 826 * Return: status 827 */ 828 uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx, 829 uint32_t numirq, uint32_t irq[], ext_intr_handler handler, 830 void *cb_ctx, const char *context_name, 831 enum hif_exec_type type, uint32_t scale) 832 { 833 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 834 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 835 struct hif_exec_context *hif_ext_group; 836 837 if (scn->ext_grp_irq_configured) { 838 HIF_ERROR("%s Called after ext grp irq configured\n", __func__); 839 return QDF_STATUS_E_FAILURE; 840 } 841 842 if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) { 843 HIF_ERROR("%s Max groups reached\n", __func__); 844 return QDF_STATUS_E_FAILURE; 845 } 846 847 if (numirq >= HIF_MAX_GRP_IRQ) { 848 HIF_ERROR("%s invalid numirq\n", __func__); 849 return QDF_STATUS_E_FAILURE; 850 } 851 852 hif_ext_group = hif_exec_create(type, scale); 853 if (!hif_ext_group) 854 return QDF_STATUS_E_FAILURE; 855 856 hif_state->hif_ext_group[hif_state->hif_num_extgroup] = 857 hif_ext_group; 858 859 hif_ext_group->numirq = numirq; 860 qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0])); 861 hif_ext_group->context = cb_ctx; 862 hif_ext_group->handler = handler; 863 hif_ext_group->configured = true; 864 hif_ext_group->grp_id = hif_state->hif_num_extgroup; 865 hif_ext_group->hif = hif_ctx; 866 hif_ext_group->context_name = context_name; 867 hif_ext_group->type = type; 868 hif_event_history_init(hif_ext_group); 869 870 hif_state->hif_num_extgroup++; 871 return QDF_STATUS_SUCCESS; 872 } 873 qdf_export_symbol(hif_register_ext_group); 874 875 /** 876 * hif_exec_create() - create an execution context 877 * @type: the type of execution context to create 878 */ 879 struct hif_exec_context *hif_exec_create(enum hif_exec_type type, 880 uint32_t scale) 881 { 882 hif_debug("%s: create exec_type %d budget %d\n", 883 __func__, type, QCA_NAPI_BUDGET * scale); 884 885 switch (type) { 886 case HIF_EXEC_NAPI_TYPE: 887 return hif_exec_napi_create(scale); 888 889 case HIF_EXEC_TASKLET_TYPE: 890 return hif_exec_tasklet_create(); 891 default: 892 return NULL; 893 } 894 } 895 896 /** 897 * hif_exec_destroy() - free the hif_exec context 898 * @ctx: context to free 899 * 900 * please kill the context before freeing it to avoid a use after free. 901 */ 902 void hif_exec_destroy(struct hif_exec_context *ctx) 903 { 904 qdf_spinlock_destroy(&ctx->irq_lock); 905 qdf_mem_free(ctx); 906 } 907 908 /** 909 * hif_deregister_exec_group() - API to free the exec contexts 910 * @hif_ctx: HIF context 911 * @context_name: name of the module whose contexts need to be deregistered 912 * 913 * This function deregisters the contexts of the requestor identified 914 * based on the context_name & frees the memory. 915 * 916 * Return: void 917 */ 918 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx, 919 const char *context_name) 920 { 921 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 922 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 923 struct hif_exec_context *hif_ext_group; 924 int i; 925 926 for (i = 0; i < HIF_MAX_GROUP; i++) { 927 hif_ext_group = hif_state->hif_ext_group[i]; 928 929 if (!hif_ext_group) 930 continue; 931 932 hif_debug("%s: Deregistering grp id %d name %s\n", 933 __func__, 934 hif_ext_group->grp_id, 935 hif_ext_group->context_name); 936 937 if (strcmp(hif_ext_group->context_name, context_name) == 0) { 938 hif_ext_group->sched_ops->kill(hif_ext_group); 939 hif_state->hif_ext_group[i] = NULL; 940 hif_exec_destroy(hif_ext_group); 941 hif_state->hif_num_extgroup--; 942 } 943 944 } 945 } 946 qdf_export_symbol(hif_deregister_exec_group); 947