1 /* 2 * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <hif_exec.h> 21 #include <ce_main.h> 22 #include "qdf_module.h" 23 #include "qdf_net_if.h" 24 #include <pld_common.h> 25 #ifdef DP_UMAC_HW_RESET_SUPPORT 26 #include "if_pci.h" 27 #endif 28 29 /* mapping NAPI budget 0 to internal budget 0 30 * NAPI budget 1 to internal budget [1,scaler -1] 31 * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc 32 */ 33 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \ 34 (((n) << (s)) - 1) 35 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \ 36 (((n) + 1) >> (s)) 37 38 static struct hif_exec_context *hif_exec_tasklet_create(void); 39 40 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 41 struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS]; 42 43 static inline 44 int hif_get_next_record_index(qdf_atomic_t *table_index, 45 int array_size) 46 { 47 int record_index = qdf_atomic_inc_return(table_index); 48 49 return record_index & (array_size - 1); 50 } 51 52 /** 53 * hif_hist_is_prev_record() - Check if index is the immediate 54 * previous record wrt curr_index 55 * @curr_index: curr index in the event history 56 * @index: index to be checked 57 * @hist_size: history size 58 * 59 * Return: true if index is immediately behind curr_index else false 60 */ 61 static inline 62 bool hif_hist_is_prev_record(int32_t curr_index, int32_t index, 63 uint32_t hist_size) 64 { 65 return (((index + 1) & (hist_size - 1)) == curr_index) ? 66 true : false; 67 } 68 69 /** 70 * hif_hist_skip_event_record() - Check if current event needs to be 71 * recorded or not 72 * @hist_ev: HIF event history 73 * @event: DP event entry 74 * 75 * Return: true if current event needs to be skipped else false 76 */ 77 static bool 78 hif_hist_skip_event_record(struct hif_event_history *hist_ev, 79 struct hif_event_record *event) 80 { 81 struct hif_event_record *rec; 82 struct hif_event_record *last_irq_rec; 83 int32_t index; 84 85 index = qdf_atomic_read(&hist_ev->index); 86 if (index < 0) 87 return false; 88 89 index &= (HIF_EVENT_HIST_MAX - 1); 90 rec = &hist_ev->event[index]; 91 92 switch (event->type) { 93 case HIF_EVENT_IRQ_TRIGGER: 94 /* 95 * The prev record check is to prevent skipping the IRQ event 96 * record in case where BH got re-scheduled due to force_break 97 * but there are no entries to be reaped in the rings. 98 */ 99 if (rec->type == HIF_EVENT_BH_SCHED && 100 hif_hist_is_prev_record(index, 101 hist_ev->misc.last_irq_index, 102 HIF_EVENT_HIST_MAX)) { 103 last_irq_rec = 104 &hist_ev->event[hist_ev->misc.last_irq_index]; 105 last_irq_rec->timestamp = hif_get_log_timestamp(); 106 last_irq_rec->cpu_id = qdf_get_cpu(); 107 last_irq_rec->hp++; 108 last_irq_rec->tp = last_irq_rec->timestamp - 109 hist_ev->misc.last_irq_ts; 110 return true; 111 } 112 break; 113 case HIF_EVENT_BH_SCHED: 114 if (rec->type == HIF_EVENT_BH_SCHED) { 115 rec->timestamp = hif_get_log_timestamp(); 116 rec->cpu_id = qdf_get_cpu(); 117 return true; 118 } 119 break; 120 case HIF_EVENT_SRNG_ACCESS_START: 121 if (event->hp == event->tp) 122 return true; 123 break; 124 case HIF_EVENT_SRNG_ACCESS_END: 125 if (rec->type != HIF_EVENT_SRNG_ACCESS_START) 126 return true; 127 break; 128 case HIF_EVENT_BH_COMPLETE: 129 case HIF_EVENT_BH_FORCE_BREAK: 130 if (rec->type != HIF_EVENT_SRNG_ACCESS_END) 131 return true; 132 break; 133 default: 134 break; 135 } 136 137 return false; 138 } 139 140 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx, 141 struct hif_event_record *event, uint8_t intr_grp_id) 142 { 143 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 144 struct hif_event_history *hist_ev; 145 struct hif_event_record *record; 146 int record_index; 147 148 if (!(scn->event_enable_mask & BIT(event->type))) 149 return; 150 151 if (qdf_unlikely(intr_grp_id >= HIF_NUM_INT_CONTEXTS)) { 152 hif_err("Invalid interrupt group id %d", intr_grp_id); 153 return; 154 } 155 156 hist_ev = scn->evt_hist[intr_grp_id]; 157 if (qdf_unlikely(!hist_ev)) 158 return; 159 160 if (hif_hist_skip_event_record(hist_ev, event)) 161 return; 162 163 record_index = hif_get_next_record_index( 164 &hist_ev->index, HIF_EVENT_HIST_MAX); 165 166 record = &hist_ev->event[record_index]; 167 168 if (event->type == HIF_EVENT_IRQ_TRIGGER) { 169 hist_ev->misc.last_irq_index = record_index; 170 hist_ev->misc.last_irq_ts = hif_get_log_timestamp(); 171 } 172 173 record->hal_ring_id = event->hal_ring_id; 174 record->hp = event->hp; 175 record->tp = event->tp; 176 record->cpu_id = qdf_get_cpu(); 177 record->timestamp = hif_get_log_timestamp(); 178 record->type = event->type; 179 } 180 181 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id) 182 { 183 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 184 185 scn->evt_hist[id] = &hif_event_desc_history[id]; 186 qdf_atomic_set(&scn->evt_hist[id]->index, -1); 187 188 hif_info("SRNG events history initialized for group: %d", id); 189 } 190 191 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id) 192 { 193 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 194 195 scn->evt_hist[id] = NULL; 196 hif_info("SRNG events history de-initialized for group: %d", id); 197 } 198 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 199 200 /** 201 * hif_print_napi_latency_stats() - print NAPI scheduling latency stats 202 * @hif_state: hif context 203 * 204 * return: void 205 */ 206 #ifdef HIF_LATENCY_PROFILE_ENABLE 207 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state) 208 { 209 struct hif_exec_context *hif_ext_group; 210 int i, j; 211 int64_t cur_tstamp; 212 213 const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] = { 214 "0-2 ms", 215 "3-10 ms", 216 "11-20 ms", 217 "21-50 ms", 218 "51-100 ms", 219 "101-250 ms", 220 "251-500 ms", 221 "> 500 ms" 222 }; 223 224 cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); 225 226 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH, 227 "Current timestamp: %lld", cur_tstamp); 228 229 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 230 if (hif_state->hif_ext_group[i]) { 231 hif_ext_group = hif_state->hif_ext_group[i]; 232 233 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH, 234 "ext grp %d Last serviced timestamp: %lld", 235 i, hif_ext_group->tstamp); 236 237 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH, 238 "Latency Bucket | Time elapsed"); 239 240 for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) { 241 if (hif_ext_group->sched_latency_stats[j]) 242 QDF_TRACE(QDF_MODULE_ID_HIF, 243 QDF_TRACE_LEVEL_INFO_HIGH, 244 "%s | %lld", 245 time_str[j], 246 hif_ext_group-> 247 sched_latency_stats[j]); 248 } 249 } 250 } 251 } 252 #else 253 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state) 254 { 255 } 256 #endif 257 258 /** 259 * hif_clear_napi_stats() - reset NAPI stats 260 * @hif_ctx: hif context 261 * 262 * return: void 263 */ 264 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx) 265 { 266 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 267 struct hif_exec_context *hif_ext_group; 268 size_t i; 269 270 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 271 hif_ext_group = hif_state->hif_ext_group[i]; 272 273 if (!hif_ext_group) 274 return; 275 276 qdf_mem_set(hif_ext_group->sched_latency_stats, 277 sizeof(hif_ext_group->sched_latency_stats), 278 0x0); 279 } 280 } 281 282 qdf_export_symbol(hif_clear_napi_stats); 283 284 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 285 /** 286 * hif_get_poll_times_hist_str() - Get HIF poll times histogram string 287 * @stats: NAPI stats to get poll time buckets 288 * @buf: buffer to fill histogram string 289 * @buf_len: length of the buffer 290 * 291 * Return: void 292 */ 293 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf, 294 uint8_t buf_len) 295 { 296 int i; 297 int str_index = 0; 298 299 for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++) 300 str_index += qdf_scnprintf(buf + str_index, buf_len - str_index, 301 "%u|", stats->poll_time_buckets[i]); 302 } 303 304 /** 305 * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI 306 * @hif_ext_group: hif_ext_group of type NAPI 307 * 308 * The function is called at the end of a NAPI poll to calculate poll time 309 * buckets. 310 * 311 * Return: void 312 */ 313 static 314 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group) 315 { 316 struct qca_napi_stat *napi_stat; 317 unsigned long long poll_time_ns; 318 uint32_t poll_time_us; 319 uint32_t bucket_size_us = 500; 320 uint32_t bucket; 321 uint32_t cpu_id = qdf_get_cpu(); 322 323 poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time; 324 poll_time_us = qdf_do_div(poll_time_ns, 1000); 325 326 napi_stat = &hif_ext_group->stats[cpu_id]; 327 if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time) 328 hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns; 329 330 bucket = poll_time_us / bucket_size_us; 331 if (bucket >= QCA_NAPI_NUM_BUCKETS) 332 bucket = QCA_NAPI_NUM_BUCKETS - 1; 333 ++napi_stat->poll_time_buckets[bucket]; 334 } 335 336 /** 337 * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield 338 * @hif_ext_group: hif_ext_group of type NAPI 339 * 340 * Return: true if NAPI needs to yield, else false 341 */ 342 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group) 343 { 344 bool time_limit_reached = false; 345 unsigned long long poll_time_ns; 346 int cpu_id = qdf_get_cpu(); 347 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 348 struct hif_config_info *cfg = &scn->hif_config; 349 350 poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time; 351 time_limit_reached = 352 poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0; 353 354 if (time_limit_reached) { 355 hif_ext_group->stats[cpu_id].time_limit_reached++; 356 hif_ext_group->force_break = true; 357 } 358 359 return time_limit_reached; 360 } 361 362 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id) 363 { 364 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 365 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 366 struct hif_exec_context *hif_ext_group; 367 bool ret_val = false; 368 369 if (!(grp_id < hif_state->hif_num_extgroup) || 370 !(grp_id < HIF_MAX_GROUP)) 371 return false; 372 373 hif_ext_group = hif_state->hif_ext_group[grp_id]; 374 375 if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE) 376 ret_val = hif_exec_poll_should_yield(hif_ext_group); 377 378 return ret_val; 379 } 380 381 /** 382 * hif_exec_update_service_start_time() - Update NAPI poll start time 383 * @hif_ext_group: hif_ext_group of type NAPI 384 * 385 * The function is called at the beginning of a NAPI poll to record the poll 386 * start time. 387 * 388 * Return: None 389 */ 390 static inline 391 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group) 392 { 393 hif_ext_group->poll_start_time = qdf_time_sched_clock(); 394 } 395 396 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx) 397 { 398 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 399 struct hif_exec_context *hif_ext_group; 400 struct qca_napi_stat *napi_stats; 401 int i, j; 402 403 /* 404 * Max value of uint_32 (poll_time_bucket) = 4294967295 405 * Thus we need 10 chars + 1 space =11 chars for each bucket value. 406 * +1 space for '\0'. 407 */ 408 char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'}; 409 410 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH, 411 "NAPI[#]CPU[#] |scheds |polls |comps |dones |t-lim |max(us)|hist(500us buckets)"); 412 413 for (i = 0; 414 (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]); 415 i++) { 416 hif_ext_group = hif_state->hif_ext_group[i]; 417 for (j = 0; j < num_possible_cpus(); j++) { 418 napi_stats = &hif_ext_group->stats[j]; 419 if (!napi_stats->napi_schedules) 420 continue; 421 422 hif_get_poll_times_hist_str(napi_stats, 423 hist_str, 424 sizeof(hist_str)); 425 QDF_TRACE(QDF_MODULE_ID_HIF, 426 QDF_TRACE_LEVEL_INFO_HIGH, 427 "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s", 428 i, j, 429 napi_stats->napi_schedules, 430 napi_stats->napi_polls, 431 napi_stats->napi_completes, 432 napi_stats->napi_workdone, 433 napi_stats->time_limit_reached, 434 qdf_do_div(napi_stats->napi_max_poll_time, 435 1000), 436 hist_str); 437 } 438 } 439 440 hif_print_napi_latency_stats(hif_state); 441 } 442 443 qdf_export_symbol(hif_print_napi_stats); 444 445 #else 446 447 static inline 448 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf, 449 uint8_t buf_len) 450 { 451 } 452 453 static inline 454 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group) 455 { 456 } 457 458 static inline 459 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group) 460 { 461 } 462 463 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx) 464 { 465 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 466 struct hif_exec_context *hif_ext_group; 467 struct qca_napi_stat *napi_stats; 468 int i, j; 469 470 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, 471 "NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone"); 472 473 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 474 if (hif_state->hif_ext_group[i]) { 475 hif_ext_group = hif_state->hif_ext_group[i]; 476 for (j = 0; j < num_possible_cpus(); j++) { 477 napi_stats = &(hif_ext_group->stats[j]); 478 if (napi_stats->napi_schedules != 0) 479 QDF_TRACE(QDF_MODULE_ID_HIF, 480 QDF_TRACE_LEVEL_FATAL, 481 "NAPI[%2d]CPU[%d]: " 482 "%7d %7d %7d %7d ", 483 i, j, 484 napi_stats->napi_schedules, 485 napi_stats->napi_polls, 486 napi_stats->napi_completes, 487 napi_stats->napi_workdone); 488 } 489 } 490 } 491 492 hif_print_napi_latency_stats(hif_state); 493 } 494 qdf_export_symbol(hif_print_napi_stats); 495 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 496 497 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx) 498 { 499 struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx); 500 501 tasklet_schedule(&t_ctx->tasklet); 502 } 503 504 /** 505 * hif_exec_tasklet_fn() - grp tasklet 506 * @data: context 507 * 508 * Return: void 509 */ 510 static void hif_exec_tasklet_fn(unsigned long data) 511 { 512 struct hif_exec_context *hif_ext_group = 513 (struct hif_exec_context *)data; 514 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 515 unsigned int work_done; 516 int cpu = smp_processor_id(); 517 518 work_done = 519 hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET, 520 cpu); 521 522 if (hif_ext_group->work_complete(hif_ext_group, work_done)) { 523 qdf_atomic_dec(&(scn->active_grp_tasklet_cnt)); 524 hif_ext_group->irq_enable(hif_ext_group); 525 } else { 526 hif_exec_tasklet_schedule(hif_ext_group); 527 } 528 } 529 530 /** 531 * hif_latency_profile_measure() - calculate latency and update histogram 532 * @hif_ext_group: hif exec context 533 * 534 * Return: None 535 */ 536 #ifdef HIF_LATENCY_PROFILE_ENABLE 537 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group) 538 { 539 int64_t cur_tstamp; 540 int64_t time_elapsed; 541 542 cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); 543 544 if (cur_tstamp > hif_ext_group->tstamp) 545 time_elapsed = (cur_tstamp - hif_ext_group->tstamp); 546 else 547 time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp); 548 549 hif_ext_group->tstamp = cur_tstamp; 550 551 if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2) 552 hif_ext_group->sched_latency_stats[0]++; 553 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10) 554 hif_ext_group->sched_latency_stats[1]++; 555 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20) 556 hif_ext_group->sched_latency_stats[2]++; 557 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50) 558 hif_ext_group->sched_latency_stats[3]++; 559 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100) 560 hif_ext_group->sched_latency_stats[4]++; 561 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250) 562 hif_ext_group->sched_latency_stats[5]++; 563 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500) 564 hif_ext_group->sched_latency_stats[6]++; 565 else 566 hif_ext_group->sched_latency_stats[7]++; 567 } 568 #else 569 static inline 570 void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group) 571 { 572 } 573 #endif 574 575 /** 576 * hif_latency_profile_start() - Update the start timestamp for HIF ext group 577 * @hif_ext_group: hif exec context 578 * 579 * Return: None 580 */ 581 #ifdef HIF_LATENCY_PROFILE_ENABLE 582 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group) 583 { 584 hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get()); 585 } 586 #else 587 static inline 588 void hif_latency_profile_start(struct hif_exec_context *hif_ext_group) 589 { 590 } 591 #endif 592 593 #ifdef FEATURE_NAPI 594 #ifdef FEATURE_IRQ_AFFINITY 595 static inline int32_t 596 hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group) 597 { 598 return qdf_atomic_inc_not_zero(&hif_ext_group->force_napi_complete); 599 } 600 #else 601 static inline int32_t 602 hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group) 603 { 604 return 0; 605 } 606 #endif 607 608 /** 609 * hif_irq_disabled_time_limit_reached() - determine if irq disabled limit 610 * reached for single MSI 611 * @hif_ext_group: hif exec context 612 * 613 * Return: true if reached, else false. 614 */ 615 static bool 616 hif_irq_disabled_time_limit_reached(struct hif_exec_context *hif_ext_group) 617 { 618 unsigned long long irq_disabled_duration_ns; 619 620 if (hif_ext_group->type != HIF_EXEC_NAPI_TYPE) 621 return false; 622 623 irq_disabled_duration_ns = qdf_time_sched_clock() - 624 hif_ext_group->irq_disabled_start_time; 625 if (irq_disabled_duration_ns >= IRQ_DISABLED_MAX_DURATION_NS) { 626 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 627 0, 0, 0, HIF_EVENT_IRQ_DISABLE_EXPIRED); 628 return true; 629 } 630 631 return false; 632 } 633 634 /** 635 * hif_exec_poll() - napi poll 636 * @napi: napi struct 637 * @budget: budget for napi 638 * 639 * Return: mapping of internal budget to napi 640 */ 641 static int hif_exec_poll(struct napi_struct *napi, int budget) 642 { 643 struct hif_napi_exec_context *napi_exec_ctx = 644 qdf_container_of(napi, struct hif_napi_exec_context, napi); 645 struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx; 646 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 647 int work_done; 648 int normalized_budget = 0; 649 int actual_dones; 650 int shift = hif_ext_group->scale_bin_shift; 651 int cpu = smp_processor_id(); 652 bool force_complete = false; 653 654 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 655 0, 0, 0, HIF_EVENT_BH_SCHED); 656 657 hif_ext_group->force_break = false; 658 hif_exec_update_service_start_time(hif_ext_group); 659 660 if (budget) 661 normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift); 662 663 hif_latency_profile_measure(hif_ext_group); 664 665 work_done = hif_ext_group->handler(hif_ext_group->context, 666 normalized_budget, cpu); 667 668 actual_dones = work_done; 669 670 if (hif_is_force_napi_complete_required(hif_ext_group)) { 671 force_complete = true; 672 if (work_done >= normalized_budget) 673 work_done = normalized_budget - 1; 674 } 675 676 if (qdf_unlikely(force_complete) || 677 (!hif_ext_group->force_break && work_done < normalized_budget) || 678 ((pld_is_one_msi(scn->qdf_dev->dev) && 679 hif_irq_disabled_time_limit_reached(hif_ext_group)))) { 680 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 681 0, 0, 0, HIF_EVENT_BH_COMPLETE); 682 napi_complete(napi); 683 qdf_atomic_dec(&scn->active_grp_tasklet_cnt); 684 hif_ext_group->irq_enable(hif_ext_group); 685 hif_ext_group->stats[cpu].napi_completes++; 686 } else { 687 /* if the ext_group supports time based yield, claim full work 688 * done anyways */ 689 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 690 0, 0, 0, HIF_EVENT_BH_FORCE_BREAK); 691 work_done = normalized_budget; 692 } 693 694 hif_ext_group->stats[cpu].napi_polls++; 695 hif_ext_group->stats[cpu].napi_workdone += actual_dones; 696 697 /* map internal budget to NAPI budget */ 698 if (work_done) 699 work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift); 700 701 hif_exec_fill_poll_time_histogram(hif_ext_group); 702 703 return work_done; 704 } 705 706 /** 707 * hif_exec_napi_schedule() - schedule the napi exec instance 708 * @ctx: a hif_exec_context known to be of napi type 709 */ 710 static void hif_exec_napi_schedule(struct hif_exec_context *ctx) 711 { 712 struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx); 713 ctx->stats[smp_processor_id()].napi_schedules++; 714 715 napi_schedule(&n_ctx->napi); 716 } 717 718 /** 719 * hif_exec_napi_kill() - stop a napi exec context from being rescheduled 720 * @ctx: a hif_exec_context known to be of napi type 721 */ 722 static void hif_exec_napi_kill(struct hif_exec_context *ctx) 723 { 724 struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx); 725 int irq_ind; 726 727 if (ctx->inited) { 728 qdf_napi_disable(&n_ctx->napi); 729 ctx->inited = 0; 730 } 731 732 for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++) 733 hif_irq_affinity_remove(ctx->os_irq[irq_ind]); 734 735 hif_core_ctl_set_boost(false); 736 qdf_netif_napi_del(&(n_ctx->napi)); 737 } 738 739 struct hif_execution_ops napi_sched_ops = { 740 .schedule = &hif_exec_napi_schedule, 741 .kill = &hif_exec_napi_kill, 742 }; 743 744 /** 745 * hif_exec_napi_create() - allocate and initialize a napi exec context 746 * @scale: a binary shift factor to map NAPI budget from\to internal 747 * budget 748 */ 749 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale) 750 { 751 struct hif_napi_exec_context *ctx; 752 753 ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context)); 754 if (!ctx) 755 return NULL; 756 757 ctx->exec_ctx.sched_ops = &napi_sched_ops; 758 ctx->exec_ctx.inited = true; 759 ctx->exec_ctx.scale_bin_shift = scale; 760 qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev); 761 qdf_netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll, 762 QCA_NAPI_BUDGET); 763 qdf_napi_enable(&ctx->napi); 764 765 return &ctx->exec_ctx; 766 } 767 #else 768 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale) 769 { 770 hif_warn("FEATURE_NAPI not defined, making tasklet"); 771 return hif_exec_tasklet_create(); 772 } 773 #endif 774 775 776 /** 777 * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled 778 * @ctx: a hif_exec_context known to be of tasklet type 779 */ 780 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx) 781 { 782 struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx); 783 int irq_ind; 784 785 if (ctx->inited) { 786 tasklet_disable(&t_ctx->tasklet); 787 tasklet_kill(&t_ctx->tasklet); 788 } 789 ctx->inited = false; 790 791 for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++) 792 hif_irq_affinity_remove(ctx->os_irq[irq_ind]); 793 } 794 795 struct hif_execution_ops tasklet_sched_ops = { 796 .schedule = &hif_exec_tasklet_schedule, 797 .kill = &hif_exec_tasklet_kill, 798 }; 799 800 /** 801 * hif_exec_tasklet_create() - allocate and initialize a tasklet exec context 802 */ 803 static struct hif_exec_context *hif_exec_tasklet_create(void) 804 { 805 struct hif_tasklet_exec_context *ctx; 806 807 ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context)); 808 if (!ctx) 809 return NULL; 810 811 ctx->exec_ctx.sched_ops = &tasklet_sched_ops; 812 tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn, 813 (unsigned long)ctx); 814 815 ctx->exec_ctx.inited = true; 816 817 return &ctx->exec_ctx; 818 } 819 820 /** 821 * hif_exec_get_ctx() - retrieve an exec context based on an id 822 * @softc: the hif context owning the exec context 823 * @id: the id of the exec context 824 * 825 * mostly added to make it easier to rename or move the context array 826 */ 827 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc, 828 uint8_t id) 829 { 830 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc); 831 832 if (id < hif_state->hif_num_extgroup) 833 return hif_state->hif_ext_group[id]; 834 835 return NULL; 836 } 837 838 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc, 839 uint8_t id) 840 { 841 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc); 842 843 if (id < hif_state->hif_num_extgroup) 844 return hif_state->hif_ext_group[id]->os_irq[0]; 845 return -EINVAL; 846 } 847 848 qdf_export_symbol(hif_get_int_ctx_irq_num); 849 850 #ifdef HIF_CPU_PERF_AFFINE_MASK 851 void hif_config_irq_set_perf_affinity_hint( 852 struct hif_opaque_softc *hif_ctx) 853 { 854 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 855 856 hif_config_irq_affinity(scn); 857 } 858 859 qdf_export_symbol(hif_config_irq_set_perf_affinity_hint); 860 #endif 861 862 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx) 863 { 864 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 865 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 866 struct hif_exec_context *hif_ext_group; 867 int i, status; 868 869 if (scn->ext_grp_irq_configured) { 870 hif_err("Called after ext grp irq configured"); 871 return QDF_STATUS_E_FAILURE; 872 } 873 874 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 875 hif_ext_group = hif_state->hif_ext_group[i]; 876 status = 0; 877 qdf_spinlock_create(&hif_ext_group->irq_lock); 878 if (hif_ext_group->configured && 879 hif_ext_group->irq_requested == false) { 880 hif_ext_group->irq_enabled = true; 881 status = hif_grp_irq_configure(scn, hif_ext_group); 882 } 883 if (status != 0) { 884 hif_err("Failed for group %d", i); 885 hif_ext_group->irq_enabled = false; 886 } 887 } 888 889 scn->ext_grp_irq_configured = true; 890 891 return QDF_STATUS_SUCCESS; 892 } 893 894 qdf_export_symbol(hif_configure_ext_group_interrupts); 895 896 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx) 897 { 898 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 899 900 if (!scn || !scn->ext_grp_irq_configured) { 901 hif_err("scn(%pk) is NULL or grp irq not configured", scn); 902 return; 903 } 904 905 hif_grp_irq_deconfigure(scn); 906 scn->ext_grp_irq_configured = false; 907 } 908 909 qdf_export_symbol(hif_deconfigure_ext_group_interrupts); 910 911 #ifdef WLAN_SUSPEND_RESUME_TEST 912 /** 913 * hif_check_and_trigger_ut_resume() - check if unit-test command was used to 914 * to trigger fake-suspend command, if yes 915 * then issue resume procedure. 916 * @scn: opaque HIF software context 917 * 918 * This API checks if unit-test command was used to trigger fake-suspend command 919 * and if answer is yes then it would trigger resume procedure. 920 * 921 * Make this API inline to save API-switch overhead and do branch-prediction to 922 * optimize performance impact. 923 * 924 * Return: void 925 */ 926 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn) 927 { 928 if (qdf_unlikely(hif_irq_trigger_ut_resume(scn))) 929 hif_ut_fw_resume(scn); 930 } 931 #else 932 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn) 933 { 934 } 935 #endif 936 937 /** 938 * hif_check_and_trigger_sys_resume() - Check for bus suspend and 939 * trigger system resume 940 * @scn: hif context 941 * @irq: irq number 942 * 943 * Return: None 944 */ 945 static inline void 946 hif_check_and_trigger_sys_resume(struct hif_softc *scn, int irq) 947 { 948 if (scn->bus_suspended && scn->linkstate_vote) { 949 hif_info_rl("interrupt rcvd:%d trigger sys resume", irq); 950 qdf_pm_system_wakeup(); 951 } 952 } 953 954 /** 955 * hif_ext_group_interrupt_handler() - handler for related interrupts 956 * @irq: irq number of the interrupt 957 * @context: the associated hif_exec_group context 958 * 959 * This callback function takes care of disabling the associated interrupts 960 * and scheduling the expected bottom half for the exec_context. 961 * This callback function also helps keep track of the count running contexts. 962 */ 963 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context) 964 { 965 struct hif_exec_context *hif_ext_group = context; 966 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 967 968 if (hif_ext_group->irq_requested) { 969 hif_latency_profile_start(hif_ext_group); 970 971 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 972 0, 0, 0, HIF_EVENT_IRQ_TRIGGER); 973 974 hif_ext_group->irq_disable(hif_ext_group); 975 976 if (pld_is_one_msi(scn->qdf_dev->dev)) 977 hif_ext_group->irq_disabled_start_time = 978 qdf_time_sched_clock(); 979 /* 980 * if private ioctl has issued fake suspend command to put 981 * FW in D0-WOW state then here is our chance to bring FW out 982 * of WOW mode. 983 * 984 * The reason why you need to explicitly wake-up the FW is here: 985 * APSS should have been in fully awake through-out when 986 * fake APSS suspend command was issued (to put FW in WOW mode) 987 * hence organic way of waking-up the FW 988 * (as part-of APSS-host wake-up) won't happen because 989 * in reality APSS didn't really suspend. 990 */ 991 hif_check_and_trigger_ut_resume(scn); 992 993 hif_check_and_trigger_sys_resume(scn, irq); 994 995 qdf_atomic_inc(&scn->active_grp_tasklet_cnt); 996 997 hif_ext_group->sched_ops->schedule(hif_ext_group); 998 } 999 1000 return IRQ_HANDLED; 1001 } 1002 1003 /** 1004 * hif_exec_kill() - grp tasklet kill 1005 * @hif_ctx: hif_softc 1006 * 1007 * return: void 1008 */ 1009 void hif_exec_kill(struct hif_opaque_softc *hif_ctx) 1010 { 1011 int i; 1012 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1013 1014 for (i = 0; i < hif_state->hif_num_extgroup; i++) 1015 hif_state->hif_ext_group[i]->sched_ops->kill( 1016 hif_state->hif_ext_group[i]); 1017 1018 qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0); 1019 } 1020 1021 #ifdef FEATURE_IRQ_AFFINITY 1022 static inline void 1023 hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group) 1024 { 1025 qdf_atomic_init(&hif_ext_group->force_napi_complete); 1026 } 1027 #else 1028 static inline void 1029 hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group) 1030 { 1031 } 1032 #endif 1033 1034 /** 1035 * hif_register_ext_group() - API to register external group 1036 * interrupt handler. 1037 * @hif_ctx : HIF Context 1038 * @numirq: number of irq's in the group 1039 * @irq: array of irq values 1040 * @handler: callback interrupt handler function 1041 * @cb_ctx: context to passed in callback 1042 * @context_name: context name 1043 * @type: napi vs tasklet 1044 * @scale: 1045 * 1046 * Return: QDF_STATUS 1047 */ 1048 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx, 1049 uint32_t numirq, uint32_t irq[], 1050 ext_intr_handler handler, 1051 void *cb_ctx, const char *context_name, 1052 enum hif_exec_type type, uint32_t scale) 1053 { 1054 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1055 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1056 struct hif_exec_context *hif_ext_group; 1057 1058 if (scn->ext_grp_irq_configured) { 1059 hif_err("Called after ext grp irq configured"); 1060 return QDF_STATUS_E_FAILURE; 1061 } 1062 1063 if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) { 1064 hif_err("Max groups: %d reached", hif_state->hif_num_extgroup); 1065 return QDF_STATUS_E_FAILURE; 1066 } 1067 1068 if (numirq >= HIF_MAX_GRP_IRQ) { 1069 hif_err("Invalid numirq: %d", numirq); 1070 return QDF_STATUS_E_FAILURE; 1071 } 1072 1073 hif_ext_group = hif_exec_create(type, scale); 1074 if (!hif_ext_group) 1075 return QDF_STATUS_E_FAILURE; 1076 1077 hif_state->hif_ext_group[hif_state->hif_num_extgroup] = 1078 hif_ext_group; 1079 1080 hif_ext_group->numirq = numirq; 1081 qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0])); 1082 hif_ext_group->context = cb_ctx; 1083 hif_ext_group->handler = handler; 1084 hif_ext_group->configured = true; 1085 hif_ext_group->grp_id = hif_state->hif_num_extgroup; 1086 hif_ext_group->hif = hif_ctx; 1087 hif_ext_group->context_name = context_name; 1088 hif_ext_group->type = type; 1089 hif_init_force_napi_complete(hif_ext_group); 1090 1091 hif_state->hif_num_extgroup++; 1092 return QDF_STATUS_SUCCESS; 1093 } 1094 qdf_export_symbol(hif_register_ext_group); 1095 1096 /** 1097 * hif_exec_create() - create an execution context 1098 * @type: the type of execution context to create 1099 * @scale: 1100 */ 1101 struct hif_exec_context *hif_exec_create(enum hif_exec_type type, 1102 uint32_t scale) 1103 { 1104 hif_debug("%s: create exec_type %d budget %d", 1105 __func__, type, QCA_NAPI_BUDGET * scale); 1106 1107 switch (type) { 1108 case HIF_EXEC_NAPI_TYPE: 1109 return hif_exec_napi_create(scale); 1110 1111 case HIF_EXEC_TASKLET_TYPE: 1112 return hif_exec_tasklet_create(); 1113 default: 1114 return NULL; 1115 } 1116 } 1117 1118 /** 1119 * hif_exec_destroy() - free the hif_exec context 1120 * @ctx: context to free 1121 * 1122 * please kill the context before freeing it to avoid a use after free. 1123 */ 1124 void hif_exec_destroy(struct hif_exec_context *ctx) 1125 { 1126 struct hif_softc *scn = HIF_GET_SOFTC(ctx->hif); 1127 1128 if (scn->ext_grp_irq_configured) 1129 qdf_spinlock_destroy(&ctx->irq_lock); 1130 qdf_mem_free(ctx); 1131 } 1132 1133 /** 1134 * hif_deregister_exec_group() - API to free the exec contexts 1135 * @hif_ctx: HIF context 1136 * @context_name: name of the module whose contexts need to be deregistered 1137 * 1138 * This function deregisters the contexts of the requestor identified 1139 * based on the context_name & frees the memory. 1140 * 1141 * Return: void 1142 */ 1143 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx, 1144 const char *context_name) 1145 { 1146 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1147 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1148 struct hif_exec_context *hif_ext_group; 1149 int i; 1150 1151 for (i = 0; i < HIF_MAX_GROUP; i++) { 1152 hif_ext_group = hif_state->hif_ext_group[i]; 1153 1154 if (!hif_ext_group) 1155 continue; 1156 1157 hif_debug("%s: Deregistering grp id %d name %s", 1158 __func__, 1159 hif_ext_group->grp_id, 1160 hif_ext_group->context_name); 1161 1162 if (strcmp(hif_ext_group->context_name, context_name) == 0) { 1163 hif_ext_group->sched_ops->kill(hif_ext_group); 1164 hif_state->hif_ext_group[i] = NULL; 1165 hif_exec_destroy(hif_ext_group); 1166 hif_state->hif_num_extgroup--; 1167 } 1168 1169 } 1170 } 1171 qdf_export_symbol(hif_deregister_exec_group); 1172 1173 #ifdef DP_UMAC_HW_RESET_SUPPORT 1174 /** 1175 * hif_umac_reset_handler_tasklet() - Tasklet for UMAC HW reset interrupt 1176 * @data: UMAC HW reset HIF context 1177 * 1178 * return: void 1179 */ 1180 static void hif_umac_reset_handler_tasklet(unsigned long data) 1181 { 1182 struct hif_umac_reset_ctx *umac_reset_ctx = 1183 (struct hif_umac_reset_ctx *)data; 1184 1185 /* call the callback handler */ 1186 umac_reset_ctx->cb_handler(umac_reset_ctx->cb_ctx); 1187 } 1188 1189 /** 1190 * hif_umac_reset_irq_handler() - Interrupt service routine of UMAC HW reset 1191 * @irq: irq coming from kernel 1192 * @ctx: UMAC HW reset HIF context 1193 * 1194 * return: IRQ_HANDLED if success, else IRQ_NONE 1195 */ 1196 static irqreturn_t hif_umac_reset_irq_handler(int irq, void *ctx) 1197 { 1198 struct hif_umac_reset_ctx *umac_reset_ctx = ctx; 1199 1200 /* Schedule the tasklet if it is umac reset interrupt and exit */ 1201 if (umac_reset_ctx->irq_handler(umac_reset_ctx->cb_ctx)) 1202 tasklet_hi_schedule(&umac_reset_ctx->intr_tq); 1203 1204 return IRQ_HANDLED; 1205 } 1206 1207 QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn, 1208 int *umac_reset_irq) 1209 { 1210 int ret; 1211 struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn); 1212 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc); 1213 struct platform_device *pdev = (struct platform_device *)sc->pdev; 1214 1215 ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev, 1216 "umac_reset", 0, umac_reset_irq); 1217 1218 if (ret) { 1219 hif_err("umac reset get irq failed ret %d", ret); 1220 return QDF_STATUS_E_FAILURE; 1221 } 1222 return QDF_STATUS_SUCCESS; 1223 } 1224 1225 qdf_export_symbol(hif_get_umac_reset_irq); 1226 1227 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn, 1228 bool (*irq_handler)(void *cb_ctx), 1229 int (*tl_handler)(void *cb_ctx), 1230 void *cb_ctx, int irq) 1231 { 1232 struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn); 1233 struct hif_umac_reset_ctx *umac_reset_ctx; 1234 int ret; 1235 1236 if (!hif_sc) { 1237 hif_err("scn is null"); 1238 return QDF_STATUS_E_NULL_VALUE; 1239 } 1240 1241 umac_reset_ctx = &hif_sc->umac_reset_ctx; 1242 1243 umac_reset_ctx->irq_handler = irq_handler; 1244 umac_reset_ctx->cb_handler = tl_handler; 1245 umac_reset_ctx->cb_ctx = cb_ctx; 1246 umac_reset_ctx->os_irq = irq; 1247 1248 /* Init the tasklet */ 1249 tasklet_init(&umac_reset_ctx->intr_tq, 1250 hif_umac_reset_handler_tasklet, 1251 (unsigned long)umac_reset_ctx); 1252 1253 /* Register the interrupt handler */ 1254 ret = pfrm_request_irq(hif_sc->qdf_dev->dev, irq, 1255 hif_umac_reset_irq_handler, 1256 IRQF_NO_SUSPEND, 1257 "umac_hw_reset_irq", 1258 umac_reset_ctx); 1259 if (ret) { 1260 hif_err("request_irq failed: %d", ret); 1261 return qdf_status_from_os_return(ret); 1262 } 1263 1264 umac_reset_ctx->irq_configured = true; 1265 1266 return QDF_STATUS_SUCCESS; 1267 } 1268 1269 qdf_export_symbol(hif_register_umac_reset_handler); 1270 1271 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn) 1272 { 1273 struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn); 1274 struct hif_umac_reset_ctx *umac_reset_ctx; 1275 int ret; 1276 1277 if (!hif_sc) { 1278 hif_err("scn is null"); 1279 return QDF_STATUS_E_NULL_VALUE; 1280 } 1281 1282 umac_reset_ctx = &hif_sc->umac_reset_ctx; 1283 if (!umac_reset_ctx->irq_configured) { 1284 hif_err("unregister called without a prior IRQ configuration"); 1285 return QDF_STATUS_E_FAILURE; 1286 } 1287 1288 ret = pfrm_free_irq(hif_sc->qdf_dev->dev, 1289 umac_reset_ctx->os_irq, 1290 umac_reset_ctx); 1291 if (ret) { 1292 hif_err("free_irq failed: %d", ret); 1293 return qdf_status_from_os_return(ret); 1294 } 1295 umac_reset_ctx->irq_configured = false; 1296 1297 tasklet_disable(&umac_reset_ctx->intr_tq); 1298 tasklet_kill(&umac_reset_ctx->intr_tq); 1299 1300 umac_reset_ctx->cb_handler = NULL; 1301 umac_reset_ctx->cb_ctx = NULL; 1302 1303 return QDF_STATUS_SUCCESS; 1304 } 1305 1306 qdf_export_symbol(hif_unregister_umac_reset_handler); 1307 #endif 1308