1 /* 2 * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <hif_exec.h> 21 #include <ce_main.h> 22 #include "qdf_module.h" 23 #include "qdf_net_if.h" 24 #include <pld_common.h> 25 #ifdef DP_UMAC_HW_RESET_SUPPORT 26 #include "if_pci.h" 27 #endif 28 29 /* mapping NAPI budget 0 to internal budget 0 30 * NAPI budget 1 to internal budget [1,scaler -1] 31 * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc 32 */ 33 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \ 34 (((n) << (s)) - 1) 35 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \ 36 (((n) + 1) >> (s)) 37 38 static struct hif_exec_context *hif_exec_tasklet_create(void); 39 40 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 41 struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS]; 42 43 static inline 44 int hif_get_next_record_index(qdf_atomic_t *table_index, 45 int array_size) 46 { 47 int record_index = qdf_atomic_inc_return(table_index); 48 49 return record_index & (array_size - 1); 50 } 51 52 /** 53 * hif_hist_is_prev_record() - Check if index is the immediate 54 * previous record wrt curr_index 55 * @curr_index: curr index in the event history 56 * @index: index to be checked 57 * @hist_size: history size 58 * 59 * Return: true if index is immediately behind curr_index else false 60 */ 61 static inline 62 bool hif_hist_is_prev_record(int32_t curr_index, int32_t index, 63 uint32_t hist_size) 64 { 65 return (((index + 1) & (hist_size - 1)) == curr_index) ? 66 true : false; 67 } 68 69 /** 70 * hif_hist_skip_event_record() - Check if current event needs to be 71 * recorded or not 72 * @hist_ev: HIF event history 73 * @event: DP event entry 74 * 75 * Return: true if current event needs to be skipped else false 76 */ 77 static bool 78 hif_hist_skip_event_record(struct hif_event_history *hist_ev, 79 struct hif_event_record *event) 80 { 81 struct hif_event_record *rec; 82 struct hif_event_record *last_irq_rec; 83 int32_t index; 84 85 index = qdf_atomic_read(&hist_ev->index); 86 if (index < 0) 87 return false; 88 89 index &= (HIF_EVENT_HIST_MAX - 1); 90 rec = &hist_ev->event[index]; 91 92 switch (event->type) { 93 case HIF_EVENT_IRQ_TRIGGER: 94 /* 95 * The prev record check is to prevent skipping the IRQ event 96 * record in case where BH got re-scheduled due to force_break 97 * but there are no entries to be reaped in the rings. 98 */ 99 if (rec->type == HIF_EVENT_BH_SCHED && 100 hif_hist_is_prev_record(index, 101 hist_ev->misc.last_irq_index, 102 HIF_EVENT_HIST_MAX)) { 103 last_irq_rec = 104 &hist_ev->event[hist_ev->misc.last_irq_index]; 105 last_irq_rec->timestamp = hif_get_log_timestamp(); 106 last_irq_rec->cpu_id = qdf_get_cpu(); 107 last_irq_rec->hp++; 108 last_irq_rec->tp = last_irq_rec->timestamp - 109 hist_ev->misc.last_irq_ts; 110 return true; 111 } 112 break; 113 case HIF_EVENT_BH_SCHED: 114 if (rec->type == HIF_EVENT_BH_SCHED) { 115 rec->timestamp = hif_get_log_timestamp(); 116 rec->cpu_id = qdf_get_cpu(); 117 return true; 118 } 119 break; 120 case HIF_EVENT_SRNG_ACCESS_START: 121 if (event->hp == event->tp) 122 return true; 123 break; 124 case HIF_EVENT_SRNG_ACCESS_END: 125 if (rec->type != HIF_EVENT_SRNG_ACCESS_START) 126 return true; 127 break; 128 case HIF_EVENT_BH_COMPLETE: 129 case HIF_EVENT_BH_FORCE_BREAK: 130 if (rec->type != HIF_EVENT_SRNG_ACCESS_END) 131 return true; 132 break; 133 default: 134 break; 135 } 136 137 return false; 138 } 139 140 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx, 141 struct hif_event_record *event, uint8_t intr_grp_id) 142 { 143 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 144 struct hif_event_history *hist_ev; 145 struct hif_event_record *record; 146 int record_index; 147 148 if (!(scn->event_enable_mask & BIT(event->type))) 149 return; 150 151 if (qdf_unlikely(intr_grp_id >= HIF_NUM_INT_CONTEXTS)) { 152 hif_err("Invalid interrupt group id %d", intr_grp_id); 153 return; 154 } 155 156 hist_ev = scn->evt_hist[intr_grp_id]; 157 if (qdf_unlikely(!hist_ev)) 158 return; 159 160 if (hif_hist_skip_event_record(hist_ev, event)) 161 return; 162 163 record_index = hif_get_next_record_index( 164 &hist_ev->index, HIF_EVENT_HIST_MAX); 165 166 record = &hist_ev->event[record_index]; 167 168 if (event->type == HIF_EVENT_IRQ_TRIGGER) { 169 hist_ev->misc.last_irq_index = record_index; 170 hist_ev->misc.last_irq_ts = hif_get_log_timestamp(); 171 } 172 173 record->hal_ring_id = event->hal_ring_id; 174 record->hp = event->hp; 175 record->tp = event->tp; 176 record->cpu_id = qdf_get_cpu(); 177 record->timestamp = hif_get_log_timestamp(); 178 record->type = event->type; 179 } 180 181 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id) 182 { 183 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 184 185 scn->evt_hist[id] = &hif_event_desc_history[id]; 186 qdf_atomic_set(&scn->evt_hist[id]->index, -1); 187 188 hif_info("SRNG events history initialized for group: %d", id); 189 } 190 191 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id) 192 { 193 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 194 195 scn->evt_hist[id] = NULL; 196 hif_info("SRNG events history de-initialized for group: %d", id); 197 } 198 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 199 200 #ifndef QCA_WIFI_WCN6450 201 /** 202 * hif_print_napi_latency_stats() - print NAPI scheduling latency stats 203 * @hif_state: hif context 204 * 205 * return: void 206 */ 207 #ifdef HIF_LATENCY_PROFILE_ENABLE 208 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state) 209 { 210 struct hif_exec_context *hif_ext_group; 211 int i, j; 212 int64_t cur_tstamp; 213 214 const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] = { 215 "0-2 ms", 216 "3-10 ms", 217 "11-20 ms", 218 "21-50 ms", 219 "51-100 ms", 220 "101-250 ms", 221 "251-500 ms", 222 "> 500 ms" 223 }; 224 225 cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); 226 227 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH, 228 "Current timestamp: %lld", cur_tstamp); 229 230 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 231 if (hif_state->hif_ext_group[i]) { 232 hif_ext_group = hif_state->hif_ext_group[i]; 233 234 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH, 235 "ext grp %d Last serviced timestamp: %lld", 236 i, hif_ext_group->tstamp); 237 238 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH, 239 "Latency Bucket | Time elapsed"); 240 241 for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) { 242 if (hif_ext_group->sched_latency_stats[j]) 243 QDF_TRACE(QDF_MODULE_ID_HIF, 244 QDF_TRACE_LEVEL_INFO_HIGH, 245 "%s | %lld", 246 time_str[j], 247 hif_ext_group-> 248 sched_latency_stats[j]); 249 } 250 } 251 } 252 } 253 #else 254 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state) 255 { 256 } 257 #endif 258 259 /** 260 * hif_clear_napi_stats() - reset NAPI stats 261 * @hif_ctx: hif context 262 * 263 * return: void 264 */ 265 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx) 266 { 267 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 268 struct hif_exec_context *hif_ext_group; 269 size_t i; 270 271 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 272 hif_ext_group = hif_state->hif_ext_group[i]; 273 274 if (!hif_ext_group) 275 return; 276 277 qdf_mem_set(hif_ext_group->sched_latency_stats, 278 sizeof(hif_ext_group->sched_latency_stats), 279 0x0); 280 } 281 } 282 283 qdf_export_symbol(hif_clear_napi_stats); 284 285 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 286 /** 287 * hif_get_poll_times_hist_str() - Get HIF poll times histogram string 288 * @stats: NAPI stats to get poll time buckets 289 * @buf: buffer to fill histogram string 290 * @buf_len: length of the buffer 291 * 292 * Return: void 293 */ 294 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf, 295 uint8_t buf_len) 296 { 297 int i; 298 int str_index = 0; 299 300 for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++) 301 str_index += qdf_scnprintf(buf + str_index, buf_len - str_index, 302 "%u|", stats->poll_time_buckets[i]); 303 } 304 305 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx) 306 { 307 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 308 struct hif_exec_context *hif_ext_group; 309 struct qca_napi_stat *napi_stats; 310 int i, j; 311 312 /* 313 * Max value of uint_32 (poll_time_bucket) = 4294967295 314 * Thus we need 10 chars + 1 space =11 chars for each bucket value. 315 * +1 space for '\0'. 316 */ 317 char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'}; 318 319 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH, 320 "NAPI[#]CPU[#] |scheds |polls |comps |dones |t-lim |max(us)|hist(500us buckets)"); 321 322 for (i = 0; 323 (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]); 324 i++) { 325 hif_ext_group = hif_state->hif_ext_group[i]; 326 for (j = 0; j < num_possible_cpus(); j++) { 327 napi_stats = &hif_ext_group->stats[j]; 328 if (!napi_stats->napi_schedules) 329 continue; 330 331 hif_get_poll_times_hist_str(napi_stats, 332 hist_str, 333 sizeof(hist_str)); 334 QDF_TRACE(QDF_MODULE_ID_HIF, 335 QDF_TRACE_LEVEL_INFO_HIGH, 336 "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s", 337 i, j, 338 napi_stats->napi_schedules, 339 napi_stats->napi_polls, 340 napi_stats->napi_completes, 341 napi_stats->napi_workdone, 342 napi_stats->time_limit_reached, 343 qdf_do_div(napi_stats->napi_max_poll_time, 344 1000), 345 hist_str); 346 } 347 } 348 349 hif_print_napi_latency_stats(hif_state); 350 } 351 352 qdf_export_symbol(hif_print_napi_stats); 353 #else 354 static inline 355 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf, 356 uint8_t buf_len) 357 { 358 } 359 360 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx) 361 { 362 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 363 struct hif_exec_context *hif_ext_group; 364 struct qca_napi_stat *napi_stats; 365 int i, j; 366 367 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, 368 "NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone"); 369 370 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 371 if (hif_state->hif_ext_group[i]) { 372 hif_ext_group = hif_state->hif_ext_group[i]; 373 for (j = 0; j < num_possible_cpus(); j++) { 374 napi_stats = &(hif_ext_group->stats[j]); 375 if (napi_stats->napi_schedules != 0) 376 QDF_TRACE(QDF_MODULE_ID_HIF, 377 QDF_TRACE_LEVEL_FATAL, 378 "NAPI[%2d]CPU[%d]: " 379 "%7d %7d %7d %7d ", 380 i, j, 381 napi_stats->napi_schedules, 382 napi_stats->napi_polls, 383 napi_stats->napi_completes, 384 napi_stats->napi_workdone); 385 } 386 } 387 } 388 389 hif_print_napi_latency_stats(hif_state); 390 } 391 qdf_export_symbol(hif_print_napi_stats); 392 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 393 #endif /* QCA_WIFI_WCN6450 */ 394 395 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 396 /** 397 * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI 398 * @hif_ext_group: hif_ext_group of type NAPI 399 * 400 * The function is called at the end of a NAPI poll to calculate poll time 401 * buckets. 402 * 403 * Return: void 404 */ 405 static 406 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group) 407 { 408 struct qca_napi_stat *napi_stat; 409 unsigned long long poll_time_ns; 410 uint32_t poll_time_us; 411 uint32_t bucket_size_us = 500; 412 uint32_t bucket; 413 uint32_t cpu_id = qdf_get_cpu(); 414 415 poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time; 416 poll_time_us = qdf_do_div(poll_time_ns, 1000); 417 418 napi_stat = &hif_ext_group->stats[cpu_id]; 419 if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time) 420 hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns; 421 422 bucket = poll_time_us / bucket_size_us; 423 if (bucket >= QCA_NAPI_NUM_BUCKETS) 424 bucket = QCA_NAPI_NUM_BUCKETS - 1; 425 ++napi_stat->poll_time_buckets[bucket]; 426 } 427 428 /** 429 * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield 430 * @hif_ext_group: hif_ext_group of type NAPI 431 * 432 * Return: true if NAPI needs to yield, else false 433 */ 434 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group) 435 { 436 bool time_limit_reached = false; 437 unsigned long long poll_time_ns; 438 int cpu_id = qdf_get_cpu(); 439 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 440 struct hif_config_info *cfg = &scn->hif_config; 441 442 poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time; 443 time_limit_reached = 444 poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0; 445 446 if (time_limit_reached) { 447 hif_ext_group->stats[cpu_id].time_limit_reached++; 448 hif_ext_group->force_break = true; 449 } 450 451 return time_limit_reached; 452 } 453 454 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id) 455 { 456 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 457 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 458 struct hif_exec_context *hif_ext_group; 459 bool ret_val = false; 460 461 if (!(grp_id < hif_state->hif_num_extgroup) || 462 !(grp_id < HIF_MAX_GROUP)) 463 return false; 464 465 hif_ext_group = hif_state->hif_ext_group[grp_id]; 466 467 if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE) 468 ret_val = hif_exec_poll_should_yield(hif_ext_group); 469 470 return ret_val; 471 } 472 473 /** 474 * hif_exec_update_service_start_time() - Update NAPI poll start time 475 * @hif_ext_group: hif_ext_group of type NAPI 476 * 477 * The function is called at the beginning of a NAPI poll to record the poll 478 * start time. 479 * 480 * Return: None 481 */ 482 static inline 483 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group) 484 { 485 hif_ext_group->poll_start_time = qdf_time_sched_clock(); 486 } 487 488 #else 489 static inline 490 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group) 491 { 492 } 493 494 static inline 495 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group) 496 { 497 } 498 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ 499 500 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx) 501 { 502 struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx); 503 504 tasklet_schedule(&t_ctx->tasklet); 505 } 506 507 /** 508 * hif_exec_tasklet_fn() - grp tasklet 509 * @data: context 510 * 511 * Return: void 512 */ 513 static void hif_exec_tasklet_fn(unsigned long data) 514 { 515 struct hif_exec_context *hif_ext_group = 516 (struct hif_exec_context *)data; 517 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 518 unsigned int work_done; 519 int cpu = smp_processor_id(); 520 521 work_done = 522 hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET, 523 cpu); 524 525 if (hif_ext_group->work_complete(hif_ext_group, work_done)) { 526 qdf_atomic_dec(&(scn->active_grp_tasklet_cnt)); 527 hif_ext_group->irq_enable(hif_ext_group); 528 } else { 529 hif_exec_tasklet_schedule(hif_ext_group); 530 } 531 } 532 533 /** 534 * hif_latency_profile_measure() - calculate latency and update histogram 535 * @hif_ext_group: hif exec context 536 * 537 * Return: None 538 */ 539 #ifdef HIF_LATENCY_PROFILE_ENABLE 540 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group) 541 { 542 int64_t cur_tstamp; 543 int64_t time_elapsed; 544 545 cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); 546 547 if (cur_tstamp > hif_ext_group->tstamp) 548 time_elapsed = (cur_tstamp - hif_ext_group->tstamp); 549 else 550 time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp); 551 552 hif_ext_group->tstamp = cur_tstamp; 553 554 if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2) 555 hif_ext_group->sched_latency_stats[0]++; 556 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10) 557 hif_ext_group->sched_latency_stats[1]++; 558 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20) 559 hif_ext_group->sched_latency_stats[2]++; 560 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50) 561 hif_ext_group->sched_latency_stats[3]++; 562 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100) 563 hif_ext_group->sched_latency_stats[4]++; 564 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250) 565 hif_ext_group->sched_latency_stats[5]++; 566 else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500) 567 hif_ext_group->sched_latency_stats[6]++; 568 else 569 hif_ext_group->sched_latency_stats[7]++; 570 } 571 #else 572 static inline 573 void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group) 574 { 575 } 576 #endif 577 578 /** 579 * hif_latency_profile_start() - Update the start timestamp for HIF ext group 580 * @hif_ext_group: hif exec context 581 * 582 * Return: None 583 */ 584 #ifdef HIF_LATENCY_PROFILE_ENABLE 585 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group) 586 { 587 hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get()); 588 } 589 #else 590 static inline 591 void hif_latency_profile_start(struct hif_exec_context *hif_ext_group) 592 { 593 } 594 #endif 595 596 #ifdef FEATURE_NAPI 597 #ifdef FEATURE_IRQ_AFFINITY 598 static inline int32_t 599 hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group) 600 { 601 return qdf_atomic_inc_not_zero(&hif_ext_group->force_napi_complete); 602 } 603 #else 604 static inline int32_t 605 hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group) 606 { 607 return 0; 608 } 609 #endif 610 611 /** 612 * hif_irq_disabled_time_limit_reached() - determine if irq disabled limit 613 * reached for single MSI 614 * @hif_ext_group: hif exec context 615 * 616 * Return: true if reached, else false. 617 */ 618 static bool 619 hif_irq_disabled_time_limit_reached(struct hif_exec_context *hif_ext_group) 620 { 621 unsigned long long irq_disabled_duration_ns; 622 623 if (hif_ext_group->type != HIF_EXEC_NAPI_TYPE) 624 return false; 625 626 irq_disabled_duration_ns = qdf_time_sched_clock() - 627 hif_ext_group->irq_disabled_start_time; 628 if (irq_disabled_duration_ns >= IRQ_DISABLED_MAX_DURATION_NS) { 629 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 630 0, 0, 0, HIF_EVENT_IRQ_DISABLE_EXPIRED); 631 return true; 632 } 633 634 return false; 635 } 636 637 /** 638 * hif_exec_poll() - napi poll 639 * @napi: napi struct 640 * @budget: budget for napi 641 * 642 * Return: mapping of internal budget to napi 643 */ 644 static int hif_exec_poll(struct napi_struct *napi, int budget) 645 { 646 struct hif_napi_exec_context *napi_exec_ctx = 647 qdf_container_of(napi, struct hif_napi_exec_context, napi); 648 struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx; 649 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 650 int work_done; 651 int normalized_budget = 0; 652 int actual_dones; 653 int shift = hif_ext_group->scale_bin_shift; 654 int cpu = smp_processor_id(); 655 bool force_complete = false; 656 657 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 658 0, 0, 0, HIF_EVENT_BH_SCHED); 659 660 hif_ext_group->force_break = false; 661 hif_exec_update_service_start_time(hif_ext_group); 662 663 if (budget) 664 normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift); 665 666 hif_latency_profile_measure(hif_ext_group); 667 668 work_done = hif_ext_group->handler(hif_ext_group->context, 669 normalized_budget, cpu); 670 671 actual_dones = work_done; 672 673 if (hif_is_force_napi_complete_required(hif_ext_group)) { 674 force_complete = true; 675 if (work_done >= normalized_budget) 676 work_done = normalized_budget - 1; 677 } 678 679 if (qdf_unlikely(force_complete) || 680 (!hif_ext_group->force_break && work_done < normalized_budget) || 681 ((pld_is_one_msi(scn->qdf_dev->dev) && 682 hif_irq_disabled_time_limit_reached(hif_ext_group)))) { 683 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 684 0, 0, 0, HIF_EVENT_BH_COMPLETE); 685 napi_complete(napi); 686 qdf_atomic_dec(&scn->active_grp_tasklet_cnt); 687 hif_ext_group->irq_enable(hif_ext_group); 688 hif_ext_group->stats[cpu].napi_completes++; 689 } else { 690 /* if the ext_group supports time based yield, claim full work 691 * done anyways */ 692 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 693 0, 0, 0, HIF_EVENT_BH_FORCE_BREAK); 694 work_done = normalized_budget; 695 } 696 697 hif_ext_group->stats[cpu].napi_polls++; 698 hif_ext_group->stats[cpu].napi_workdone += actual_dones; 699 700 /* map internal budget to NAPI budget */ 701 if (work_done) 702 work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift); 703 704 hif_exec_fill_poll_time_histogram(hif_ext_group); 705 706 return work_done; 707 } 708 709 /** 710 * hif_exec_napi_schedule() - schedule the napi exec instance 711 * @ctx: a hif_exec_context known to be of napi type 712 */ 713 static void hif_exec_napi_schedule(struct hif_exec_context *ctx) 714 { 715 struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx); 716 ctx->stats[smp_processor_id()].napi_schedules++; 717 718 napi_schedule(&n_ctx->napi); 719 } 720 721 /** 722 * hif_exec_napi_kill() - stop a napi exec context from being rescheduled 723 * @ctx: a hif_exec_context known to be of napi type 724 */ 725 static void hif_exec_napi_kill(struct hif_exec_context *ctx) 726 { 727 struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx); 728 int irq_ind; 729 730 if (ctx->inited) { 731 qdf_napi_disable(&n_ctx->napi); 732 ctx->inited = 0; 733 } 734 735 for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++) 736 hif_irq_affinity_remove(ctx->os_irq[irq_ind]); 737 738 hif_core_ctl_set_boost(false); 739 qdf_netif_napi_del(&(n_ctx->napi)); 740 } 741 742 struct hif_execution_ops napi_sched_ops = { 743 .schedule = &hif_exec_napi_schedule, 744 .kill = &hif_exec_napi_kill, 745 }; 746 747 /** 748 * hif_exec_napi_create() - allocate and initialize a napi exec context 749 * @scale: a binary shift factor to map NAPI budget from\to internal 750 * budget 751 */ 752 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale) 753 { 754 struct hif_napi_exec_context *ctx; 755 756 ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context)); 757 if (!ctx) 758 return NULL; 759 760 ctx->exec_ctx.sched_ops = &napi_sched_ops; 761 ctx->exec_ctx.inited = true; 762 ctx->exec_ctx.scale_bin_shift = scale; 763 qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev); 764 qdf_netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll, 765 QCA_NAPI_BUDGET); 766 qdf_napi_enable(&ctx->napi); 767 768 return &ctx->exec_ctx; 769 } 770 #else 771 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale) 772 { 773 hif_warn("FEATURE_NAPI not defined, making tasklet"); 774 return hif_exec_tasklet_create(); 775 } 776 #endif 777 778 779 /** 780 * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled 781 * @ctx: a hif_exec_context known to be of tasklet type 782 */ 783 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx) 784 { 785 struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx); 786 int irq_ind; 787 788 if (ctx->inited) { 789 tasklet_disable(&t_ctx->tasklet); 790 tasklet_kill(&t_ctx->tasklet); 791 } 792 ctx->inited = false; 793 794 for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++) 795 hif_irq_affinity_remove(ctx->os_irq[irq_ind]); 796 } 797 798 struct hif_execution_ops tasklet_sched_ops = { 799 .schedule = &hif_exec_tasklet_schedule, 800 .kill = &hif_exec_tasklet_kill, 801 }; 802 803 /** 804 * hif_exec_tasklet_create() - allocate and initialize a tasklet exec context 805 */ 806 static struct hif_exec_context *hif_exec_tasklet_create(void) 807 { 808 struct hif_tasklet_exec_context *ctx; 809 810 ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context)); 811 if (!ctx) 812 return NULL; 813 814 ctx->exec_ctx.sched_ops = &tasklet_sched_ops; 815 tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn, 816 (unsigned long)ctx); 817 818 ctx->exec_ctx.inited = true; 819 820 return &ctx->exec_ctx; 821 } 822 823 /** 824 * hif_exec_get_ctx() - retrieve an exec context based on an id 825 * @softc: the hif context owning the exec context 826 * @id: the id of the exec context 827 * 828 * mostly added to make it easier to rename or move the context array 829 */ 830 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc, 831 uint8_t id) 832 { 833 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc); 834 835 if (id < hif_state->hif_num_extgroup) 836 return hif_state->hif_ext_group[id]; 837 838 return NULL; 839 } 840 841 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc, 842 uint8_t id) 843 { 844 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc); 845 846 if (id < hif_state->hif_num_extgroup) 847 return hif_state->hif_ext_group[id]->os_irq[0]; 848 return -EINVAL; 849 } 850 851 qdf_export_symbol(hif_get_int_ctx_irq_num); 852 853 #ifdef HIF_CPU_PERF_AFFINE_MASK 854 void hif_config_irq_set_perf_affinity_hint( 855 struct hif_opaque_softc *hif_ctx) 856 { 857 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 858 859 hif_config_irq_affinity(scn); 860 } 861 862 qdf_export_symbol(hif_config_irq_set_perf_affinity_hint); 863 #endif 864 865 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx) 866 { 867 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 868 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 869 struct hif_exec_context *hif_ext_group; 870 int i, status; 871 872 if (scn->ext_grp_irq_configured) { 873 hif_err("Called after ext grp irq configured"); 874 return QDF_STATUS_E_FAILURE; 875 } 876 877 for (i = 0; i < hif_state->hif_num_extgroup; i++) { 878 hif_ext_group = hif_state->hif_ext_group[i]; 879 status = 0; 880 qdf_spinlock_create(&hif_ext_group->irq_lock); 881 if (hif_ext_group->configured && 882 hif_ext_group->irq_requested == false) { 883 hif_ext_group->irq_enabled = true; 884 status = hif_grp_irq_configure(scn, hif_ext_group); 885 } 886 if (status != 0) { 887 hif_err("Failed for group %d", i); 888 hif_ext_group->irq_enabled = false; 889 } 890 } 891 892 scn->ext_grp_irq_configured = true; 893 894 return QDF_STATUS_SUCCESS; 895 } 896 897 qdf_export_symbol(hif_configure_ext_group_interrupts); 898 899 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx) 900 { 901 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 902 903 if (!scn || !scn->ext_grp_irq_configured) { 904 hif_err("scn(%pk) is NULL or grp irq not configured", scn); 905 return; 906 } 907 908 hif_grp_irq_deconfigure(scn); 909 scn->ext_grp_irq_configured = false; 910 } 911 912 qdf_export_symbol(hif_deconfigure_ext_group_interrupts); 913 914 #ifdef WLAN_SUSPEND_RESUME_TEST 915 /** 916 * hif_check_and_trigger_ut_resume() - check if unit-test command was used to 917 * to trigger fake-suspend command, if yes 918 * then issue resume procedure. 919 * @scn: opaque HIF software context 920 * 921 * This API checks if unit-test command was used to trigger fake-suspend command 922 * and if answer is yes then it would trigger resume procedure. 923 * 924 * Make this API inline to save API-switch overhead and do branch-prediction to 925 * optimize performance impact. 926 * 927 * Return: void 928 */ 929 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn) 930 { 931 if (qdf_unlikely(hif_irq_trigger_ut_resume(scn))) 932 hif_ut_fw_resume(scn); 933 } 934 #else 935 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn) 936 { 937 } 938 #endif 939 940 /** 941 * hif_check_and_trigger_sys_resume() - Check for bus suspend and 942 * trigger system resume 943 * @scn: hif context 944 * @irq: irq number 945 * 946 * Return: None 947 */ 948 static inline void 949 hif_check_and_trigger_sys_resume(struct hif_softc *scn, int irq) 950 { 951 if (scn->bus_suspended && scn->linkstate_vote) { 952 hif_info_rl("interrupt rcvd:%d trigger sys resume", irq); 953 qdf_pm_system_wakeup(); 954 } 955 } 956 957 /** 958 * hif_ext_group_interrupt_handler() - handler for related interrupts 959 * @irq: irq number of the interrupt 960 * @context: the associated hif_exec_group context 961 * 962 * This callback function takes care of disabling the associated interrupts 963 * and scheduling the expected bottom half for the exec_context. 964 * This callback function also helps keep track of the count running contexts. 965 */ 966 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context) 967 { 968 struct hif_exec_context *hif_ext_group = context; 969 struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); 970 971 if (hif_ext_group->irq_requested) { 972 hif_latency_profile_start(hif_ext_group); 973 974 hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, 975 0, 0, 0, HIF_EVENT_IRQ_TRIGGER); 976 977 hif_ext_group->irq_disable(hif_ext_group); 978 979 if (pld_is_one_msi(scn->qdf_dev->dev)) 980 hif_ext_group->irq_disabled_start_time = 981 qdf_time_sched_clock(); 982 /* 983 * if private ioctl has issued fake suspend command to put 984 * FW in D0-WOW state then here is our chance to bring FW out 985 * of WOW mode. 986 * 987 * The reason why you need to explicitly wake-up the FW is here: 988 * APSS should have been in fully awake through-out when 989 * fake APSS suspend command was issued (to put FW in WOW mode) 990 * hence organic way of waking-up the FW 991 * (as part-of APSS-host wake-up) won't happen because 992 * in reality APSS didn't really suspend. 993 */ 994 hif_check_and_trigger_ut_resume(scn); 995 996 hif_check_and_trigger_sys_resume(scn, irq); 997 998 qdf_atomic_inc(&scn->active_grp_tasklet_cnt); 999 1000 hif_ext_group->sched_ops->schedule(hif_ext_group); 1001 } 1002 1003 return IRQ_HANDLED; 1004 } 1005 1006 /** 1007 * hif_exec_kill() - grp tasklet kill 1008 * @hif_ctx: hif_softc 1009 * 1010 * return: void 1011 */ 1012 void hif_exec_kill(struct hif_opaque_softc *hif_ctx) 1013 { 1014 int i; 1015 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1016 1017 for (i = 0; i < hif_state->hif_num_extgroup; i++) 1018 hif_state->hif_ext_group[i]->sched_ops->kill( 1019 hif_state->hif_ext_group[i]); 1020 1021 qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0); 1022 } 1023 1024 #ifdef FEATURE_IRQ_AFFINITY 1025 static inline void 1026 hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group) 1027 { 1028 qdf_atomic_init(&hif_ext_group->force_napi_complete); 1029 } 1030 #else 1031 static inline void 1032 hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group) 1033 { 1034 } 1035 #endif 1036 1037 /** 1038 * hif_register_ext_group() - API to register external group 1039 * interrupt handler. 1040 * @hif_ctx : HIF Context 1041 * @numirq: number of irq's in the group 1042 * @irq: array of irq values 1043 * @handler: callback interrupt handler function 1044 * @cb_ctx: context to passed in callback 1045 * @context_name: context name 1046 * @type: napi vs tasklet 1047 * @scale: 1048 * 1049 * Return: QDF_STATUS 1050 */ 1051 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx, 1052 uint32_t numirq, uint32_t irq[], 1053 ext_intr_handler handler, 1054 void *cb_ctx, const char *context_name, 1055 enum hif_exec_type type, uint32_t scale) 1056 { 1057 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1058 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1059 struct hif_exec_context *hif_ext_group; 1060 1061 if (scn->ext_grp_irq_configured) { 1062 hif_err("Called after ext grp irq configured"); 1063 return QDF_STATUS_E_FAILURE; 1064 } 1065 1066 if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) { 1067 hif_err("Max groups: %d reached", hif_state->hif_num_extgroup); 1068 return QDF_STATUS_E_FAILURE; 1069 } 1070 1071 if (numirq >= HIF_MAX_GRP_IRQ) { 1072 hif_err("Invalid numirq: %d", numirq); 1073 return QDF_STATUS_E_FAILURE; 1074 } 1075 1076 hif_ext_group = hif_exec_create(type, scale); 1077 if (!hif_ext_group) 1078 return QDF_STATUS_E_FAILURE; 1079 1080 hif_state->hif_ext_group[hif_state->hif_num_extgroup] = 1081 hif_ext_group; 1082 1083 hif_ext_group->numirq = numirq; 1084 qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0])); 1085 hif_ext_group->context = cb_ctx; 1086 hif_ext_group->handler = handler; 1087 hif_ext_group->configured = true; 1088 hif_ext_group->grp_id = hif_state->hif_num_extgroup; 1089 hif_ext_group->hif = hif_ctx; 1090 hif_ext_group->context_name = context_name; 1091 hif_ext_group->type = type; 1092 hif_init_force_napi_complete(hif_ext_group); 1093 1094 hif_state->hif_num_extgroup++; 1095 return QDF_STATUS_SUCCESS; 1096 } 1097 qdf_export_symbol(hif_register_ext_group); 1098 1099 /** 1100 * hif_exec_create() - create an execution context 1101 * @type: the type of execution context to create 1102 * @scale: 1103 */ 1104 struct hif_exec_context *hif_exec_create(enum hif_exec_type type, 1105 uint32_t scale) 1106 { 1107 hif_debug("%s: create exec_type %d budget %d", 1108 __func__, type, QCA_NAPI_BUDGET * scale); 1109 1110 switch (type) { 1111 case HIF_EXEC_NAPI_TYPE: 1112 return hif_exec_napi_create(scale); 1113 1114 case HIF_EXEC_TASKLET_TYPE: 1115 return hif_exec_tasklet_create(); 1116 default: 1117 return NULL; 1118 } 1119 } 1120 1121 /** 1122 * hif_exec_destroy() - free the hif_exec context 1123 * @ctx: context to free 1124 * 1125 * please kill the context before freeing it to avoid a use after free. 1126 */ 1127 void hif_exec_destroy(struct hif_exec_context *ctx) 1128 { 1129 struct hif_softc *scn = HIF_GET_SOFTC(ctx->hif); 1130 1131 if (scn->ext_grp_irq_configured) 1132 qdf_spinlock_destroy(&ctx->irq_lock); 1133 qdf_mem_free(ctx); 1134 } 1135 1136 /** 1137 * hif_deregister_exec_group() - API to free the exec contexts 1138 * @hif_ctx: HIF context 1139 * @context_name: name of the module whose contexts need to be deregistered 1140 * 1141 * This function deregisters the contexts of the requestor identified 1142 * based on the context_name & frees the memory. 1143 * 1144 * Return: void 1145 */ 1146 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx, 1147 const char *context_name) 1148 { 1149 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1150 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1151 struct hif_exec_context *hif_ext_group; 1152 int i; 1153 1154 for (i = 0; i < HIF_MAX_GROUP; i++) { 1155 hif_ext_group = hif_state->hif_ext_group[i]; 1156 1157 if (!hif_ext_group) 1158 continue; 1159 1160 hif_debug("%s: Deregistering grp id %d name %s", 1161 __func__, 1162 hif_ext_group->grp_id, 1163 hif_ext_group->context_name); 1164 1165 if (strcmp(hif_ext_group->context_name, context_name) == 0) { 1166 hif_ext_group->sched_ops->kill(hif_ext_group); 1167 hif_state->hif_ext_group[i] = NULL; 1168 hif_exec_destroy(hif_ext_group); 1169 hif_state->hif_num_extgroup--; 1170 } 1171 1172 } 1173 } 1174 qdf_export_symbol(hif_deregister_exec_group); 1175 1176 #ifdef DP_UMAC_HW_RESET_SUPPORT 1177 /** 1178 * hif_umac_reset_handler_tasklet() - Tasklet for UMAC HW reset interrupt 1179 * @data: UMAC HW reset HIF context 1180 * 1181 * return: void 1182 */ 1183 static void hif_umac_reset_handler_tasklet(unsigned long data) 1184 { 1185 struct hif_umac_reset_ctx *umac_reset_ctx = 1186 (struct hif_umac_reset_ctx *)data; 1187 1188 /* call the callback handler */ 1189 umac_reset_ctx->cb_handler(umac_reset_ctx->cb_ctx); 1190 } 1191 1192 /** 1193 * hif_umac_reset_irq_handler() - Interrupt service routine of UMAC HW reset 1194 * @irq: irq coming from kernel 1195 * @ctx: UMAC HW reset HIF context 1196 * 1197 * return: IRQ_HANDLED if success, else IRQ_NONE 1198 */ 1199 static irqreturn_t hif_umac_reset_irq_handler(int irq, void *ctx) 1200 { 1201 struct hif_umac_reset_ctx *umac_reset_ctx = ctx; 1202 1203 /* Schedule the tasklet if it is umac reset interrupt and exit */ 1204 if (umac_reset_ctx->irq_handler(umac_reset_ctx->cb_ctx)) 1205 tasklet_hi_schedule(&umac_reset_ctx->intr_tq); 1206 1207 return IRQ_HANDLED; 1208 } 1209 1210 QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn, 1211 int *umac_reset_irq) 1212 { 1213 int ret; 1214 struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn); 1215 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc); 1216 struct platform_device *pdev = (struct platform_device *)sc->pdev; 1217 1218 ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev, 1219 "umac_reset", 0, umac_reset_irq); 1220 1221 if (ret) { 1222 hif_err("umac reset get irq failed ret %d", ret); 1223 return QDF_STATUS_E_FAILURE; 1224 } 1225 return QDF_STATUS_SUCCESS; 1226 } 1227 1228 qdf_export_symbol(hif_get_umac_reset_irq); 1229 1230 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn, 1231 bool (*irq_handler)(void *cb_ctx), 1232 int (*tl_handler)(void *cb_ctx), 1233 void *cb_ctx, int irq) 1234 { 1235 struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn); 1236 struct hif_umac_reset_ctx *umac_reset_ctx; 1237 int ret; 1238 1239 if (!hif_sc) { 1240 hif_err("scn is null"); 1241 return QDF_STATUS_E_NULL_VALUE; 1242 } 1243 1244 umac_reset_ctx = &hif_sc->umac_reset_ctx; 1245 1246 umac_reset_ctx->irq_handler = irq_handler; 1247 umac_reset_ctx->cb_handler = tl_handler; 1248 umac_reset_ctx->cb_ctx = cb_ctx; 1249 umac_reset_ctx->os_irq = irq; 1250 1251 /* Init the tasklet */ 1252 tasklet_init(&umac_reset_ctx->intr_tq, 1253 hif_umac_reset_handler_tasklet, 1254 (unsigned long)umac_reset_ctx); 1255 1256 /* Register the interrupt handler */ 1257 ret = pfrm_request_irq(hif_sc->qdf_dev->dev, irq, 1258 hif_umac_reset_irq_handler, 1259 IRQF_NO_SUSPEND, 1260 "umac_hw_reset_irq", 1261 umac_reset_ctx); 1262 if (ret) { 1263 hif_err("request_irq failed: %d", ret); 1264 return qdf_status_from_os_return(ret); 1265 } 1266 1267 umac_reset_ctx->irq_configured = true; 1268 1269 return QDF_STATUS_SUCCESS; 1270 } 1271 1272 qdf_export_symbol(hif_register_umac_reset_handler); 1273 1274 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn) 1275 { 1276 struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn); 1277 struct hif_umac_reset_ctx *umac_reset_ctx; 1278 int ret; 1279 1280 if (!hif_sc) { 1281 hif_err("scn is null"); 1282 return QDF_STATUS_E_NULL_VALUE; 1283 } 1284 1285 umac_reset_ctx = &hif_sc->umac_reset_ctx; 1286 if (!umac_reset_ctx->irq_configured) { 1287 hif_err("unregister called without a prior IRQ configuration"); 1288 return QDF_STATUS_E_FAILURE; 1289 } 1290 1291 ret = pfrm_free_irq(hif_sc->qdf_dev->dev, 1292 umac_reset_ctx->os_irq, 1293 umac_reset_ctx); 1294 if (ret) { 1295 hif_err("free_irq failed: %d", ret); 1296 return qdf_status_from_os_return(ret); 1297 } 1298 umac_reset_ctx->irq_configured = false; 1299 1300 tasklet_disable(&umac_reset_ctx->intr_tq); 1301 tasklet_kill(&umac_reset_ctx->intr_tq); 1302 1303 umac_reset_ctx->cb_handler = NULL; 1304 umac_reset_ctx->cb_ctx = NULL; 1305 1306 return QDF_STATUS_SUCCESS; 1307 } 1308 1309 qdf_export_symbol(hif_unregister_umac_reset_handler); 1310 #endif 1311