1 /* 2 * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <linux/pci.h> 20 #include <linux/slab.h> 21 #include <linux/interrupt.h> 22 #include <linux/if_arp.h> 23 #include "qdf_lock.h" 24 #include "qdf_types.h" 25 #include "qdf_status.h" 26 #include "regtable.h" 27 #include "hif.h" 28 #include "hif_io32.h" 29 #include "ce_main.h" 30 #include "ce_api.h" 31 #include "ce_reg.h" 32 #include "ce_internal.h" 33 #include "ce_tasklet.h" 34 #include "pld_common.h" 35 #include "hif_debug.h" 36 #include "hif_napi.h" 37 38 /** 39 * struct tasklet_work 40 * 41 * @id: ce_id 42 * @work: work 43 */ 44 struct tasklet_work { 45 enum ce_id_type id; 46 void *data; 47 struct work_struct work; 48 }; 49 50 51 /** 52 * reschedule_ce_tasklet_work_handler() - reschedule work 53 * @work: struct work_struct 54 * 55 * Return: N/A 56 */ 57 static void reschedule_ce_tasklet_work_handler(struct work_struct *work) 58 { 59 struct tasklet_work *ce_work = container_of(work, struct tasklet_work, 60 work); 61 struct hif_softc *scn = ce_work->data; 62 struct HIF_CE_state *hif_ce_state; 63 64 if (!scn) { 65 HIF_ERROR("%s: tasklet scn is null", __func__); 66 return; 67 } 68 69 hif_ce_state = HIF_GET_CE_STATE(scn); 70 71 if (scn->hif_init_done == false) { 72 HIF_ERROR("%s: wlan driver is unloaded", __func__); 73 return; 74 } 75 if (hif_ce_state->tasklets[ce_work->id].inited) 76 tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq); 77 } 78 79 static struct tasklet_work tasklet_workers[CE_ID_MAX]; 80 static bool work_initialized; 81 82 /** 83 * init_tasklet_work() - init_tasklet_work 84 * @work: struct work_struct 85 * @work_handler: work_handler 86 * 87 * Return: N/A 88 */ 89 static void init_tasklet_work(struct work_struct *work, 90 work_func_t work_handler) 91 { 92 INIT_WORK(work, work_handler); 93 } 94 95 /** 96 * init_tasklet_workers() - init_tasklet_workers 97 * @scn: HIF Context 98 * 99 * Return: N/A 100 */ 101 void init_tasklet_workers(struct hif_opaque_softc *scn) 102 { 103 uint32_t id; 104 105 for (id = 0; id < CE_ID_MAX; id++) { 106 tasklet_workers[id].id = id; 107 tasklet_workers[id].data = scn; 108 init_tasklet_work(&tasklet_workers[id].work, 109 reschedule_ce_tasklet_work_handler); 110 } 111 work_initialized = true; 112 } 113 114 /** 115 * deinit_tasklet_workers() - deinit_tasklet_workers 116 * @scn: HIF Context 117 * 118 * Return: N/A 119 */ 120 void deinit_tasklet_workers(struct hif_opaque_softc *scn) 121 { 122 u32 id; 123 124 for (id = 0; id < CE_ID_MAX; id++) 125 cancel_work_sync(&tasklet_workers[id].work); 126 127 work_initialized = false; 128 } 129 130 /** 131 * ce_schedule_tasklet() - schedule ce tasklet 132 * @tasklet_entry: struct ce_tasklet_entry 133 * 134 * Return: N/A 135 */ 136 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry) 137 { 138 tasklet_schedule(&tasklet_entry->intr_tq); 139 } 140 141 #ifdef CE_TASKLET_DEBUG_ENABLE 142 /** 143 * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution 144 * entry time 145 * @scn: hif_softc 146 * @ce_id: ce_id 147 * 148 * Return: None 149 */ 150 static inline void 151 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id) 152 { 153 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 154 155 hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] = 156 qdf_get_log_timestamp_usecs(); 157 } 158 159 /** 160 * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled 161 * entry time 162 * @scn: hif_softc 163 * @ce_id: ce_id 164 * 165 * Return: None 166 */ 167 static inline void 168 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id) 169 { 170 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 171 172 hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] = 173 qdf_get_log_timestamp_usecs(); 174 } 175 176 /** 177 * hif_ce_latency_stats() - Display ce latency information 178 * @hif_ctx: hif_softc struct 179 * 180 * Return: None 181 */ 182 static void 183 hif_ce_latency_stats(struct hif_softc *hif_ctx) 184 { 185 uint8_t i, j; 186 uint32_t index, start_index; 187 static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1 - 2", 188 "2 - 5", "5 - 10", " > 10"}; 189 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx); 190 struct ce_stats *stats = &hif_ce_state->stats; 191 192 hif_err("\tCE TASKLET ARRIVAL AND EXECUTION STATS"); 193 for (i = 0; i < CE_COUNT_MAX; i++) { 194 hif_nofl_err("\n\t\tCE Ring %d Tasklet Execution Bucket", i); 195 for (j = 0; j < CE_BUCKET_MAX; j++) { 196 hif_nofl_err("\t Bucket %sms :%llu\t last update:%llu", 197 buck_str[j], 198 stats->ce_tasklet_exec_bucket[i][j], 199 stats->ce_tasklet_exec_last_update[i][j]); 200 } 201 202 hif_nofl_err("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i); 203 for (j = 0; j < CE_BUCKET_MAX; j++) { 204 hif_nofl_err("\t Bucket %sms :%llu\t last update :%lld", 205 buck_str[j], 206 stats->ce_tasklet_sched_bucket[i][j], 207 stats-> 208 ce_tasklet_sched_last_update[i][j]); 209 } 210 211 hif_nofl_err("\n\t\t CE RING %d Last %d time records", 212 i, HIF_REQUESTED_EVENTS); 213 index = stats->record_index[i]; 214 start_index = stats->record_index[i]; 215 216 for (j = 0; j < HIF_REQUESTED_EVENTS; j++) { 217 hif_nofl_err("\t Execuiton time: %luus Total Scheduled time: %luus", 218 stats->tasklet_exec_time_record[i][index], 219 stats-> 220 tasklet_sched_time_record[i][index]); 221 index = (index - 1) % HIF_REQUESTED_EVENTS; 222 if (index == start_index) 223 break; 224 } 225 } 226 } 227 228 /** 229 * ce_tasklet_update_bucket() - update ce execution and scehduled time latency 230 * in corresponding time buckets 231 * @stats: struct ce_stats 232 * @ce_id: ce_id_type 233 * @entry_us: timestamp when tasklet is started to execute 234 * @exit_us: timestamp when tasklet is completed execution 235 * 236 * Return: N/A 237 */ 238 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state, 239 uint8_t ce_id) 240 { 241 uint32_t index; 242 uint64_t exec_time, exec_ms; 243 uint64_t sched_time, sched_ms; 244 uint64_t curr_time = qdf_get_log_timestamp_usecs(); 245 struct ce_stats *stats = &hif_ce_state->stats; 246 247 exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]); 248 sched_time = (stats->tasklet_exec_entry_ts[ce_id]) - 249 (stats->tasklet_sched_entry_ts[ce_id]); 250 251 index = stats->record_index[ce_id]; 252 index = (index + 1) % HIF_REQUESTED_EVENTS; 253 254 stats->tasklet_exec_time_record[ce_id][index] = exec_time; 255 stats->tasklet_sched_time_record[ce_id][index] = sched_time; 256 stats->record_index[ce_id] = index; 257 258 exec_ms = qdf_do_div(exec_time, 1000); 259 sched_ms = qdf_do_div(sched_time, 1000); 260 261 if (exec_ms > 10) { 262 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++; 263 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND] 264 = curr_time; 265 } else if (exec_ms > 5) { 266 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++; 267 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS] 268 = curr_time; 269 } else if (exec_ms > 2) { 270 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++; 271 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS] 272 = curr_time; 273 } else if (exec_ms > 1) { 274 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++; 275 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS] 276 = curr_time; 277 } else if (exec_time > 500) { 278 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++; 279 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS] 280 = curr_time; 281 } else { 282 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++; 283 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US] 284 = curr_time; 285 } 286 287 if (sched_ms > 10) { 288 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++; 289 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND] 290 = curr_time; 291 } else if (sched_ms > 5) { 292 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++; 293 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS] 294 = curr_time; 295 } else if (sched_ms > 2) { 296 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++; 297 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS] 298 = curr_time; 299 } else if (sched_ms > 1) { 300 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++; 301 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS] 302 = curr_time; 303 } else if (sched_time > 500) { 304 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++; 305 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS] 306 = curr_time; 307 } else { 308 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++; 309 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US] 310 = curr_time; 311 } 312 } 313 #else 314 static inline void 315 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id) 316 { 317 } 318 319 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state, 320 uint8_t ce_id) 321 { 322 } 323 324 static inline void 325 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id) 326 { 327 } 328 329 static void 330 hif_ce_latency_stats(struct hif_softc *hif_ctx) 331 { 332 } 333 #endif /*CE_TASKLET_DEBUG_ENABLE*/ 334 335 /** 336 * ce_tasklet() - ce_tasklet 337 * @data: data 338 * 339 * Return: N/A 340 */ 341 static void ce_tasklet(unsigned long data) 342 { 343 struct ce_tasklet_entry *tasklet_entry = 344 (struct ce_tasklet_entry *)data; 345 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; 346 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 347 struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id]; 348 349 if (scn->ce_latency_stats) 350 hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id); 351 352 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, 353 HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0); 354 355 if (qdf_atomic_read(&scn->link_suspended)) { 356 HIF_ERROR("%s: ce %d tasklet fired after link suspend.", 357 __func__, tasklet_entry->ce_id); 358 QDF_BUG(0); 359 } 360 361 ce_per_engine_service(scn, tasklet_entry->ce_id); 362 363 if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) { 364 /* 365 * There are frames pending, schedule tasklet to process them. 366 * Enable the interrupt only when there is no pending frames in 367 * any of the Copy Engine pipes. 368 */ 369 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, 370 HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, -1, 0); 371 372 ce_schedule_tasklet(tasklet_entry); 373 return; 374 } 375 376 if (scn->target_status != TARGET_STATUS_RESET) 377 hif_irq_enable(scn, tasklet_entry->ce_id); 378 379 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT, 380 NULL, NULL, -1, 0); 381 382 if (scn->ce_latency_stats) 383 ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id); 384 385 qdf_atomic_dec(&scn->active_tasklet_cnt); 386 } 387 388 /** 389 * ce_tasklet_init() - ce_tasklet_init 390 * @hif_ce_state: hif_ce_state 391 * @mask: mask 392 * 393 * Return: N/A 394 */ 395 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask) 396 { 397 int i; 398 399 for (i = 0; i < CE_COUNT_MAX; i++) { 400 if (mask & (1 << i)) { 401 hif_ce_state->tasklets[i].ce_id = i; 402 hif_ce_state->tasklets[i].inited = true; 403 hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state; 404 tasklet_init(&hif_ce_state->tasklets[i].intr_tq, 405 ce_tasklet, 406 (unsigned long)&hif_ce_state->tasklets[i]); 407 } 408 } 409 } 410 /** 411 * ce_tasklet_kill() - ce_tasklet_kill 412 * @hif_ce_state: hif_ce_state 413 * 414 * Context: Non-Atomic context 415 * Return: N/A 416 */ 417 void ce_tasklet_kill(struct hif_softc *scn) 418 { 419 int i; 420 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 421 422 work_initialized = false; 423 424 for (i = 0; i < CE_COUNT_MAX; i++) { 425 if (hif_ce_state->tasklets[i].inited) { 426 hif_ce_state->tasklets[i].inited = false; 427 /* 428 * Cancel the tasklet work before tasklet_disable 429 * to avoid race between tasklet_schedule and 430 * tasklet_kill. Here cancel_work_sync() won't 431 * return before reschedule_ce_tasklet_work_handler() 432 * completes. Even if tasklet_schedule() happens 433 * tasklet_disable() will take care of that. 434 */ 435 cancel_work_sync(&tasklet_workers[i].work); 436 tasklet_kill(&hif_ce_state->tasklets[i].intr_tq); 437 } 438 } 439 qdf_atomic_set(&scn->active_tasklet_cnt, 0); 440 } 441 442 #define HIF_CE_DRAIN_WAIT_CNT 20 443 /** 444 * hif_drain_tasklets(): wait until no tasklet is pending 445 * @scn: hif context 446 * 447 * Let running tasklets clear pending trafic. 448 * 449 * Return: 0 if no bottom half is in progress when it returns. 450 * -EFAULT if it times out. 451 */ 452 int hif_drain_tasklets(struct hif_softc *scn) 453 { 454 uint32_t ce_drain_wait_cnt = 0; 455 int32_t tasklet_cnt; 456 457 while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) { 458 if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) { 459 HIF_ERROR("%s: CE still not done with access: %d", 460 __func__, tasklet_cnt); 461 462 return -EFAULT; 463 } 464 HIF_INFO("%s: Waiting for CE to finish access", __func__); 465 msleep(10); 466 } 467 return 0; 468 } 469 470 #ifdef WLAN_SUSPEND_RESUME_TEST 471 /** 472 * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should 473 * trigger a unit-test resume. 474 * @scn: The HIF context to operate on 475 * @ce_id: The copy engine Id from the originating interrupt 476 * 477 * Return: true if the raised irq should trigger a unit-test resume 478 */ 479 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) 480 { 481 int errno; 482 uint8_t wake_ce_id; 483 484 if (!hif_is_ut_suspended(scn)) 485 return false; 486 487 /* ensure passed ce_id matches wake ce_id */ 488 errno = hif_get_wake_ce_id(scn, &wake_ce_id); 489 if (errno) { 490 HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno); 491 return false; 492 } 493 494 return ce_id == wake_ce_id; 495 } 496 #else 497 static inline bool 498 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) 499 { 500 return false; 501 } 502 #endif /* WLAN_SUSPEND_RESUME_TEST */ 503 504 /** 505 * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler 506 * @irq: irq coming from kernel 507 * @context: context 508 * 509 * Return: N/A 510 */ 511 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context) 512 { 513 struct ce_tasklet_entry *tasklet_entry = context; 514 struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state); 515 516 return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq), 517 tasklet_entry); 518 } 519 520 /** 521 * hif_ce_increment_interrupt_count() - update ce stats 522 * @hif_ce_state: ce state 523 * @ce_id: ce id 524 * 525 * Return: none 526 */ 527 static inline void 528 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id) 529 { 530 int cpu_id = qdf_get_cpu(); 531 532 hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++; 533 } 534 535 /** 536 * hif_display_ce_stats() - display ce stats 537 * @hif_ce_state: ce state 538 * 539 * Return: none 540 */ 541 void hif_display_ce_stats(struct hif_softc *hif_ctx) 542 { 543 #define STR_SIZE 128 544 uint8_t i, j, pos; 545 char str_buffer[STR_SIZE]; 546 int size, ret; 547 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx); 548 549 qdf_debug("CE interrupt statistics:"); 550 for (i = 0; i < CE_COUNT_MAX; i++) { 551 size = STR_SIZE; 552 pos = 0; 553 for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) { 554 ret = snprintf(str_buffer + pos, size, "[%d]:%d ", 555 j, hif_ce_state->stats.ce_per_cpu[i][j]); 556 if (ret <= 0 || ret >= size) 557 break; 558 size -= ret; 559 pos += ret; 560 } 561 qdf_debug("CE id[%2d] - %s", i, str_buffer); 562 } 563 564 if (hif_ctx->ce_latency_stats) 565 hif_ce_latency_stats(hif_ctx); 566 #undef STR_SIZE 567 } 568 569 /** 570 * hif_clear_ce_stats() - clear ce stats 571 * @hif_ce_state: ce state 572 * 573 * Return: none 574 */ 575 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state) 576 { 577 qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats)); 578 } 579 580 /** 581 * hif_tasklet_schedule() - schedule tasklet 582 * @hif_ctx: hif context 583 * @tasklet_entry: ce tasklet entry 584 * 585 * Return: false if tasklet already scheduled, otherwise true 586 */ 587 static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx, 588 struct ce_tasklet_entry *tasklet_entry) 589 { 590 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 591 592 if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) { 593 HIF_DBG("tasklet scheduled, return"); 594 qdf_atomic_dec(&scn->active_tasklet_cnt); 595 return false; 596 } 597 598 tasklet_schedule(&tasklet_entry->intr_tq); 599 if (scn->ce_latency_stats) 600 hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id); 601 602 return true; 603 } 604 605 /** 606 * ce_dispatch_interrupt() - dispatch an interrupt to a processing context 607 * @ce_id: ce_id 608 * @tasklet_entry: context 609 * 610 * Return: N/A 611 */ 612 irqreturn_t ce_dispatch_interrupt(int ce_id, 613 struct ce_tasklet_entry *tasklet_entry) 614 { 615 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; 616 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 617 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 618 619 if (tasklet_entry->ce_id != ce_id) { 620 HIF_ERROR("%s: ce_id (expect %d, received %d) does not match", 621 __func__, tasklet_entry->ce_id, ce_id); 622 return IRQ_NONE; 623 } 624 if (unlikely(ce_id >= CE_COUNT_MAX)) { 625 HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d", 626 __func__, tasklet_entry->ce_id, CE_COUNT_MAX); 627 return IRQ_NONE; 628 } 629 630 hif_irq_disable(scn, ce_id); 631 632 if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) 633 return IRQ_HANDLED; 634 635 hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, 636 NULL, NULL, 0, 0); 637 hif_ce_increment_interrupt_count(hif_ce_state, ce_id); 638 639 if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) { 640 hif_ut_fw_resume(scn); 641 hif_irq_enable(scn, ce_id); 642 return IRQ_HANDLED; 643 } 644 645 qdf_atomic_inc(&scn->active_tasklet_cnt); 646 647 if (hif_napi_enabled(hif_hdl, ce_id)) 648 hif_napi_schedule(hif_hdl, ce_id); 649 else 650 hif_tasklet_schedule(hif_hdl, tasklet_entry); 651 652 return IRQ_HANDLED; 653 } 654 655 /** 656 * const char *ce_name 657 * 658 * @ce_name: ce_name 659 */ 660 const char *ce_name[] = { 661 "WLAN_CE_0", 662 "WLAN_CE_1", 663 "WLAN_CE_2", 664 "WLAN_CE_3", 665 "WLAN_CE_4", 666 "WLAN_CE_5", 667 "WLAN_CE_6", 668 "WLAN_CE_7", 669 "WLAN_CE_8", 670 "WLAN_CE_9", 671 "WLAN_CE_10", 672 "WLAN_CE_11", 673 }; 674 /** 675 * ce_unregister_irq() - ce_unregister_irq 676 * @hif_ce_state: hif_ce_state copy engine device handle 677 * @mask: which coppy engines to unregister for. 678 * 679 * Unregisters copy engine irqs matching mask. If a 1 is set at bit x, 680 * unregister for copy engine x. 681 * 682 * Return: QDF_STATUS 683 */ 684 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) 685 { 686 int id; 687 int ce_count; 688 int ret; 689 struct hif_softc *scn; 690 691 if (!hif_ce_state) { 692 HIF_WARN("%s: hif_ce_state = NULL", __func__); 693 return QDF_STATUS_SUCCESS; 694 } 695 696 scn = HIF_GET_SOFTC(hif_ce_state); 697 ce_count = scn->ce_count; 698 /* we are removing interrupts, so better stop NAPI */ 699 ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn), 700 NAPI_EVT_INT_STATE, (void *)0); 701 if (ret != 0) 702 HIF_ERROR("%s: napi_event INT_STATE returned %d", 703 __func__, ret); 704 /* this is not fatal, continue */ 705 706 /* filter mask to free only for ce's with irq registered */ 707 mask &= hif_ce_state->ce_register_irq_done; 708 for (id = 0; id < ce_count; id++) { 709 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { 710 ret = pld_ce_free_irq(scn->qdf_dev->dev, id, 711 &hif_ce_state->tasklets[id]); 712 if (ret < 0) 713 HIF_ERROR( 714 "%s: pld_unregister_irq error - ce_id = %d, ret = %d", 715 __func__, id, ret); 716 } 717 ce_disable_polling(scn->ce_id_to_state[id]); 718 } 719 hif_ce_state->ce_register_irq_done &= ~mask; 720 721 return QDF_STATUS_SUCCESS; 722 } 723 /** 724 * ce_register_irq() - ce_register_irq 725 * @hif_ce_state: hif_ce_state 726 * @mask: which coppy engines to unregister for. 727 * 728 * Registers copy engine irqs matching mask. If a 1 is set at bit x, 729 * Register for copy engine x. 730 * 731 * Return: QDF_STATUS 732 */ 733 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) 734 { 735 int id; 736 int ce_count; 737 int ret; 738 unsigned long irqflags = IRQF_TRIGGER_RISING; 739 uint32_t done_mask = 0; 740 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 741 742 ce_count = scn->ce_count; 743 744 for (id = 0; id < ce_count; id++) { 745 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { 746 ret = pld_ce_request_irq(scn->qdf_dev->dev, id, 747 hif_snoc_interrupt_handler, 748 irqflags, ce_name[id], 749 &hif_ce_state->tasklets[id]); 750 if (ret) { 751 HIF_ERROR( 752 "%s: cannot register CE %d irq handler, ret = %d", 753 __func__, id, ret); 754 ce_unregister_irq(hif_ce_state, done_mask); 755 return QDF_STATUS_E_FAULT; 756 } 757 done_mask |= 1 << id; 758 } 759 } 760 hif_ce_state->ce_register_irq_done |= done_mask; 761 762 return QDF_STATUS_SUCCESS; 763 } 764