1 /* 2 * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <linux/pci.h> 21 #include <linux/slab.h> 22 #include <linux/interrupt.h> 23 #include <linux/if_arp.h> 24 #include "qdf_lock.h" 25 #include "qdf_types.h" 26 #include "qdf_status.h" 27 #include "regtable.h" 28 #include "hif.h" 29 #include "hif_io32.h" 30 #include "ce_main.h" 31 #include "ce_api.h" 32 #include "ce_reg.h" 33 #include "ce_internal.h" 34 #include "ce_tasklet.h" 35 #include "pld_common.h" 36 #include "hif_debug.h" 37 #include "hif_napi.h" 38 39 /** 40 * struct tasklet_work 41 * 42 * @id: ce_id 43 * @data: data 44 * @reg_work: work 45 */ 46 struct tasklet_work { 47 enum ce_id_type id; 48 void *data; 49 qdf_work_t reg_work; 50 }; 51 52 53 /** 54 * ce_tasklet_schedule() - schedule CE tasklet 55 * @tasklet_entry: ce tasklet entry 56 * 57 * Return: None 58 */ 59 static inline void ce_tasklet_schedule(struct ce_tasklet_entry *tasklet_entry) 60 { 61 if (tasklet_entry->hi_tasklet_ce) 62 tasklet_hi_schedule(&tasklet_entry->intr_tq); 63 else 64 tasklet_schedule(&tasklet_entry->intr_tq); 65 } 66 67 /** 68 * reschedule_ce_tasklet_work_handler() - reschedule work 69 * @work: struct work_struct 70 * 71 * Return: N/A 72 */ 73 static void reschedule_ce_tasklet_work_handler(struct work_struct *work) 74 { 75 qdf_work_t *reg_work = qdf_container_of(work, qdf_work_t, work); 76 struct tasklet_work *ce_work = qdf_container_of(reg_work, 77 struct tasklet_work, 78 reg_work); 79 struct hif_softc *scn = ce_work->data; 80 struct HIF_CE_state *hif_ce_state; 81 82 if (!scn) { 83 hif_err("tasklet scn is null"); 84 return; 85 } 86 87 hif_ce_state = HIF_GET_CE_STATE(scn); 88 89 if (scn->hif_init_done == false) { 90 hif_err("wlan driver is unloaded"); 91 return; 92 } 93 if (hif_ce_state->tasklets[ce_work->id].inited) 94 ce_tasklet_schedule(&hif_ce_state->tasklets[ce_work->id]); 95 } 96 97 static struct tasklet_work tasklet_workers[CE_ID_MAX]; 98 99 /** 100 * init_tasklet_work() - init_tasklet_work 101 * @work: struct work_struct 102 * @work_handler: work_handler 103 * 104 * Return: N/A 105 */ 106 static void init_tasklet_work(struct work_struct *work, 107 work_func_t work_handler) 108 { 109 INIT_WORK(work, work_handler); 110 } 111 112 /** 113 * init_tasklet_worker_by_ceid() - init_tasklet_workers 114 * @scn: HIF Context 115 * @ce_id: copy engine ID 116 * 117 * Return: N/A 118 */ 119 void init_tasklet_worker_by_ceid(struct hif_opaque_softc *scn, int ce_id) 120 { 121 122 tasklet_workers[ce_id].id = ce_id; 123 tasklet_workers[ce_id].data = scn; 124 init_tasklet_work(&tasklet_workers[ce_id].reg_work.work, 125 reschedule_ce_tasklet_work_handler); 126 } 127 128 /** 129 * deinit_tasklet_workers() - deinit_tasklet_workers 130 * @scn: HIF Context 131 * 132 * Return: N/A 133 */ 134 void deinit_tasklet_workers(struct hif_opaque_softc *scn) 135 { 136 u32 id; 137 138 for (id = 0; id < CE_ID_MAX; id++) 139 qdf_cancel_work(&tasklet_workers[id].reg_work); 140 } 141 142 #ifdef CE_TASKLET_DEBUG_ENABLE 143 /** 144 * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution 145 * entry time 146 * @scn: hif_softc 147 * @ce_id: ce_id 148 * 149 * Return: None 150 */ 151 static inline void 152 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id) 153 { 154 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 155 156 hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] = 157 qdf_get_log_timestamp_usecs(); 158 } 159 160 /** 161 * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled 162 * entry time 163 * @scn: hif_softc 164 * @ce_id: ce_id 165 * 166 * Return: None 167 */ 168 static inline void 169 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id) 170 { 171 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 172 173 hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] = 174 qdf_get_log_timestamp_usecs(); 175 } 176 177 /** 178 * hif_ce_latency_stats() - Display ce latency information 179 * @hif_ctx: hif_softc struct 180 * 181 * Return: None 182 */ 183 static void 184 hif_ce_latency_stats(struct hif_softc *hif_ctx) 185 { 186 uint8_t i, j; 187 uint32_t index, start_index; 188 uint64_t secs, usecs; 189 static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1 - 2", 190 "2 - 5", "5 - 10", " > 10"}; 191 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx); 192 struct ce_stats *stats = &hif_ce_state->stats; 193 194 hif_err("\tCE TASKLET ARRIVAL AND EXECUTION STATS"); 195 for (i = 0; i < CE_COUNT_MAX; i++) { 196 hif_nofl_err("\n\t\tCE Ring %d Tasklet Execution Bucket", i); 197 for (j = 0; j < CE_BUCKET_MAX; j++) { 198 qdf_log_timestamp_to_secs( 199 stats->ce_tasklet_exec_last_update[i][j], 200 &secs, &usecs); 201 hif_nofl_err("\t Bucket %sms :%llu\t last update:% 8lld.%06lld", 202 buck_str[j], 203 stats->ce_tasklet_exec_bucket[i][j], 204 secs, usecs); 205 } 206 207 hif_nofl_err("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i); 208 for (j = 0; j < CE_BUCKET_MAX; j++) { 209 qdf_log_timestamp_to_secs( 210 stats->ce_tasklet_sched_last_update[i][j], 211 &secs, &usecs); 212 hif_nofl_err("\t Bucket %sms :%llu\t last update :% 8lld.%06lld", 213 buck_str[j], 214 stats->ce_tasklet_sched_bucket[i][j], 215 secs, usecs); 216 } 217 218 hif_nofl_err("\n\t\t CE RING %d Last %d time records", 219 i, HIF_REQUESTED_EVENTS); 220 index = stats->record_index[i]; 221 start_index = stats->record_index[i]; 222 223 for (j = 0; j < HIF_REQUESTED_EVENTS; j++) { 224 hif_nofl_err("\tExecution time: %lluus Total Scheduled time: %lluus", 225 stats->tasklet_exec_time_record[i][index], 226 stats-> 227 tasklet_sched_time_record[i][index]); 228 if (index) 229 index = (index - 1) % HIF_REQUESTED_EVENTS; 230 else 231 index = HIF_REQUESTED_EVENTS - 1; 232 if (index == start_index) 233 break; 234 } 235 } 236 } 237 238 /** 239 * ce_tasklet_update_bucket() - update ce execution and scehduled time latency 240 * in corresponding time buckets 241 * @hif_ce_state: HIF CE state 242 * @ce_id: ce_id_type 243 * 244 * Return: N/A 245 */ 246 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state, 247 uint8_t ce_id) 248 { 249 uint32_t index; 250 uint64_t exec_time, exec_ms; 251 uint64_t sched_time, sched_ms; 252 uint64_t curr_time = qdf_get_log_timestamp_usecs(); 253 struct ce_stats *stats = &hif_ce_state->stats; 254 255 exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]); 256 sched_time = (stats->tasklet_exec_entry_ts[ce_id]) - 257 (stats->tasklet_sched_entry_ts[ce_id]); 258 259 index = stats->record_index[ce_id]; 260 index = (index + 1) % HIF_REQUESTED_EVENTS; 261 262 stats->tasklet_exec_time_record[ce_id][index] = exec_time; 263 stats->tasklet_sched_time_record[ce_id][index] = sched_time; 264 stats->record_index[ce_id] = index; 265 266 exec_ms = qdf_do_div(exec_time, 1000); 267 sched_ms = qdf_do_div(sched_time, 1000); 268 269 if (exec_ms > 10) { 270 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++; 271 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND] 272 = curr_time; 273 } else if (exec_ms > 5) { 274 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++; 275 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS] 276 = curr_time; 277 } else if (exec_ms > 2) { 278 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++; 279 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS] 280 = curr_time; 281 } else if (exec_ms > 1) { 282 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++; 283 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS] 284 = curr_time; 285 } else if (exec_time > 500) { 286 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++; 287 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS] 288 = curr_time; 289 } else { 290 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++; 291 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US] 292 = curr_time; 293 } 294 295 if (sched_ms > 10) { 296 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++; 297 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND] 298 = curr_time; 299 } else if (sched_ms > 5) { 300 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++; 301 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS] 302 = curr_time; 303 } else if (sched_ms > 2) { 304 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++; 305 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS] 306 = curr_time; 307 } else if (sched_ms > 1) { 308 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++; 309 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS] 310 = curr_time; 311 } else if (sched_time > 500) { 312 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++; 313 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS] 314 = curr_time; 315 } else { 316 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++; 317 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US] 318 = curr_time; 319 } 320 } 321 #else 322 static inline void 323 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id) 324 { 325 } 326 327 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state, 328 uint8_t ce_id) 329 { 330 } 331 332 static inline void 333 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id) 334 { 335 } 336 337 static void 338 hif_ce_latency_stats(struct hif_softc *hif_ctx) 339 { 340 } 341 #endif /*CE_TASKLET_DEBUG_ENABLE*/ 342 343 #if defined(CE_TASKLET_DEBUG_ENABLE) && defined(CE_TASKLET_SCHEDULE_ON_FULL) 344 /** 345 * hif_reset_ce_full_count() - Reset ce full count 346 * @scn: hif_softc 347 * @ce_id: ce_id 348 * 349 * Return: None 350 */ 351 static inline void 352 hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id) 353 { 354 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 355 356 hif_ce_state->stats.ce_ring_full_count[ce_id] = 0; 357 } 358 #else 359 static inline void 360 hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id) 361 { 362 } 363 #endif 364 365 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT 366 /** 367 * ce_get_custom_cb_pending() - Helper API to check whether the custom 368 * callback is pending 369 * @CE_state: Pointer to CE state 370 * 371 * return: bool 372 */ 373 static bool 374 ce_get_custom_cb_pending(struct CE_state *CE_state) 375 { 376 return (qdf_atomic_dec_if_positive(&CE_state->custom_cb_pending) >= 0); 377 } 378 379 /** 380 * ce_execute_custom_cb() - Helper API to execute custom callback 381 * @CE_state: Pointer to CE state 382 * 383 * return: void 384 */ 385 static void 386 ce_execute_custom_cb(struct CE_state *CE_state) 387 { 388 while (ce_get_custom_cb_pending(CE_state) && CE_state->custom_cb && 389 CE_state->custom_cb_context) 390 CE_state->custom_cb(CE_state->custom_cb_context); 391 } 392 #else 393 /** 394 * ce_execute_custom_cb() - Helper API to execute custom callback 395 * @CE_state: Pointer to CE state 396 * 397 * return: void 398 */ 399 static void 400 ce_execute_custom_cb(struct CE_state *CE_state) 401 { 402 } 403 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */ 404 405 /** 406 * ce_tasklet() - ce_tasklet 407 * @data: data 408 * 409 * Return: N/A 410 */ 411 static void ce_tasklet(unsigned long data) 412 { 413 struct ce_tasklet_entry *tasklet_entry = 414 (struct ce_tasklet_entry *)data; 415 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; 416 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 417 struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id]; 418 419 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, 420 HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0); 421 422 if (scn->ce_latency_stats) 423 hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id); 424 425 hif_tasklet_latency_record_exec(scn, tasklet_entry->ce_id); 426 427 if (qdf_atomic_read(&scn->link_suspended)) { 428 hif_err("ce %d tasklet fired after link suspend", 429 tasklet_entry->ce_id); 430 QDF_BUG(0); 431 } 432 433 ce_execute_custom_cb(CE_state); 434 435 ce_per_engine_service(scn, tasklet_entry->ce_id); 436 437 if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) { 438 /* 439 * There are frames pending, schedule tasklet to process them. 440 * Enable the interrupt only when there is no pending frames in 441 * any of the Copy Engine pipes. 442 */ 443 if (test_bit(TASKLET_STATE_SCHED, 444 &tasklet_entry->intr_tq.state)) { 445 hif_info("ce_id%d tasklet was scheduled, return", 446 tasklet_entry->ce_id); 447 qdf_atomic_dec(&scn->active_tasklet_cnt); 448 return; 449 } 450 451 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, 452 HIF_CE_TASKLET_RESCHEDULE, 453 NULL, NULL, -1, 0); 454 455 ce_tasklet_schedule(tasklet_entry); 456 hif_tasklet_latency_record_sched(scn, tasklet_entry->ce_id); 457 458 hif_reset_ce_full_count(scn, tasklet_entry->ce_id); 459 if (scn->ce_latency_stats) { 460 ce_tasklet_update_bucket(hif_ce_state, 461 tasklet_entry->ce_id); 462 hif_record_tasklet_sched_entry_ts(scn, 463 tasklet_entry->ce_id); 464 } 465 return; 466 } 467 468 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT, 469 NULL, NULL, -1, 0); 470 471 if (scn->ce_latency_stats) 472 ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id); 473 474 if ((scn->target_status != TARGET_STATUS_RESET) && 475 !scn->free_irq_done) 476 hif_irq_enable(scn, tasklet_entry->ce_id); 477 478 qdf_atomic_dec(&scn->active_tasklet_cnt); 479 } 480 481 /** 482 * ce_tasklet_init() - ce_tasklet_init 483 * @hif_ce_state: hif_ce_state 484 * @mask: mask 485 * 486 * Return: N/A 487 */ 488 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask) 489 { 490 int i; 491 struct CE_attr *attr; 492 493 for (i = 0; i < CE_COUNT_MAX; i++) { 494 if (mask & (1 << i)) { 495 hif_ce_state->tasklets[i].ce_id = i; 496 hif_ce_state->tasklets[i].inited = true; 497 hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state; 498 499 attr = &hif_ce_state->host_ce_config[i]; 500 if (attr->flags & CE_ATTR_HI_TASKLET) 501 hif_ce_state->tasklets[i].hi_tasklet_ce = true; 502 else 503 hif_ce_state->tasklets[i].hi_tasklet_ce = false; 504 505 tasklet_init(&hif_ce_state->tasklets[i].intr_tq, 506 ce_tasklet, 507 (unsigned long)&hif_ce_state->tasklets[i]); 508 } 509 } 510 } 511 /** 512 * ce_tasklet_kill() - ce_tasklet_kill 513 * @scn: HIF context 514 * 515 * Context: Non-Atomic context 516 * Return: N/A 517 */ 518 void ce_tasklet_kill(struct hif_softc *scn) 519 { 520 int i; 521 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 522 523 for (i = 0; i < CE_COUNT_MAX; i++) { 524 if (hif_ce_state->tasklets[i].inited) { 525 hif_ce_state->tasklets[i].inited = false; 526 /* 527 * Cancel the tasklet work before tasklet_disable 528 * to avoid race between tasklet_schedule and 529 * tasklet_kill. Here cancel_work_sync() won't 530 * return before reschedule_ce_tasklet_work_handler() 531 * completes. Even if tasklet_schedule() happens 532 * tasklet_disable() will take care of that. 533 */ 534 qdf_cancel_work(&tasklet_workers[i].reg_work); 535 tasklet_kill(&hif_ce_state->tasklets[i].intr_tq); 536 } 537 } 538 qdf_atomic_set(&scn->active_tasklet_cnt, 0); 539 } 540 541 /** 542 * ce_tasklet_entry_dump() - dump tasklet entries info 543 * @hif_ce_state: ce state 544 * 545 * This function will dump all tasklet entries info 546 * 547 * Return: None 548 */ 549 static void ce_tasklet_entry_dump(struct HIF_CE_state *hif_ce_state) 550 { 551 struct ce_tasklet_entry *tasklet_entry; 552 int i; 553 554 if (hif_ce_state) { 555 for (i = 0; i < CE_COUNT_MAX; i++) { 556 tasklet_entry = &hif_ce_state->tasklets[i]; 557 558 hif_info("%02d: ce_id=%d, inited=%d, hi_tasklet_ce=%d hif_ce_state=%pK", 559 i, 560 tasklet_entry->ce_id, 561 tasklet_entry->inited, 562 tasklet_entry->hi_tasklet_ce, 563 tasklet_entry->hif_ce_state); 564 } 565 } 566 } 567 568 #define HIF_CE_DRAIN_WAIT_CNT 20 569 /** 570 * hif_drain_tasklets(): wait until no tasklet is pending 571 * @scn: hif context 572 * 573 * Let running tasklets clear pending traffic. 574 * 575 * Return: 0 if no bottom half is in progress when it returns. 576 * -EFAULT if it times out. 577 */ 578 int hif_drain_tasklets(struct hif_softc *scn) 579 { 580 uint32_t ce_drain_wait_cnt = 0; 581 int32_t tasklet_cnt; 582 583 while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) { 584 if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) { 585 hif_err("CE still not done with access: %d", 586 tasklet_cnt); 587 588 return -EFAULT; 589 } 590 hif_info("Waiting for CE to finish access"); 591 msleep(10); 592 } 593 return 0; 594 } 595 596 #ifdef WLAN_SUSPEND_RESUME_TEST 597 /** 598 * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should 599 * trigger a unit-test resume. 600 * @scn: The HIF context to operate on 601 * @ce_id: The copy engine Id from the originating interrupt 602 * 603 * Return: true if the raised irq should trigger a unit-test resume 604 */ 605 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) 606 { 607 int errno; 608 uint8_t wake_ce_id; 609 610 if (!hif_is_ut_suspended(scn)) 611 return false; 612 613 /* ensure passed ce_id matches wake ce_id */ 614 errno = hif_get_wake_ce_id(scn, &wake_ce_id); 615 if (errno) { 616 hif_err("Failed to get wake CE Id: %d", errno); 617 return false; 618 } 619 620 return ce_id == wake_ce_id; 621 } 622 #else 623 static inline bool 624 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) 625 { 626 return false; 627 } 628 #endif /* WLAN_SUSPEND_RESUME_TEST */ 629 630 /** 631 * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler 632 * @irq: irq coming from kernel 633 * @context: context 634 * 635 * Return: N/A 636 */ 637 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context) 638 { 639 struct ce_tasklet_entry *tasklet_entry = context; 640 struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state); 641 642 return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq), 643 tasklet_entry); 644 } 645 646 /** 647 * hif_ce_increment_interrupt_count() - update ce stats 648 * @hif_ce_state: ce state 649 * @ce_id: ce id 650 * 651 * Return: none 652 */ 653 static inline void 654 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id) 655 { 656 int cpu_id = qdf_get_cpu(); 657 658 hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++; 659 } 660 661 /** 662 * hif_display_ce_stats() - display ce stats 663 * @hif_ctx: HIF context 664 * 665 * Return: none 666 */ 667 void hif_display_ce_stats(struct hif_softc *hif_ctx) 668 { 669 #define STR_SIZE 128 670 uint8_t i, j, pos; 671 char str_buffer[STR_SIZE]; 672 int size, ret; 673 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx); 674 675 qdf_debug("CE interrupt statistics:"); 676 for (i = 0; i < CE_COUNT_MAX; i++) { 677 size = STR_SIZE; 678 pos = 0; 679 for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) { 680 ret = snprintf(str_buffer + pos, size, "[%d]:%d ", 681 j, hif_ce_state->stats.ce_per_cpu[i][j]); 682 if (ret <= 0 || ret >= size) 683 break; 684 size -= ret; 685 pos += ret; 686 } 687 qdf_debug("CE id[%2d] - %s", i, str_buffer); 688 } 689 690 if (hif_ctx->ce_latency_stats) 691 hif_ce_latency_stats(hif_ctx); 692 #undef STR_SIZE 693 } 694 695 /** 696 * hif_clear_ce_stats() - clear ce stats 697 * @hif_ce_state: ce state 698 * 699 * Return: none 700 */ 701 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state) 702 { 703 qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats)); 704 } 705 706 #ifdef WLAN_TRACEPOINTS 707 /** 708 * hif_set_ce_tasklet_sched_time() - Set tasklet schedule time for 709 * CE with matching ce_id 710 * @scn: hif context 711 * @ce_id: CE id 712 * 713 * Return: None 714 */ 715 static inline 716 void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id) 717 { 718 struct CE_state *ce_state = scn->ce_id_to_state[ce_id]; 719 720 ce_state->ce_tasklet_sched_time = qdf_time_sched_clock(); 721 } 722 #else 723 static inline 724 void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id) 725 { 726 } 727 #endif 728 729 /** 730 * hif_tasklet_schedule() - schedule tasklet 731 * @hif_ctx: hif context 732 * @tasklet_entry: ce tasklet entry 733 * 734 * Return: false if tasklet already scheduled, otherwise true 735 */ 736 static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx, 737 struct ce_tasklet_entry *tasklet_entry) 738 { 739 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 740 741 if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) { 742 hif_debug("tasklet scheduled, return"); 743 qdf_atomic_dec(&scn->active_tasklet_cnt); 744 return false; 745 } 746 747 hif_set_ce_tasklet_sched_time(scn, tasklet_entry->ce_id); 748 /* keep it before tasklet_schedule, this is to happy whunt. 749 * in whunt, tasklet may run before finished hif_tasklet_schedule. 750 */ 751 hif_tasklet_latency_record_sched(scn, tasklet_entry->ce_id); 752 ce_tasklet_schedule(tasklet_entry); 753 754 hif_reset_ce_full_count(scn, tasklet_entry->ce_id); 755 if (scn->ce_latency_stats) 756 hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id); 757 758 return true; 759 } 760 761 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7 762 /** 763 * ce_poll_reap_by_id() - reap the available frames from CE by polling per ce_id 764 * @scn: hif context 765 * @ce_id: CE id 766 * 767 * This function needs to be called once after all the irqs are disabled 768 * and tasklets are drained during bus suspend. 769 * 770 * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop 771 */ 772 static int ce_poll_reap_by_id(struct hif_softc *scn, enum ce_id_type ce_id) 773 { 774 struct HIF_CE_state *hif_ce_state = (struct HIF_CE_state *)scn; 775 struct CE_state *CE_state = scn->ce_id_to_state[ce_id]; 776 777 if (scn->ce_latency_stats) 778 hif_record_tasklet_exec_entry_ts(scn, ce_id); 779 780 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY, 781 NULL, NULL, -1, 0); 782 783 ce_per_engine_service(scn, ce_id); 784 785 /* 786 * In an unlikely case, if frames are still pending to reap, 787 * could be an infinite loop, so return -EBUSY. 788 */ 789 if (ce_check_rx_pending(CE_state)) 790 return -EBUSY; 791 792 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT, 793 NULL, NULL, -1, 0); 794 795 if (scn->ce_latency_stats) 796 ce_tasklet_update_bucket(hif_ce_state, ce_id); 797 798 return 0; 799 } 800 801 /** 802 * hif_drain_fw_diag_ce() - reap all the available FW diag logs from CE 803 * @scn: hif context 804 * 805 * This function needs to be called once after all the irqs are disabled 806 * and tasklets are drained during bus suspend. 807 * 808 * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop 809 */ 810 int hif_drain_fw_diag_ce(struct hif_softc *scn) 811 { 812 uint8_t ce_id; 813 814 if (hif_get_fw_diag_ce_id(scn, &ce_id)) 815 return 0; 816 817 return ce_poll_reap_by_id(scn, ce_id); 818 } 819 #endif 820 821 #ifdef CE_TASKLET_SCHEDULE_ON_FULL 822 static inline int ce_check_tasklet_status(int ce_id, 823 struct ce_tasklet_entry *entry) 824 { 825 struct HIF_CE_state *hif_ce_state = entry->hif_ce_state; 826 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 827 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 828 829 if (hif_napi_enabled(hif_hdl, ce_id)) { 830 struct qca_napi_info *napi; 831 832 napi = scn->napi_data.napis[ce_id]; 833 if (test_bit(NAPI_STATE_SCHED, &napi->napi.state)) 834 return -EBUSY; 835 } else { 836 if (test_bit(TASKLET_STATE_SCHED, 837 &hif_ce_state->tasklets[ce_id].intr_tq.state)) 838 return -EBUSY; 839 } 840 return 0; 841 } 842 843 static inline void ce_interrupt_lock(struct CE_state *ce_state) 844 { 845 qdf_spin_lock_irqsave(&ce_state->ce_interrupt_lock); 846 } 847 848 static inline void ce_interrupt_unlock(struct CE_state *ce_state) 849 { 850 qdf_spin_unlock_irqrestore(&ce_state->ce_interrupt_lock); 851 } 852 #else 853 static inline int ce_check_tasklet_status(int ce_id, 854 struct ce_tasklet_entry *entry) 855 { 856 return 0; 857 } 858 859 static inline void ce_interrupt_lock(struct CE_state *ce_state) 860 { 861 } 862 863 static inline void ce_interrupt_unlock(struct CE_state *ce_state) 864 { 865 } 866 #endif 867 868 /** 869 * ce_dispatch_interrupt() - dispatch an interrupt to a processing context 870 * @ce_id: ce_id 871 * @tasklet_entry: context 872 * 873 * Return: N/A 874 */ 875 irqreturn_t ce_dispatch_interrupt(int ce_id, 876 struct ce_tasklet_entry *tasklet_entry) 877 { 878 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; 879 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 880 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 881 struct CE_state *ce_state; 882 883 if (tasklet_entry->ce_id != ce_id) { 884 bool rl; 885 886 rl = hif_err_rl("ce_id (expect %d, received %d) does not match, inited=%d, ce_count=%u", 887 tasklet_entry->ce_id, ce_id, 888 tasklet_entry->inited, 889 scn->ce_count); 890 891 if (!rl) 892 ce_tasklet_entry_dump(hif_ce_state); 893 894 return IRQ_NONE; 895 } 896 if (unlikely(ce_id >= CE_COUNT_MAX)) { 897 hif_err("ce_id=%d > CE_COUNT_MAX=%d", 898 tasklet_entry->ce_id, CE_COUNT_MAX); 899 return IRQ_NONE; 900 } 901 902 ce_state = scn->ce_id_to_state[ce_id]; 903 904 ce_interrupt_lock(ce_state); 905 if (ce_check_tasklet_status(ce_id, tasklet_entry)) { 906 ce_interrupt_unlock(ce_state); 907 return IRQ_NONE; 908 } 909 910 hif_irq_disable(scn, ce_id); 911 912 if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 913 ce_interrupt_unlock(ce_state); 914 return IRQ_HANDLED; 915 } 916 917 hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, 918 NULL, NULL, 0, 0); 919 hif_ce_increment_interrupt_count(hif_ce_state, ce_id); 920 921 if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) { 922 hif_ut_fw_resume(scn); 923 hif_irq_enable(scn, ce_id); 924 ce_interrupt_unlock(ce_state); 925 return IRQ_HANDLED; 926 } 927 928 qdf_atomic_inc(&scn->active_tasklet_cnt); 929 930 if (hif_napi_enabled(hif_hdl, ce_id)) 931 hif_napi_schedule(hif_hdl, ce_id); 932 else 933 hif_tasklet_schedule(hif_hdl, tasklet_entry); 934 935 ce_interrupt_unlock(ce_state); 936 937 return IRQ_HANDLED; 938 } 939 940 const char *ce_name[CE_COUNT_MAX] = { 941 "WLAN_CE_0", 942 "WLAN_CE_1", 943 "WLAN_CE_2", 944 "WLAN_CE_3", 945 "WLAN_CE_4", 946 "WLAN_CE_5", 947 "WLAN_CE_6", 948 "WLAN_CE_7", 949 "WLAN_CE_8", 950 "WLAN_CE_9", 951 "WLAN_CE_10", 952 "WLAN_CE_11", 953 #ifdef QCA_WIFI_QCN9224 954 "WLAN_CE_12", 955 "WLAN_CE_13", 956 "WLAN_CE_14", 957 "WLAN_CE_15", 958 #endif 959 }; 960 /** 961 * ce_unregister_irq() - ce_unregister_irq 962 * @hif_ce_state: hif_ce_state copy engine device handle 963 * @mask: which copy engines to unregister for. 964 * 965 * Unregisters copy engine irqs matching mask. If a 1 is set at bit x, 966 * unregister for copy engine x. 967 * 968 * Return: QDF_STATUS 969 */ 970 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) 971 { 972 int id; 973 int ce_count; 974 int ret; 975 struct hif_softc *scn; 976 977 if (!hif_ce_state) { 978 hif_warn("hif_ce_state = NULL"); 979 return QDF_STATUS_SUCCESS; 980 } 981 982 scn = HIF_GET_SOFTC(hif_ce_state); 983 ce_count = scn->ce_count; 984 /* we are removing interrupts, so better stop NAPI */ 985 ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn), 986 NAPI_EVT_INT_STATE, (void *)0); 987 if (ret != 0) 988 hif_err("napi_event INT_STATE returned %d", ret); 989 /* this is not fatal, continue */ 990 991 /* filter mask to free only for ce's with irq registered */ 992 mask &= hif_ce_state->ce_register_irq_done; 993 for (id = 0; id < ce_count; id++) { 994 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { 995 ret = pld_ce_free_irq(scn->qdf_dev->dev, id, 996 &hif_ce_state->tasklets[id]); 997 if (ret < 0) 998 hif_err( 999 "pld_unregister_irq error - ce_id = %d, ret = %d", 1000 id, ret); 1001 } 1002 ce_disable_polling(scn->ce_id_to_state[id]); 1003 } 1004 hif_ce_state->ce_register_irq_done &= ~mask; 1005 1006 return QDF_STATUS_SUCCESS; 1007 } 1008 /** 1009 * ce_register_irq() - ce_register_irq 1010 * @hif_ce_state: hif_ce_state 1011 * @mask: which copy engines to unregister for. 1012 * 1013 * Registers copy engine irqs matching mask. If a 1 is set at bit x, 1014 * Register for copy engine x. 1015 * 1016 * Return: QDF_STATUS 1017 */ 1018 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) 1019 { 1020 int id; 1021 int ce_count; 1022 int ret; 1023 unsigned long irqflags = IRQF_TRIGGER_RISING; 1024 uint32_t done_mask = 0; 1025 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 1026 1027 ce_count = scn->ce_count; 1028 1029 for (id = 0; id < ce_count; id++) { 1030 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { 1031 ret = pld_ce_request_irq(scn->qdf_dev->dev, id, 1032 hif_snoc_interrupt_handler, 1033 irqflags, ce_name[id], 1034 &hif_ce_state->tasklets[id]); 1035 if (ret) { 1036 hif_err( 1037 "cannot register CE %d irq handler, ret = %d", 1038 id, ret); 1039 ce_unregister_irq(hif_ce_state, done_mask); 1040 return QDF_STATUS_E_FAULT; 1041 } 1042 done_mask |= 1 << id; 1043 } 1044 } 1045 hif_ce_state->ce_register_irq_done |= done_mask; 1046 1047 return QDF_STATUS_SUCCESS; 1048 } 1049