1 /* 2 * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <linux/pci.h> 21 #include <linux/slab.h> 22 #include <linux/interrupt.h> 23 #include <linux/if_arp.h> 24 #include "qdf_lock.h" 25 #include "qdf_types.h" 26 #include "qdf_status.h" 27 #include "regtable.h" 28 #include "hif.h" 29 #include "hif_io32.h" 30 #include "ce_main.h" 31 #include "ce_api.h" 32 #include "ce_reg.h" 33 #include "ce_internal.h" 34 #include "ce_tasklet.h" 35 #include "pld_common.h" 36 #include "hif_debug.h" 37 #include "hif_napi.h" 38 39 /** 40 * struct tasklet_work 41 * 42 * @id: ce_id 43 * @data: data 44 * @reg_work: work 45 */ 46 struct tasklet_work { 47 enum ce_id_type id; 48 void *data; 49 qdf_work_t reg_work; 50 }; 51 52 53 /** 54 * ce_tasklet_schedule() - schedule CE tasklet 55 * @tasklet_entry: ce tasklet entry 56 * 57 * Return: None 58 */ 59 static inline void ce_tasklet_schedule(struct ce_tasklet_entry *tasklet_entry) 60 { 61 if (tasklet_entry->hi_tasklet_ce) 62 tasklet_hi_schedule(&tasklet_entry->intr_tq); 63 else 64 tasklet_schedule(&tasklet_entry->intr_tq); 65 } 66 67 /** 68 * reschedule_ce_tasklet_work_handler() - reschedule work 69 * @work: struct work_struct 70 * 71 * Return: N/A 72 */ 73 static void reschedule_ce_tasklet_work_handler(struct work_struct *work) 74 { 75 qdf_work_t *reg_work = qdf_container_of(work, qdf_work_t, work); 76 struct tasklet_work *ce_work = qdf_container_of(reg_work, 77 struct tasklet_work, 78 reg_work); 79 struct hif_softc *scn = ce_work->data; 80 struct HIF_CE_state *hif_ce_state; 81 82 if (!scn) { 83 hif_err("tasklet scn is null"); 84 return; 85 } 86 87 hif_ce_state = HIF_GET_CE_STATE(scn); 88 89 if (scn->hif_init_done == false) { 90 hif_err("wlan driver is unloaded"); 91 return; 92 } 93 if (hif_ce_state->tasklets[ce_work->id].inited) 94 ce_tasklet_schedule(&hif_ce_state->tasklets[ce_work->id]); 95 } 96 97 static struct tasklet_work tasklet_workers[CE_ID_MAX]; 98 99 /** 100 * init_tasklet_work() - init_tasklet_work 101 * @work: struct work_struct 102 * @work_handler: work_handler 103 * 104 * Return: N/A 105 */ 106 static void init_tasklet_work(struct work_struct *work, 107 work_func_t work_handler) 108 { 109 INIT_WORK(work, work_handler); 110 } 111 112 /** 113 * init_tasklet_worker_by_ceid() - init_tasklet_workers 114 * @scn: HIF Context 115 * @ce_id: copy engine ID 116 * 117 * Return: N/A 118 */ 119 void init_tasklet_worker_by_ceid(struct hif_opaque_softc *scn, int ce_id) 120 { 121 122 tasklet_workers[ce_id].id = ce_id; 123 tasklet_workers[ce_id].data = scn; 124 init_tasklet_work(&tasklet_workers[ce_id].reg_work.work, 125 reschedule_ce_tasklet_work_handler); 126 } 127 128 /** 129 * deinit_tasklet_workers() - deinit_tasklet_workers 130 * @scn: HIF Context 131 * 132 * Return: N/A 133 */ 134 void deinit_tasklet_workers(struct hif_opaque_softc *scn) 135 { 136 u32 id; 137 138 for (id = 0; id < CE_ID_MAX; id++) 139 qdf_cancel_work(&tasklet_workers[id].reg_work); 140 } 141 142 #ifdef CE_TASKLET_DEBUG_ENABLE 143 /** 144 * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution 145 * entry time 146 * @scn: hif_softc 147 * @ce_id: ce_id 148 * 149 * Return: None 150 */ 151 static inline void 152 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id) 153 { 154 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 155 156 hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] = 157 qdf_get_log_timestamp_usecs(); 158 } 159 160 /** 161 * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled 162 * entry time 163 * @scn: hif_softc 164 * @ce_id: ce_id 165 * 166 * Return: None 167 */ 168 static inline void 169 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id) 170 { 171 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 172 173 hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] = 174 qdf_get_log_timestamp_usecs(); 175 } 176 177 /** 178 * hif_ce_latency_stats() - Display ce latency information 179 * @hif_ctx: hif_softc struct 180 * 181 * Return: None 182 */ 183 static void 184 hif_ce_latency_stats(struct hif_softc *hif_ctx) 185 { 186 uint8_t i, j; 187 uint32_t index, start_index; 188 uint64_t secs, usecs; 189 static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1 - 2", 190 "2 - 5", "5 - 10", " > 10"}; 191 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx); 192 struct ce_stats *stats = &hif_ce_state->stats; 193 194 hif_err("\tCE TASKLET ARRIVAL AND EXECUTION STATS"); 195 for (i = 0; i < CE_COUNT_MAX; i++) { 196 hif_nofl_err("\n\t\tCE Ring %d Tasklet Execution Bucket", i); 197 for (j = 0; j < CE_BUCKET_MAX; j++) { 198 qdf_log_timestamp_to_secs( 199 stats->ce_tasklet_exec_last_update[i][j], 200 &secs, &usecs); 201 hif_nofl_err("\t Bucket %sms :%llu\t last update:% 8lld.%06lld", 202 buck_str[j], 203 stats->ce_tasklet_exec_bucket[i][j], 204 secs, usecs); 205 } 206 207 hif_nofl_err("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i); 208 for (j = 0; j < CE_BUCKET_MAX; j++) { 209 qdf_log_timestamp_to_secs( 210 stats->ce_tasklet_sched_last_update[i][j], 211 &secs, &usecs); 212 hif_nofl_err("\t Bucket %sms :%llu\t last update :% 8lld.%06lld", 213 buck_str[j], 214 stats->ce_tasklet_sched_bucket[i][j], 215 secs, usecs); 216 } 217 218 hif_nofl_err("\n\t\t CE RING %d Last %d time records", 219 i, HIF_REQUESTED_EVENTS); 220 index = stats->record_index[i]; 221 start_index = stats->record_index[i]; 222 223 for (j = 0; j < HIF_REQUESTED_EVENTS; j++) { 224 hif_nofl_err("\tExecution time: %lluus Total Scheduled time: %lluus", 225 stats->tasklet_exec_time_record[i][index], 226 stats-> 227 tasklet_sched_time_record[i][index]); 228 if (index) 229 index = (index - 1) % HIF_REQUESTED_EVENTS; 230 else 231 index = HIF_REQUESTED_EVENTS - 1; 232 if (index == start_index) 233 break; 234 } 235 } 236 } 237 238 /** 239 * ce_tasklet_update_bucket() - update ce execution and scehduled time latency 240 * in corresponding time buckets 241 * @hif_ce_state: HIF CE state 242 * @ce_id: ce_id_type 243 * 244 * Return: N/A 245 */ 246 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state, 247 uint8_t ce_id) 248 { 249 uint32_t index; 250 uint64_t exec_time, exec_ms; 251 uint64_t sched_time, sched_ms; 252 uint64_t curr_time = qdf_get_log_timestamp_usecs(); 253 struct ce_stats *stats = &hif_ce_state->stats; 254 255 exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]); 256 sched_time = (stats->tasklet_exec_entry_ts[ce_id]) - 257 (stats->tasklet_sched_entry_ts[ce_id]); 258 259 index = stats->record_index[ce_id]; 260 index = (index + 1) % HIF_REQUESTED_EVENTS; 261 262 stats->tasklet_exec_time_record[ce_id][index] = exec_time; 263 stats->tasklet_sched_time_record[ce_id][index] = sched_time; 264 stats->record_index[ce_id] = index; 265 266 exec_ms = qdf_do_div(exec_time, 1000); 267 sched_ms = qdf_do_div(sched_time, 1000); 268 269 if (exec_ms > 10) { 270 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++; 271 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND] 272 = curr_time; 273 } else if (exec_ms > 5) { 274 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++; 275 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS] 276 = curr_time; 277 } else if (exec_ms > 2) { 278 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++; 279 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS] 280 = curr_time; 281 } else if (exec_ms > 1) { 282 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++; 283 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS] 284 = curr_time; 285 } else if (exec_time > 500) { 286 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++; 287 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS] 288 = curr_time; 289 } else { 290 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++; 291 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US] 292 = curr_time; 293 } 294 295 if (sched_ms > 10) { 296 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++; 297 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND] 298 = curr_time; 299 } else if (sched_ms > 5) { 300 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++; 301 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS] 302 = curr_time; 303 } else if (sched_ms > 2) { 304 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++; 305 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS] 306 = curr_time; 307 } else if (sched_ms > 1) { 308 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++; 309 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS] 310 = curr_time; 311 } else if (sched_time > 500) { 312 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++; 313 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS] 314 = curr_time; 315 } else { 316 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++; 317 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US] 318 = curr_time; 319 } 320 } 321 #else 322 static inline void 323 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id) 324 { 325 } 326 327 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state, 328 uint8_t ce_id) 329 { 330 } 331 332 static inline void 333 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id) 334 { 335 } 336 337 static void 338 hif_ce_latency_stats(struct hif_softc *hif_ctx) 339 { 340 } 341 #endif /*CE_TASKLET_DEBUG_ENABLE*/ 342 343 #if defined(CE_TASKLET_DEBUG_ENABLE) && defined(CE_TASKLET_SCHEDULE_ON_FULL) 344 /** 345 * hif_reset_ce_full_count() - Reset ce full count 346 * @scn: hif_softc 347 * @ce_id: ce_id 348 * 349 * Return: None 350 */ 351 static inline void 352 hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id) 353 { 354 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 355 356 hif_ce_state->stats.ce_ring_full_count[ce_id] = 0; 357 } 358 #else 359 static inline void 360 hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id) 361 { 362 } 363 #endif 364 365 #ifdef HIF_DETECTION_LATENCY_ENABLE 366 static inline 367 void hif_latency_detect_tasklet_sched( 368 struct hif_softc *scn, 369 struct ce_tasklet_entry *tasklet_entry) 370 { 371 int idx = tasklet_entry->ce_id; 372 373 if (idx >= HIF_TASKLET_IN_MONITOR || 374 !qdf_test_bit(idx, scn->latency_detect.tasklet_bmap)) 375 return; 376 377 scn->latency_detect.tasklet_info[idx].sched_cpuid = qdf_get_cpu(); 378 scn->latency_detect.tasklet_info[idx].sched_time = qdf_system_ticks(); 379 } 380 381 static inline 382 void hif_latency_detect_tasklet_exec( 383 struct hif_softc *scn, 384 struct ce_tasklet_entry *tasklet_entry) 385 { 386 int idx = tasklet_entry->ce_id; 387 388 if (idx >= HIF_TASKLET_IN_MONITOR || 389 !qdf_test_bit(idx, scn->latency_detect.tasklet_bmap)) 390 return; 391 392 scn->latency_detect.tasklet_info[idx].exec_time = qdf_system_ticks(); 393 hif_check_detection_latency(scn, false, BIT(HIF_DETECT_TASKLET)); 394 } 395 #else 396 static inline 397 void hif_latency_detect_tasklet_sched( 398 struct hif_softc *scn, 399 struct ce_tasklet_entry *tasklet_entry) 400 {} 401 402 static inline 403 void hif_latency_detect_tasklet_exec( 404 struct hif_softc *scn, 405 struct ce_tasklet_entry *tasklet_entry) 406 {} 407 #endif 408 409 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT 410 /** 411 * ce_get_custom_cb_pending() - Helper API to check whether the custom 412 * callback is pending 413 * @CE_state: Pointer to CE state 414 * 415 * return: bool 416 */ 417 static bool 418 ce_get_custom_cb_pending(struct CE_state *CE_state) 419 { 420 return (qdf_atomic_dec_if_positive(&CE_state->custom_cb_pending) >= 0); 421 } 422 423 /** 424 * ce_execute_custom_cb() - Helper API to execute custom callback 425 * @CE_state: Pointer to CE state 426 * 427 * return: void 428 */ 429 static void 430 ce_execute_custom_cb(struct CE_state *CE_state) 431 { 432 while (ce_get_custom_cb_pending(CE_state) && CE_state->custom_cb && 433 CE_state->custom_cb_context) 434 CE_state->custom_cb(CE_state->custom_cb_context); 435 } 436 #else 437 /** 438 * ce_execute_custom_cb() - Helper API to execute custom callback 439 * @CE_state: Pointer to CE state 440 * 441 * return: void 442 */ 443 static void 444 ce_execute_custom_cb(struct CE_state *CE_state) 445 { 446 } 447 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */ 448 449 /** 450 * ce_tasklet() - ce_tasklet 451 * @data: data 452 * 453 * Return: N/A 454 */ 455 static void ce_tasklet(unsigned long data) 456 { 457 struct ce_tasklet_entry *tasklet_entry = 458 (struct ce_tasklet_entry *)data; 459 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; 460 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 461 struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id]; 462 463 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, 464 HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0); 465 466 if (scn->ce_latency_stats) 467 hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id); 468 469 hif_latency_detect_tasklet_exec(scn, tasklet_entry); 470 471 if (qdf_atomic_read(&scn->link_suspended)) { 472 hif_err("ce %d tasklet fired after link suspend", 473 tasklet_entry->ce_id); 474 QDF_BUG(0); 475 } 476 477 ce_execute_custom_cb(CE_state); 478 479 ce_per_engine_service(scn, tasklet_entry->ce_id); 480 481 if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) { 482 /* 483 * There are frames pending, schedule tasklet to process them. 484 * Enable the interrupt only when there is no pending frames in 485 * any of the Copy Engine pipes. 486 */ 487 if (test_bit(TASKLET_STATE_SCHED, 488 &tasklet_entry->intr_tq.state)) { 489 hif_info("ce_id%d tasklet was scheduled, return", 490 tasklet_entry->ce_id); 491 qdf_atomic_dec(&scn->active_tasklet_cnt); 492 return; 493 } 494 495 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, 496 HIF_CE_TASKLET_RESCHEDULE, 497 NULL, NULL, -1, 0); 498 499 ce_tasklet_schedule(tasklet_entry); 500 hif_latency_detect_tasklet_sched(scn, tasklet_entry); 501 502 hif_reset_ce_full_count(scn, tasklet_entry->ce_id); 503 if (scn->ce_latency_stats) { 504 ce_tasklet_update_bucket(hif_ce_state, 505 tasklet_entry->ce_id); 506 hif_record_tasklet_sched_entry_ts(scn, 507 tasklet_entry->ce_id); 508 } 509 return; 510 } 511 512 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT, 513 NULL, NULL, -1, 0); 514 515 if (scn->ce_latency_stats) 516 ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id); 517 518 if ((scn->target_status != TARGET_STATUS_RESET) && 519 !scn->free_irq_done) 520 hif_irq_enable(scn, tasklet_entry->ce_id); 521 522 qdf_atomic_dec(&scn->active_tasklet_cnt); 523 } 524 525 /** 526 * ce_tasklet_init() - ce_tasklet_init 527 * @hif_ce_state: hif_ce_state 528 * @mask: mask 529 * 530 * Return: N/A 531 */ 532 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask) 533 { 534 int i; 535 struct CE_attr *attr; 536 537 for (i = 0; i < CE_COUNT_MAX; i++) { 538 if (mask & (1 << i)) { 539 hif_ce_state->tasklets[i].ce_id = i; 540 hif_ce_state->tasklets[i].inited = true; 541 hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state; 542 543 attr = &hif_ce_state->host_ce_config[i]; 544 if (attr->flags & CE_ATTR_HI_TASKLET) 545 hif_ce_state->tasklets[i].hi_tasklet_ce = true; 546 else 547 hif_ce_state->tasklets[i].hi_tasklet_ce = false; 548 549 tasklet_init(&hif_ce_state->tasklets[i].intr_tq, 550 ce_tasklet, 551 (unsigned long)&hif_ce_state->tasklets[i]); 552 } 553 } 554 } 555 /** 556 * ce_tasklet_kill() - ce_tasklet_kill 557 * @scn: HIF context 558 * 559 * Context: Non-Atomic context 560 * Return: N/A 561 */ 562 void ce_tasklet_kill(struct hif_softc *scn) 563 { 564 int i; 565 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 566 567 for (i = 0; i < CE_COUNT_MAX; i++) { 568 if (hif_ce_state->tasklets[i].inited) { 569 hif_ce_state->tasklets[i].inited = false; 570 /* 571 * Cancel the tasklet work before tasklet_disable 572 * to avoid race between tasklet_schedule and 573 * tasklet_kill. Here cancel_work_sync() won't 574 * return before reschedule_ce_tasklet_work_handler() 575 * completes. Even if tasklet_schedule() happens 576 * tasklet_disable() will take care of that. 577 */ 578 qdf_cancel_work(&tasklet_workers[i].reg_work); 579 tasklet_kill(&hif_ce_state->tasklets[i].intr_tq); 580 } 581 } 582 qdf_atomic_set(&scn->active_tasklet_cnt, 0); 583 } 584 585 /** 586 * ce_tasklet_entry_dump() - dump tasklet entries info 587 * @hif_ce_state: ce state 588 * 589 * This function will dump all tasklet entries info 590 * 591 * Return: None 592 */ 593 static void ce_tasklet_entry_dump(struct HIF_CE_state *hif_ce_state) 594 { 595 struct ce_tasklet_entry *tasklet_entry; 596 int i; 597 598 if (hif_ce_state) { 599 for (i = 0; i < CE_COUNT_MAX; i++) { 600 tasklet_entry = &hif_ce_state->tasklets[i]; 601 602 hif_info("%02d: ce_id=%d, inited=%d, hi_tasklet_ce=%d hif_ce_state=%pK", 603 i, 604 tasklet_entry->ce_id, 605 tasklet_entry->inited, 606 tasklet_entry->hi_tasklet_ce, 607 tasklet_entry->hif_ce_state); 608 } 609 } 610 } 611 612 #define HIF_CE_DRAIN_WAIT_CNT 20 613 /** 614 * hif_drain_tasklets(): wait until no tasklet is pending 615 * @scn: hif context 616 * 617 * Let running tasklets clear pending traffic. 618 * 619 * Return: 0 if no bottom half is in progress when it returns. 620 * -EFAULT if it times out. 621 */ 622 int hif_drain_tasklets(struct hif_softc *scn) 623 { 624 uint32_t ce_drain_wait_cnt = 0; 625 int32_t tasklet_cnt; 626 627 while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) { 628 if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) { 629 hif_err("CE still not done with access: %d", 630 tasklet_cnt); 631 632 return -EFAULT; 633 } 634 hif_info("Waiting for CE to finish access"); 635 msleep(10); 636 } 637 return 0; 638 } 639 640 #ifdef WLAN_SUSPEND_RESUME_TEST 641 /** 642 * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should 643 * trigger a unit-test resume. 644 * @scn: The HIF context to operate on 645 * @ce_id: The copy engine Id from the originating interrupt 646 * 647 * Return: true if the raised irq should trigger a unit-test resume 648 */ 649 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) 650 { 651 int errno; 652 uint8_t wake_ce_id; 653 654 if (!hif_is_ut_suspended(scn)) 655 return false; 656 657 /* ensure passed ce_id matches wake ce_id */ 658 errno = hif_get_wake_ce_id(scn, &wake_ce_id); 659 if (errno) { 660 hif_err("Failed to get wake CE Id: %d", errno); 661 return false; 662 } 663 664 return ce_id == wake_ce_id; 665 } 666 #else 667 static inline bool 668 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) 669 { 670 return false; 671 } 672 #endif /* WLAN_SUSPEND_RESUME_TEST */ 673 674 /** 675 * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler 676 * @irq: irq coming from kernel 677 * @context: context 678 * 679 * Return: N/A 680 */ 681 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context) 682 { 683 struct ce_tasklet_entry *tasklet_entry = context; 684 struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state); 685 686 return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq), 687 tasklet_entry); 688 } 689 690 /** 691 * hif_ce_increment_interrupt_count() - update ce stats 692 * @hif_ce_state: ce state 693 * @ce_id: ce id 694 * 695 * Return: none 696 */ 697 static inline void 698 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id) 699 { 700 int cpu_id = qdf_get_cpu(); 701 702 hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++; 703 } 704 705 /** 706 * hif_display_ce_stats() - display ce stats 707 * @hif_ctx: HIF context 708 * 709 * Return: none 710 */ 711 void hif_display_ce_stats(struct hif_softc *hif_ctx) 712 { 713 #define STR_SIZE 128 714 uint8_t i, j, pos; 715 char str_buffer[STR_SIZE]; 716 int size, ret; 717 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx); 718 719 qdf_debug("CE interrupt statistics:"); 720 for (i = 0; i < CE_COUNT_MAX; i++) { 721 size = STR_SIZE; 722 pos = 0; 723 for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) { 724 ret = snprintf(str_buffer + pos, size, "[%d]:%d ", 725 j, hif_ce_state->stats.ce_per_cpu[i][j]); 726 if (ret <= 0 || ret >= size) 727 break; 728 size -= ret; 729 pos += ret; 730 } 731 qdf_debug("CE id[%2d] - %s", i, str_buffer); 732 } 733 734 if (hif_ctx->ce_latency_stats) 735 hif_ce_latency_stats(hif_ctx); 736 #undef STR_SIZE 737 } 738 739 /** 740 * hif_clear_ce_stats() - clear ce stats 741 * @hif_ce_state: ce state 742 * 743 * Return: none 744 */ 745 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state) 746 { 747 qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats)); 748 } 749 750 #ifdef WLAN_TRACEPOINTS 751 /** 752 * hif_set_ce_tasklet_sched_time() - Set tasklet schedule time for 753 * CE with matching ce_id 754 * @scn: hif context 755 * @ce_id: CE id 756 * 757 * Return: None 758 */ 759 static inline 760 void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id) 761 { 762 struct CE_state *ce_state = scn->ce_id_to_state[ce_id]; 763 764 ce_state->ce_tasklet_sched_time = qdf_time_sched_clock(); 765 } 766 #else 767 static inline 768 void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id) 769 { 770 } 771 #endif 772 773 /** 774 * hif_tasklet_schedule() - schedule tasklet 775 * @hif_ctx: hif context 776 * @tasklet_entry: ce tasklet entry 777 * 778 * Return: false if tasklet already scheduled, otherwise true 779 */ 780 static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx, 781 struct ce_tasklet_entry *tasklet_entry) 782 { 783 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 784 785 if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) { 786 hif_debug("tasklet scheduled, return"); 787 qdf_atomic_dec(&scn->active_tasklet_cnt); 788 return false; 789 } 790 791 hif_set_ce_tasklet_sched_time(scn, tasklet_entry->ce_id); 792 /* keep it before tasklet_schedule, this is to happy whunt. 793 * in whunt, tasklet may run before finished hif_tasklet_schedule. 794 */ 795 hif_latency_detect_tasklet_sched(scn, tasklet_entry); 796 ce_tasklet_schedule(tasklet_entry); 797 798 hif_reset_ce_full_count(scn, tasklet_entry->ce_id); 799 if (scn->ce_latency_stats) 800 hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id); 801 802 return true; 803 } 804 805 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7 806 /** 807 * ce_poll_reap_by_id() - reap the available frames from CE by polling per ce_id 808 * @scn: hif context 809 * @ce_id: CE id 810 * 811 * This function needs to be called once after all the irqs are disabled 812 * and tasklets are drained during bus suspend. 813 * 814 * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop 815 */ 816 static int ce_poll_reap_by_id(struct hif_softc *scn, enum ce_id_type ce_id) 817 { 818 struct HIF_CE_state *hif_ce_state = (struct HIF_CE_state *)scn; 819 struct CE_state *CE_state = scn->ce_id_to_state[ce_id]; 820 821 if (scn->ce_latency_stats) 822 hif_record_tasklet_exec_entry_ts(scn, ce_id); 823 824 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY, 825 NULL, NULL, -1, 0); 826 827 ce_per_engine_service(scn, ce_id); 828 829 /* 830 * In an unlikely case, if frames are still pending to reap, 831 * could be an infinite loop, so return -EBUSY. 832 */ 833 if (ce_check_rx_pending(CE_state)) 834 return -EBUSY; 835 836 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT, 837 NULL, NULL, -1, 0); 838 839 if (scn->ce_latency_stats) 840 ce_tasklet_update_bucket(hif_ce_state, ce_id); 841 842 return 0; 843 } 844 845 /** 846 * hif_drain_fw_diag_ce() - reap all the available FW diag logs from CE 847 * @scn: hif context 848 * 849 * This function needs to be called once after all the irqs are disabled 850 * and tasklets are drained during bus suspend. 851 * 852 * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop 853 */ 854 int hif_drain_fw_diag_ce(struct hif_softc *scn) 855 { 856 uint8_t ce_id; 857 858 if (hif_get_fw_diag_ce_id(scn, &ce_id)) 859 return 0; 860 861 return ce_poll_reap_by_id(scn, ce_id); 862 } 863 #endif 864 865 #ifdef CE_TASKLET_SCHEDULE_ON_FULL 866 static inline int ce_check_tasklet_status(int ce_id, 867 struct ce_tasklet_entry *entry) 868 { 869 struct HIF_CE_state *hif_ce_state = entry->hif_ce_state; 870 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 871 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 872 873 if (hif_napi_enabled(hif_hdl, ce_id)) { 874 struct qca_napi_info *napi; 875 876 napi = scn->napi_data.napis[ce_id]; 877 if (test_bit(NAPI_STATE_SCHED, &napi->napi.state)) 878 return -EBUSY; 879 } else { 880 if (test_bit(TASKLET_STATE_SCHED, 881 &hif_ce_state->tasklets[ce_id].intr_tq.state)) 882 return -EBUSY; 883 } 884 return 0; 885 } 886 887 static inline void ce_interrupt_lock(struct CE_state *ce_state) 888 { 889 qdf_spin_lock_irqsave(&ce_state->ce_interrupt_lock); 890 } 891 892 static inline void ce_interrupt_unlock(struct CE_state *ce_state) 893 { 894 qdf_spin_unlock_irqrestore(&ce_state->ce_interrupt_lock); 895 } 896 #else 897 static inline int ce_check_tasklet_status(int ce_id, 898 struct ce_tasklet_entry *entry) 899 { 900 return 0; 901 } 902 903 static inline void ce_interrupt_lock(struct CE_state *ce_state) 904 { 905 } 906 907 static inline void ce_interrupt_unlock(struct CE_state *ce_state) 908 { 909 } 910 #endif 911 912 /** 913 * ce_dispatch_interrupt() - dispatch an interrupt to a processing context 914 * @ce_id: ce_id 915 * @tasklet_entry: context 916 * 917 * Return: N/A 918 */ 919 irqreturn_t ce_dispatch_interrupt(int ce_id, 920 struct ce_tasklet_entry *tasklet_entry) 921 { 922 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; 923 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 924 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 925 struct CE_state *ce_state; 926 927 if (tasklet_entry->ce_id != ce_id) { 928 bool rl; 929 930 rl = hif_err_rl("ce_id (expect %d, received %d) does not match, inited=%d, ce_count=%u", 931 tasklet_entry->ce_id, ce_id, 932 tasklet_entry->inited, 933 scn->ce_count); 934 935 if (!rl) 936 ce_tasklet_entry_dump(hif_ce_state); 937 938 return IRQ_NONE; 939 } 940 if (unlikely(ce_id >= CE_COUNT_MAX)) { 941 hif_err("ce_id=%d > CE_COUNT_MAX=%d", 942 tasklet_entry->ce_id, CE_COUNT_MAX); 943 return IRQ_NONE; 944 } 945 946 ce_state = scn->ce_id_to_state[ce_id]; 947 948 ce_interrupt_lock(ce_state); 949 if (ce_check_tasklet_status(ce_id, tasklet_entry)) { 950 ce_interrupt_unlock(ce_state); 951 return IRQ_NONE; 952 } 953 954 hif_irq_disable(scn, ce_id); 955 956 if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { 957 ce_interrupt_unlock(ce_state); 958 return IRQ_HANDLED; 959 } 960 961 hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, 962 NULL, NULL, 0, 0); 963 hif_ce_increment_interrupt_count(hif_ce_state, ce_id); 964 965 if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) { 966 hif_ut_fw_resume(scn); 967 hif_irq_enable(scn, ce_id); 968 ce_interrupt_unlock(ce_state); 969 return IRQ_HANDLED; 970 } 971 972 qdf_atomic_inc(&scn->active_tasklet_cnt); 973 974 if (hif_napi_enabled(hif_hdl, ce_id)) 975 hif_napi_schedule(hif_hdl, ce_id); 976 else 977 hif_tasklet_schedule(hif_hdl, tasklet_entry); 978 979 ce_interrupt_unlock(ce_state); 980 981 return IRQ_HANDLED; 982 } 983 984 const char *ce_name[CE_COUNT_MAX] = { 985 "WLAN_CE_0", 986 "WLAN_CE_1", 987 "WLAN_CE_2", 988 "WLAN_CE_3", 989 "WLAN_CE_4", 990 "WLAN_CE_5", 991 "WLAN_CE_6", 992 "WLAN_CE_7", 993 "WLAN_CE_8", 994 "WLAN_CE_9", 995 "WLAN_CE_10", 996 "WLAN_CE_11", 997 #ifdef QCA_WIFI_QCN9224 998 "WLAN_CE_12", 999 "WLAN_CE_13", 1000 "WLAN_CE_14", 1001 "WLAN_CE_15", 1002 #endif 1003 }; 1004 /** 1005 * ce_unregister_irq() - ce_unregister_irq 1006 * @hif_ce_state: hif_ce_state copy engine device handle 1007 * @mask: which copy engines to unregister for. 1008 * 1009 * Unregisters copy engine irqs matching mask. If a 1 is set at bit x, 1010 * unregister for copy engine x. 1011 * 1012 * Return: QDF_STATUS 1013 */ 1014 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) 1015 { 1016 int id; 1017 int ce_count; 1018 int ret; 1019 struct hif_softc *scn; 1020 1021 if (!hif_ce_state) { 1022 hif_warn("hif_ce_state = NULL"); 1023 return QDF_STATUS_SUCCESS; 1024 } 1025 1026 scn = HIF_GET_SOFTC(hif_ce_state); 1027 ce_count = scn->ce_count; 1028 /* we are removing interrupts, so better stop NAPI */ 1029 ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn), 1030 NAPI_EVT_INT_STATE, (void *)0); 1031 if (ret != 0) 1032 hif_err("napi_event INT_STATE returned %d", ret); 1033 /* this is not fatal, continue */ 1034 1035 /* filter mask to free only for ce's with irq registered */ 1036 mask &= hif_ce_state->ce_register_irq_done; 1037 for (id = 0; id < ce_count; id++) { 1038 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { 1039 ret = pld_ce_free_irq(scn->qdf_dev->dev, id, 1040 &hif_ce_state->tasklets[id]); 1041 if (ret < 0) 1042 hif_err( 1043 "pld_unregister_irq error - ce_id = %d, ret = %d", 1044 id, ret); 1045 } 1046 ce_disable_polling(scn->ce_id_to_state[id]); 1047 } 1048 hif_ce_state->ce_register_irq_done &= ~mask; 1049 1050 return QDF_STATUS_SUCCESS; 1051 } 1052 /** 1053 * ce_register_irq() - ce_register_irq 1054 * @hif_ce_state: hif_ce_state 1055 * @mask: which copy engines to unregister for. 1056 * 1057 * Registers copy engine irqs matching mask. If a 1 is set at bit x, 1058 * Register for copy engine x. 1059 * 1060 * Return: QDF_STATUS 1061 */ 1062 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) 1063 { 1064 int id; 1065 int ce_count; 1066 int ret; 1067 unsigned long irqflags = IRQF_TRIGGER_RISING; 1068 uint32_t done_mask = 0; 1069 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 1070 1071 ce_count = scn->ce_count; 1072 1073 for (id = 0; id < ce_count; id++) { 1074 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { 1075 ret = pld_ce_request_irq(scn->qdf_dev->dev, id, 1076 hif_snoc_interrupt_handler, 1077 irqflags, ce_name[id], 1078 &hif_ce_state->tasklets[id]); 1079 if (ret) { 1080 hif_err( 1081 "cannot register CE %d irq handler, ret = %d", 1082 id, ret); 1083 ce_unregister_irq(hif_ce_state, done_mask); 1084 return QDF_STATUS_E_FAULT; 1085 } 1086 done_mask |= 1 << id; 1087 } 1088 } 1089 hif_ce_state->ce_register_irq_done |= done_mask; 1090 1091 return QDF_STATUS_SUCCESS; 1092 } 1093