1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "targcfg.h" 20 #include "qdf_lock.h" 21 #include "qdf_status.h" 22 #include "qdf_status.h" 23 #include <qdf_atomic.h> /* qdf_atomic_read */ 24 #include <targaddrs.h> 25 #include "hif_io32.h" 26 #include <hif.h> 27 #include <target_type.h> 28 #include "regtable.h" 29 #define ATH_MODULE_NAME hif 30 #include <a_debug.h> 31 #include "hif_main.h" 32 #include "ce_api.h" 33 #include "qdf_trace.h" 34 #include "pld_common.h" 35 #include "hif_debug.h" 36 #include "ce_internal.h" 37 #include "ce_reg.h" 38 #include "ce_assignment.h" 39 #include "ce_tasklet.h" 40 #include "qdf_module.h" 41 42 #define CE_POLL_TIMEOUT 10 /* ms */ 43 44 #define AGC_DUMP 1 45 #define CHANINFO_DUMP 2 46 #define BB_WATCHDOG_DUMP 3 47 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 48 #define PCIE_ACCESS_DUMP 4 49 #endif 50 #include "mp_dev.h" 51 #ifdef HIF_CE_LOG_INFO 52 #include "qdf_hang_event_notifier.h" 53 #endif 54 55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \ 56 defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018) || \ 57 defined(QCA_WIFI_WCN7850) || defined(QCA_WIFI_QCA9574)) && \ 58 !defined(QCA_WIFI_SUPPORT_SRNG) 59 #define QCA_WIFI_SUPPORT_SRNG 60 #endif 61 62 #ifdef QCA_WIFI_SUPPORT_SRNG 63 #include <hal_api.h> 64 #endif 65 66 /* Forward references */ 67 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info); 68 69 /* 70 * Fix EV118783, poll to check whether a BMI response comes 71 * other than waiting for the interruption which may be lost. 72 */ 73 /* #define BMI_RSP_POLLING */ 74 #define BMI_RSP_TO_MILLISEC 1000 75 76 #ifdef CONFIG_BYPASS_QMI 77 #define BYPASS_QMI 1 78 #else 79 #define BYPASS_QMI 0 80 #endif 81 82 #ifdef ENABLE_10_4_FW_HDR 83 #if (ENABLE_10_4_FW_HDR == 1) 84 #define WDI_IPA_SERVICE_GROUP 5 85 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0) 86 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1) 87 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2) 88 #endif /* ENABLE_10_4_FW_HDR == 1 */ 89 #endif /* ENABLE_10_4_FW_HDR */ 90 91 static void hif_config_rri_on_ddr(struct hif_softc *scn); 92 93 /** 94 * hif_target_access_log_dump() - dump access log 95 * 96 * dump access log 97 * 98 * Return: n/a 99 */ 100 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 101 static void hif_target_access_log_dump(void) 102 { 103 hif_target_dump_access_log(); 104 } 105 #endif 106 107 /* 108 * This structure contains the interrupt index for each Copy engine 109 * for various number of MSIs available in the system. 110 */ 111 static struct ce_int_assignment ce_int_context[NUM_CE_CONTEXT] = { 112 /* Default configuration */ 113 {{ CE_INTERRUPT_IDX(0), 114 CE_INTERRUPT_IDX(1), 115 CE_INTERRUPT_IDX(2), 116 CE_INTERRUPT_IDX(3), 117 CE_INTERRUPT_IDX(4), 118 CE_INTERRUPT_IDX(5), 119 CE_INTERRUPT_IDX(6), 120 CE_INTERRUPT_IDX(7), 121 CE_INTERRUPT_IDX(8), 122 CE_INTERRUPT_IDX(9), 123 CE_INTERRUPT_IDX(10), 124 CE_INTERRUPT_IDX(11), 125 #ifdef QCA_WIFI_QCN9224 126 CE_INTERRUPT_IDX(12), 127 CE_INTERRUPT_IDX(13), 128 CE_INTERRUPT_IDX(14), 129 CE_INTERRUPT_IDX(15), 130 #endif 131 } }, 132 /* Interrupt assignment for 1 MSI combination */ 133 {{ CE_INTERRUPT_IDX(0), 134 CE_INTERRUPT_IDX(0), 135 CE_INTERRUPT_IDX(0), 136 CE_INTERRUPT_IDX(0), 137 CE_INTERRUPT_IDX(0), 138 CE_INTERRUPT_IDX(0), 139 CE_INTERRUPT_IDX(0), 140 CE_INTERRUPT_IDX(0), 141 CE_INTERRUPT_IDX(0), 142 CE_INTERRUPT_IDX(0), 143 CE_INTERRUPT_IDX(0), 144 CE_INTERRUPT_IDX(0), 145 #ifdef QCA_WIFI_QCN9224 146 CE_INTERRUPT_IDX(0), 147 CE_INTERRUPT_IDX(0), 148 CE_INTERRUPT_IDX(0), 149 CE_INTERRUPT_IDX(0), 150 #endif 151 } }, 152 /* Interrupt assignment for 2 MSI combination */ 153 {{ CE_INTERRUPT_IDX(0), 154 CE_INTERRUPT_IDX(1), 155 CE_INTERRUPT_IDX(0), 156 CE_INTERRUPT_IDX(1), 157 CE_INTERRUPT_IDX(0), 158 CE_INTERRUPT_IDX(1), 159 CE_INTERRUPT_IDX(0), 160 CE_INTERRUPT_IDX(0), 161 CE_INTERRUPT_IDX(0), 162 CE_INTERRUPT_IDX(0), 163 CE_INTERRUPT_IDX(0), 164 CE_INTERRUPT_IDX(0), 165 #ifdef QCA_WIFI_QCN9224 166 CE_INTERRUPT_IDX(0), 167 CE_INTERRUPT_IDX(0), 168 CE_INTERRUPT_IDX(0), 169 CE_INTERRUPT_IDX(0), 170 #endif 171 } }, 172 /* Interrupt assignment for 3 MSI combination */ 173 {{ CE_INTERRUPT_IDX(0), 174 CE_INTERRUPT_IDX(1), 175 CE_INTERRUPT_IDX(2), 176 CE_INTERRUPT_IDX(1), 177 CE_INTERRUPT_IDX(0), 178 CE_INTERRUPT_IDX(1), 179 CE_INTERRUPT_IDX(0), 180 CE_INTERRUPT_IDX(0), 181 CE_INTERRUPT_IDX(0), 182 CE_INTERRUPT_IDX(0), 183 CE_INTERRUPT_IDX(0), 184 CE_INTERRUPT_IDX(0), 185 #ifdef QCA_WIFI_QCN9224 186 CE_INTERRUPT_IDX(0), 187 CE_INTERRUPT_IDX(0), 188 CE_INTERRUPT_IDX(0), 189 CE_INTERRUPT_IDX(0), 190 #endif 191 } }, 192 /* Interrupt assignment for 4 MSI combination */ 193 {{ CE_INTERRUPT_IDX(0), 194 CE_INTERRUPT_IDX(1), 195 CE_INTERRUPT_IDX(2), 196 CE_INTERRUPT_IDX(3), 197 CE_INTERRUPT_IDX(0), 198 CE_INTERRUPT_IDX(1), 199 CE_INTERRUPT_IDX(0), 200 CE_INTERRUPT_IDX(0), 201 CE_INTERRUPT_IDX(0), 202 CE_INTERRUPT_IDX(0), 203 CE_INTERRUPT_IDX(0), 204 CE_INTERRUPT_IDX(0), 205 #ifdef QCA_WIFI_QCN9224 206 CE_INTERRUPT_IDX(0), 207 CE_INTERRUPT_IDX(0), 208 CE_INTERRUPT_IDX(0), 209 CE_INTERRUPT_IDX(0), 210 #endif 211 } }, 212 /* Interrupt assignment for 5 MSI combination */ 213 {{ CE_INTERRUPT_IDX(0), 214 CE_INTERRUPT_IDX(1), 215 CE_INTERRUPT_IDX(2), 216 CE_INTERRUPT_IDX(3), 217 CE_INTERRUPT_IDX(0), 218 CE_INTERRUPT_IDX(4), 219 CE_INTERRUPT_IDX(0), 220 CE_INTERRUPT_IDX(0), 221 CE_INTERRUPT_IDX(0), 222 CE_INTERRUPT_IDX(0), 223 CE_INTERRUPT_IDX(0), 224 CE_INTERRUPT_IDX(0), 225 #ifdef QCA_WIFI_QCN9224 226 CE_INTERRUPT_IDX(0), 227 CE_INTERRUPT_IDX(0), 228 CE_INTERRUPT_IDX(0), 229 CE_INTERRUPT_IDX(0), 230 #endif 231 } }, 232 /* Interrupt assignment for 6 MSI combination */ 233 {{ CE_INTERRUPT_IDX(0), 234 CE_INTERRUPT_IDX(1), 235 CE_INTERRUPT_IDX(2), 236 CE_INTERRUPT_IDX(3), 237 CE_INTERRUPT_IDX(4), 238 CE_INTERRUPT_IDX(5), 239 CE_INTERRUPT_IDX(0), 240 CE_INTERRUPT_IDX(0), 241 CE_INTERRUPT_IDX(0), 242 CE_INTERRUPT_IDX(0), 243 CE_INTERRUPT_IDX(0), 244 CE_INTERRUPT_IDX(0), 245 #ifdef QCA_WIFI_QCN9224 246 CE_INTERRUPT_IDX(0), 247 CE_INTERRUPT_IDX(0), 248 CE_INTERRUPT_IDX(0), 249 CE_INTERRUPT_IDX(0), 250 #endif 251 } }, 252 /* Interrupt assignment for 7 MSI combination */ 253 {{ CE_INTERRUPT_IDX(0), 254 CE_INTERRUPT_IDX(1), 255 CE_INTERRUPT_IDX(2), 256 CE_INTERRUPT_IDX(3), 257 CE_INTERRUPT_IDX(4), 258 CE_INTERRUPT_IDX(5), 259 CE_INTERRUPT_IDX(6), 260 CE_INTERRUPT_IDX(0), 261 CE_INTERRUPT_IDX(0), 262 CE_INTERRUPT_IDX(0), 263 CE_INTERRUPT_IDX(0), 264 CE_INTERRUPT_IDX(0), 265 #ifdef QCA_WIFI_QCN9224 266 CE_INTERRUPT_IDX(0), 267 CE_INTERRUPT_IDX(0), 268 CE_INTERRUPT_IDX(0), 269 CE_INTERRUPT_IDX(0), 270 #endif 271 } }, 272 /* Interrupt assignment for 8 MSI combination */ 273 {{ CE_INTERRUPT_IDX(0), 274 CE_INTERRUPT_IDX(1), 275 CE_INTERRUPT_IDX(2), 276 CE_INTERRUPT_IDX(3), 277 CE_INTERRUPT_IDX(4), 278 CE_INTERRUPT_IDX(5), 279 CE_INTERRUPT_IDX(6), 280 CE_INTERRUPT_IDX(7), 281 CE_INTERRUPT_IDX(0), 282 CE_INTERRUPT_IDX(0), 283 CE_INTERRUPT_IDX(0), 284 CE_INTERRUPT_IDX(0), 285 #ifdef QCA_WIFI_QCN9224 286 CE_INTERRUPT_IDX(0), 287 CE_INTERRUPT_IDX(0), 288 CE_INTERRUPT_IDX(0), 289 CE_INTERRUPT_IDX(0), 290 #endif 291 } }, 292 /* Interrupt assignment for 9 MSI combination */ 293 {{ CE_INTERRUPT_IDX(0), 294 CE_INTERRUPT_IDX(1), 295 CE_INTERRUPT_IDX(2), 296 CE_INTERRUPT_IDX(3), 297 CE_INTERRUPT_IDX(4), 298 CE_INTERRUPT_IDX(5), 299 CE_INTERRUPT_IDX(6), 300 CE_INTERRUPT_IDX(7), 301 CE_INTERRUPT_IDX(8), 302 CE_INTERRUPT_IDX(0), 303 CE_INTERRUPT_IDX(0), 304 CE_INTERRUPT_IDX(0), 305 #ifdef QCA_WIFI_QCN9224 306 CE_INTERRUPT_IDX(0), 307 CE_INTERRUPT_IDX(0), 308 CE_INTERRUPT_IDX(0), 309 CE_INTERRUPT_IDX(0), 310 #endif 311 } }, 312 /* Interrupt assignment for 10 MSI combination */ 313 {{ CE_INTERRUPT_IDX(0), 314 CE_INTERRUPT_IDX(1), 315 CE_INTERRUPT_IDX(2), 316 CE_INTERRUPT_IDX(3), 317 CE_INTERRUPT_IDX(4), 318 CE_INTERRUPT_IDX(5), 319 CE_INTERRUPT_IDX(6), 320 CE_INTERRUPT_IDX(7), 321 CE_INTERRUPT_IDX(8), 322 CE_INTERRUPT_IDX(9), 323 CE_INTERRUPT_IDX(0), 324 CE_INTERRUPT_IDX(0), 325 #ifdef QCA_WIFI_QCN9224 326 CE_INTERRUPT_IDX(0), 327 CE_INTERRUPT_IDX(0), 328 CE_INTERRUPT_IDX(0), 329 CE_INTERRUPT_IDX(0), 330 #endif 331 } }, 332 /* Interrupt assignment for 11 MSI combination */ 333 {{ CE_INTERRUPT_IDX(0), 334 CE_INTERRUPT_IDX(1), 335 CE_INTERRUPT_IDX(2), 336 CE_INTERRUPT_IDX(3), 337 CE_INTERRUPT_IDX(4), 338 CE_INTERRUPT_IDX(5), 339 CE_INTERRUPT_IDX(6), 340 CE_INTERRUPT_IDX(7), 341 CE_INTERRUPT_IDX(8), 342 CE_INTERRUPT_IDX(9), 343 CE_INTERRUPT_IDX(10), 344 CE_INTERRUPT_IDX(0), 345 #ifdef QCA_WIFI_QCN9224 346 CE_INTERRUPT_IDX(0), 347 CE_INTERRUPT_IDX(0), 348 CE_INTERRUPT_IDX(0), 349 CE_INTERRUPT_IDX(0), 350 #endif 351 } }, 352 /* Interrupt assignment for 12 MSI combination */ 353 {{ CE_INTERRUPT_IDX(0), 354 CE_INTERRUPT_IDX(1), 355 CE_INTERRUPT_IDX(2), 356 CE_INTERRUPT_IDX(3), 357 CE_INTERRUPT_IDX(4), 358 CE_INTERRUPT_IDX(5), 359 CE_INTERRUPT_IDX(6), 360 CE_INTERRUPT_IDX(7), 361 CE_INTERRUPT_IDX(8), 362 CE_INTERRUPT_IDX(9), 363 CE_INTERRUPT_IDX(10), 364 CE_INTERRUPT_IDX(11), 365 #ifdef QCA_WIFI_QCN9224 366 CE_INTERRUPT_IDX(0), 367 CE_INTERRUPT_IDX(0), 368 CE_INTERRUPT_IDX(0), 369 CE_INTERRUPT_IDX(0), 370 #endif 371 } }, 372 #ifdef QCA_WIFI_QCN9224 373 /* Interrupt assignment for 13 MSI combination */ 374 {{ CE_INTERRUPT_IDX(0), 375 CE_INTERRUPT_IDX(1), 376 CE_INTERRUPT_IDX(2), 377 CE_INTERRUPT_IDX(3), 378 CE_INTERRUPT_IDX(4), 379 CE_INTERRUPT_IDX(5), 380 CE_INTERRUPT_IDX(6), 381 CE_INTERRUPT_IDX(7), 382 CE_INTERRUPT_IDX(8), 383 CE_INTERRUPT_IDX(9), 384 CE_INTERRUPT_IDX(10), 385 CE_INTERRUPT_IDX(11), 386 CE_INTERRUPT_IDX(12), 387 CE_INTERRUPT_IDX(0), 388 CE_INTERRUPT_IDX(0), 389 CE_INTERRUPT_IDX(0), 390 } }, 391 /* Interrupt assignment for 14 MSI combination */ 392 {{ CE_INTERRUPT_IDX(0), 393 CE_INTERRUPT_IDX(1), 394 CE_INTERRUPT_IDX(2), 395 CE_INTERRUPT_IDX(3), 396 CE_INTERRUPT_IDX(4), 397 CE_INTERRUPT_IDX(5), 398 CE_INTERRUPT_IDX(6), 399 CE_INTERRUPT_IDX(7), 400 CE_INTERRUPT_IDX(8), 401 CE_INTERRUPT_IDX(9), 402 CE_INTERRUPT_IDX(10), 403 CE_INTERRUPT_IDX(11), 404 CE_INTERRUPT_IDX(12), 405 CE_INTERRUPT_IDX(13), 406 CE_INTERRUPT_IDX(0), 407 CE_INTERRUPT_IDX(0), 408 } }, 409 /* Interrupt assignment for 15 MSI combination */ 410 {{ CE_INTERRUPT_IDX(0), 411 CE_INTERRUPT_IDX(1), 412 CE_INTERRUPT_IDX(2), 413 CE_INTERRUPT_IDX(3), 414 CE_INTERRUPT_IDX(4), 415 CE_INTERRUPT_IDX(5), 416 CE_INTERRUPT_IDX(6), 417 CE_INTERRUPT_IDX(7), 418 CE_INTERRUPT_IDX(8), 419 CE_INTERRUPT_IDX(9), 420 CE_INTERRUPT_IDX(10), 421 CE_INTERRUPT_IDX(11), 422 CE_INTERRUPT_IDX(12), 423 CE_INTERRUPT_IDX(13), 424 CE_INTERRUPT_IDX(14), 425 CE_INTERRUPT_IDX(0), 426 } }, 427 /* Interrupt assignment for 16 MSI combination */ 428 {{ CE_INTERRUPT_IDX(0), 429 CE_INTERRUPT_IDX(1), 430 CE_INTERRUPT_IDX(2), 431 CE_INTERRUPT_IDX(3), 432 CE_INTERRUPT_IDX(4), 433 CE_INTERRUPT_IDX(5), 434 CE_INTERRUPT_IDX(6), 435 CE_INTERRUPT_IDX(7), 436 CE_INTERRUPT_IDX(8), 437 CE_INTERRUPT_IDX(9), 438 CE_INTERRUPT_IDX(10), 439 CE_INTERRUPT_IDX(11), 440 CE_INTERRUPT_IDX(12), 441 CE_INTERRUPT_IDX(13), 442 CE_INTERRUPT_IDX(14), 443 CE_INTERRUPT_IDX(15), 444 } }, 445 #endif 446 }; 447 448 449 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, 450 uint8_t cmd_id, bool start) 451 { 452 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 453 454 switch (cmd_id) { 455 case AGC_DUMP: 456 if (start) 457 priv_start_agc(scn); 458 else 459 priv_dump_agc(scn); 460 break; 461 case CHANINFO_DUMP: 462 if (start) 463 priv_start_cap_chaninfo(scn); 464 else 465 priv_dump_chaninfo(scn); 466 break; 467 case BB_WATCHDOG_DUMP: 468 priv_dump_bbwatchdog(scn); 469 break; 470 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 471 case PCIE_ACCESS_DUMP: 472 hif_target_access_log_dump(); 473 break; 474 #endif 475 default: 476 hif_err("Invalid htc dump command: %d", cmd_id); 477 break; 478 } 479 } 480 481 static void ce_poll_timeout(void *arg) 482 { 483 struct CE_state *CE_state = (struct CE_state *)arg; 484 485 if (CE_state->timer_inited) { 486 ce_per_engine_service(CE_state->scn, CE_state->id); 487 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT); 488 } 489 } 490 491 static unsigned int roundup_pwr2(unsigned int n) 492 { 493 int i; 494 unsigned int test_pwr2; 495 496 if (!(n & (n - 1))) 497 return n; /* already a power of 2 */ 498 499 test_pwr2 = 4; 500 for (i = 0; i < 29; i++) { 501 if (test_pwr2 > n) 502 return test_pwr2; 503 test_pwr2 = test_pwr2 << 1; 504 } 505 506 QDF_ASSERT(0); /* n too large */ 507 return 0; 508 } 509 510 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C 511 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40 512 513 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = { 514 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 515 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 516 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 517 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 518 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 519 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 520 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 521 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 522 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 523 #ifdef QCA_WIFI_3_0_ADRASTEA 524 { 9, ADRASTEA_DST_WR_INDEX_OFFSET}, 525 { 10, ADRASTEA_DST_WR_INDEX_OFFSET}, 526 { 11, ADRASTEA_DST_WR_INDEX_OFFSET}, 527 #endif 528 }; 529 530 #ifdef QCN7605_SUPPORT 531 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = { 532 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 533 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 534 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 535 { 3, ADRASTEA_DST_WR_INDEX_OFFSET}, 536 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 537 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 538 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 539 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 540 }; 541 #endif 542 543 #ifdef WLAN_FEATURE_EPPING 544 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = { 545 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 546 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 547 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 548 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 549 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 550 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 551 { 5, ADRASTEA_DST_WR_INDEX_OFFSET}, 552 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 553 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 554 }; 555 #endif 556 557 /* CE_PCI TABLE */ 558 /* 559 * NOTE: the table below is out of date, though still a useful reference. 560 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual 561 * mapping of HTC services to HIF pipes. 562 */ 563 /* 564 * This authoritative table defines Copy Engine configuration and the mapping 565 * of services/endpoints to CEs. A subset of this information is passed to 566 * the Target during startup as a prerequisite to entering BMI phase. 567 * See: 568 * target_service_to_ce_map - Target-side mapping 569 * hif_map_service_to_pipe - Host-side mapping 570 * target_ce_config - Target-side configuration 571 * host_ce_config - Host-side configuration 572 ============================================================================ 573 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer 574 | | | ctio | Size | Frequency 575 | | | n | | 576 ============================================================================ 577 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent 578 descriptor | | | | O(100B) | and regular 579 download | | | | | 580 ---------------------------------------------------------------------------- 581 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and 582 indication | | | | O(10B) | regular 583 upload | | | | | 584 ---------------------------------------------------------------------------- 585 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare 586 upload | | | | O(1000B) | (frequent 587 e.g. noise | | | | | during IP1.0 588 packets | | | | | testing) 589 ---------------------------------------------------------------------------- 590 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare 591 download | | | | O(1000B) | (frequent 592 e.g. | | | | | during IP1.0 593 misdirecte | | | | | testing) 594 d EAPOL | | | | | 595 packets | | | | | 596 ---------------------------------------------------------------------------- 597 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?) 598 | DATA_VO (uplink) | | | | 599 ---------------------------------------------------------------------------- 600 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?) 601 | DATA_VO (downlink) | | | | 602 ---------------------------------------------------------------------------- 603 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent 604 | | | | O(100B) | 605 ---------------------------------------------------------------------------- 606 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent 607 messages | (downlink) | | | O(100B) | 608 | | | | | 609 ---------------------------------------------------------------------------- 610 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?) 611 | HTC_RAW_STREAMS | | | | 612 | (uplink) | | | | 613 ---------------------------------------------------------------------------- 614 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?) 615 | HTC_RAW_STREAMS | | | | 616 | (downlink) | | | | 617 ---------------------------------------------------------------------------- 618 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window 619 | | | | | infrequent 620 ============================================================================ 621 */ 622 623 /* 624 * Map from service/endpoint to Copy Engine. 625 * This table is derived from the CE_PCI TABLE, above. 626 * It is passed to the Target at startup for use by firmware. 627 */ 628 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 629 { 630 WMI_DATA_VO_SVC, 631 PIPEDIR_OUT, /* out = UL = host -> target */ 632 3, 633 }, 634 { 635 WMI_DATA_VO_SVC, 636 PIPEDIR_IN, /* in = DL = target -> host */ 637 2, 638 }, 639 { 640 WMI_DATA_BK_SVC, 641 PIPEDIR_OUT, /* out = UL = host -> target */ 642 3, 643 }, 644 { 645 WMI_DATA_BK_SVC, 646 PIPEDIR_IN, /* in = DL = target -> host */ 647 2, 648 }, 649 { 650 WMI_DATA_BE_SVC, 651 PIPEDIR_OUT, /* out = UL = host -> target */ 652 3, 653 }, 654 { 655 WMI_DATA_BE_SVC, 656 PIPEDIR_IN, /* in = DL = target -> host */ 657 2, 658 }, 659 { 660 WMI_DATA_VI_SVC, 661 PIPEDIR_OUT, /* out = UL = host -> target */ 662 3, 663 }, 664 { 665 WMI_DATA_VI_SVC, 666 PIPEDIR_IN, /* in = DL = target -> host */ 667 2, 668 }, 669 { 670 WMI_CONTROL_SVC, 671 PIPEDIR_OUT, /* out = UL = host -> target */ 672 3, 673 }, 674 { 675 WMI_CONTROL_SVC, 676 PIPEDIR_IN, /* in = DL = target -> host */ 677 2, 678 }, 679 { 680 HTC_CTRL_RSVD_SVC, 681 PIPEDIR_OUT, /* out = UL = host -> target */ 682 0, /* could be moved to 3 (share with WMI) */ 683 }, 684 { 685 HTC_CTRL_RSVD_SVC, 686 PIPEDIR_IN, /* in = DL = target -> host */ 687 2, 688 }, 689 { 690 HTC_RAW_STREAMS_SVC, /* not currently used */ 691 PIPEDIR_OUT, /* out = UL = host -> target */ 692 0, 693 }, 694 { 695 HTC_RAW_STREAMS_SVC, /* not currently used */ 696 PIPEDIR_IN, /* in = DL = target -> host */ 697 2, 698 }, 699 { 700 HTT_DATA_MSG_SVC, 701 PIPEDIR_OUT, /* out = UL = host -> target */ 702 4, 703 }, 704 { 705 HTT_DATA_MSG_SVC, 706 PIPEDIR_IN, /* in = DL = target -> host */ 707 1, 708 }, 709 { 710 WDI_IPA_TX_SVC, 711 PIPEDIR_OUT, /* in = DL = target -> host */ 712 5, 713 }, 714 #if defined(QCA_WIFI_3_0_ADRASTEA) 715 { 716 HTT_DATA2_MSG_SVC, 717 PIPEDIR_IN, /* in = DL = target -> host */ 718 9, 719 }, 720 { 721 HTT_DATA3_MSG_SVC, 722 PIPEDIR_IN, /* in = DL = target -> host */ 723 10, 724 }, 725 { 726 PACKET_LOG_SVC, 727 PIPEDIR_IN, /* in = DL = target -> host */ 728 11, 729 }, 730 #endif 731 /* (Additions here) */ 732 733 { /* Must be last */ 734 0, 735 0, 736 0, 737 }, 738 }; 739 740 /* PIPEDIR_OUT = HOST to Target */ 741 /* PIPEDIR_IN = TARGET to HOST */ 742 #if (defined(QCA_WIFI_QCA8074)) 743 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 744 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 745 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 746 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 747 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 748 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 749 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 750 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 751 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 752 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 753 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 754 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 755 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 756 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 757 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 758 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 759 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 760 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 761 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 762 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 763 /* (Additions here) */ 764 { 0, 0, 0, }, 765 }; 766 #else 767 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 768 }; 769 #endif 770 771 #if (defined(QCA_WIFI_QCA9574)) 772 static struct service_to_pipe target_service_to_ce_map_qca9574[] = { 773 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 774 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 775 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 776 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 777 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 778 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 779 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 780 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 781 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 782 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 783 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 784 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 785 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 786 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 787 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 788 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 789 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 790 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 791 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 792 /* (Additions here) */ 793 { 0, 0, 0, }, 794 }; 795 #else 796 static struct service_to_pipe target_service_to_ce_map_qca9574[] = { 797 }; 798 #endif 799 800 #if (defined(QCA_WIFI_QCA8074V2)) 801 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 802 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 803 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 804 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 805 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 806 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 807 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 808 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 809 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 810 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 811 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 812 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 813 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 814 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9}, 815 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2}, 816 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 817 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 818 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 819 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 820 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 821 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 822 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 823 /* (Additions here) */ 824 { 0, 0, 0, }, 825 }; 826 #else 827 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 828 }; 829 #endif 830 831 #if (defined(QCA_WIFI_QCA6018)) 832 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 833 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 834 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 835 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 836 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 837 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 838 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 839 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 840 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 841 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 842 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 843 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 844 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 845 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 846 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 847 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 848 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 849 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 850 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 851 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 852 /* (Additions here) */ 853 { 0, 0, 0, }, 854 }; 855 #else 856 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 857 }; 858 #endif 859 860 #if (defined(QCA_WIFI_QCN9000)) 861 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = { 862 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 863 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 864 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 865 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 866 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 867 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 868 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 869 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 870 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 871 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 872 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 873 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 874 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 875 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 876 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 877 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 878 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 879 /* (Additions here) */ 880 { 0, 0, 0, }, 881 }; 882 #else 883 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = { 884 }; 885 #endif 886 887 #if (defined(QCA_WIFI_QCN9224)) 888 static struct service_to_pipe target_service_to_ce_map_qcn9224[] = { 889 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 890 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 891 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 892 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 893 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 894 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 895 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 896 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 897 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 898 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 899 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 900 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 901 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 902 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 903 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 904 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 905 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7, }, 906 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2, }, 907 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 908 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 14, }, 909 /* (Additions here) */ 910 { 0, 0, 0, }, 911 }; 912 #endif 913 914 #if (defined(QCA_WIFI_QCA5018)) 915 static struct service_to_pipe target_service_to_ce_map_qca5018[] = { 916 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 917 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 918 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 919 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 920 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 921 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 922 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 923 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 924 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 925 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 926 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 927 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 928 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 929 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 930 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 931 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 932 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 933 /* (Additions here) */ 934 { 0, 0, 0, }, 935 }; 936 #else 937 static struct service_to_pipe target_service_to_ce_map_qca5018[] = { 938 }; 939 #endif 940 941 /* PIPEDIR_OUT = HOST to Target */ 942 /* PIPEDIR_IN = TARGET to HOST */ 943 #ifdef QCN7605_SUPPORT 944 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = { 945 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, }, 946 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 947 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, }, 948 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 949 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, }, 950 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 951 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, }, 952 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 953 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, }, 954 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 955 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 956 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 957 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, }, 958 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, }, 959 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 960 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 961 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, }, 962 #ifdef IPA_OFFLOAD 963 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, }, 964 #else 965 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, }, 966 #endif 967 { PACKET_LOG_SVC, PIPEDIR_IN, 7, }, 968 /* (Additions here) */ 969 { 0, 0, 0, }, 970 }; 971 #endif 972 973 #if (defined(QCA_WIFI_QCA6290)) 974 #ifdef QCA_6290_AP_MODE 975 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 976 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 977 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, }, 978 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 979 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, }, 980 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 981 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, }, 982 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 983 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, }, 984 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 985 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, }, 986 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 987 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, }, 988 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 989 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, }, 990 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 991 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 992 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 993 /* (Additions here) */ 994 { 0, 0, 0, }, 995 }; 996 #else 997 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 998 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 999 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1000 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1001 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1002 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1003 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1004 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1005 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1006 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1007 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1008 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1009 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1010 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1011 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1012 /* (Additions here) */ 1013 { 0, 0, 0, }, 1014 }; 1015 #endif 1016 #else 1017 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 1018 }; 1019 #endif 1020 1021 #if (defined(QCA_WIFI_QCA6390)) 1022 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 1023 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1024 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1025 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1026 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1027 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1028 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1029 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1030 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1031 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1032 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1033 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1034 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1035 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1036 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1037 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1038 /* (Additions here) */ 1039 { 0, 0, 0, }, 1040 }; 1041 #else 1042 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 1043 }; 1044 #endif 1045 1046 static struct service_to_pipe target_service_to_ce_map_qca6490[] = { 1047 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1048 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1049 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1050 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1051 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1052 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1053 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1054 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1055 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1056 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1057 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1058 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1059 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1060 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1061 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1062 /* (Additions here) */ 1063 { 0, 0, 0, }, 1064 }; 1065 1066 #if (defined(QCA_WIFI_QCA6750)) 1067 static struct service_to_pipe target_service_to_ce_map_qca6750[] = { 1068 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1069 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1070 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1071 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1072 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1073 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1074 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1075 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1076 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1077 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1078 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1079 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1080 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1081 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1082 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1083 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7 1084 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, }, 1085 #endif 1086 /* (Additions here) */ 1087 { 0, 0, 0, }, 1088 }; 1089 #else 1090 static struct service_to_pipe target_service_to_ce_map_qca6750[] = { 1091 }; 1092 #endif 1093 1094 #if (defined(QCA_WIFI_WCN7850)) 1095 static struct service_to_pipe target_service_to_ce_map_wcn7850[] = { 1096 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1097 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1098 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1099 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1100 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1101 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1102 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1103 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1104 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1105 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1106 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1107 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1108 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1109 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1110 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7 1111 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1112 #endif 1113 /* (Additions here) */ 1114 { 0, 0, 0, }, 1115 }; 1116 #else 1117 static struct service_to_pipe target_service_to_ce_map_wcn7850[] = { 1118 }; 1119 #endif 1120 1121 static struct service_to_pipe target_service_to_ce_map_ar900b[] = { 1122 { 1123 WMI_DATA_VO_SVC, 1124 PIPEDIR_OUT, /* out = UL = host -> target */ 1125 3, 1126 }, 1127 { 1128 WMI_DATA_VO_SVC, 1129 PIPEDIR_IN, /* in = DL = target -> host */ 1130 2, 1131 }, 1132 { 1133 WMI_DATA_BK_SVC, 1134 PIPEDIR_OUT, /* out = UL = host -> target */ 1135 3, 1136 }, 1137 { 1138 WMI_DATA_BK_SVC, 1139 PIPEDIR_IN, /* in = DL = target -> host */ 1140 2, 1141 }, 1142 { 1143 WMI_DATA_BE_SVC, 1144 PIPEDIR_OUT, /* out = UL = host -> target */ 1145 3, 1146 }, 1147 { 1148 WMI_DATA_BE_SVC, 1149 PIPEDIR_IN, /* in = DL = target -> host */ 1150 2, 1151 }, 1152 { 1153 WMI_DATA_VI_SVC, 1154 PIPEDIR_OUT, /* out = UL = host -> target */ 1155 3, 1156 }, 1157 { 1158 WMI_DATA_VI_SVC, 1159 PIPEDIR_IN, /* in = DL = target -> host */ 1160 2, 1161 }, 1162 { 1163 WMI_CONTROL_SVC, 1164 PIPEDIR_OUT, /* out = UL = host -> target */ 1165 3, 1166 }, 1167 { 1168 WMI_CONTROL_SVC, 1169 PIPEDIR_IN, /* in = DL = target -> host */ 1170 2, 1171 }, 1172 { 1173 HTC_CTRL_RSVD_SVC, 1174 PIPEDIR_OUT, /* out = UL = host -> target */ 1175 0, /* could be moved to 3 (share with WMI) */ 1176 }, 1177 { 1178 HTC_CTRL_RSVD_SVC, 1179 PIPEDIR_IN, /* in = DL = target -> host */ 1180 1, 1181 }, 1182 { 1183 HTC_RAW_STREAMS_SVC, /* not currently used */ 1184 PIPEDIR_OUT, /* out = UL = host -> target */ 1185 0, 1186 }, 1187 { 1188 HTC_RAW_STREAMS_SVC, /* not currently used */ 1189 PIPEDIR_IN, /* in = DL = target -> host */ 1190 1, 1191 }, 1192 { 1193 HTT_DATA_MSG_SVC, 1194 PIPEDIR_OUT, /* out = UL = host -> target */ 1195 4, 1196 }, 1197 #ifdef WLAN_FEATURE_FASTPATH 1198 { 1199 HTT_DATA_MSG_SVC, 1200 PIPEDIR_IN, /* in = DL = target -> host */ 1201 5, 1202 }, 1203 #else /* WLAN_FEATURE_FASTPATH */ 1204 { 1205 HTT_DATA_MSG_SVC, 1206 PIPEDIR_IN, /* in = DL = target -> host */ 1207 1, 1208 }, 1209 #endif /* WLAN_FEATURE_FASTPATH */ 1210 1211 /* (Additions here) */ 1212 1213 { /* Must be last */ 1214 0, 1215 0, 1216 0, 1217 }, 1218 }; 1219 1220 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map; 1221 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map); 1222 1223 #ifdef WLAN_FEATURE_EPPING 1224 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = { 1225 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1226 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1227 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 1228 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 1229 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1230 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1231 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1232 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1233 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1234 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1235 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 1236 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1237 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 1238 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1239 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 1240 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 1241 {0, 0, 0,}, /* Must be last */ 1242 }; 1243 1244 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 1245 **tgt_svc_map_to_use, 1246 uint32_t *sz_tgt_svc_map_to_use) 1247 { 1248 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping; 1249 *sz_tgt_svc_map_to_use = 1250 sizeof(target_service_to_ce_map_wlan_epping); 1251 } 1252 #endif 1253 1254 #ifdef QCN7605_SUPPORT 1255 static inline 1256 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 1257 uint32_t *sz_tgt_svc_map_to_use) 1258 { 1259 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605; 1260 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605); 1261 } 1262 #else 1263 static inline 1264 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 1265 uint32_t *sz_tgt_svc_map_to_use) 1266 { 1267 hif_err("QCN7605 not supported"); 1268 } 1269 #endif 1270 1271 #ifdef QCA_WIFI_QCN9224 1272 static 1273 void hif_set_ce_config_qcn9224(struct hif_softc *scn, 1274 struct HIF_CE_state *hif_state) 1275 { 1276 hif_state->host_ce_config = host_ce_config_wlan_qcn9224; 1277 hif_state->target_ce_config = target_ce_config_wlan_qcn9224; 1278 hif_state->target_ce_config_sz = 1279 sizeof(target_ce_config_wlan_qcn9224); 1280 scn->ce_count = QCN_9224_CE_COUNT; 1281 scn->disable_wake_irq = 1; 1282 } 1283 1284 static 1285 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use, 1286 uint32_t *sz_tgt_svc_map_to_use) 1287 { 1288 *tgt_svc_map_to_use = target_service_to_ce_map_qcn9224; 1289 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn9224); 1290 } 1291 #else 1292 static inline 1293 void hif_set_ce_config_qcn9224(struct hif_softc *scn, 1294 struct HIF_CE_state *hif_state) 1295 { 1296 hif_err("QCN9224 not supported"); 1297 } 1298 1299 static inline 1300 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use, 1301 uint32_t *sz_tgt_svc_map_to_use) 1302 { 1303 hif_err("QCN9224 not supported"); 1304 } 1305 #endif 1306 1307 static void hif_select_service_to_pipe_map(struct hif_softc *scn, 1308 struct service_to_pipe **tgt_svc_map_to_use, 1309 uint32_t *sz_tgt_svc_map_to_use) 1310 { 1311 uint32_t mode = hif_get_conparam(scn); 1312 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1313 struct hif_target_info *tgt_info = &scn->target_info; 1314 1315 if (QDF_IS_EPPING_ENABLED(mode)) { 1316 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use, 1317 sz_tgt_svc_map_to_use); 1318 } else { 1319 switch (tgt_info->target_type) { 1320 default: 1321 *tgt_svc_map_to_use = target_service_to_ce_map_wlan; 1322 *sz_tgt_svc_map_to_use = 1323 sizeof(target_service_to_ce_map_wlan); 1324 break; 1325 case TARGET_TYPE_QCN7605: 1326 hif_select_ce_map_qcn7605(tgt_svc_map_to_use, 1327 sz_tgt_svc_map_to_use); 1328 break; 1329 case TARGET_TYPE_AR900B: 1330 case TARGET_TYPE_QCA9984: 1331 case TARGET_TYPE_IPQ4019: 1332 case TARGET_TYPE_QCA9888: 1333 case TARGET_TYPE_AR9888: 1334 case TARGET_TYPE_AR9888V2: 1335 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b; 1336 *sz_tgt_svc_map_to_use = 1337 sizeof(target_service_to_ce_map_ar900b); 1338 break; 1339 case TARGET_TYPE_QCA6290: 1340 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290; 1341 *sz_tgt_svc_map_to_use = 1342 sizeof(target_service_to_ce_map_qca6290); 1343 break; 1344 case TARGET_TYPE_QCA6390: 1345 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390; 1346 *sz_tgt_svc_map_to_use = 1347 sizeof(target_service_to_ce_map_qca6390); 1348 break; 1349 case TARGET_TYPE_QCA6490: 1350 *tgt_svc_map_to_use = target_service_to_ce_map_qca6490; 1351 *sz_tgt_svc_map_to_use = 1352 sizeof(target_service_to_ce_map_qca6490); 1353 break; 1354 case TARGET_TYPE_QCA6750: 1355 *tgt_svc_map_to_use = target_service_to_ce_map_qca6750; 1356 *sz_tgt_svc_map_to_use = 1357 sizeof(target_service_to_ce_map_qca6750); 1358 break; 1359 case TARGET_TYPE_WCN7850: 1360 *tgt_svc_map_to_use = target_service_to_ce_map_wcn7850; 1361 *sz_tgt_svc_map_to_use = 1362 sizeof(target_service_to_ce_map_wcn7850); 1363 break; 1364 case TARGET_TYPE_QCA8074: 1365 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074; 1366 *sz_tgt_svc_map_to_use = 1367 sizeof(target_service_to_ce_map_qca8074); 1368 break; 1369 case TARGET_TYPE_QCA8074V2: 1370 *tgt_svc_map_to_use = 1371 target_service_to_ce_map_qca8074_v2; 1372 *sz_tgt_svc_map_to_use = 1373 sizeof(target_service_to_ce_map_qca8074_v2); 1374 break; 1375 case TARGET_TYPE_QCA9574: 1376 *tgt_svc_map_to_use = 1377 target_service_to_ce_map_qca9574; 1378 *sz_tgt_svc_map_to_use = 1379 sizeof(target_service_to_ce_map_qca9574); 1380 break; 1381 case TARGET_TYPE_QCA6018: 1382 *tgt_svc_map_to_use = 1383 target_service_to_ce_map_qca6018; 1384 *sz_tgt_svc_map_to_use = 1385 sizeof(target_service_to_ce_map_qca6018); 1386 break; 1387 case TARGET_TYPE_QCN9000: 1388 *tgt_svc_map_to_use = 1389 target_service_to_ce_map_qcn9000; 1390 *sz_tgt_svc_map_to_use = 1391 sizeof(target_service_to_ce_map_qcn9000); 1392 break; 1393 case TARGET_TYPE_QCN9224: 1394 hif_select_ce_map_qcn9224(tgt_svc_map_to_use, 1395 sz_tgt_svc_map_to_use); 1396 break; 1397 case TARGET_TYPE_QCA5018: 1398 case TARGET_TYPE_QCN6122: 1399 *tgt_svc_map_to_use = 1400 target_service_to_ce_map_qca5018; 1401 *sz_tgt_svc_map_to_use = 1402 sizeof(target_service_to_ce_map_qca5018); 1403 break; 1404 } 1405 } 1406 hif_state->tgt_svc_map = *tgt_svc_map_to_use; 1407 hif_state->sz_tgt_svc_map = *sz_tgt_svc_map_to_use / 1408 sizeof(struct service_to_pipe); 1409 } 1410 1411 /** 1412 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly 1413 * @ce_state : pointer to the state context of the CE 1414 * 1415 * Description: 1416 * Sets htt_rx_data attribute of the state structure if the 1417 * CE serves one of the HTT DATA services. 1418 * 1419 * Return: 1420 * false (attribute set to false) 1421 * true (attribute set to true); 1422 */ 1423 static bool ce_mark_datapath(struct CE_state *ce_state) 1424 { 1425 struct service_to_pipe *svc_map; 1426 uint32_t map_sz, map_len; 1427 int i; 1428 bool rc = false; 1429 1430 if (ce_state) { 1431 hif_select_service_to_pipe_map(ce_state->scn, &svc_map, 1432 &map_sz); 1433 1434 map_len = map_sz / sizeof(struct service_to_pipe); 1435 for (i = 0; i < map_len; i++) { 1436 if ((svc_map[i].pipenum == ce_state->id) && 1437 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) || 1438 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) || 1439 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) { 1440 /* HTT CEs are unidirectional */ 1441 if (svc_map[i].pipedir == PIPEDIR_IN) 1442 ce_state->htt_rx_data = true; 1443 else 1444 ce_state->htt_tx_data = true; 1445 rc = true; 1446 } 1447 } 1448 } 1449 return rc; 1450 } 1451 1452 /** 1453 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes 1454 * @ce_id: ce in question 1455 * @ring: ring state being examined 1456 * @type: "src_ring" or "dest_ring" string for identifying the ring 1457 * 1458 * Warns on non-zero index values. 1459 * Causes a kernel panic if the ring is not empty durring initialization. 1460 */ 1461 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring, 1462 char *type) 1463 { 1464 if (ring->write_index != 0 || ring->sw_index != 0) 1465 hif_err("ce %d, %s, initial sw_index = %d, initial write_index =%d", 1466 ce_id, type, ring->sw_index, ring->write_index); 1467 if (ring->write_index != ring->sw_index) 1468 QDF_BUG(0); 1469 } 1470 1471 #ifdef IPA_OFFLOAD 1472 /** 1473 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring 1474 * @scn: softc instance 1475 * @ce_id: ce in question 1476 * @base_addr: pointer to copyengine ring base address 1477 * @ce_ring: copyengine instance 1478 * @nentries: number of entries should be allocated 1479 * @desc_size: ce desc size 1480 * 1481 * Return: QDF_STATUS_SUCCESS - for success 1482 */ 1483 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1484 qdf_dma_addr_t *base_addr, 1485 struct CE_ring_state *ce_ring, 1486 unsigned int nentries, uint32_t desc_size) 1487 { 1488 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 1489 !ce_srng_based(scn)) { 1490 if (!scn->ipa_ce_ring) { 1491 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc( 1492 scn->qdf_dev, 1493 nentries * desc_size + CE_DESC_RING_ALIGN); 1494 if (!scn->ipa_ce_ring) { 1495 hif_err( 1496 "Failed to allocate memory for IPA ce ring"); 1497 return QDF_STATUS_E_NOMEM; 1498 } 1499 } 1500 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev, 1501 &scn->ipa_ce_ring->mem_info); 1502 ce_ring->base_addr_owner_space_unaligned = 1503 scn->ipa_ce_ring->vaddr; 1504 } else { 1505 ce_ring->base_addr_owner_space_unaligned = 1506 hif_mem_alloc_consistent_unaligned 1507 (scn, 1508 (nentries * desc_size + 1509 CE_DESC_RING_ALIGN), 1510 base_addr, 1511 ce_ring->hal_ring_type, 1512 &ce_ring->is_ring_prealloc); 1513 1514 if (!ce_ring->base_addr_owner_space_unaligned) { 1515 hif_err("Failed to allocate DMA memory for ce ring id: %u", 1516 CE_id); 1517 return QDF_STATUS_E_NOMEM; 1518 } 1519 } 1520 return QDF_STATUS_SUCCESS; 1521 } 1522 1523 /** 1524 * ce_free_desc_ring() - Frees copyengine descriptor ring 1525 * @scn: softc instance 1526 * @ce_id: ce in question 1527 * @ce_ring: copyengine instance 1528 * @desc_size: ce desc size 1529 * 1530 * Return: None 1531 */ 1532 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1533 struct CE_ring_state *ce_ring, uint32_t desc_size) 1534 { 1535 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 1536 !ce_srng_based(scn)) { 1537 if (scn->ipa_ce_ring) { 1538 qdf_mem_shared_mem_free(scn->qdf_dev, 1539 scn->ipa_ce_ring); 1540 scn->ipa_ce_ring = NULL; 1541 } 1542 ce_ring->base_addr_owner_space_unaligned = NULL; 1543 } else { 1544 hif_mem_free_consistent_unaligned 1545 (scn, 1546 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 1547 ce_ring->base_addr_owner_space_unaligned, 1548 ce_ring->base_addr_CE_space, 0, 1549 ce_ring->is_ring_prealloc); 1550 ce_ring->base_addr_owner_space_unaligned = NULL; 1551 } 1552 } 1553 #else 1554 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1555 qdf_dma_addr_t *base_addr, 1556 struct CE_ring_state *ce_ring, 1557 unsigned int nentries, uint32_t desc_size) 1558 { 1559 ce_ring->base_addr_owner_space_unaligned = 1560 hif_mem_alloc_consistent_unaligned 1561 (scn, 1562 (nentries * desc_size + 1563 CE_DESC_RING_ALIGN), 1564 base_addr, 1565 ce_ring->hal_ring_type, 1566 &ce_ring->is_ring_prealloc); 1567 1568 if (!ce_ring->base_addr_owner_space_unaligned) { 1569 hif_err("Failed to allocate DMA memory for ce ring id: %u", 1570 CE_id); 1571 return QDF_STATUS_E_NOMEM; 1572 } 1573 return QDF_STATUS_SUCCESS; 1574 } 1575 1576 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1577 struct CE_ring_state *ce_ring, uint32_t desc_size) 1578 { 1579 hif_mem_free_consistent_unaligned 1580 (scn, 1581 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 1582 ce_ring->base_addr_owner_space_unaligned, 1583 ce_ring->base_addr_CE_space, 0, 1584 ce_ring->is_ring_prealloc); 1585 ce_ring->base_addr_owner_space_unaligned = NULL; 1586 } 1587 #endif /* IPA_OFFLOAD */ 1588 1589 /* 1590 * TODO: Need to explore the possibility of having this as part of a 1591 * target context instead of a global array. 1592 */ 1593 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void); 1594 1595 void ce_service_register_module(enum ce_target_type target_type, 1596 struct ce_ops* (*ce_attach)(void)) 1597 { 1598 if (target_type < CE_MAX_TARGET_TYPE) 1599 ce_attach_register[target_type] = ce_attach; 1600 } 1601 1602 qdf_export_symbol(ce_service_register_module); 1603 1604 /** 1605 * ce_srng_based() - Does this target use srng 1606 * @ce_state : pointer to the state context of the CE 1607 * 1608 * Description: 1609 * returns true if the target is SRNG based 1610 * 1611 * Return: 1612 * false (attribute set to false) 1613 * true (attribute set to true); 1614 */ 1615 bool ce_srng_based(struct hif_softc *scn) 1616 { 1617 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1618 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 1619 1620 switch (tgt_info->target_type) { 1621 case TARGET_TYPE_QCA8074: 1622 case TARGET_TYPE_QCA8074V2: 1623 case TARGET_TYPE_QCA6290: 1624 case TARGET_TYPE_QCA6390: 1625 case TARGET_TYPE_QCA6490: 1626 case TARGET_TYPE_QCA6750: 1627 case TARGET_TYPE_QCA6018: 1628 case TARGET_TYPE_QCN9000: 1629 case TARGET_TYPE_QCN6122: 1630 case TARGET_TYPE_QCA5018: 1631 case TARGET_TYPE_WCN7850: 1632 case TARGET_TYPE_QCN9224: 1633 case TARGET_TYPE_QCA9574: 1634 return true; 1635 default: 1636 return false; 1637 } 1638 return false; 1639 } 1640 qdf_export_symbol(ce_srng_based); 1641 1642 #ifdef QCA_WIFI_SUPPORT_SRNG 1643 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1644 { 1645 struct ce_ops *ops = NULL; 1646 1647 if (ce_srng_based(scn)) { 1648 if (ce_attach_register[CE_SVC_SRNG]) 1649 ops = ce_attach_register[CE_SVC_SRNG](); 1650 } else if (ce_attach_register[CE_SVC_LEGACY]) { 1651 ops = ce_attach_register[CE_SVC_LEGACY](); 1652 } 1653 1654 return ops; 1655 } 1656 1657 1658 #else /* QCA_LITHIUM */ 1659 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1660 { 1661 if (ce_attach_register[CE_SVC_LEGACY]) 1662 return ce_attach_register[CE_SVC_LEGACY](); 1663 1664 return NULL; 1665 } 1666 #endif /* QCA_LITHIUM */ 1667 1668 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn, 1669 struct pld_shadow_reg_v2_cfg **shadow_config, 1670 int *num_shadow_registers_configured) { 1671 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1672 1673 hif_state->ce_services->ce_prepare_shadow_register_v2_cfg( 1674 scn, shadow_config, num_shadow_registers_configured); 1675 1676 return; 1677 } 1678 1679 static inline uint32_t ce_get_desc_size(struct hif_softc *scn, 1680 uint8_t ring_type) 1681 { 1682 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1683 1684 return hif_state->ce_services->ce_get_desc_size(ring_type); 1685 } 1686 1687 #ifdef QCA_WIFI_SUPPORT_SRNG 1688 static inline int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type) 1689 { 1690 switch (ce_ring_type) { 1691 case CE_RING_SRC: 1692 return CE_SRC; 1693 case CE_RING_DEST: 1694 return CE_DST; 1695 case CE_RING_STATUS: 1696 return CE_DST_STATUS; 1697 default: 1698 return -EINVAL; 1699 } 1700 } 1701 #else 1702 static int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type) 1703 { 1704 return 0; 1705 } 1706 #endif 1707 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state, 1708 uint8_t ring_type, uint32_t nentries) 1709 { 1710 uint32_t ce_nbytes; 1711 char *ptr; 1712 qdf_dma_addr_t base_addr; 1713 struct CE_ring_state *ce_ring; 1714 uint32_t desc_size; 1715 struct hif_softc *scn = CE_state->scn; 1716 1717 ce_nbytes = sizeof(struct CE_ring_state) 1718 + (nentries * sizeof(void *)); 1719 ptr = qdf_mem_malloc(ce_nbytes); 1720 if (!ptr) 1721 return NULL; 1722 1723 ce_ring = (struct CE_ring_state *)ptr; 1724 ptr += sizeof(struct CE_ring_state); 1725 ce_ring->nentries = nentries; 1726 ce_ring->nentries_mask = nentries - 1; 1727 1728 ce_ring->low_water_mark_nentries = 0; 1729 ce_ring->high_water_mark_nentries = nentries; 1730 ce_ring->per_transfer_context = (void **)ptr; 1731 ce_ring->hal_ring_type = ce_ring_type_to_hal_ring_type(ring_type); 1732 1733 desc_size = ce_get_desc_size(scn, ring_type); 1734 1735 /* Legacy platforms that do not support cache 1736 * coherent DMA are unsupported 1737 */ 1738 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr, 1739 ce_ring, nentries, 1740 desc_size) != 1741 QDF_STATUS_SUCCESS) { 1742 hif_err("ring has no DMA mem"); 1743 qdf_mem_free(ce_ring); 1744 return NULL; 1745 } 1746 ce_ring->base_addr_CE_space_unaligned = base_addr; 1747 1748 /* Correctly initialize memory to 0 to 1749 * prevent garbage data crashing system 1750 * when download firmware 1751 */ 1752 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned, 1753 nentries * desc_size + 1754 CE_DESC_RING_ALIGN); 1755 1756 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) { 1757 1758 ce_ring->base_addr_CE_space = 1759 (ce_ring->base_addr_CE_space_unaligned + 1760 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1); 1761 1762 ce_ring->base_addr_owner_space = (void *) 1763 (((size_t) ce_ring->base_addr_owner_space_unaligned + 1764 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1)); 1765 } else { 1766 ce_ring->base_addr_CE_space = 1767 ce_ring->base_addr_CE_space_unaligned; 1768 ce_ring->base_addr_owner_space = 1769 ce_ring->base_addr_owner_space_unaligned; 1770 } 1771 1772 return ce_ring; 1773 } 1774 1775 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type, 1776 uint32_t ce_id, struct CE_ring_state *ring, 1777 struct CE_attr *attr) 1778 { 1779 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1780 1781 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, 1782 ring, attr); 1783 } 1784 1785 static void ce_srng_cleanup(struct hif_softc *scn, struct CE_state *CE_state, 1786 uint8_t ring_type) 1787 { 1788 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1789 1790 if (hif_state->ce_services->ce_srng_cleanup) 1791 hif_state->ce_services->ce_srng_cleanup(scn, 1792 CE_state, ring_type); 1793 } 1794 1795 int hif_ce_bus_early_suspend(struct hif_softc *scn) 1796 { 1797 uint8_t ul_pipe, dl_pipe; 1798 int ce_id, status, ul_is_polled, dl_is_polled; 1799 struct CE_state *ce_state; 1800 1801 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC, 1802 &ul_pipe, &dl_pipe, 1803 &ul_is_polled, &dl_is_polled); 1804 if (status) { 1805 hif_err("pipe_mapping failure"); 1806 return status; 1807 } 1808 1809 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1810 if (ce_id == ul_pipe) 1811 continue; 1812 if (ce_id == dl_pipe) 1813 continue; 1814 1815 ce_state = scn->ce_id_to_state[ce_id]; 1816 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1817 if (ce_state->state == CE_RUNNING) 1818 ce_state->state = CE_PAUSED; 1819 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1820 } 1821 1822 return status; 1823 } 1824 1825 int hif_ce_bus_late_resume(struct hif_softc *scn) 1826 { 1827 int ce_id; 1828 struct CE_state *ce_state; 1829 int write_index = 0; 1830 bool index_updated; 1831 1832 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1833 ce_state = scn->ce_id_to_state[ce_id]; 1834 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1835 if (ce_state->state == CE_PENDING) { 1836 write_index = ce_state->src_ring->write_index; 1837 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 1838 write_index); 1839 ce_state->state = CE_RUNNING; 1840 index_updated = true; 1841 } else { 1842 index_updated = false; 1843 } 1844 1845 if (ce_state->state == CE_PAUSED) 1846 ce_state->state = CE_RUNNING; 1847 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1848 1849 if (index_updated) 1850 hif_record_ce_desc_event(scn, ce_id, 1851 RESUME_WRITE_INDEX_UPDATE, 1852 NULL, NULL, write_index, 0); 1853 } 1854 1855 return 0; 1856 } 1857 1858 /** 1859 * ce_oom_recovery() - try to recover rx ce from oom condition 1860 * @context: CE_state of the CE with oom rx ring 1861 * 1862 * the executing work Will continue to be rescheduled until 1863 * at least 1 descriptor is successfully posted to the rx ring. 1864 * 1865 * return: none 1866 */ 1867 static void ce_oom_recovery(void *context) 1868 { 1869 struct CE_state *ce_state = context; 1870 struct hif_softc *scn = ce_state->scn; 1871 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn); 1872 struct HIF_CE_pipe_info *pipe_info = 1873 &ce_softc->pipe_info[ce_state->id]; 1874 1875 hif_post_recv_buffers_for_pipe(pipe_info); 1876 } 1877 1878 #ifdef HIF_CE_DEBUG_DATA_BUF 1879 /** 1880 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by 1881 * the CE descriptors. 1882 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE 1883 * @scn: hif scn handle 1884 * ce_id: Copy Engine Id 1885 * 1886 * Return: QDF_STATUS 1887 */ 1888 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1889 { 1890 struct hif_ce_desc_event *event = NULL; 1891 struct hif_ce_desc_event *hist_ev = NULL; 1892 uint32_t index = 0; 1893 1894 hist_ev = 1895 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1896 1897 if (!hist_ev) 1898 return QDF_STATUS_E_NOMEM; 1899 1900 scn->hif_ce_desc_hist.data_enable[ce_id] = true; 1901 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1902 event = &hist_ev[index]; 1903 event->data = 1904 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE); 1905 if (!event->data) { 1906 hif_err_rl("ce debug data alloc failed"); 1907 return QDF_STATUS_E_NOMEM; 1908 } 1909 } 1910 return QDF_STATUS_SUCCESS; 1911 } 1912 1913 /** 1914 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by 1915 * the CE descriptors. 1916 * @scn: hif scn handle 1917 * ce_id: Copy Engine Id 1918 * 1919 * Return: 1920 */ 1921 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1922 { 1923 struct hif_ce_desc_event *event = NULL; 1924 struct hif_ce_desc_event *hist_ev = NULL; 1925 uint32_t index = 0; 1926 1927 hist_ev = 1928 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1929 1930 if (!hist_ev) 1931 return; 1932 1933 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1934 event = &hist_ev[index]; 1935 if (event->data) 1936 qdf_mem_free(event->data); 1937 event->data = NULL; 1938 event = NULL; 1939 } 1940 1941 } 1942 #endif /* HIF_CE_DEBUG_DATA_BUF */ 1943 1944 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF 1945 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 1946 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX]; 1947 1948 /** 1949 * alloc_mem_ce_debug_history() - Allocate CE descriptor history 1950 * @scn: hif scn handle 1951 * @ce_id: Copy Engine Id 1952 * @src_nentries: source ce ring entries 1953 * Return: QDF_STATUS 1954 */ 1955 static QDF_STATUS 1956 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id, 1957 uint32_t src_nentries) 1958 { 1959 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1960 QDF_STATUS status = QDF_STATUS_SUCCESS; 1961 ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id]; 1962 ce_hist->enable[ce_id] = 1; 1963 1964 if (src_nentries) { 1965 status = alloc_mem_ce_debug_hist_data(scn, ce_id); 1966 if (status != QDF_STATUS_SUCCESS) 1967 return status; 1968 } else { 1969 ce_hist->data_enable[ce_id] = false; 1970 } 1971 1972 return QDF_STATUS_SUCCESS; 1973 } 1974 1975 /** 1976 * free_mem_ce_debug_history() - Free CE descriptor history 1977 * @scn: hif scn handle 1978 * @ce_id: Copy Engine Id 1979 * 1980 * Return: None 1981 */ 1982 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id) 1983 { 1984 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1985 1986 ce_hist->enable[ce_id] = 0; 1987 if (ce_hist->data_enable[ce_id]) { 1988 ce_hist->data_enable[ce_id] = false; 1989 free_mem_ce_debug_hist_data(scn, ce_id); 1990 } 1991 ce_hist->hist_ev[ce_id] = NULL; 1992 } 1993 #else 1994 static inline QDF_STATUS 1995 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 1996 uint32_t src_nentries) 1997 { 1998 return QDF_STATUS_SUCCESS; 1999 } 2000 2001 static inline void 2002 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } 2003 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */ 2004 #else 2005 #if defined(HIF_CE_DEBUG_DATA_BUF) 2006 2007 static QDF_STATUS 2008 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 2009 uint32_t src_nentries) 2010 { 2011 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *) 2012 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event)); 2013 2014 if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) { 2015 scn->hif_ce_desc_hist.enable[CE_id] = 0; 2016 return QDF_STATUS_E_NOMEM; 2017 } else { 2018 scn->hif_ce_desc_hist.enable[CE_id] = 1; 2019 return QDF_STATUS_SUCCESS; 2020 } 2021 } 2022 2023 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) 2024 { 2025 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2026 struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id]; 2027 2028 if (!hist_ev) 2029 return; 2030 2031 if (ce_hist->data_enable[CE_id]) { 2032 ce_hist->data_enable[CE_id] = false; 2033 free_mem_ce_debug_hist_data(scn, CE_id); 2034 } 2035 2036 ce_hist->enable[CE_id] = 0; 2037 qdf_mem_free(ce_hist->hist_ev[CE_id]); 2038 ce_hist->hist_ev[CE_id] = NULL; 2039 } 2040 2041 #else 2042 2043 static inline QDF_STATUS 2044 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 2045 uint32_t src_nentries) 2046 { 2047 return QDF_STATUS_SUCCESS; 2048 } 2049 2050 static inline void 2051 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } 2052 #endif /* HIF_CE_DEBUG_DATA_BUF */ 2053 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */ 2054 2055 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 2056 /** 2057 * reset_ce_debug_history() - reset the index and ce id used for dumping the 2058 * CE records on the console using sysfs. 2059 * @scn: hif scn handle 2060 * 2061 * Return: 2062 */ 2063 static inline void reset_ce_debug_history(struct hif_softc *scn) 2064 { 2065 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2066 /* Initialise the CE debug history sysfs interface inputs ce_id and 2067 * index. Disable data storing 2068 */ 2069 ce_hist->hist_index = 0; 2070 ce_hist->hist_id = 0; 2071 } 2072 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 2073 static inline void reset_ce_debug_history(struct hif_softc *scn) { } 2074 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 2075 2076 void ce_enable_polling(void *cestate) 2077 { 2078 struct CE_state *CE_state = (struct CE_state *)cestate; 2079 2080 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 2081 CE_state->timer_inited = true; 2082 } 2083 2084 void ce_disable_polling(void *cestate) 2085 { 2086 struct CE_state *CE_state = (struct CE_state *)cestate; 2087 2088 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 2089 CE_state->timer_inited = false; 2090 } 2091 2092 /* 2093 * Initialize a Copy Engine based on caller-supplied attributes. 2094 * This may be called once to initialize both source and destination 2095 * rings or it may be called twice for separate source and destination 2096 * initialization. It may be that only one side or the other is 2097 * initialized by software/firmware. 2098 * 2099 * This should be called durring the initialization sequence before 2100 * interupts are enabled, so we don't have to worry about thread safety. 2101 */ 2102 struct CE_handle *ce_init(struct hif_softc *scn, 2103 unsigned int CE_id, struct CE_attr *attr) 2104 { 2105 struct CE_state *CE_state; 2106 uint32_t ctrl_addr; 2107 unsigned int nentries; 2108 bool malloc_CE_state = false; 2109 bool malloc_src_ring = false; 2110 int status; 2111 QDF_STATUS mem_status = QDF_STATUS_SUCCESS; 2112 2113 QDF_ASSERT(CE_id < scn->ce_count); 2114 ctrl_addr = CE_BASE_ADDRESS(CE_id); 2115 CE_state = scn->ce_id_to_state[CE_id]; 2116 2117 if (!CE_state) { 2118 CE_state = 2119 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state)); 2120 if (!CE_state) 2121 return NULL; 2122 2123 malloc_CE_state = true; 2124 qdf_spinlock_create(&CE_state->ce_index_lock); 2125 2126 CE_state->id = CE_id; 2127 CE_state->ctrl_addr = ctrl_addr; 2128 CE_state->state = CE_RUNNING; 2129 CE_state->attr_flags = attr->flags; 2130 } 2131 CE_state->scn = scn; 2132 CE_state->service = ce_engine_service_reg; 2133 2134 qdf_atomic_init(&CE_state->rx_pending); 2135 if (!attr) { 2136 /* Already initialized; caller wants the handle */ 2137 return (struct CE_handle *)CE_state; 2138 } 2139 2140 if (CE_state->src_sz_max) 2141 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max); 2142 else 2143 CE_state->src_sz_max = attr->src_sz_max; 2144 2145 ce_init_ce_desc_event_log(scn, CE_id, 2146 attr->src_nentries + attr->dest_nentries); 2147 2148 /* source ring setup */ 2149 nentries = attr->src_nentries; 2150 if (nentries) { 2151 struct CE_ring_state *src_ring; 2152 2153 nentries = roundup_pwr2(nentries); 2154 if (CE_state->src_ring) { 2155 QDF_ASSERT(CE_state->src_ring->nentries == nentries); 2156 } else { 2157 src_ring = CE_state->src_ring = 2158 ce_alloc_ring_state(CE_state, 2159 CE_RING_SRC, 2160 nentries); 2161 if (!src_ring) { 2162 /* cannot allocate src ring. If the 2163 * CE_state is allocated locally free 2164 * CE_State and return error. 2165 */ 2166 hif_err("src ring has no mem"); 2167 if (malloc_CE_state) { 2168 /* allocated CE_state locally */ 2169 qdf_mem_free(CE_state); 2170 malloc_CE_state = false; 2171 } 2172 return NULL; 2173 } 2174 /* we can allocate src ring. Mark that the src ring is 2175 * allocated locally 2176 */ 2177 malloc_src_ring = true; 2178 2179 /* 2180 * Also allocate a shadow src ring in 2181 * regular mem to use for faster access. 2182 */ 2183 src_ring->shadow_base_unaligned = 2184 qdf_mem_malloc(nentries * 2185 sizeof(struct CE_src_desc) + 2186 CE_DESC_RING_ALIGN); 2187 if (!src_ring->shadow_base_unaligned) 2188 goto error_no_dma_mem; 2189 2190 src_ring->shadow_base = (struct CE_src_desc *) 2191 (((size_t) src_ring->shadow_base_unaligned + 2192 CE_DESC_RING_ALIGN - 1) & 2193 ~(CE_DESC_RING_ALIGN - 1)); 2194 2195 status = ce_ring_setup(scn, CE_RING_SRC, CE_id, 2196 src_ring, attr); 2197 if (status < 0) 2198 goto error_target_access; 2199 2200 ce_ring_test_initial_indexes(CE_id, src_ring, 2201 "src_ring"); 2202 } 2203 } 2204 2205 /* destination ring setup */ 2206 nentries = attr->dest_nentries; 2207 if (nentries) { 2208 struct CE_ring_state *dest_ring; 2209 2210 nentries = roundup_pwr2(nentries); 2211 if (CE_state->dest_ring) { 2212 QDF_ASSERT(CE_state->dest_ring->nentries == nentries); 2213 } else { 2214 dest_ring = CE_state->dest_ring = 2215 ce_alloc_ring_state(CE_state, 2216 CE_RING_DEST, 2217 nentries); 2218 if (!dest_ring) { 2219 /* cannot allocate dst ring. If the CE_state 2220 * or src ring is allocated locally free 2221 * CE_State and src ring and return error. 2222 */ 2223 hif_err("dest ring has no mem"); 2224 goto error_no_dma_mem; 2225 } 2226 2227 status = ce_ring_setup(scn, CE_RING_DEST, CE_id, 2228 dest_ring, attr); 2229 if (status < 0) 2230 goto error_target_access; 2231 2232 ce_ring_test_initial_indexes(CE_id, dest_ring, 2233 "dest_ring"); 2234 2235 /* For srng based target, init status ring here */ 2236 if (ce_srng_based(CE_state->scn)) { 2237 CE_state->status_ring = 2238 ce_alloc_ring_state(CE_state, 2239 CE_RING_STATUS, 2240 nentries); 2241 if (!CE_state->status_ring) { 2242 /*Allocation failed. Cleanup*/ 2243 qdf_mem_free(CE_state->dest_ring); 2244 if (malloc_src_ring) { 2245 qdf_mem_free 2246 (CE_state->src_ring); 2247 CE_state->src_ring = NULL; 2248 malloc_src_ring = false; 2249 } 2250 if (malloc_CE_state) { 2251 /* allocated CE_state locally */ 2252 scn->ce_id_to_state[CE_id] = 2253 NULL; 2254 qdf_mem_free(CE_state); 2255 malloc_CE_state = false; 2256 } 2257 2258 return NULL; 2259 } 2260 2261 status = ce_ring_setup(scn, CE_RING_STATUS, 2262 CE_id, CE_state->status_ring, 2263 attr); 2264 if (status < 0) 2265 goto error_target_access; 2266 2267 } 2268 2269 /* epping */ 2270 /* poll timer */ 2271 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) { 2272 qdf_timer_init(scn->qdf_dev, 2273 &CE_state->poll_timer, 2274 ce_poll_timeout, 2275 CE_state, 2276 QDF_TIMER_TYPE_WAKE_APPS); 2277 ce_enable_polling(CE_state); 2278 qdf_timer_mod(&CE_state->poll_timer, 2279 CE_POLL_TIMEOUT); 2280 } 2281 } 2282 } 2283 2284 if (!ce_srng_based(scn)) { 2285 /* Enable CE error interrupts */ 2286 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 2287 goto error_target_access; 2288 CE_ERROR_INTR_ENABLE(scn, ctrl_addr); 2289 if (Q_TARGET_ACCESS_END(scn) < 0) 2290 goto error_target_access; 2291 } 2292 2293 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work, 2294 ce_oom_recovery, CE_state); 2295 2296 /* update the htt_data attribute */ 2297 ce_mark_datapath(CE_state); 2298 scn->ce_id_to_state[CE_id] = CE_state; 2299 2300 mem_status = alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries); 2301 if (mem_status != QDF_STATUS_SUCCESS) 2302 goto error_target_access; 2303 2304 return (struct CE_handle *)CE_state; 2305 2306 error_target_access: 2307 error_no_dma_mem: 2308 ce_fini((struct CE_handle *)CE_state); 2309 return NULL; 2310 } 2311 2312 /** 2313 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs 2314 * @hif_ctx: HIF Context 2315 * 2316 * API to check if polling is enabled on all CEs. Returns true when polling 2317 * is enabled on all CEs. 2318 * 2319 * Return: bool 2320 */ 2321 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx) 2322 { 2323 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2324 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2325 struct CE_attr *attr; 2326 int id; 2327 2328 for (id = 0; id < scn->ce_count; id++) { 2329 attr = &hif_state->host_ce_config[id]; 2330 if (attr && (attr->dest_nentries) && 2331 !(attr->flags & CE_ATTR_ENABLE_POLL)) 2332 return false; 2333 } 2334 return true; 2335 } 2336 qdf_export_symbol(hif_is_polled_mode_enabled); 2337 2338 static int hif_get_pktlog_ce_num(struct hif_softc *scn) 2339 { 2340 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2341 int id; 2342 2343 for (id = 0; id < hif_state->sz_tgt_svc_map; id++) { 2344 if (hif_state->tgt_svc_map[id].service_id == PACKET_LOG_SVC) 2345 return hif_state->tgt_svc_map[id].pipenum; 2346 } 2347 return -EINVAL; 2348 } 2349 2350 #ifdef WLAN_FEATURE_FASTPATH 2351 /** 2352 * hif_enable_fastpath() Update that we have enabled fastpath mode 2353 * @hif_ctx: HIF context 2354 * 2355 * For use in data path 2356 * 2357 * Retrun: void 2358 */ 2359 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx) 2360 { 2361 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2362 2363 if (ce_srng_based(scn)) { 2364 hif_warn("srng rings do not support fastpath"); 2365 return; 2366 } 2367 hif_debug("Enabling fastpath mode"); 2368 scn->fastpath_mode_on = true; 2369 } 2370 2371 /** 2372 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled 2373 * @hif_ctx: HIF Context 2374 * 2375 * For use in data path to skip HTC 2376 * 2377 * Return: bool 2378 */ 2379 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx) 2380 { 2381 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2382 2383 return scn->fastpath_mode_on; 2384 } 2385 2386 /** 2387 * hif_get_ce_handle - API to get CE handle for FastPath mode 2388 * @hif_ctx: HIF Context 2389 * @id: CopyEngine Id 2390 * 2391 * API to return CE handle for fastpath mode 2392 * 2393 * Return: void 2394 */ 2395 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id) 2396 { 2397 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2398 2399 return scn->ce_id_to_state[id]; 2400 } 2401 qdf_export_symbol(hif_get_ce_handle); 2402 2403 /** 2404 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup. 2405 * No processing is required inside this function. 2406 * @ce_hdl: Cope engine handle 2407 * Using an assert, this function makes sure that, 2408 * the TX CE has been processed completely. 2409 * 2410 * This is called while dismantling CE structures. No other thread 2411 * should be using these structures while dismantling is occurring 2412 * therfore no locking is needed. 2413 * 2414 * Return: none 2415 */ 2416 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) 2417 { 2418 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 2419 struct CE_ring_state *src_ring = ce_state->src_ring; 2420 struct hif_softc *sc = ce_state->scn; 2421 uint32_t sw_index, write_index; 2422 2423 if (hif_is_nss_wifi_enabled(sc)) 2424 return; 2425 2426 if (sc->fastpath_mode_on && ce_state->htt_tx_data) { 2427 hif_debug("Fastpath mode ON, Cleaning up HTT Tx CE"); 2428 sw_index = src_ring->sw_index; 2429 write_index = src_ring->sw_index; 2430 2431 /* At this point Tx CE should be clean */ 2432 qdf_assert_always(sw_index == write_index); 2433 } 2434 } 2435 2436 /** 2437 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue. 2438 * @ce_hdl: Handle to CE 2439 * 2440 * These buffers are never allocated on the fly, but 2441 * are allocated only once during HIF start and freed 2442 * only once during HIF stop. 2443 * NOTE: 2444 * The assumption here is there is no in-flight DMA in progress 2445 * currently, so that buffers can be freed up safely. 2446 * 2447 * Return: NONE 2448 */ 2449 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) 2450 { 2451 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 2452 struct CE_ring_state *dst_ring = ce_state->dest_ring; 2453 qdf_nbuf_t nbuf; 2454 int i; 2455 2456 if (ce_state->scn->fastpath_mode_on == false) 2457 return; 2458 2459 if (!ce_state->htt_rx_data) 2460 return; 2461 2462 /* 2463 * when fastpath_mode is on and for datapath CEs. Unlike other CE's, 2464 * this CE is completely full: does not leave one blank space, to 2465 * distinguish between empty queue & full queue. So free all the 2466 * entries. 2467 */ 2468 for (i = 0; i < dst_ring->nentries; i++) { 2469 nbuf = dst_ring->per_transfer_context[i]; 2470 2471 /* 2472 * The reasons for doing this check are: 2473 * 1) Protect against calling cleanup before allocating buffers 2474 * 2) In a corner case, FASTPATH_mode_on may be set, but we 2475 * could have a partially filled ring, because of a memory 2476 * allocation failure in the middle of allocating ring. 2477 * This check accounts for that case, checking 2478 * fastpath_mode_on flag or started flag would not have 2479 * covered that case. This is not in performance path, 2480 * so OK to do this. 2481 */ 2482 if (nbuf) { 2483 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf, 2484 QDF_DMA_FROM_DEVICE); 2485 qdf_nbuf_free(nbuf); 2486 } 2487 } 2488 } 2489 2490 /** 2491 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1 2492 * @scn: HIF handle 2493 * 2494 * Datapath Rx CEs are special case, where we reuse all the message buffers. 2495 * Hence we have to post all the entries in the pipe, even, in the beginning 2496 * unlike for other CE pipes where one less than dest_nentries are filled in 2497 * the beginning. 2498 * 2499 * Return: None 2500 */ 2501 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 2502 { 2503 int pipe_num; 2504 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2505 2506 if (scn->fastpath_mode_on == false) 2507 return; 2508 2509 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2510 struct HIF_CE_pipe_info *pipe_info = 2511 &hif_state->pipe_info[pipe_num]; 2512 struct CE_state *ce_state = 2513 scn->ce_id_to_state[pipe_info->pipe_num]; 2514 2515 if (ce_state->htt_rx_data) 2516 atomic_inc(&pipe_info->recv_bufs_needed); 2517 } 2518 } 2519 #else 2520 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 2521 { 2522 } 2523 2524 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 2525 { 2526 return false; 2527 } 2528 #endif /* WLAN_FEATURE_FASTPATH */ 2529 2530 void ce_fini(struct CE_handle *copyeng) 2531 { 2532 struct CE_state *CE_state = (struct CE_state *)copyeng; 2533 unsigned int CE_id = CE_state->id; 2534 struct hif_softc *scn = CE_state->scn; 2535 uint32_t desc_size; 2536 2537 bool inited = CE_state->timer_inited; 2538 CE_state->state = CE_UNUSED; 2539 scn->ce_id_to_state[CE_id] = NULL; 2540 /* Set the flag to false first to stop processing in ce_poll_timeout */ 2541 ce_disable_polling(CE_state); 2542 2543 qdf_lro_deinit(CE_state->lro_data); 2544 2545 if (CE_state->src_ring) { 2546 /* Cleanup the datapath Tx ring */ 2547 ce_h2t_tx_ce_cleanup(copyeng); 2548 2549 desc_size = ce_get_desc_size(scn, CE_RING_SRC); 2550 if (CE_state->src_ring->shadow_base_unaligned) 2551 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned); 2552 if (CE_state->src_ring->base_addr_owner_space_unaligned) 2553 ce_free_desc_ring(scn, CE_state->id, 2554 CE_state->src_ring, 2555 desc_size); 2556 ce_srng_cleanup(scn, CE_state, CE_RING_SRC); 2557 qdf_mem_free(CE_state->src_ring); 2558 } 2559 if (CE_state->dest_ring) { 2560 /* Cleanup the datapath Rx ring */ 2561 ce_t2h_msg_ce_cleanup(copyeng); 2562 2563 desc_size = ce_get_desc_size(scn, CE_RING_DEST); 2564 if (CE_state->dest_ring->base_addr_owner_space_unaligned) 2565 ce_free_desc_ring(scn, CE_state->id, 2566 CE_state->dest_ring, 2567 desc_size); 2568 ce_srng_cleanup(scn, CE_state, CE_RING_DEST); 2569 qdf_mem_free(CE_state->dest_ring); 2570 2571 /* epping */ 2572 if (inited) { 2573 qdf_timer_free(&CE_state->poll_timer); 2574 } 2575 } 2576 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) { 2577 /* Cleanup the datapath Tx ring */ 2578 ce_h2t_tx_ce_cleanup(copyeng); 2579 2580 if (CE_state->status_ring->shadow_base_unaligned) 2581 qdf_mem_free( 2582 CE_state->status_ring->shadow_base_unaligned); 2583 2584 desc_size = ce_get_desc_size(scn, CE_RING_STATUS); 2585 if (CE_state->status_ring->base_addr_owner_space_unaligned) 2586 ce_free_desc_ring(scn, CE_state->id, 2587 CE_state->status_ring, 2588 desc_size); 2589 ce_srng_cleanup(scn, CE_state, CE_RING_STATUS); 2590 qdf_mem_free(CE_state->status_ring); 2591 } 2592 2593 free_mem_ce_debug_history(scn, CE_id); 2594 reset_ce_debug_history(scn); 2595 ce_deinit_ce_desc_event_log(scn, CE_id); 2596 2597 qdf_spinlock_destroy(&CE_state->ce_index_lock); 2598 qdf_mem_free(CE_state); 2599 } 2600 2601 void hif_detach_htc(struct hif_opaque_softc *hif_ctx) 2602 { 2603 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2604 2605 qdf_mem_zero(&hif_state->msg_callbacks_pending, 2606 sizeof(hif_state->msg_callbacks_pending)); 2607 qdf_mem_zero(&hif_state->msg_callbacks_current, 2608 sizeof(hif_state->msg_callbacks_current)); 2609 } 2610 2611 /* Send the first nbytes bytes of the buffer */ 2612 QDF_STATUS 2613 hif_send_head(struct hif_opaque_softc *hif_ctx, 2614 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes, 2615 qdf_nbuf_t nbuf, unsigned int data_attr) 2616 { 2617 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2618 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2619 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 2620 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 2621 int bytes = nbytes, nfrags = 0; 2622 struct ce_sendlist sendlist; 2623 int i = 0; 2624 QDF_STATUS status; 2625 unsigned int mux_id = 0; 2626 2627 if (nbytes > qdf_nbuf_len(nbuf)) { 2628 hif_err("nbytes: %d nbuf_len: %d", nbytes, 2629 (uint32_t)qdf_nbuf_len(nbuf)); 2630 QDF_ASSERT(0); 2631 } 2632 2633 transfer_id = 2634 (mux_id & MUX_ID_MASK) | 2635 (transfer_id & TRANSACTION_ID_MASK); 2636 data_attr &= DESC_DATA_FLAG_MASK; 2637 /* 2638 * The common case involves sending multiple fragments within a 2639 * single download (the tx descriptor and the tx frame header). 2640 * So, optimize for the case of multiple fragments by not even 2641 * checking whether it's necessary to use a sendlist. 2642 * The overhead of using a sendlist for a single buffer download 2643 * is not a big deal, since it happens rarely (for WMI messages). 2644 */ 2645 ce_sendlist_init(&sendlist); 2646 do { 2647 qdf_dma_addr_t frag_paddr; 2648 int frag_bytes; 2649 2650 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags); 2651 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags); 2652 /* 2653 * Clear the packet offset for all but the first CE desc. 2654 */ 2655 if (i++ > 0) 2656 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; 2657 2658 status = ce_sendlist_buf_add(&sendlist, frag_paddr, 2659 frag_bytes > 2660 bytes ? bytes : frag_bytes, 2661 qdf_nbuf_get_frag_is_wordstream 2662 (nbuf, 2663 nfrags) ? 0 : 2664 CE_SEND_FLAG_SWAP_DISABLE, 2665 data_attr); 2666 if (status != QDF_STATUS_SUCCESS) { 2667 hif_err("frag_num: %d larger than limit (status=%d)", 2668 nfrags, status); 2669 return status; 2670 } 2671 bytes -= frag_bytes; 2672 nfrags++; 2673 } while (bytes > 0); 2674 2675 /* Make sure we have resources to handle this request */ 2676 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2677 if (pipe_info->num_sends_allowed < nfrags) { 2678 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2679 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE); 2680 return QDF_STATUS_E_RESOURCES; 2681 } 2682 pipe_info->num_sends_allowed -= nfrags; 2683 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2684 2685 if (qdf_unlikely(!ce_hdl)) { 2686 hif_err("CE handle is null"); 2687 return A_ERROR; 2688 } 2689 2690 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF); 2691 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, 2692 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf), 2693 sizeof(qdf_nbuf_data(nbuf)), QDF_TX)); 2694 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); 2695 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 2696 2697 return status; 2698 } 2699 2700 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, 2701 int force) 2702 { 2703 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2704 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2705 2706 if (!force) { 2707 int resources; 2708 /* 2709 * Decide whether to actually poll for completions, or just 2710 * wait for a later chance. If there seem to be plenty of 2711 * resources left, then just wait, since checking involves 2712 * reading a CE register, which is a relatively expensive 2713 * operation. 2714 */ 2715 resources = hif_get_free_queue_number(hif_ctx, pipe); 2716 /* 2717 * If at least 50% of the total resources are still available, 2718 * don't bother checking again yet. 2719 */ 2720 if (resources > (hif_state->host_ce_config[pipe].src_nentries >> 2721 1)) 2722 return; 2723 } 2724 #ifdef ATH_11AC_TXCOMPACT 2725 ce_per_engine_servicereap(scn, pipe); 2726 #else 2727 ce_per_engine_service(scn, pipe); 2728 #endif 2729 } 2730 2731 uint16_t 2732 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 2733 { 2734 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2735 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 2736 uint16_t rv; 2737 2738 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2739 rv = pipe_info->num_sends_allowed; 2740 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2741 return rv; 2742 } 2743 2744 /* Called by lower (CE) layer when a send to Target completes. */ 2745 static void 2746 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context, 2747 void *transfer_context, qdf_dma_addr_t CE_data, 2748 unsigned int nbytes, unsigned int transfer_id, 2749 unsigned int sw_index, unsigned int hw_index, 2750 unsigned int toeplitz_hash_result) 2751 { 2752 struct HIF_CE_pipe_info *pipe_info = 2753 (struct HIF_CE_pipe_info *)ce_context; 2754 unsigned int sw_idx = sw_index, hw_idx = hw_index; 2755 struct hif_msg_callbacks *msg_callbacks = 2756 &pipe_info->pipe_callbacks; 2757 2758 do { 2759 /* 2760 * The upper layer callback will be triggered 2761 * when last fragment is complteted. 2762 */ 2763 if (transfer_context != CE_SENDLIST_ITEM_CTXT) 2764 msg_callbacks->txCompletionHandler( 2765 msg_callbacks->Context, 2766 transfer_context, transfer_id, 2767 toeplitz_hash_result); 2768 2769 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2770 pipe_info->num_sends_allowed++; 2771 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2772 } while (ce_completed_send_next(copyeng, 2773 &ce_context, &transfer_context, 2774 &CE_data, &nbytes, &transfer_id, 2775 &sw_idx, &hw_idx, 2776 &toeplitz_hash_result) == QDF_STATUS_SUCCESS); 2777 } 2778 2779 /** 2780 * hif_ce_do_recv(): send message from copy engine to upper layers 2781 * @msg_callbacks: structure containing callback and callback context 2782 * @netbuff: skb containing message 2783 * @nbytes: number of bytes in the message 2784 * @pipe_info: used for the pipe_number info 2785 * 2786 * Checks the packet length, configures the length in the netbuff, 2787 * and calls the upper layer callback. 2788 * 2789 * return: None 2790 */ 2791 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks, 2792 qdf_nbuf_t netbuf, int nbytes, 2793 struct HIF_CE_pipe_info *pipe_info) { 2794 if (nbytes <= pipe_info->buf_sz) { 2795 qdf_nbuf_set_pktlen(netbuf, nbytes); 2796 msg_callbacks-> 2797 rxCompletionHandler(msg_callbacks->Context, 2798 netbuf, pipe_info->pipe_num); 2799 } else { 2800 hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes); 2801 qdf_nbuf_free(netbuf); 2802 } 2803 } 2804 2805 /* Called by lower (CE) layer when data is received from the Target. */ 2806 static void 2807 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context, 2808 void *transfer_context, qdf_dma_addr_t CE_data, 2809 unsigned int nbytes, unsigned int transfer_id, 2810 unsigned int flags) 2811 { 2812 struct HIF_CE_pipe_info *pipe_info = 2813 (struct HIF_CE_pipe_info *)ce_context; 2814 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 2815 struct CE_state *ce_state = (struct CE_state *) copyeng; 2816 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2817 struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn); 2818 struct hif_msg_callbacks *msg_callbacks = 2819 &pipe_info->pipe_callbacks; 2820 2821 do { 2822 hif_pm_runtime_mark_last_busy(hif_ctx); 2823 qdf_nbuf_unmap_single(scn->qdf_dev, 2824 (qdf_nbuf_t) transfer_context, 2825 QDF_DMA_FROM_DEVICE); 2826 2827 atomic_inc(&pipe_info->recv_bufs_needed); 2828 hif_post_recv_buffers_for_pipe(pipe_info); 2829 if (scn->target_status == TARGET_STATUS_RESET) 2830 qdf_nbuf_free(transfer_context); 2831 else 2832 hif_ce_do_recv(msg_callbacks, transfer_context, 2833 nbytes, pipe_info); 2834 2835 /* Set up force_break flag if num of receices reaches 2836 * MAX_NUM_OF_RECEIVES 2837 */ 2838 ce_state->receive_count++; 2839 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { 2840 ce_state->force_break = 1; 2841 break; 2842 } 2843 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context, 2844 &CE_data, &nbytes, &transfer_id, 2845 &flags) == QDF_STATUS_SUCCESS); 2846 2847 } 2848 2849 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */ 2850 2851 void 2852 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused, 2853 struct hif_msg_callbacks *callbacks) 2854 { 2855 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2856 2857 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 2858 spin_lock_init(&pcie_access_log_lock); 2859 #endif 2860 /* Save callbacks for later installation */ 2861 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks, 2862 sizeof(hif_state->msg_callbacks_pending)); 2863 2864 } 2865 2866 static int hif_completion_thread_startup_by_ceid(struct HIF_CE_state *hif_state, 2867 int pipe_num) 2868 { 2869 struct CE_attr attr; 2870 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2871 struct hif_msg_callbacks *hif_msg_callbacks = 2872 &hif_state->msg_callbacks_current; 2873 struct HIF_CE_pipe_info *pipe_info; 2874 struct CE_state *ce_state; 2875 2876 if (pipe_num >= CE_COUNT_MAX) 2877 return -EINVAL; 2878 2879 pipe_info = &hif_state->pipe_info[pipe_num]; 2880 ce_state = scn->ce_id_to_state[pipe_num]; 2881 2882 if (!hif_msg_callbacks || 2883 !hif_msg_callbacks->rxCompletionHandler || 2884 !hif_msg_callbacks->txCompletionHandler) { 2885 hif_err("%s: no completion handler registered", __func__); 2886 return -EFAULT; 2887 } 2888 2889 attr = hif_state->host_ce_config[pipe_num]; 2890 if (attr.src_nentries) { 2891 /* pipe used to send to target */ 2892 hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n", 2893 __func__, pipe_num, pipe_info); 2894 ce_send_cb_register(pipe_info->ce_hdl, 2895 hif_pci_ce_send_done, pipe_info, 2896 attr.flags & CE_ATTR_DISABLE_INTR); 2897 pipe_info->num_sends_allowed = attr.src_nentries - 1; 2898 } 2899 if (attr.dest_nentries) { 2900 hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n", 2901 __func__, pipe_num, pipe_info); 2902 /* pipe used to receive from target */ 2903 ce_recv_cb_register(pipe_info->ce_hdl, 2904 hif_pci_ce_recv_data, pipe_info, 2905 attr.flags & CE_ATTR_DISABLE_INTR); 2906 } 2907 2908 if (attr.src_nentries) 2909 qdf_spinlock_create(&pipe_info->completion_freeq_lock); 2910 2911 if (!(ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND)) 2912 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks, 2913 sizeof(pipe_info->pipe_callbacks)); 2914 2915 return 0; 2916 } 2917 2918 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state) 2919 { 2920 struct CE_handle *ce_diag = hif_state->ce_diag; 2921 int pipe_num, ret; 2922 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2923 2924 /* daemonize("hif_compl_thread"); */ 2925 2926 if (scn->ce_count == 0) { 2927 hif_err("ce_count is 0"); 2928 return -EINVAL; 2929 } 2930 2931 2932 A_TARGET_ACCESS_LIKELY(scn); 2933 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2934 struct HIF_CE_pipe_info *pipe_info; 2935 2936 pipe_info = &hif_state->pipe_info[pipe_num]; 2937 if (pipe_info->ce_hdl == ce_diag) 2938 continue; /* Handle Diagnostic CE specially */ 2939 2940 ret = hif_completion_thread_startup_by_ceid(hif_state, 2941 pipe_num); 2942 if (ret < 0) 2943 return ret; 2944 2945 } 2946 2947 A_TARGET_ACCESS_UNLIKELY(scn); 2948 return 0; 2949 } 2950 2951 /* 2952 * Install pending msg callbacks. 2953 * 2954 * TBDXXX: This hack is needed because upper layers install msg callbacks 2955 * for use with HTC before BMI is done; yet this HIF implementation 2956 * needs to continue to use BMI msg callbacks. Really, upper layers 2957 * should not register HTC callbacks until AFTER BMI phase. 2958 */ 2959 static void hif_msg_callbacks_install(struct hif_softc *scn) 2960 { 2961 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2962 2963 qdf_mem_copy(&hif_state->msg_callbacks_current, 2964 &hif_state->msg_callbacks_pending, 2965 sizeof(hif_state->msg_callbacks_pending)); 2966 } 2967 2968 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe, 2969 uint8_t *DLPipe) 2970 { 2971 int ul_is_polled, dl_is_polled; 2972 2973 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC, 2974 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled); 2975 } 2976 2977 /** 2978 * hif_dump_pipe_debug_count() - Log error count 2979 * @scn: hif_softc pointer. 2980 * 2981 * Output the pipe error counts of each pipe to log file 2982 * 2983 * Return: N/A 2984 */ 2985 void hif_dump_pipe_debug_count(struct hif_softc *scn) 2986 { 2987 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2988 int pipe_num; 2989 2990 if (!hif_state) { 2991 hif_err("hif_state is NULL"); 2992 return; 2993 } 2994 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2995 struct HIF_CE_pipe_info *pipe_info; 2996 2997 pipe_info = &hif_state->pipe_info[pipe_num]; 2998 2999 if (pipe_info->nbuf_alloc_err_count > 0 || 3000 pipe_info->nbuf_dma_err_count > 0 || 3001 pipe_info->nbuf_ce_enqueue_err_count) 3002 hif_err( 3003 "pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u", 3004 pipe_info->pipe_num, 3005 atomic_read(&pipe_info->recv_bufs_needed), 3006 pipe_info->nbuf_alloc_err_count, 3007 pipe_info->nbuf_dma_err_count, 3008 pipe_info->nbuf_ce_enqueue_err_count); 3009 } 3010 } 3011 3012 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info, 3013 void *nbuf, uint32_t *error_cnt, 3014 enum hif_ce_event_type failure_type, 3015 const char *failure_type_string) 3016 { 3017 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed); 3018 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl; 3019 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 3020 int ce_id = CE_state->id; 3021 uint32_t error_cnt_tmp; 3022 3023 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 3024 error_cnt_tmp = ++(*error_cnt); 3025 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 3026 hif_debug("pipe_num: %d, needed: %d, err_cnt: %u, fail_type: %s", 3027 pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp, 3028 failure_type_string); 3029 hif_record_ce_desc_event(scn, ce_id, failure_type, 3030 NULL, nbuf, bufs_needed_tmp, 0); 3031 /* if we fail to allocate the last buffer for an rx pipe, 3032 * there is no trigger to refill the ce and we will 3033 * eventually crash 3034 */ 3035 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 || 3036 (ce_srng_based(scn) && 3037 bufs_needed_tmp == CE_state->dest_ring->nentries - 2)) 3038 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work); 3039 3040 } 3041 3042 3043 3044 3045 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) 3046 { 3047 struct CE_handle *ce_hdl; 3048 qdf_size_t buf_sz; 3049 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 3050 QDF_STATUS status; 3051 uint32_t bufs_posted = 0; 3052 unsigned int ce_id; 3053 3054 buf_sz = pipe_info->buf_sz; 3055 if (buf_sz == 0) { 3056 /* Unused Copy Engine */ 3057 return QDF_STATUS_SUCCESS; 3058 } 3059 3060 ce_hdl = pipe_info->ce_hdl; 3061 ce_id = ((struct CE_state *)ce_hdl)->id; 3062 3063 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 3064 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) { 3065 qdf_dma_addr_t CE_data; /* CE space buffer address */ 3066 qdf_nbuf_t nbuf; 3067 3068 atomic_dec(&pipe_info->recv_bufs_needed); 3069 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 3070 3071 hif_record_ce_desc_event(scn, ce_id, 3072 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL, 3073 0, 0); 3074 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false); 3075 if (!nbuf) { 3076 hif_post_recv_buffers_failure(pipe_info, nbuf, 3077 &pipe_info->nbuf_alloc_err_count, 3078 HIF_RX_NBUF_ALLOC_FAILURE, 3079 "HIF_RX_NBUF_ALLOC_FAILURE"); 3080 return QDF_STATUS_E_NOMEM; 3081 } 3082 3083 hif_record_ce_desc_event(scn, ce_id, 3084 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf, 3085 0, 0); 3086 /* 3087 * qdf_nbuf_peek_header(nbuf, &data, &unused); 3088 * CE_data = dma_map_single(dev, data, buf_sz, ); 3089 * DMA_FROM_DEVICE); 3090 */ 3091 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf, 3092 QDF_DMA_FROM_DEVICE); 3093 3094 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 3095 hif_post_recv_buffers_failure(pipe_info, nbuf, 3096 &pipe_info->nbuf_dma_err_count, 3097 HIF_RX_NBUF_MAP_FAILURE, 3098 "HIF_RX_NBUF_MAP_FAILURE"); 3099 qdf_nbuf_free(nbuf); 3100 return status; 3101 } 3102 3103 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0); 3104 hif_record_ce_desc_event(scn, ce_id, 3105 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf, 3106 0, 0); 3107 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data, 3108 buf_sz, DMA_FROM_DEVICE); 3109 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data); 3110 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 3111 hif_post_recv_buffers_failure(pipe_info, nbuf, 3112 &pipe_info->nbuf_ce_enqueue_err_count, 3113 HIF_RX_NBUF_ENQUEUE_FAILURE, 3114 "HIF_RX_NBUF_ENQUEUE_FAILURE"); 3115 3116 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf, 3117 QDF_DMA_FROM_DEVICE); 3118 qdf_nbuf_free(nbuf); 3119 return status; 3120 } 3121 3122 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 3123 bufs_posted++; 3124 } 3125 pipe_info->nbuf_alloc_err_count = 3126 (pipe_info->nbuf_alloc_err_count > bufs_posted) ? 3127 pipe_info->nbuf_alloc_err_count - bufs_posted : 0; 3128 pipe_info->nbuf_dma_err_count = 3129 (pipe_info->nbuf_dma_err_count > bufs_posted) ? 3130 pipe_info->nbuf_dma_err_count - bufs_posted : 0; 3131 pipe_info->nbuf_ce_enqueue_err_count = 3132 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ? 3133 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; 3134 3135 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 3136 3137 return QDF_STATUS_SUCCESS; 3138 } 3139 3140 /* 3141 * Try to post all desired receive buffers for all pipes. 3142 * Returns 0 for non fastpath rx copy engine as 3143 * oom_allocation_work will be scheduled to recover any 3144 * failures, non-zero if unable to completely replenish 3145 * receive buffers for fastpath rx Copy engine. 3146 */ 3147 static QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn) 3148 { 3149 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3150 int pipe_num; 3151 struct CE_state *ce_state = NULL; 3152 QDF_STATUS qdf_status; 3153 3154 A_TARGET_ACCESS_LIKELY(scn); 3155 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3156 struct HIF_CE_pipe_info *pipe_info; 3157 3158 ce_state = scn->ce_id_to_state[pipe_num]; 3159 pipe_info = &hif_state->pipe_info[pipe_num]; 3160 3161 if (!ce_state) 3162 continue; 3163 3164 /* Do not init dynamic CEs, during initial load */ 3165 if (ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND) 3166 continue; 3167 3168 if (hif_is_nss_wifi_enabled(scn) && 3169 ce_state && (ce_state->htt_rx_data)) 3170 continue; 3171 3172 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); 3173 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state && 3174 ce_state->htt_rx_data && 3175 scn->fastpath_mode_on) { 3176 A_TARGET_ACCESS_UNLIKELY(scn); 3177 return qdf_status; 3178 } 3179 } 3180 3181 A_TARGET_ACCESS_UNLIKELY(scn); 3182 3183 return QDF_STATUS_SUCCESS; 3184 } 3185 3186 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx) 3187 { 3188 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 3189 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3190 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 3191 3192 hif_update_fastpath_recv_bufs_cnt(scn); 3193 3194 hif_msg_callbacks_install(scn); 3195 3196 if (hif_completion_thread_startup(hif_state)) 3197 return QDF_STATUS_E_FAILURE; 3198 3199 /* enable buffer cleanup */ 3200 hif_state->started = true; 3201 3202 /* Post buffers once to start things off. */ 3203 qdf_status = hif_post_recv_buffers(scn); 3204 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 3205 /* cleanup is done in hif_ce_disable */ 3206 hif_err("Failed to post buffers"); 3207 return qdf_status; 3208 } 3209 3210 return qdf_status; 3211 } 3212 3213 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 3214 { 3215 struct hif_softc *scn; 3216 struct CE_handle *ce_hdl; 3217 uint32_t buf_sz; 3218 struct HIF_CE_state *hif_state; 3219 qdf_nbuf_t netbuf; 3220 qdf_dma_addr_t CE_data; 3221 void *per_CE_context; 3222 3223 buf_sz = pipe_info->buf_sz; 3224 /* Unused Copy Engine */ 3225 if (buf_sz == 0) 3226 return; 3227 3228 3229 hif_state = pipe_info->HIF_CE_state; 3230 if (!hif_state->started) 3231 return; 3232 3233 scn = HIF_GET_SOFTC(hif_state); 3234 ce_hdl = pipe_info->ce_hdl; 3235 3236 if (!scn->qdf_dev) 3237 return; 3238 while (ce_revoke_recv_next 3239 (ce_hdl, &per_CE_context, (void **)&netbuf, 3240 &CE_data) == QDF_STATUS_SUCCESS) { 3241 if (netbuf) { 3242 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf, 3243 QDF_DMA_FROM_DEVICE); 3244 qdf_nbuf_free(netbuf); 3245 } 3246 } 3247 } 3248 3249 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 3250 { 3251 struct CE_handle *ce_hdl; 3252 struct HIF_CE_state *hif_state; 3253 struct hif_softc *scn; 3254 qdf_nbuf_t netbuf; 3255 void *per_CE_context; 3256 qdf_dma_addr_t CE_data; 3257 unsigned int nbytes; 3258 unsigned int id; 3259 uint32_t buf_sz; 3260 uint32_t toeplitz_hash_result; 3261 3262 buf_sz = pipe_info->buf_sz; 3263 if (buf_sz == 0) { 3264 /* Unused Copy Engine */ 3265 return; 3266 } 3267 3268 hif_state = pipe_info->HIF_CE_state; 3269 if (!hif_state->started) { 3270 return; 3271 } 3272 3273 scn = HIF_GET_SOFTC(hif_state); 3274 3275 ce_hdl = pipe_info->ce_hdl; 3276 3277 while (ce_cancel_send_next 3278 (ce_hdl, &per_CE_context, 3279 (void **)&netbuf, &CE_data, &nbytes, 3280 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 3281 if (netbuf != CE_SENDLIST_ITEM_CTXT) { 3282 /* 3283 * Packets enqueued by htt_h2t_ver_req_msg() and 3284 * htt_h2t_rx_ring_cfg_msg_ll() have already been 3285 * freed in htt_htc_misc_pkt_pool_free() in 3286 * wlantl_close(), so do not free them here again 3287 * by checking whether it's the endpoint 3288 * which they are queued in. 3289 */ 3290 if (id == scn->htc_htt_tx_endpoint) 3291 return; 3292 /* Indicate the completion to higher 3293 * layer to free the buffer 3294 */ 3295 if (pipe_info->pipe_callbacks.txCompletionHandler) 3296 pipe_info->pipe_callbacks. 3297 txCompletionHandler(pipe_info-> 3298 pipe_callbacks.Context, 3299 netbuf, id, toeplitz_hash_result); 3300 } 3301 } 3302 } 3303 3304 /* 3305 * Cleanup residual buffers for device shutdown: 3306 * buffers that were enqueued for receive 3307 * buffers that were to be sent 3308 * Note: Buffers that had completed but which were 3309 * not yet processed are on a completion queue. They 3310 * are handled when the completion thread shuts down. 3311 */ 3312 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state) 3313 { 3314 int pipe_num; 3315 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 3316 struct CE_state *ce_state; 3317 3318 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3319 struct HIF_CE_pipe_info *pipe_info; 3320 3321 ce_state = scn->ce_id_to_state[pipe_num]; 3322 if (hif_is_nss_wifi_enabled(scn) && ce_state && 3323 ((ce_state->htt_tx_data) || 3324 (ce_state->htt_rx_data))) { 3325 continue; 3326 } 3327 3328 pipe_info = &hif_state->pipe_info[pipe_num]; 3329 hif_recv_buffer_cleanup_on_pipe(pipe_info); 3330 hif_send_buffer_cleanup_on_pipe(pipe_info); 3331 } 3332 } 3333 3334 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) 3335 { 3336 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 3337 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3338 3339 hif_buffer_cleanup(hif_state); 3340 } 3341 3342 static void hif_destroy_oom_work(struct hif_softc *scn) 3343 { 3344 struct CE_state *ce_state; 3345 int ce_id; 3346 3347 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 3348 ce_state = scn->ce_id_to_state[ce_id]; 3349 if (ce_state) 3350 qdf_destroy_work(scn->qdf_dev, 3351 &ce_state->oom_allocation_work); 3352 } 3353 } 3354 3355 void hif_ce_stop(struct hif_softc *scn) 3356 { 3357 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3358 int pipe_num; 3359 3360 /* 3361 * before cleaning up any memory, ensure irq & 3362 * bottom half contexts will not be re-entered 3363 */ 3364 hif_disable_isr(&scn->osc); 3365 hif_destroy_oom_work(scn); 3366 scn->hif_init_done = false; 3367 3368 /* 3369 * At this point, asynchronous threads are stopped, 3370 * The Target should not DMA nor interrupt, Host code may 3371 * not initiate anything more. So we just need to clean 3372 * up Host-side state. 3373 */ 3374 3375 if (scn->athdiag_procfs_inited) { 3376 athdiag_procfs_remove(); 3377 scn->athdiag_procfs_inited = false; 3378 } 3379 3380 hif_buffer_cleanup(hif_state); 3381 3382 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3383 struct HIF_CE_pipe_info *pipe_info; 3384 struct CE_attr attr; 3385 struct CE_handle *ce_diag = hif_state->ce_diag; 3386 3387 pipe_info = &hif_state->pipe_info[pipe_num]; 3388 if (pipe_info->ce_hdl) { 3389 if (pipe_info->ce_hdl != ce_diag && 3390 hif_state->started) { 3391 attr = hif_state->host_ce_config[pipe_num]; 3392 if (attr.src_nentries) 3393 qdf_spinlock_destroy(&pipe_info-> 3394 completion_freeq_lock); 3395 } 3396 ce_fini(pipe_info->ce_hdl); 3397 pipe_info->ce_hdl = NULL; 3398 pipe_info->buf_sz = 0; 3399 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 3400 } 3401 } 3402 3403 if (hif_state->sleep_timer_init) { 3404 qdf_timer_stop(&hif_state->sleep_timer); 3405 qdf_timer_free(&hif_state->sleep_timer); 3406 hif_state->sleep_timer_init = false; 3407 } 3408 3409 hif_state->started = false; 3410 } 3411 3412 static void hif_get_shadow_reg_cfg(struct hif_softc *scn, 3413 struct shadow_reg_cfg 3414 **target_shadow_reg_cfg_ret, 3415 uint32_t *shadow_cfg_sz_ret) 3416 { 3417 if (target_shadow_reg_cfg_ret) 3418 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg; 3419 if (shadow_cfg_sz_ret) 3420 *shadow_cfg_sz_ret = shadow_cfg_sz; 3421 } 3422 3423 /** 3424 * hif_get_target_ce_config() - get copy engine configuration 3425 * @target_ce_config_ret: basic copy engine configuration 3426 * @target_ce_config_sz_ret: size of the basic configuration in bytes 3427 * @target_service_to_ce_map_ret: service mapping for the copy engines 3428 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes 3429 * @target_shadow_reg_cfg_ret: shadow register configuration 3430 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes 3431 * 3432 * providing accessor to these values outside of this file. 3433 * currently these are stored in static pointers to const sections. 3434 * there are multiple configurations that are selected from at compile time. 3435 * Runtime selection would need to consider mode, target type and bus type. 3436 * 3437 * Return: return by parameter. 3438 */ 3439 void hif_get_target_ce_config(struct hif_softc *scn, 3440 struct CE_pipe_config **target_ce_config_ret, 3441 uint32_t *target_ce_config_sz_ret, 3442 struct service_to_pipe **target_service_to_ce_map_ret, 3443 uint32_t *target_service_to_ce_map_sz_ret, 3444 struct shadow_reg_cfg **target_shadow_reg_cfg_ret, 3445 uint32_t *shadow_cfg_sz_ret) 3446 { 3447 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3448 3449 *target_ce_config_ret = hif_state->target_ce_config; 3450 *target_ce_config_sz_ret = hif_state->target_ce_config_sz; 3451 3452 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret, 3453 target_service_to_ce_map_sz_ret); 3454 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret, 3455 shadow_cfg_sz_ret); 3456 } 3457 3458 #ifdef CONFIG_SHADOW_V2 3459 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 3460 { 3461 int i; 3462 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3463 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg); 3464 3465 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) { 3466 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 3467 "%s: i %d, val %x", __func__, i, 3468 cfg->shadow_reg_v2_cfg[i].addr); 3469 } 3470 } 3471 3472 #else 3473 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 3474 { 3475 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3476 "%s: CONFIG_SHADOW_V2 not defined", __func__); 3477 } 3478 #endif 3479 3480 #ifdef ADRASTEA_RRI_ON_DDR 3481 /** 3482 * hif_get_src_ring_read_index(): Called to get the SRRI 3483 * 3484 * @scn: hif_softc pointer 3485 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 3486 * 3487 * This function returns the SRRI to the caller. For CEs that 3488 * dont have interrupts enabled, we look at the DDR based SRRI 3489 * 3490 * Return: SRRI 3491 */ 3492 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, 3493 uint32_t CE_ctrl_addr) 3494 { 3495 struct CE_attr attr; 3496 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3497 3498 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 3499 if (attr.flags & CE_ATTR_DISABLE_INTR) { 3500 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 3501 } else { 3502 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 3503 return A_TARGET_READ(scn, 3504 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS); 3505 else 3506 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, 3507 CE_ctrl_addr); 3508 } 3509 } 3510 3511 /** 3512 * hif_get_dst_ring_read_index(): Called to get the DRRI 3513 * 3514 * @scn: hif_softc pointer 3515 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 3516 * 3517 * This function returns the DRRI to the caller. For CEs that 3518 * dont have interrupts enabled, we look at the DDR based DRRI 3519 * 3520 * Return: DRRI 3521 */ 3522 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, 3523 uint32_t CE_ctrl_addr) 3524 { 3525 struct CE_attr attr; 3526 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3527 3528 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 3529 3530 if (attr.flags & CE_ATTR_DISABLE_INTR) { 3531 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 3532 } else { 3533 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 3534 return A_TARGET_READ(scn, 3535 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS); 3536 else 3537 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, 3538 CE_ctrl_addr); 3539 } 3540 } 3541 3542 /** 3543 * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr 3544 * @scn: hif_softc pointer 3545 * 3546 * Return: qdf status 3547 */ 3548 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn) 3549 { 3550 qdf_dma_addr_t paddr_rri_on_ddr = 0; 3551 3552 scn->vaddr_rri_on_ddr = 3553 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 3554 scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)), 3555 &paddr_rri_on_ddr); 3556 3557 if (!scn->vaddr_rri_on_ddr) { 3558 hif_err("dmaable page alloc fail"); 3559 return QDF_STATUS_E_NOMEM; 3560 } 3561 3562 scn->paddr_rri_on_ddr = paddr_rri_on_ddr; 3563 3564 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t)); 3565 3566 return QDF_STATUS_SUCCESS; 3567 } 3568 #endif 3569 3570 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR) 3571 /** 3572 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 3573 * 3574 * @scn: hif_softc pointer 3575 * 3576 * This function allocates non cached memory on ddr and sends 3577 * the physical address of this memory to the CE hardware. The 3578 * hardware updates the RRI on this particular location. 3579 * 3580 * Return: None 3581 */ 3582 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 3583 { 3584 unsigned int i; 3585 uint32_t high_paddr, low_paddr; 3586 3587 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) 3588 return; 3589 3590 low_paddr = BITS0_TO_31(scn->paddr_rri_on_ddr); 3591 high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr); 3592 3593 hif_debug("using srri and drri from DDR"); 3594 3595 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr); 3596 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr); 3597 3598 for (i = 0; i < CE_COUNT; i++) 3599 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i)); 3600 } 3601 #else 3602 /** 3603 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 3604 * 3605 * @scn: hif_softc pointer 3606 * 3607 * This is a dummy implementation for platforms that don't 3608 * support this functionality. 3609 * 3610 * Return: None 3611 */ 3612 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 3613 { 3614 } 3615 #endif 3616 3617 /** 3618 * hif_update_rri_over_ddr_config() - update rri_over_ddr config for 3619 * QMI command 3620 * @scn: hif context 3621 * @cfg: wlan enable config 3622 * 3623 * In case of Genoa, rri_over_ddr memory configuration is passed 3624 * to firmware through QMI configure command. 3625 */ 3626 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR) 3627 static void hif_update_rri_over_ddr_config(struct hif_softc *scn, 3628 struct pld_wlan_enable_cfg *cfg) 3629 { 3630 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) 3631 return; 3632 3633 cfg->rri_over_ddr_cfg_valid = true; 3634 cfg->rri_over_ddr_cfg.base_addr_low = 3635 BITS0_TO_31(scn->paddr_rri_on_ddr); 3636 cfg->rri_over_ddr_cfg.base_addr_high = 3637 BITS32_TO_35(scn->paddr_rri_on_ddr); 3638 } 3639 #else 3640 static void hif_update_rri_over_ddr_config(struct hif_softc *scn, 3641 struct pld_wlan_enable_cfg *cfg) 3642 { 3643 } 3644 #endif 3645 3646 /** 3647 * hif_wlan_enable(): call the platform driver to enable wlan 3648 * @scn: HIF Context 3649 * 3650 * This function passes the con_mode and CE configuration to 3651 * platform driver to enable wlan. 3652 * 3653 * Return: linux error code 3654 */ 3655 int hif_wlan_enable(struct hif_softc *scn) 3656 { 3657 struct pld_wlan_enable_cfg cfg; 3658 enum pld_driver_mode mode; 3659 uint32_t con_mode = hif_get_conparam(scn); 3660 3661 hif_get_target_ce_config(scn, 3662 (struct CE_pipe_config **)&cfg.ce_tgt_cfg, 3663 &cfg.num_ce_tgt_cfg, 3664 (struct service_to_pipe **)&cfg.ce_svc_cfg, 3665 &cfg.num_ce_svc_pipe_cfg, 3666 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg, 3667 &cfg.num_shadow_reg_cfg); 3668 3669 /* translate from structure size to array size */ 3670 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config); 3671 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe); 3672 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg); 3673 3674 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg, 3675 &cfg.num_shadow_reg_v2_cfg); 3676 3677 hif_print_hal_shadow_register_cfg(&cfg); 3678 3679 hif_update_rri_over_ddr_config(scn, &cfg); 3680 3681 if (QDF_GLOBAL_FTM_MODE == con_mode) 3682 mode = PLD_FTM; 3683 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode) 3684 mode = PLD_COLDBOOT_CALIBRATION; 3685 else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode) 3686 mode = PLD_FTM_COLDBOOT_CALIBRATION; 3687 else if (QDF_IS_EPPING_ENABLED(con_mode)) 3688 mode = PLD_EPPING; 3689 else 3690 mode = PLD_MISSION; 3691 3692 if (BYPASS_QMI) 3693 return 0; 3694 else 3695 return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode); 3696 } 3697 3698 #ifdef WLAN_FEATURE_EPPING 3699 3700 #define CE_EPPING_USES_IRQ true 3701 3702 void hif_ce_prepare_epping_config(struct hif_softc *scn, 3703 struct HIF_CE_state *hif_state) 3704 { 3705 if (CE_EPPING_USES_IRQ) 3706 hif_state->host_ce_config = host_ce_config_wlan_epping_irq; 3707 else 3708 hif_state->host_ce_config = host_ce_config_wlan_epping_poll; 3709 hif_state->target_ce_config = target_ce_config_wlan_epping; 3710 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping); 3711 target_shadow_reg_cfg = target_shadow_reg_cfg_epping; 3712 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping); 3713 scn->ce_count = EPPING_HOST_CE_COUNT; 3714 } 3715 #endif 3716 3717 #ifdef QCN7605_SUPPORT 3718 static inline 3719 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 3720 struct HIF_CE_state *hif_state) 3721 { 3722 hif_state->host_ce_config = host_ce_config_wlan_qcn7605; 3723 hif_state->target_ce_config = target_ce_config_wlan_qcn7605; 3724 hif_state->target_ce_config_sz = 3725 sizeof(target_ce_config_wlan_qcn7605); 3726 target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605; 3727 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605); 3728 scn->ce_count = QCN7605_CE_COUNT; 3729 } 3730 #else 3731 static inline 3732 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 3733 struct HIF_CE_state *hif_state) 3734 { 3735 hif_err("QCN7605 not supported"); 3736 } 3737 #endif 3738 3739 #ifdef CE_SVC_CMN_INIT 3740 #ifdef QCA_WIFI_SUPPORT_SRNG 3741 static inline void hif_ce_service_init(void) 3742 { 3743 ce_service_srng_init(); 3744 } 3745 #else 3746 static inline void hif_ce_service_init(void) 3747 { 3748 ce_service_legacy_init(); 3749 } 3750 #endif 3751 #else 3752 static inline void hif_ce_service_init(void) 3753 { 3754 } 3755 #endif 3756 3757 3758 /** 3759 * hif_ce_prepare_config() - load the correct static tables. 3760 * @scn: hif context 3761 * 3762 * Epping uses different static attribute tables than mission mode. 3763 */ 3764 void hif_ce_prepare_config(struct hif_softc *scn) 3765 { 3766 uint32_t mode = hif_get_conparam(scn); 3767 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3768 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 3769 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3770 int ret; 3771 int msi_data_count = 0; 3772 int msi_data_start = 0; 3773 int msi_irq_start = 0; 3774 3775 hif_ce_service_init(); 3776 hif_state->ce_services = ce_services_attach(scn); 3777 3778 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 3779 &msi_data_count, &msi_data_start, 3780 &msi_irq_start); 3781 3782 scn->ce_count = HOST_CE_COUNT; 3783 scn->int_assignment = &ce_int_context[msi_data_count]; 3784 scn->free_irq_done = false; 3785 /* if epping is enabled we need to use the epping configuration. */ 3786 if (QDF_IS_EPPING_ENABLED(mode)) { 3787 hif_ce_prepare_epping_config(scn, hif_state); 3788 return; 3789 } 3790 3791 switch (tgt_info->target_type) { 3792 default: 3793 hif_state->host_ce_config = host_ce_config_wlan; 3794 hif_state->target_ce_config = target_ce_config_wlan; 3795 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan); 3796 break; 3797 case TARGET_TYPE_QCN7605: 3798 hif_set_ce_config_qcn7605(scn, hif_state); 3799 break; 3800 case TARGET_TYPE_AR900B: 3801 case TARGET_TYPE_QCA9984: 3802 case TARGET_TYPE_IPQ4019: 3803 case TARGET_TYPE_QCA9888: 3804 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 3805 hif_state->host_ce_config = 3806 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog; 3807 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 3808 hif_state->host_ce_config = 3809 host_lowdesc_ce_cfg_wlan_ar900b; 3810 } else { 3811 hif_state->host_ce_config = host_ce_config_wlan_ar900b; 3812 } 3813 3814 hif_state->target_ce_config = target_ce_config_wlan_ar900b; 3815 hif_state->target_ce_config_sz = 3816 sizeof(target_ce_config_wlan_ar900b); 3817 3818 break; 3819 3820 case TARGET_TYPE_AR9888: 3821 case TARGET_TYPE_AR9888V2: 3822 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 3823 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888; 3824 } else { 3825 hif_state->host_ce_config = host_ce_config_wlan_ar9888; 3826 } 3827 3828 hif_state->target_ce_config = target_ce_config_wlan_ar9888; 3829 hif_state->target_ce_config_sz = 3830 sizeof(target_ce_config_wlan_ar9888); 3831 3832 break; 3833 3834 case TARGET_TYPE_QCA8074: 3835 case TARGET_TYPE_QCA8074V2: 3836 case TARGET_TYPE_QCA6018: 3837 if (scn->bus_type == QDF_BUS_TYPE_PCI) { 3838 hif_state->host_ce_config = 3839 host_ce_config_wlan_qca8074_pci; 3840 hif_state->target_ce_config = 3841 target_ce_config_wlan_qca8074_pci; 3842 hif_state->target_ce_config_sz = 3843 sizeof(target_ce_config_wlan_qca8074_pci); 3844 } else { 3845 hif_state->host_ce_config = host_ce_config_wlan_qca8074; 3846 hif_state->target_ce_config = 3847 target_ce_config_wlan_qca8074; 3848 hif_state->target_ce_config_sz = 3849 sizeof(target_ce_config_wlan_qca8074); 3850 } 3851 break; 3852 case TARGET_TYPE_QCA6290: 3853 hif_state->host_ce_config = host_ce_config_wlan_qca6290; 3854 hif_state->target_ce_config = target_ce_config_wlan_qca6290; 3855 hif_state->target_ce_config_sz = 3856 sizeof(target_ce_config_wlan_qca6290); 3857 3858 scn->ce_count = QCA_6290_CE_COUNT; 3859 break; 3860 case TARGET_TYPE_QCN9000: 3861 hif_state->host_ce_config = host_ce_config_wlan_qcn9000; 3862 hif_state->target_ce_config = target_ce_config_wlan_qcn9000; 3863 hif_state->target_ce_config_sz = 3864 sizeof(target_ce_config_wlan_qcn9000); 3865 scn->ce_count = QCN_9000_CE_COUNT; 3866 scn->disable_wake_irq = 1; 3867 break; 3868 case TARGET_TYPE_QCN9224: 3869 hif_set_ce_config_qcn9224(scn, hif_state); 3870 break; 3871 case TARGET_TYPE_QCN6122: 3872 hif_state->host_ce_config = host_ce_config_wlan_qcn6122; 3873 hif_state->target_ce_config = target_ce_config_wlan_qcn6122; 3874 hif_state->target_ce_config_sz = 3875 sizeof(target_ce_config_wlan_qcn6122); 3876 scn->ce_count = QCN_6122_CE_COUNT; 3877 scn->disable_wake_irq = 1; 3878 break; 3879 case TARGET_TYPE_QCA5018: 3880 hif_state->host_ce_config = host_ce_config_wlan_qca5018; 3881 hif_state->target_ce_config = target_ce_config_wlan_qca5018; 3882 hif_state->target_ce_config_sz = 3883 sizeof(target_ce_config_wlan_qca5018); 3884 scn->ce_count = QCA_5018_CE_COUNT; 3885 break; 3886 case TARGET_TYPE_QCA9574: 3887 hif_state->host_ce_config = host_ce_config_wlan_qca9574; 3888 hif_state->target_ce_config = target_ce_config_wlan_qca9574; 3889 hif_state->target_ce_config_sz = 3890 sizeof(target_ce_config_wlan_qca9574); 3891 break; 3892 case TARGET_TYPE_QCA6390: 3893 hif_state->host_ce_config = host_ce_config_wlan_qca6390; 3894 hif_state->target_ce_config = target_ce_config_wlan_qca6390; 3895 hif_state->target_ce_config_sz = 3896 sizeof(target_ce_config_wlan_qca6390); 3897 3898 scn->ce_count = QCA_6390_CE_COUNT; 3899 break; 3900 case TARGET_TYPE_QCA6490: 3901 hif_state->host_ce_config = host_ce_config_wlan_qca6490; 3902 hif_state->target_ce_config = target_ce_config_wlan_qca6490; 3903 hif_state->target_ce_config_sz = 3904 sizeof(target_ce_config_wlan_qca6490); 3905 3906 scn->ce_count = QCA_6490_CE_COUNT; 3907 break; 3908 case TARGET_TYPE_QCA6750: 3909 hif_state->host_ce_config = host_ce_config_wlan_qca6750; 3910 hif_state->target_ce_config = target_ce_config_wlan_qca6750; 3911 hif_state->target_ce_config_sz = 3912 sizeof(target_ce_config_wlan_qca6750); 3913 3914 scn->ce_count = QCA_6750_CE_COUNT; 3915 break; 3916 case TARGET_TYPE_WCN7850: 3917 hif_state->host_ce_config = host_ce_config_wlan_wcn7850; 3918 hif_state->target_ce_config = target_ce_config_wlan_wcn7850; 3919 hif_state->target_ce_config_sz = 3920 sizeof(target_ce_config_wlan_wcn7850); 3921 scn->ce_count = WCN_7850_CE_COUNT; 3922 break; 3923 case TARGET_TYPE_ADRASTEA: 3924 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 3925 hif_state->host_ce_config = 3926 host_lowdesc_ce_config_wlan_adrastea_nopktlog; 3927 hif_state->target_ce_config = 3928 target_lowdesc_ce_config_wlan_adrastea_nopktlog; 3929 hif_state->target_ce_config_sz = 3930 sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog); 3931 } else { 3932 hif_state->host_ce_config = 3933 host_ce_config_wlan_adrastea; 3934 hif_state->target_ce_config = 3935 target_ce_config_wlan_adrastea; 3936 hif_state->target_ce_config_sz = 3937 sizeof(target_ce_config_wlan_adrastea); 3938 } 3939 break; 3940 3941 } 3942 QDF_BUG(scn->ce_count <= CE_COUNT_MAX); 3943 } 3944 3945 /** 3946 * hif_ce_open() - do ce specific allocations 3947 * @hif_sc: pointer to hif context 3948 * 3949 * return: 0 for success or QDF_STATUS_E_NOMEM 3950 */ 3951 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc) 3952 { 3953 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 3954 3955 qdf_spinlock_create(&hif_state->irq_reg_lock); 3956 qdf_spinlock_create(&hif_state->keep_awake_lock); 3957 return QDF_STATUS_SUCCESS; 3958 } 3959 3960 /** 3961 * hif_ce_close() - do ce specific free 3962 * @hif_sc: pointer to hif context 3963 */ 3964 void hif_ce_close(struct hif_softc *hif_sc) 3965 { 3966 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 3967 3968 qdf_spinlock_destroy(&hif_state->irq_reg_lock); 3969 qdf_spinlock_destroy(&hif_state->keep_awake_lock); 3970 } 3971 3972 /** 3973 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed 3974 * @hif_sc: hif context 3975 * 3976 * uses state variables to support cleaning up when hif_config_ce fails. 3977 */ 3978 void hif_unconfig_ce(struct hif_softc *hif_sc) 3979 { 3980 int pipe_num; 3981 struct HIF_CE_pipe_info *pipe_info; 3982 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 3983 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc); 3984 3985 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 3986 pipe_info = &hif_state->pipe_info[pipe_num]; 3987 if (pipe_info->ce_hdl) { 3988 ce_unregister_irq(hif_state, (1 << pipe_num)); 3989 } 3990 } 3991 deinit_tasklet_workers(hif_hdl); 3992 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 3993 pipe_info = &hif_state->pipe_info[pipe_num]; 3994 if (pipe_info->ce_hdl) { 3995 ce_fini(pipe_info->ce_hdl); 3996 pipe_info->ce_hdl = NULL; 3997 pipe_info->buf_sz = 0; 3998 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 3999 } 4000 } 4001 if (hif_sc->athdiag_procfs_inited) { 4002 athdiag_procfs_remove(); 4003 hif_sc->athdiag_procfs_inited = false; 4004 } 4005 } 4006 4007 #ifdef CONFIG_BYPASS_QMI 4008 #ifdef QCN7605_SUPPORT 4009 /** 4010 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 4011 * @scn: pointer to HIF structure 4012 * 4013 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 4014 * 4015 * Return: void 4016 */ 4017 static void hif_post_static_buf_to_target(struct hif_softc *scn) 4018 { 4019 phys_addr_t target_pa; 4020 struct ce_info *ce_info_ptr; 4021 uint32_t msi_data_start; 4022 uint32_t msi_data_count; 4023 uint32_t msi_irq_start; 4024 uint32_t i = 0; 4025 int ret; 4026 4027 scn->vaddr_qmi_bypass = 4028 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 4029 scn->qdf_dev->dev, 4030 FW_SHARED_MEM, 4031 &target_pa); 4032 if (!scn->vaddr_qmi_bypass) { 4033 hif_err("Memory allocation failed could not post target buf"); 4034 return; 4035 } 4036 4037 scn->paddr_qmi_bypass = target_pa; 4038 4039 ce_info_ptr = (struct ce_info *)scn->vaddr_qmi_bypass; 4040 4041 if (scn->vaddr_rri_on_ddr) { 4042 ce_info_ptr->rri_over_ddr_low_paddr = 4043 BITS0_TO_31(scn->paddr_rri_on_ddr); 4044 ce_info_ptr->rri_over_ddr_high_paddr = 4045 BITS32_TO_35(scn->paddr_rri_on_ddr); 4046 } 4047 4048 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 4049 &msi_data_count, &msi_data_start, 4050 &msi_irq_start); 4051 if (ret) { 4052 hif_err("Failed to get CE msi config"); 4053 return; 4054 } 4055 4056 for (i = 0; i < CE_COUNT_MAX; i++) { 4057 ce_info_ptr->cfg[i].ce_id = i; 4058 ce_info_ptr->cfg[i].msi_vector = 4059 (i % msi_data_count) + msi_irq_start; 4060 } 4061 4062 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 4063 hif_info("target va %pK target pa %pa", scn->vaddr_qmi_bypass, 4064 &target_pa); 4065 } 4066 4067 /** 4068 * hif_cleanup_static_buf_to_target() - clean up static buffer to WLAN FW 4069 * @scn: pointer to HIF structure 4070 * 4071 * 4072 * Return: void 4073 */ 4074 void hif_cleanup_static_buf_to_target(struct hif_softc *scn) 4075 { 4076 void *target_va = scn->vaddr_qmi_bypass; 4077 phys_addr_t target_pa = scn->paddr_qmi_bypass; 4078 4079 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 4080 FW_SHARED_MEM, target_va, 4081 target_pa, 0); 4082 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0); 4083 } 4084 #else 4085 /** 4086 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 4087 * @scn: pointer to HIF structure 4088 * 4089 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 4090 * 4091 * Return: void 4092 */ 4093 static void hif_post_static_buf_to_target(struct hif_softc *scn) 4094 { 4095 qdf_dma_addr_t target_pa; 4096 4097 scn->vaddr_qmi_bypass = 4098 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 4099 scn->qdf_dev->dev, 4100 FW_SHARED_MEM, 4101 &target_pa); 4102 if (!scn->vaddr_qmi_bypass) { 4103 hif_err("Memory allocation failed could not post target buf"); 4104 return; 4105 } 4106 4107 scn->paddr_qmi_bypass = target_pa; 4108 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 4109 } 4110 4111 /** 4112 * hif_cleanup_static_buf_to_target() - clean up static buffer to WLAN FW 4113 * @scn: pointer to HIF structure 4114 * 4115 * 4116 * Return: void 4117 */ 4118 void hif_cleanup_static_buf_to_target(struct hif_softc *scn) 4119 { 4120 void *target_va = scn->vaddr_qmi_bypass; 4121 phys_addr_t target_pa = scn->paddr_qmi_bypass; 4122 4123 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 4124 FW_SHARED_MEM, target_va, 4125 target_pa, 0); 4126 hif_write32_mb(snc, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0); 4127 } 4128 #endif 4129 4130 #else 4131 static inline void hif_post_static_buf_to_target(struct hif_softc *scn) 4132 { 4133 } 4134 4135 void hif_cleanup_static_buf_to_target(struct hif_softc *scn) 4136 { 4137 } 4138 #endif 4139 4140 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok, 4141 bool wait_for_it) 4142 { 4143 /* todo */ 4144 return 0; 4145 } 4146 4147 int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num) 4148 { 4149 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4150 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 4151 struct HIF_CE_pipe_info *pipe_info; 4152 struct CE_state *ce_state = NULL; 4153 struct CE_attr *attr; 4154 int rv = 0; 4155 4156 if (pipe_num >= CE_COUNT_MAX) 4157 return -EINVAL; 4158 4159 pipe_info = &hif_state->pipe_info[pipe_num]; 4160 pipe_info->pipe_num = pipe_num; 4161 pipe_info->HIF_CE_state = hif_state; 4162 attr = &hif_state->host_ce_config[pipe_num]; 4163 ce_state = scn->ce_id_to_state[pipe_num]; 4164 4165 if (ce_state) { 4166 /* Do not reinitialize the CE if its done already */ 4167 rv = QDF_STATUS_E_BUSY; 4168 goto err; 4169 } 4170 4171 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr); 4172 ce_state = scn->ce_id_to_state[pipe_num]; 4173 if (!ce_state) { 4174 A_TARGET_ACCESS_UNLIKELY(scn); 4175 rv = QDF_STATUS_E_FAILURE; 4176 goto err; 4177 } 4178 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock); 4179 QDF_ASSERT(pipe_info->ce_hdl); 4180 if (!pipe_info->ce_hdl) { 4181 rv = QDF_STATUS_E_FAILURE; 4182 A_TARGET_ACCESS_UNLIKELY(scn); 4183 goto err; 4184 } 4185 4186 ce_state->lro_data = qdf_lro_init(); 4187 4188 if (attr->flags & CE_ATTR_DIAG) { 4189 /* Reserve the ultimate CE for 4190 * Diagnostic Window support 4191 */ 4192 hif_state->ce_diag = pipe_info->ce_hdl; 4193 goto skip; 4194 } 4195 4196 if (hif_is_nss_wifi_enabled(scn) && ce_state && 4197 (ce_state->htt_rx_data)) { 4198 goto skip; 4199 } 4200 4201 pipe_info->buf_sz = (qdf_size_t)(attr->src_sz_max); 4202 if (attr->dest_nentries > 0) { 4203 atomic_set(&pipe_info->recv_bufs_needed, 4204 init_buffer_count(attr->dest_nentries - 1)); 4205 /*SRNG based CE has one entry less */ 4206 if (ce_srng_based(scn)) 4207 atomic_dec(&pipe_info->recv_bufs_needed); 4208 } else { 4209 atomic_set(&pipe_info->recv_bufs_needed, 0); 4210 } 4211 ce_tasklet_init(hif_state, (1 << pipe_num)); 4212 ce_register_irq(hif_state, (1 << pipe_num)); 4213 4214 init_tasklet_worker_by_ceid(hif_hdl, pipe_num); 4215 skip: 4216 return 0; 4217 err: 4218 return rv; 4219 } 4220 4221 /** 4222 * hif_config_ce() - configure copy engines 4223 * @scn: hif context 4224 * 4225 * Prepares fw, copy engine hardware and host sw according 4226 * to the attributes selected by hif_ce_prepare_config. 4227 * 4228 * also calls athdiag_procfs_init 4229 * 4230 * return: 0 for success nonzero for failure. 4231 */ 4232 int hif_config_ce(struct hif_softc *scn) 4233 { 4234 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4235 struct HIF_CE_pipe_info *pipe_info; 4236 int pipe_num; 4237 4238 #ifdef ADRASTEA_SHADOW_REGISTERS 4239 int i; 4240 #endif 4241 QDF_STATUS rv = QDF_STATUS_SUCCESS; 4242 4243 scn->notice_send = true; 4244 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; 4245 4246 hif_post_static_buf_to_target(scn); 4247 4248 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS; 4249 4250 hif_config_rri_on_ddr(scn); 4251 4252 if (ce_srng_based(scn)) 4253 scn->bus_ops.hif_target_sleep_state_adjust = 4254 &hif_srng_sleep_state_adjust; 4255 4256 /* Initialise the CE debug history sysfs interface inputs ce_id and 4257 * index. Disable data storing 4258 */ 4259 reset_ce_debug_history(scn); 4260 4261 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 4262 struct CE_attr *attr; 4263 4264 pipe_info = &hif_state->pipe_info[pipe_num]; 4265 attr = &hif_state->host_ce_config[pipe_num]; 4266 4267 if (attr->flags & CE_ATTR_INIT_ON_DEMAND) 4268 continue; 4269 4270 if (hif_config_ce_by_id(scn, pipe_num)) 4271 goto err; 4272 } 4273 4274 if (athdiag_procfs_init(scn) != 0) { 4275 A_TARGET_ACCESS_UNLIKELY(scn); 4276 goto err; 4277 } 4278 scn->athdiag_procfs_inited = true; 4279 4280 hif_debug("ce_init done"); 4281 hif_debug("%s: X, ret = %d", __func__, rv); 4282 4283 #ifdef ADRASTEA_SHADOW_REGISTERS 4284 hif_debug("Using Shadow Registers instead of CE Registers"); 4285 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) { 4286 hif_debug("Shadow Register%d is mapped to address %x", 4287 i, 4288 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2)); 4289 } 4290 #endif 4291 4292 return rv != QDF_STATUS_SUCCESS; 4293 err: 4294 /* Failure, so clean up */ 4295 hif_unconfig_ce(scn); 4296 hif_info("X, ret = %d", rv); 4297 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; 4298 } 4299 4300 /** 4301 * hif_config_ce_pktlog() - configure copy engines 4302 * @scn: hif context 4303 * 4304 * Prepares fw, copy engine hardware and host sw according 4305 * to the attributes selected by hif_ce_prepare_config. 4306 * 4307 * also calls athdiag_procfs_init 4308 * 4309 * return: 0 for success nonzero for failure. 4310 */ 4311 int hif_config_ce_pktlog(struct hif_opaque_softc *hif_hdl) 4312 { 4313 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 4314 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4315 int pipe_num; 4316 QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE; 4317 struct HIF_CE_pipe_info *pipe_info; 4318 4319 if (!scn) 4320 goto err; 4321 4322 if (scn->pktlog_init) 4323 return QDF_STATUS_SUCCESS; 4324 4325 pipe_num = hif_get_pktlog_ce_num(scn); 4326 if (pipe_num < 0) { 4327 qdf_status = QDF_STATUS_E_FAILURE; 4328 goto err; 4329 } 4330 4331 pipe_info = &hif_state->pipe_info[pipe_num]; 4332 4333 qdf_status = hif_config_ce_by_id(scn, pipe_num); 4334 /* CE Already initialized. Do not try to reinitialized again */ 4335 if (qdf_status == QDF_STATUS_E_BUSY) 4336 return QDF_STATUS_SUCCESS; 4337 4338 qdf_status = hif_config_irq_by_ceid(scn, pipe_num); 4339 if (qdf_status < 0) 4340 goto err; 4341 4342 qdf_status = hif_completion_thread_startup_by_ceid(hif_state, pipe_num); 4343 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 4344 hif_err("%s:failed to start hif thread", __func__); 4345 goto err; 4346 } 4347 4348 /* Post buffers for pktlog copy engine. */ 4349 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); 4350 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 4351 /* cleanup is done in hif_ce_disable */ 4352 hif_err("%s:failed to post buffers", __func__); 4353 return qdf_status; 4354 } 4355 scn->pktlog_init = true; 4356 return qdf_status != QDF_STATUS_SUCCESS; 4357 4358 err: 4359 hif_debug("%s: X, ret = %d", __func__, qdf_status); 4360 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; 4361 } 4362 4363 #ifdef IPA_OFFLOAD 4364 /** 4365 * hif_ce_ipa_get_ce_resource() - get uc resource on hif 4366 * @scn: bus context 4367 * @ce_sr_base_paddr: copyengine source ring base physical address 4368 * @ce_sr_ring_size: copyengine source ring size 4369 * @ce_reg_paddr: copyengine register physical address 4370 * 4371 * IPA micro controller data path offload feature enabled, 4372 * HIF should release copy engine related resource information to IPA UC 4373 * IPA UC will access hardware resource with released information 4374 * 4375 * Return: None 4376 */ 4377 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 4378 qdf_shared_mem_t **ce_sr, 4379 uint32_t *ce_sr_ring_size, 4380 qdf_dma_addr_t *ce_reg_paddr) 4381 { 4382 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4383 struct HIF_CE_pipe_info *pipe_info = 4384 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]); 4385 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 4386 4387 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size, 4388 ce_reg_paddr); 4389 } 4390 #endif /* IPA_OFFLOAD */ 4391 4392 4393 #ifdef ADRASTEA_SHADOW_REGISTERS 4394 4395 /* 4396 * Current shadow register config 4397 * 4398 * ----------------------------------------------------------- 4399 * Shadow Register | CE | src/dst write index 4400 * ----------------------------------------------------------- 4401 * 0 | 0 | src 4402 * 1 No Config - Doesn't point to anything 4403 * 2 No Config - Doesn't point to anything 4404 * 3 | 3 | src 4405 * 4 | 4 | src 4406 * 5 | 5 | src 4407 * 6 No Config - Doesn't point to anything 4408 * 7 | 7 | src 4409 * 8 No Config - Doesn't point to anything 4410 * 9 No Config - Doesn't point to anything 4411 * 10 No Config - Doesn't point to anything 4412 * 11 No Config - Doesn't point to anything 4413 * ----------------------------------------------------------- 4414 * 12 No Config - Doesn't point to anything 4415 * 13 | 1 | dst 4416 * 14 | 2 | dst 4417 * 15 No Config - Doesn't point to anything 4418 * 16 No Config - Doesn't point to anything 4419 * 17 No Config - Doesn't point to anything 4420 * 18 No Config - Doesn't point to anything 4421 * 19 | 7 | dst 4422 * 20 | 8 | dst 4423 * 21 No Config - Doesn't point to anything 4424 * 22 No Config - Doesn't point to anything 4425 * 23 No Config - Doesn't point to anything 4426 * ----------------------------------------------------------- 4427 * 4428 * 4429 * ToDo - Move shadow register config to following in the future 4430 * This helps free up a block of shadow registers towards the end. 4431 * Can be used for other purposes 4432 * 4433 * ----------------------------------------------------------- 4434 * Shadow Register | CE | src/dst write index 4435 * ----------------------------------------------------------- 4436 * 0 | 0 | src 4437 * 1 | 3 | src 4438 * 2 | 4 | src 4439 * 3 | 5 | src 4440 * 4 | 7 | src 4441 * ----------------------------------------------------------- 4442 * 5 | 1 | dst 4443 * 6 | 2 | dst 4444 * 7 | 7 | dst 4445 * 8 | 8 | dst 4446 * ----------------------------------------------------------- 4447 * 9 No Config - Doesn't point to anything 4448 * 12 No Config - Doesn't point to anything 4449 * 13 No Config - Doesn't point to anything 4450 * 14 No Config - Doesn't point to anything 4451 * 15 No Config - Doesn't point to anything 4452 * 16 No Config - Doesn't point to anything 4453 * 17 No Config - Doesn't point to anything 4454 * 18 No Config - Doesn't point to anything 4455 * 19 No Config - Doesn't point to anything 4456 * 20 No Config - Doesn't point to anything 4457 * 21 No Config - Doesn't point to anything 4458 * 22 No Config - Doesn't point to anything 4459 * 23 No Config - Doesn't point to anything 4460 * ----------------------------------------------------------- 4461 */ 4462 #ifndef QCN7605_SUPPORT 4463 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 4464 { 4465 u32 addr = 0; 4466 u32 ce = COPY_ENGINE_ID(ctrl_addr); 4467 4468 switch (ce) { 4469 case 0: 4470 addr = SHADOW_VALUE0; 4471 break; 4472 case 3: 4473 addr = SHADOW_VALUE3; 4474 break; 4475 case 4: 4476 addr = SHADOW_VALUE4; 4477 break; 4478 case 5: 4479 addr = SHADOW_VALUE5; 4480 break; 4481 case 7: 4482 addr = SHADOW_VALUE7; 4483 break; 4484 default: 4485 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 4486 QDF_ASSERT(0); 4487 } 4488 return addr; 4489 4490 } 4491 4492 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 4493 { 4494 u32 addr = 0; 4495 u32 ce = COPY_ENGINE_ID(ctrl_addr); 4496 4497 switch (ce) { 4498 case 1: 4499 addr = SHADOW_VALUE13; 4500 break; 4501 case 2: 4502 addr = SHADOW_VALUE14; 4503 break; 4504 case 5: 4505 addr = SHADOW_VALUE17; 4506 break; 4507 case 7: 4508 addr = SHADOW_VALUE19; 4509 break; 4510 case 8: 4511 addr = SHADOW_VALUE20; 4512 break; 4513 case 9: 4514 addr = SHADOW_VALUE21; 4515 break; 4516 case 10: 4517 addr = SHADOW_VALUE22; 4518 break; 4519 case 11: 4520 addr = SHADOW_VALUE23; 4521 break; 4522 default: 4523 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 4524 QDF_ASSERT(0); 4525 } 4526 4527 return addr; 4528 4529 } 4530 #else 4531 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 4532 { 4533 u32 addr = 0; 4534 u32 ce = COPY_ENGINE_ID(ctrl_addr); 4535 4536 switch (ce) { 4537 case 0: 4538 addr = SHADOW_VALUE0; 4539 break; 4540 case 3: 4541 addr = SHADOW_VALUE3; 4542 break; 4543 case 4: 4544 addr = SHADOW_VALUE4; 4545 break; 4546 case 5: 4547 addr = SHADOW_VALUE5; 4548 break; 4549 default: 4550 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 4551 QDF_ASSERT(0); 4552 } 4553 return addr; 4554 } 4555 4556 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 4557 { 4558 u32 addr = 0; 4559 u32 ce = COPY_ENGINE_ID(ctrl_addr); 4560 4561 switch (ce) { 4562 case 1: 4563 addr = SHADOW_VALUE13; 4564 break; 4565 case 2: 4566 addr = SHADOW_VALUE14; 4567 break; 4568 case 3: 4569 addr = SHADOW_VALUE15; 4570 break; 4571 case 5: 4572 addr = SHADOW_VALUE17; 4573 break; 4574 case 7: 4575 addr = SHADOW_VALUE19; 4576 break; 4577 case 8: 4578 addr = SHADOW_VALUE20; 4579 break; 4580 case 9: 4581 addr = SHADOW_VALUE21; 4582 break; 4583 case 10: 4584 addr = SHADOW_VALUE22; 4585 break; 4586 case 11: 4587 addr = SHADOW_VALUE23; 4588 break; 4589 default: 4590 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 4591 QDF_ASSERT(0); 4592 } 4593 4594 return addr; 4595 } 4596 #endif 4597 #endif 4598 4599 #if defined(FEATURE_LRO) 4600 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id) 4601 { 4602 struct CE_state *ce_state; 4603 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 4604 4605 ce_state = scn->ce_id_to_state[ctx_id]; 4606 4607 return ce_state->lro_data; 4608 } 4609 #endif 4610 4611 /** 4612 * hif_map_service_to_pipe() - returns the ce ids pertaining to 4613 * this service 4614 * @scn: hif_softc pointer. 4615 * @svc_id: Service ID for which the mapping is needed. 4616 * @ul_pipe: address of the container in which ul pipe is returned. 4617 * @dl_pipe: address of the container in which dl pipe is returned. 4618 * @ul_is_polled: address of the container in which a bool 4619 * indicating if the UL CE for this service 4620 * is polled is returned. 4621 * @dl_is_polled: address of the container in which a bool 4622 * indicating if the DL CE for this service 4623 * is polled is returned. 4624 * 4625 * Return: Indicates whether the service has been found in the table. 4626 * Upon return, ul_is_polled is updated only if ul_pipe is updated. 4627 * There will be warning logs if either leg has not been updated 4628 * because it missed the entry in the table (but this is not an err). 4629 */ 4630 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id, 4631 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, 4632 int *dl_is_polled) 4633 { 4634 int status = -EINVAL; 4635 unsigned int i; 4636 struct service_to_pipe element; 4637 struct service_to_pipe *tgt_svc_map_to_use; 4638 uint32_t sz_tgt_svc_map_to_use; 4639 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 4640 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4641 bool dl_updated = false; 4642 bool ul_updated = false; 4643 4644 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use, 4645 &sz_tgt_svc_map_to_use); 4646 4647 *dl_is_polled = 0; /* polling for received messages not supported */ 4648 4649 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) { 4650 4651 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element)); 4652 if (element.service_id == svc_id) { 4653 if (element.pipedir == PIPEDIR_OUT) { 4654 *ul_pipe = element.pipenum; 4655 *ul_is_polled = 4656 (hif_state->host_ce_config[*ul_pipe].flags & 4657 CE_ATTR_DISABLE_INTR) != 0; 4658 ul_updated = true; 4659 } else if (element.pipedir == PIPEDIR_IN) { 4660 *dl_pipe = element.pipenum; 4661 dl_updated = true; 4662 } 4663 status = 0; 4664 } 4665 } 4666 if (ul_updated == false) 4667 hif_debug("ul pipe is NOT updated for service %d", svc_id); 4668 if (dl_updated == false) 4669 hif_debug("dl pipe is NOT updated for service %d", svc_id); 4670 4671 return status; 4672 } 4673 4674 #ifdef SHADOW_REG_DEBUG 4675 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, 4676 uint32_t CE_ctrl_addr) 4677 { 4678 uint32_t read_from_hw, srri_from_ddr = 0; 4679 4680 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS); 4681 4682 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 4683 4684 if (read_from_hw != srri_from_ddr) { 4685 hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 4686 srri_from_ddr, read_from_hw, 4687 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 4688 QDF_ASSERT(0); 4689 } 4690 return srri_from_ddr; 4691 } 4692 4693 4694 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, 4695 uint32_t CE_ctrl_addr) 4696 { 4697 uint32_t read_from_hw, drri_from_ddr = 0; 4698 4699 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS); 4700 4701 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 4702 4703 if (read_from_hw != drri_from_ddr) { 4704 hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 4705 drri_from_ddr, read_from_hw, 4706 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 4707 QDF_ASSERT(0); 4708 } 4709 return drri_from_ddr; 4710 } 4711 4712 #endif 4713 4714 /** 4715 * hif_dump_ce_registers() - dump ce registers 4716 * @scn: hif_opaque_softc pointer. 4717 * 4718 * Output the copy engine registers 4719 * 4720 * Return: 0 for success or error code 4721 */ 4722 int hif_dump_ce_registers(struct hif_softc *scn) 4723 { 4724 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 4725 uint32_t ce_reg_address = CE0_BASE_ADDRESS; 4726 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2]; 4727 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2; 4728 uint16_t i; 4729 QDF_STATUS status; 4730 4731 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) { 4732 if (!scn->ce_id_to_state[i]) { 4733 hif_debug("CE%d not used", i); 4734 continue; 4735 } 4736 4737 status = hif_diag_read_mem(hif_hdl, ce_reg_address, 4738 (uint8_t *) &ce_reg_values[0], 4739 ce_reg_word_size * sizeof(uint32_t)); 4740 4741 if (status != QDF_STATUS_SUCCESS) { 4742 hif_err("Dumping CE register failed!"); 4743 return -EACCES; 4744 } 4745 hif_debug("CE%d=>", i); 4746 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, 4747 (uint8_t *) &ce_reg_values[0], 4748 ce_reg_word_size * sizeof(uint32_t)); 4749 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address 4750 + SR_WR_INDEX_ADDRESS), 4751 ce_reg_values[SR_WR_INDEX_ADDRESS/4]); 4752 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address 4753 + CURRENT_SRRI_ADDRESS), 4754 ce_reg_values[CURRENT_SRRI_ADDRESS/4]); 4755 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address 4756 + DST_WR_INDEX_ADDRESS), 4757 ce_reg_values[DST_WR_INDEX_ADDRESS/4]); 4758 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address 4759 + CURRENT_DRRI_ADDRESS), 4760 ce_reg_values[CURRENT_DRRI_ADDRESS/4]); 4761 qdf_print("---"); 4762 } 4763 return 0; 4764 } 4765 qdf_export_symbol(hif_dump_ce_registers); 4766 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT 4767 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, 4768 struct hif_pipe_addl_info *hif_info, uint32_t pipe) 4769 { 4770 struct hif_softc *scn = HIF_GET_SOFTC(osc); 4771 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 4772 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc); 4773 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 4774 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 4775 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 4776 struct CE_ring_state *src_ring = ce_state->src_ring; 4777 struct CE_ring_state *dest_ring = ce_state->dest_ring; 4778 4779 if (src_ring) { 4780 hif_info->ul_pipe.nentries = src_ring->nentries; 4781 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask; 4782 hif_info->ul_pipe.sw_index = src_ring->sw_index; 4783 hif_info->ul_pipe.write_index = src_ring->write_index; 4784 hif_info->ul_pipe.hw_index = src_ring->hw_index; 4785 hif_info->ul_pipe.base_addr_CE_space = 4786 src_ring->base_addr_CE_space; 4787 hif_info->ul_pipe.base_addr_owner_space = 4788 src_ring->base_addr_owner_space; 4789 } 4790 4791 4792 if (dest_ring) { 4793 hif_info->dl_pipe.nentries = dest_ring->nentries; 4794 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask; 4795 hif_info->dl_pipe.sw_index = dest_ring->sw_index; 4796 hif_info->dl_pipe.write_index = dest_ring->write_index; 4797 hif_info->dl_pipe.hw_index = dest_ring->hw_index; 4798 hif_info->dl_pipe.base_addr_CE_space = 4799 dest_ring->base_addr_CE_space; 4800 hif_info->dl_pipe.base_addr_owner_space = 4801 dest_ring->base_addr_owner_space; 4802 } 4803 4804 hif_info->pci_mem = pci_resource_start(sc->pdev, 0); 4805 hif_info->ctrl_addr = ce_state->ctrl_addr; 4806 4807 return hif_info; 4808 } 4809 qdf_export_symbol(hif_get_addl_pipe_info); 4810 4811 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode) 4812 { 4813 struct hif_softc *scn = HIF_GET_SOFTC(osc); 4814 4815 scn->nss_wifi_ol_mode = mode; 4816 return 0; 4817 } 4818 qdf_export_symbol(hif_set_nss_wifiol_mode); 4819 #endif 4820 4821 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib) 4822 { 4823 struct hif_softc *scn = HIF_GET_SOFTC(osc); 4824 scn->hif_attribute = hif_attrib; 4825 } 4826 4827 4828 /* disable interrupts (only applicable for legacy copy engine currently */ 4829 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num) 4830 { 4831 struct hif_softc *scn = HIF_GET_SOFTC(osc); 4832 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num]; 4833 uint32_t ctrl_addr = CE_state->ctrl_addr; 4834 4835 Q_TARGET_ACCESS_BEGIN(scn); 4836 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 4837 Q_TARGET_ACCESS_END(scn); 4838 } 4839 qdf_export_symbol(hif_disable_interrupt); 4840 4841 /** 4842 * hif_fw_event_handler() - hif fw event handler 4843 * @hif_state: pointer to hif ce state structure 4844 * 4845 * Process fw events and raise HTC callback to process fw events. 4846 * 4847 * Return: none 4848 */ 4849 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state) 4850 { 4851 struct hif_msg_callbacks *msg_callbacks = 4852 &hif_state->msg_callbacks_current; 4853 4854 if (!msg_callbacks->fwEventHandler) 4855 return; 4856 4857 msg_callbacks->fwEventHandler(msg_callbacks->Context, 4858 QDF_STATUS_E_FAILURE); 4859 } 4860 4861 #ifndef QCA_WIFI_3_0 4862 /** 4863 * hif_fw_interrupt_handler() - FW interrupt handler 4864 * @irq: irq number 4865 * @arg: the user pointer 4866 * 4867 * Called from the PCI interrupt handler when a 4868 * firmware-generated interrupt to the Host. 4869 * 4870 * only registered for legacy ce devices 4871 * 4872 * Return: status of handled irq 4873 */ 4874 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 4875 { 4876 struct hif_softc *scn = arg; 4877 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4878 uint32_t fw_indicator_address, fw_indicator; 4879 4880 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 4881 return ATH_ISR_NOSCHED; 4882 4883 fw_indicator_address = hif_state->fw_indicator_address; 4884 /* For sudden unplug this will return ~0 */ 4885 fw_indicator = A_TARGET_READ(scn, fw_indicator_address); 4886 4887 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) { 4888 /* ACK: clear Target-side pending event */ 4889 A_TARGET_WRITE(scn, fw_indicator_address, 4890 fw_indicator & ~FW_IND_EVENT_PENDING); 4891 if (Q_TARGET_ACCESS_END(scn) < 0) 4892 return ATH_ISR_SCHED; 4893 4894 if (hif_state->started) { 4895 hif_fw_event_handler(hif_state); 4896 } else { 4897 /* 4898 * Probable Target failure before we're prepared 4899 * to handle it. Generally unexpected. 4900 * fw_indicator used as bitmap, and defined as below: 4901 * FW_IND_EVENT_PENDING 0x1 4902 * FW_IND_INITIALIZED 0x2 4903 * FW_IND_NEEDRECOVER 0x4 4904 */ 4905 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 4906 ("%s: Early firmware event indicated 0x%x\n", 4907 __func__, fw_indicator)); 4908 } 4909 } else { 4910 if (Q_TARGET_ACCESS_END(scn) < 0) 4911 return ATH_ISR_SCHED; 4912 } 4913 4914 return ATH_ISR_SCHED; 4915 } 4916 #else 4917 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 4918 { 4919 return ATH_ISR_SCHED; 4920 } 4921 #endif /* #ifdef QCA_WIFI_3_0 */ 4922 4923 4924 /** 4925 * hif_wlan_disable(): call the platform driver to disable wlan 4926 * @scn: HIF Context 4927 * 4928 * This function passes the con_mode to platform driver to disable 4929 * wlan. 4930 * 4931 * Return: void 4932 */ 4933 void hif_wlan_disable(struct hif_softc *scn) 4934 { 4935 enum pld_driver_mode mode; 4936 uint32_t con_mode = hif_get_conparam(scn); 4937 4938 if (scn->target_status == TARGET_STATUS_RESET) 4939 return; 4940 4941 if (QDF_GLOBAL_FTM_MODE == con_mode) 4942 mode = PLD_FTM; 4943 else if (QDF_IS_EPPING_ENABLED(con_mode)) 4944 mode = PLD_EPPING; 4945 else 4946 mode = PLD_MISSION; 4947 4948 pld_wlan_disable(scn->qdf_dev->dev, mode); 4949 } 4950 4951 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id) 4952 { 4953 int status; 4954 uint8_t ul_pipe, dl_pipe; 4955 int ul_is_polled, dl_is_polled; 4956 4957 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */ 4958 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 4959 HTC_CTRL_RSVD_SVC, 4960 &ul_pipe, &dl_pipe, 4961 &ul_is_polled, &dl_is_polled); 4962 if (status) { 4963 hif_err("Failed to map pipe: %d", status); 4964 return status; 4965 } 4966 4967 *ce_id = dl_pipe; 4968 4969 return 0; 4970 } 4971 4972 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id) 4973 { 4974 int status; 4975 uint8_t ul_pipe, dl_pipe; 4976 int ul_is_polled, dl_is_polled; 4977 4978 /* DL pipe for WMI_CONTROL_DIAG_SVC should map to the FW DIAG CE_ID */ 4979 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 4980 WMI_CONTROL_DIAG_SVC, 4981 &ul_pipe, &dl_pipe, 4982 &ul_is_polled, &dl_is_polled); 4983 if (status) { 4984 hif_err("Failed to map pipe: %d", status); 4985 return status; 4986 } 4987 4988 *ce_id = dl_pipe; 4989 4990 return 0; 4991 } 4992 4993 #ifdef HIF_CE_LOG_INFO 4994 /** 4995 * ce_get_index_info(): Get CE index info 4996 * @scn: HIF Context 4997 * @ce_state: CE opaque handle 4998 * @info: CE info 4999 * 5000 * Return: 0 for success and non zero for failure 5001 */ 5002 static 5003 int ce_get_index_info(struct hif_softc *scn, void *ce_state, 5004 struct ce_index *info) 5005 { 5006 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 5007 5008 return hif_state->ce_services->ce_get_index_info(scn, ce_state, info); 5009 } 5010 5011 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data, 5012 unsigned int *offset) 5013 { 5014 struct hang_event_info info = {0}; 5015 static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) | 5016 BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10); 5017 uint8_t curr_index = 0; 5018 uint8_t i; 5019 uint16_t size; 5020 5021 info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt); 5022 info.active_grp_tasklet_cnt = 5023 qdf_atomic_read(&scn->active_grp_tasklet_cnt); 5024 5025 for (i = 0; i < scn->ce_count; i++) { 5026 if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i]) 5027 continue; 5028 5029 if (ce_get_index_info(scn, scn->ce_id_to_state[i], 5030 &info.ce_info[curr_index])) 5031 continue; 5032 5033 curr_index++; 5034 } 5035 5036 info.ce_count = curr_index; 5037 size = sizeof(info) - 5038 (CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index); 5039 5040 if (*offset + size > QDF_WLAN_HANG_FW_OFFSET) 5041 return; 5042 5043 QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO, 5044 size - QDF_HANG_EVENT_TLV_HDR_SIZE); 5045 5046 qdf_mem_copy(data + *offset, &info, size); 5047 *offset = *offset + size; 5048 } 5049 #endif 5050