1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "targcfg.h" 21 #include "qdf_lock.h" 22 #include "qdf_status.h" 23 #include "qdf_status.h" 24 #include <qdf_atomic.h> /* qdf_atomic_read */ 25 #include <targaddrs.h> 26 #include "hif_io32.h" 27 #include <hif.h> 28 #include <target_type.h> 29 #include "regtable.h" 30 #define ATH_MODULE_NAME hif 31 #include <a_debug.h> 32 #include "hif_main.h" 33 #include "ce_api.h" 34 #include "qdf_trace.h" 35 #include "pld_common.h" 36 #include "hif_debug.h" 37 #include "ce_internal.h" 38 #include "ce_reg.h" 39 #include "ce_assignment.h" 40 #include "ce_tasklet.h" 41 #include "qdf_module.h" 42 43 #define CE_POLL_TIMEOUT 10 /* ms */ 44 45 #define AGC_DUMP 1 46 #define CHANINFO_DUMP 2 47 #define BB_WATCHDOG_DUMP 3 48 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 49 #define PCIE_ACCESS_DUMP 4 50 #endif 51 #include "mp_dev.h" 52 #ifdef HIF_CE_LOG_INFO 53 #include "qdf_hang_event_notifier.h" 54 #endif 55 56 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \ 57 defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018) || \ 58 defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCA5332) || \ 59 defined(QCA_WIFI_QCA9574)) && !defined(QCA_WIFI_SUPPORT_SRNG) 60 #define QCA_WIFI_SUPPORT_SRNG 61 #endif 62 63 #ifdef QCA_WIFI_SUPPORT_SRNG 64 #include <hal_api.h> 65 #endif 66 67 /* Forward references */ 68 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info); 69 70 /* 71 * Fix EV118783, poll to check whether a BMI response comes 72 * other than waiting for the interruption which may be lost. 73 */ 74 /* #define BMI_RSP_POLLING */ 75 #define BMI_RSP_TO_MILLISEC 1000 76 77 #ifdef CONFIG_BYPASS_QMI 78 #define BYPASS_QMI 1 79 #else 80 #define BYPASS_QMI 0 81 #endif 82 83 #ifdef ENABLE_10_4_FW_HDR 84 #if (ENABLE_10_4_FW_HDR == 1) 85 #define WDI_IPA_SERVICE_GROUP 5 86 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0) 87 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1) 88 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2) 89 #endif /* ENABLE_10_4_FW_HDR == 1 */ 90 #endif /* ENABLE_10_4_FW_HDR */ 91 92 static void hif_config_rri_on_ddr(struct hif_softc *scn); 93 94 /** 95 * hif_target_access_log_dump() - dump access log 96 * 97 * dump access log 98 * 99 * Return: n/a 100 */ 101 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 102 static void hif_target_access_log_dump(void) 103 { 104 hif_target_dump_access_log(); 105 } 106 #endif 107 108 /* 109 * This structure contains the interrupt index for each Copy engine 110 * for various number of MSIs available in the system. 111 */ 112 static struct ce_int_assignment ce_int_context[NUM_CE_CONTEXT] = { 113 /* Default configuration */ 114 {{ CE_INTERRUPT_IDX(0), 115 CE_INTERRUPT_IDX(1), 116 CE_INTERRUPT_IDX(2), 117 CE_INTERRUPT_IDX(3), 118 CE_INTERRUPT_IDX(4), 119 CE_INTERRUPT_IDX(5), 120 CE_INTERRUPT_IDX(6), 121 CE_INTERRUPT_IDX(7), 122 CE_INTERRUPT_IDX(8), 123 CE_INTERRUPT_IDX(9), 124 CE_INTERRUPT_IDX(10), 125 CE_INTERRUPT_IDX(11), 126 #ifdef QCA_WIFI_QCN9224 127 CE_INTERRUPT_IDX(12), 128 CE_INTERRUPT_IDX(13), 129 CE_INTERRUPT_IDX(14), 130 CE_INTERRUPT_IDX(15), 131 #endif 132 } }, 133 /* Interrupt assignment for 1 MSI combination */ 134 {{ CE_INTERRUPT_IDX(0), 135 CE_INTERRUPT_IDX(0), 136 CE_INTERRUPT_IDX(0), 137 CE_INTERRUPT_IDX(0), 138 CE_INTERRUPT_IDX(0), 139 CE_INTERRUPT_IDX(0), 140 CE_INTERRUPT_IDX(0), 141 CE_INTERRUPT_IDX(0), 142 CE_INTERRUPT_IDX(0), 143 CE_INTERRUPT_IDX(0), 144 CE_INTERRUPT_IDX(0), 145 CE_INTERRUPT_IDX(0), 146 #ifdef QCA_WIFI_QCN9224 147 CE_INTERRUPT_IDX(0), 148 CE_INTERRUPT_IDX(0), 149 CE_INTERRUPT_IDX(0), 150 CE_INTERRUPT_IDX(0), 151 #endif 152 } }, 153 /* Interrupt assignment for 2 MSI combination */ 154 {{ CE_INTERRUPT_IDX(0), 155 CE_INTERRUPT_IDX(1), 156 CE_INTERRUPT_IDX(0), 157 CE_INTERRUPT_IDX(1), 158 CE_INTERRUPT_IDX(0), 159 CE_INTERRUPT_IDX(1), 160 CE_INTERRUPT_IDX(0), 161 CE_INTERRUPT_IDX(0), 162 CE_INTERRUPT_IDX(0), 163 CE_INTERRUPT_IDX(0), 164 CE_INTERRUPT_IDX(0), 165 CE_INTERRUPT_IDX(0), 166 #ifdef QCA_WIFI_QCN9224 167 CE_INTERRUPT_IDX(0), 168 CE_INTERRUPT_IDX(0), 169 CE_INTERRUPT_IDX(0), 170 CE_INTERRUPT_IDX(0), 171 #endif 172 } }, 173 /* Interrupt assignment for 3 MSI combination */ 174 {{ CE_INTERRUPT_IDX(0), 175 CE_INTERRUPT_IDX(1), 176 CE_INTERRUPT_IDX(2), 177 CE_INTERRUPT_IDX(1), 178 CE_INTERRUPT_IDX(0), 179 CE_INTERRUPT_IDX(1), 180 CE_INTERRUPT_IDX(0), 181 CE_INTERRUPT_IDX(0), 182 CE_INTERRUPT_IDX(0), 183 CE_INTERRUPT_IDX(0), 184 CE_INTERRUPT_IDX(0), 185 CE_INTERRUPT_IDX(0), 186 #ifdef QCA_WIFI_QCN9224 187 CE_INTERRUPT_IDX(0), 188 CE_INTERRUPT_IDX(0), 189 CE_INTERRUPT_IDX(0), 190 CE_INTERRUPT_IDX(0), 191 #endif 192 } }, 193 /* Interrupt assignment for 4 MSI combination */ 194 {{ CE_INTERRUPT_IDX(0), 195 CE_INTERRUPT_IDX(1), 196 CE_INTERRUPT_IDX(2), 197 CE_INTERRUPT_IDX(3), 198 CE_INTERRUPT_IDX(0), 199 CE_INTERRUPT_IDX(1), 200 CE_INTERRUPT_IDX(0), 201 CE_INTERRUPT_IDX(0), 202 CE_INTERRUPT_IDX(0), 203 CE_INTERRUPT_IDX(0), 204 CE_INTERRUPT_IDX(0), 205 CE_INTERRUPT_IDX(0), 206 #ifdef QCA_WIFI_QCN9224 207 CE_INTERRUPT_IDX(0), 208 CE_INTERRUPT_IDX(0), 209 CE_INTERRUPT_IDX(0), 210 CE_INTERRUPT_IDX(0), 211 #endif 212 } }, 213 /* Interrupt assignment for 5 MSI combination */ 214 {{ CE_INTERRUPT_IDX(0), 215 CE_INTERRUPT_IDX(1), 216 CE_INTERRUPT_IDX(2), 217 CE_INTERRUPT_IDX(3), 218 CE_INTERRUPT_IDX(0), 219 CE_INTERRUPT_IDX(4), 220 CE_INTERRUPT_IDX(0), 221 CE_INTERRUPT_IDX(0), 222 CE_INTERRUPT_IDX(0), 223 CE_INTERRUPT_IDX(0), 224 CE_INTERRUPT_IDX(0), 225 CE_INTERRUPT_IDX(0), 226 #ifdef QCA_WIFI_QCN9224 227 CE_INTERRUPT_IDX(0), 228 CE_INTERRUPT_IDX(0), 229 CE_INTERRUPT_IDX(0), 230 CE_INTERRUPT_IDX(0), 231 #endif 232 } }, 233 /* Interrupt assignment for 6 MSI combination */ 234 {{ CE_INTERRUPT_IDX(0), 235 CE_INTERRUPT_IDX(1), 236 CE_INTERRUPT_IDX(2), 237 CE_INTERRUPT_IDX(3), 238 CE_INTERRUPT_IDX(4), 239 CE_INTERRUPT_IDX(5), 240 CE_INTERRUPT_IDX(0), 241 CE_INTERRUPT_IDX(0), 242 CE_INTERRUPT_IDX(0), 243 CE_INTERRUPT_IDX(0), 244 CE_INTERRUPT_IDX(0), 245 CE_INTERRUPT_IDX(0), 246 #ifdef QCA_WIFI_QCN9224 247 CE_INTERRUPT_IDX(0), 248 CE_INTERRUPT_IDX(0), 249 CE_INTERRUPT_IDX(0), 250 CE_INTERRUPT_IDX(0), 251 #endif 252 } }, 253 /* Interrupt assignment for 7 MSI combination */ 254 {{ CE_INTERRUPT_IDX(0), 255 CE_INTERRUPT_IDX(1), 256 CE_INTERRUPT_IDX(2), 257 CE_INTERRUPT_IDX(3), 258 CE_INTERRUPT_IDX(4), 259 CE_INTERRUPT_IDX(5), 260 CE_INTERRUPT_IDX(6), 261 CE_INTERRUPT_IDX(0), 262 CE_INTERRUPT_IDX(0), 263 CE_INTERRUPT_IDX(0), 264 CE_INTERRUPT_IDX(0), 265 CE_INTERRUPT_IDX(0), 266 #ifdef QCA_WIFI_QCN9224 267 CE_INTERRUPT_IDX(0), 268 CE_INTERRUPT_IDX(0), 269 CE_INTERRUPT_IDX(0), 270 CE_INTERRUPT_IDX(0), 271 #endif 272 } }, 273 /* Interrupt assignment for 8 MSI combination */ 274 {{ CE_INTERRUPT_IDX(0), 275 CE_INTERRUPT_IDX(1), 276 CE_INTERRUPT_IDX(2), 277 CE_INTERRUPT_IDX(3), 278 CE_INTERRUPT_IDX(4), 279 CE_INTERRUPT_IDX(5), 280 CE_INTERRUPT_IDX(6), 281 CE_INTERRUPT_IDX(7), 282 CE_INTERRUPT_IDX(0), 283 CE_INTERRUPT_IDX(0), 284 CE_INTERRUPT_IDX(0), 285 CE_INTERRUPT_IDX(0), 286 #ifdef QCA_WIFI_QCN9224 287 CE_INTERRUPT_IDX(0), 288 CE_INTERRUPT_IDX(0), 289 CE_INTERRUPT_IDX(0), 290 CE_INTERRUPT_IDX(0), 291 #endif 292 } }, 293 /* Interrupt assignment for 9 MSI combination */ 294 {{ CE_INTERRUPT_IDX(0), 295 CE_INTERRUPT_IDX(1), 296 CE_INTERRUPT_IDX(2), 297 CE_INTERRUPT_IDX(3), 298 CE_INTERRUPT_IDX(4), 299 CE_INTERRUPT_IDX(5), 300 CE_INTERRUPT_IDX(6), 301 CE_INTERRUPT_IDX(7), 302 CE_INTERRUPT_IDX(8), 303 CE_INTERRUPT_IDX(0), 304 CE_INTERRUPT_IDX(0), 305 CE_INTERRUPT_IDX(0), 306 #ifdef QCA_WIFI_QCN9224 307 CE_INTERRUPT_IDX(0), 308 CE_INTERRUPT_IDX(0), 309 CE_INTERRUPT_IDX(0), 310 CE_INTERRUPT_IDX(0), 311 #endif 312 } }, 313 /* Interrupt assignment for 10 MSI combination */ 314 {{ CE_INTERRUPT_IDX(0), 315 CE_INTERRUPT_IDX(1), 316 CE_INTERRUPT_IDX(2), 317 CE_INTERRUPT_IDX(3), 318 CE_INTERRUPT_IDX(4), 319 CE_INTERRUPT_IDX(5), 320 CE_INTERRUPT_IDX(6), 321 CE_INTERRUPT_IDX(7), 322 CE_INTERRUPT_IDX(8), 323 CE_INTERRUPT_IDX(9), 324 CE_INTERRUPT_IDX(0), 325 CE_INTERRUPT_IDX(0), 326 #ifdef QCA_WIFI_QCN9224 327 CE_INTERRUPT_IDX(0), 328 CE_INTERRUPT_IDX(0), 329 CE_INTERRUPT_IDX(0), 330 CE_INTERRUPT_IDX(0), 331 #endif 332 } }, 333 /* Interrupt assignment for 11 MSI combination */ 334 {{ CE_INTERRUPT_IDX(0), 335 CE_INTERRUPT_IDX(1), 336 CE_INTERRUPT_IDX(2), 337 CE_INTERRUPT_IDX(3), 338 CE_INTERRUPT_IDX(4), 339 CE_INTERRUPT_IDX(5), 340 CE_INTERRUPT_IDX(6), 341 CE_INTERRUPT_IDX(7), 342 CE_INTERRUPT_IDX(8), 343 CE_INTERRUPT_IDX(9), 344 CE_INTERRUPT_IDX(10), 345 CE_INTERRUPT_IDX(0), 346 #ifdef QCA_WIFI_QCN9224 347 CE_INTERRUPT_IDX(0), 348 CE_INTERRUPT_IDX(0), 349 CE_INTERRUPT_IDX(0), 350 CE_INTERRUPT_IDX(0), 351 #endif 352 } }, 353 /* Interrupt assignment for 12 MSI combination */ 354 {{ CE_INTERRUPT_IDX(0), 355 CE_INTERRUPT_IDX(1), 356 CE_INTERRUPT_IDX(2), 357 CE_INTERRUPT_IDX(3), 358 CE_INTERRUPT_IDX(4), 359 CE_INTERRUPT_IDX(5), 360 CE_INTERRUPT_IDX(6), 361 CE_INTERRUPT_IDX(7), 362 CE_INTERRUPT_IDX(8), 363 CE_INTERRUPT_IDX(9), 364 CE_INTERRUPT_IDX(10), 365 CE_INTERRUPT_IDX(11), 366 #ifdef QCA_WIFI_QCN9224 367 CE_INTERRUPT_IDX(0), 368 CE_INTERRUPT_IDX(0), 369 CE_INTERRUPT_IDX(0), 370 CE_INTERRUPT_IDX(0), 371 #endif 372 } }, 373 #ifdef QCA_WIFI_QCN9224 374 /* Interrupt assignment for 13 MSI combination */ 375 {{ CE_INTERRUPT_IDX(0), 376 CE_INTERRUPT_IDX(1), 377 CE_INTERRUPT_IDX(2), 378 CE_INTERRUPT_IDX(3), 379 CE_INTERRUPT_IDX(4), 380 CE_INTERRUPT_IDX(5), 381 CE_INTERRUPT_IDX(6), 382 CE_INTERRUPT_IDX(7), 383 CE_INTERRUPT_IDX(8), 384 CE_INTERRUPT_IDX(9), 385 CE_INTERRUPT_IDX(10), 386 CE_INTERRUPT_IDX(11), 387 CE_INTERRUPT_IDX(12), 388 CE_INTERRUPT_IDX(0), 389 CE_INTERRUPT_IDX(0), 390 CE_INTERRUPT_IDX(0), 391 } }, 392 /* Interrupt assignment for 14 MSI combination */ 393 {{ CE_INTERRUPT_IDX(0), 394 CE_INTERRUPT_IDX(1), 395 CE_INTERRUPT_IDX(2), 396 CE_INTERRUPT_IDX(3), 397 CE_INTERRUPT_IDX(4), 398 CE_INTERRUPT_IDX(5), 399 CE_INTERRUPT_IDX(6), 400 CE_INTERRUPT_IDX(7), 401 CE_INTERRUPT_IDX(8), 402 CE_INTERRUPT_IDX(9), 403 CE_INTERRUPT_IDX(10), 404 CE_INTERRUPT_IDX(11), 405 CE_INTERRUPT_IDX(12), 406 CE_INTERRUPT_IDX(13), 407 CE_INTERRUPT_IDX(0), 408 CE_INTERRUPT_IDX(0), 409 } }, 410 /* Interrupt assignment for 15 MSI combination */ 411 {{ CE_INTERRUPT_IDX(0), 412 CE_INTERRUPT_IDX(1), 413 CE_INTERRUPT_IDX(2), 414 CE_INTERRUPT_IDX(3), 415 CE_INTERRUPT_IDX(4), 416 CE_INTERRUPT_IDX(5), 417 CE_INTERRUPT_IDX(6), 418 CE_INTERRUPT_IDX(7), 419 CE_INTERRUPT_IDX(8), 420 CE_INTERRUPT_IDX(9), 421 CE_INTERRUPT_IDX(10), 422 CE_INTERRUPT_IDX(11), 423 CE_INTERRUPT_IDX(12), 424 CE_INTERRUPT_IDX(13), 425 CE_INTERRUPT_IDX(14), 426 CE_INTERRUPT_IDX(0), 427 } }, 428 /* Interrupt assignment for 16 MSI combination */ 429 {{ CE_INTERRUPT_IDX(0), 430 CE_INTERRUPT_IDX(1), 431 CE_INTERRUPT_IDX(2), 432 CE_INTERRUPT_IDX(3), 433 CE_INTERRUPT_IDX(4), 434 CE_INTERRUPT_IDX(5), 435 CE_INTERRUPT_IDX(6), 436 CE_INTERRUPT_IDX(7), 437 CE_INTERRUPT_IDX(8), 438 CE_INTERRUPT_IDX(9), 439 CE_INTERRUPT_IDX(10), 440 CE_INTERRUPT_IDX(11), 441 CE_INTERRUPT_IDX(12), 442 CE_INTERRUPT_IDX(13), 443 CE_INTERRUPT_IDX(14), 444 CE_INTERRUPT_IDX(15), 445 } }, 446 #endif 447 }; 448 449 450 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, 451 uint8_t cmd_id, bool start) 452 { 453 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 454 455 switch (cmd_id) { 456 case AGC_DUMP: 457 if (start) 458 priv_start_agc(scn); 459 else 460 priv_dump_agc(scn); 461 break; 462 case CHANINFO_DUMP: 463 if (start) 464 priv_start_cap_chaninfo(scn); 465 else 466 priv_dump_chaninfo(scn); 467 break; 468 case BB_WATCHDOG_DUMP: 469 priv_dump_bbwatchdog(scn); 470 break; 471 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 472 case PCIE_ACCESS_DUMP: 473 hif_target_access_log_dump(); 474 break; 475 #endif 476 default: 477 hif_err("Invalid htc dump command: %d", cmd_id); 478 break; 479 } 480 } 481 482 static void ce_poll_timeout(void *arg) 483 { 484 struct CE_state *CE_state = (struct CE_state *)arg; 485 486 if (CE_state->timer_inited) { 487 ce_per_engine_service(CE_state->scn, CE_state->id); 488 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT); 489 } 490 } 491 492 static unsigned int roundup_pwr2(unsigned int n) 493 { 494 int i; 495 unsigned int test_pwr2; 496 497 if (!(n & (n - 1))) 498 return n; /* already a power of 2 */ 499 500 test_pwr2 = 4; 501 for (i = 0; i < 29; i++) { 502 if (test_pwr2 > n) 503 return test_pwr2; 504 test_pwr2 = test_pwr2 << 1; 505 } 506 507 QDF_ASSERT(0); /* n too large */ 508 return 0; 509 } 510 511 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C 512 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40 513 514 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = { 515 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 516 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 517 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 518 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 519 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 520 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 521 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 522 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 523 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 524 #ifdef QCA_WIFI_3_0_ADRASTEA 525 { 9, ADRASTEA_DST_WR_INDEX_OFFSET}, 526 { 10, ADRASTEA_DST_WR_INDEX_OFFSET}, 527 { 11, ADRASTEA_DST_WR_INDEX_OFFSET}, 528 #endif 529 }; 530 531 #ifdef QCN7605_SUPPORT 532 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = { 533 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 534 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 535 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 536 { 3, ADRASTEA_DST_WR_INDEX_OFFSET}, 537 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 538 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 539 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 540 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 541 }; 542 #endif 543 544 #ifdef WLAN_FEATURE_EPPING 545 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = { 546 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 547 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 548 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 549 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 550 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 551 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 552 { 5, ADRASTEA_DST_WR_INDEX_OFFSET}, 553 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 554 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 555 }; 556 #endif 557 558 /* CE_PCI TABLE */ 559 /* 560 * NOTE: the table below is out of date, though still a useful reference. 561 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual 562 * mapping of HTC services to HIF pipes. 563 */ 564 /* 565 * This authoritative table defines Copy Engine configuration and the mapping 566 * of services/endpoints to CEs. A subset of this information is passed to 567 * the Target during startup as a prerequisite to entering BMI phase. 568 * See: 569 * target_service_to_ce_map - Target-side mapping 570 * hif_map_service_to_pipe - Host-side mapping 571 * target_ce_config - Target-side configuration 572 * host_ce_config - Host-side configuration 573 ============================================================================ 574 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer 575 | | | ctio | Size | Frequency 576 | | | n | | 577 ============================================================================ 578 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent 579 descriptor | | | | O(100B) | and regular 580 download | | | | | 581 ---------------------------------------------------------------------------- 582 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and 583 indication | | | | O(10B) | regular 584 upload | | | | | 585 ---------------------------------------------------------------------------- 586 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare 587 upload | | | | O(1000B) | (frequent 588 e.g. noise | | | | | during IP1.0 589 packets | | | | | testing) 590 ---------------------------------------------------------------------------- 591 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare 592 download | | | | O(1000B) | (frequent 593 e.g. | | | | | during IP1.0 594 misdirecte | | | | | testing) 595 d EAPOL | | | | | 596 packets | | | | | 597 ---------------------------------------------------------------------------- 598 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?) 599 | DATA_VO (uplink) | | | | 600 ---------------------------------------------------------------------------- 601 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?) 602 | DATA_VO (downlink) | | | | 603 ---------------------------------------------------------------------------- 604 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent 605 | | | | O(100B) | 606 ---------------------------------------------------------------------------- 607 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent 608 messages | (downlink) | | | O(100B) | 609 | | | | | 610 ---------------------------------------------------------------------------- 611 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?) 612 | HTC_RAW_STREAMS | | | | 613 | (uplink) | | | | 614 ---------------------------------------------------------------------------- 615 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?) 616 | HTC_RAW_STREAMS | | | | 617 | (downlink) | | | | 618 ---------------------------------------------------------------------------- 619 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window 620 | | | | | infrequent 621 ============================================================================ 622 */ 623 624 /* 625 * Map from service/endpoint to Copy Engine. 626 * This table is derived from the CE_PCI TABLE, above. 627 * It is passed to the Target at startup for use by firmware. 628 */ 629 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 630 { 631 WMI_DATA_VO_SVC, 632 PIPEDIR_OUT, /* out = UL = host -> target */ 633 3, 634 }, 635 { 636 WMI_DATA_VO_SVC, 637 PIPEDIR_IN, /* in = DL = target -> host */ 638 2, 639 }, 640 { 641 WMI_DATA_BK_SVC, 642 PIPEDIR_OUT, /* out = UL = host -> target */ 643 3, 644 }, 645 { 646 WMI_DATA_BK_SVC, 647 PIPEDIR_IN, /* in = DL = target -> host */ 648 2, 649 }, 650 { 651 WMI_DATA_BE_SVC, 652 PIPEDIR_OUT, /* out = UL = host -> target */ 653 3, 654 }, 655 { 656 WMI_DATA_BE_SVC, 657 PIPEDIR_IN, /* in = DL = target -> host */ 658 2, 659 }, 660 { 661 WMI_DATA_VI_SVC, 662 PIPEDIR_OUT, /* out = UL = host -> target */ 663 3, 664 }, 665 { 666 WMI_DATA_VI_SVC, 667 PIPEDIR_IN, /* in = DL = target -> host */ 668 2, 669 }, 670 { 671 WMI_CONTROL_SVC, 672 PIPEDIR_OUT, /* out = UL = host -> target */ 673 3, 674 }, 675 { 676 WMI_CONTROL_SVC, 677 PIPEDIR_IN, /* in = DL = target -> host */ 678 2, 679 }, 680 { 681 HTC_CTRL_RSVD_SVC, 682 PIPEDIR_OUT, /* out = UL = host -> target */ 683 0, /* could be moved to 3 (share with WMI) */ 684 }, 685 { 686 HTC_CTRL_RSVD_SVC, 687 PIPEDIR_IN, /* in = DL = target -> host */ 688 2, 689 }, 690 { 691 HTC_RAW_STREAMS_SVC, /* not currently used */ 692 PIPEDIR_OUT, /* out = UL = host -> target */ 693 0, 694 }, 695 { 696 HTC_RAW_STREAMS_SVC, /* not currently used */ 697 PIPEDIR_IN, /* in = DL = target -> host */ 698 2, 699 }, 700 { 701 HTT_DATA_MSG_SVC, 702 PIPEDIR_OUT, /* out = UL = host -> target */ 703 4, 704 }, 705 { 706 HTT_DATA_MSG_SVC, 707 PIPEDIR_IN, /* in = DL = target -> host */ 708 1, 709 }, 710 { 711 WDI_IPA_TX_SVC, 712 PIPEDIR_OUT, /* in = DL = target -> host */ 713 5, 714 }, 715 #if defined(QCA_WIFI_3_0_ADRASTEA) 716 { 717 HTT_DATA2_MSG_SVC, 718 PIPEDIR_IN, /* in = DL = target -> host */ 719 9, 720 }, 721 { 722 HTT_DATA3_MSG_SVC, 723 PIPEDIR_IN, /* in = DL = target -> host */ 724 10, 725 }, 726 { 727 PACKET_LOG_SVC, 728 PIPEDIR_IN, /* in = DL = target -> host */ 729 11, 730 }, 731 #endif 732 /* (Additions here) */ 733 734 { /* Must be last */ 735 0, 736 0, 737 0, 738 }, 739 }; 740 741 /* PIPEDIR_OUT = HOST to Target */ 742 /* PIPEDIR_IN = TARGET to HOST */ 743 #if (defined(QCA_WIFI_QCA8074)) 744 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 745 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 746 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 747 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 748 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 749 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 750 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 751 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 752 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 753 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 754 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 755 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 756 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 757 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 758 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 759 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 760 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 761 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 762 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 763 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 764 /* (Additions here) */ 765 { 0, 0, 0, }, 766 }; 767 #else 768 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 769 }; 770 #endif 771 772 #if (defined(QCA_WIFI_QCA9574)) 773 static struct service_to_pipe target_service_to_ce_map_qca9574[] = { 774 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 775 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 776 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 777 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 778 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 779 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 780 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 781 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 782 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 783 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 784 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 785 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 786 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 787 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 788 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 789 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 790 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 791 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 792 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 793 /* (Additions here) */ 794 { 0, 0, 0, }, 795 }; 796 #else 797 static struct service_to_pipe target_service_to_ce_map_qca9574[] = { 798 }; 799 #endif 800 801 #if (defined(QCA_WIFI_QCA8074V2)) 802 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 803 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 804 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 805 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 806 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 807 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 808 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 809 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 810 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 811 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 812 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 813 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 814 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 815 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9}, 816 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2}, 817 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 818 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 819 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 820 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 821 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 822 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 823 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 824 /* (Additions here) */ 825 { 0, 0, 0, }, 826 }; 827 #else 828 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 829 }; 830 #endif 831 832 #if (defined(QCA_WIFI_QCA6018)) 833 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 834 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 835 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 836 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 837 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 838 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 839 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 840 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 841 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 842 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 843 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 844 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 845 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 846 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 847 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 848 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 849 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 850 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 851 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 852 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 853 /* (Additions here) */ 854 { 0, 0, 0, }, 855 }; 856 #else 857 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 858 }; 859 #endif 860 861 #if (defined(QCA_WIFI_QCN9000)) 862 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = { 863 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 864 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 865 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 866 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 867 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 868 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 869 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 870 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 871 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 872 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 873 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 874 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 875 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 876 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 877 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 878 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 879 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 880 /* (Additions here) */ 881 { 0, 0, 0, }, 882 }; 883 #else 884 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = { 885 }; 886 #endif 887 888 #if (defined(QCA_WIFI_QCA5332)) 889 static struct service_to_pipe target_service_to_ce_map_qca5332[] = { 890 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 891 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 892 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 893 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 894 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 895 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 896 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 897 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 898 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 899 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 900 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 901 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 902 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 903 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 904 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 905 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 906 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 907 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 9, }, 908 /* (Additions here) */ 909 { 0, 0, 0, }, 910 }; 911 #else 912 static struct service_to_pipe target_service_to_ce_map_qca5332[] = { 913 }; 914 #endif 915 916 #if (defined(QCA_WIFI_QCN9224)) 917 static struct service_to_pipe target_service_to_ce_map_qcn9224[] = { 918 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 919 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 920 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 921 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 922 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 923 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 924 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 925 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 926 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 927 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 928 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 929 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 930 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 931 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 932 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 933 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 934 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7, }, 935 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2, }, 936 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 937 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 14, }, 938 { WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 14, }, 939 /* (Additions here) */ 940 { 0, 0, 0, }, 941 }; 942 #endif 943 944 #if (defined(QCA_WIFI_QCA5018)) 945 static struct service_to_pipe target_service_to_ce_map_qca5018[] = { 946 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 947 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 948 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 949 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 950 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 951 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 952 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 953 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 954 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 955 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 956 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 957 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 958 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 959 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 960 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 961 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 962 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 963 /* (Additions here) */ 964 { 0, 0, 0, }, 965 }; 966 #else 967 static struct service_to_pipe target_service_to_ce_map_qca5018[] = { 968 }; 969 #endif 970 971 /* PIPEDIR_OUT = HOST to Target */ 972 /* PIPEDIR_IN = TARGET to HOST */ 973 #ifdef QCN7605_SUPPORT 974 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = { 975 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, }, 976 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 977 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, }, 978 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 979 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, }, 980 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 981 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, }, 982 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 983 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, }, 984 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 985 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 986 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 987 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, }, 988 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, }, 989 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 990 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 991 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, }, 992 #ifdef IPA_OFFLOAD 993 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, }, 994 #else 995 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, }, 996 #endif 997 { PACKET_LOG_SVC, PIPEDIR_IN, 7, }, 998 /* (Additions here) */ 999 { 0, 0, 0, }, 1000 }; 1001 #endif 1002 1003 #if (defined(QCA_WIFI_QCA6290)) 1004 #ifdef QCA_6290_AP_MODE 1005 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 1006 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1007 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, }, 1008 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1009 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, }, 1010 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1011 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, }, 1012 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1013 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, }, 1014 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1015 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, }, 1016 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1017 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, }, 1018 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1019 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, }, 1020 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 1021 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 1022 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1023 /* (Additions here) */ 1024 { 0, 0, 0, }, 1025 }; 1026 #else 1027 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 1028 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1029 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1030 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1031 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1032 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1033 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1034 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1035 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1036 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1037 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1038 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1039 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1040 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1041 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1042 /* (Additions here) */ 1043 { 0, 0, 0, }, 1044 }; 1045 #endif 1046 #else 1047 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 1048 }; 1049 #endif 1050 1051 #if (defined(QCA_WIFI_QCA6390)) 1052 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 1053 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1054 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1055 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1056 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1057 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1058 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1059 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1060 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1061 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1062 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1063 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1064 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1065 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1066 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1067 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1068 /* (Additions here) */ 1069 { 0, 0, 0, }, 1070 }; 1071 #else 1072 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 1073 }; 1074 #endif 1075 1076 static struct service_to_pipe target_service_to_ce_map_qca6490[] = { 1077 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1078 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1079 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1080 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1081 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1082 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1083 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1084 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1085 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1086 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1087 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1088 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1089 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1090 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1091 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1092 /* (Additions here) */ 1093 { 0, 0, 0, }, 1094 }; 1095 1096 #if (defined(QCA_WIFI_QCA6750)) 1097 static struct service_to_pipe target_service_to_ce_map_qca6750[] = { 1098 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1099 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1100 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1101 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1102 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1103 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1104 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1105 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1106 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1107 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1108 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1109 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1110 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1111 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1112 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1113 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7 1114 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, }, 1115 #endif 1116 /* (Additions here) */ 1117 { 0, 0, 0, }, 1118 }; 1119 #else 1120 static struct service_to_pipe target_service_to_ce_map_qca6750[] = { 1121 }; 1122 #endif 1123 1124 #if (defined(QCA_WIFI_KIWI)) 1125 static struct service_to_pipe target_service_to_ce_map_kiwi[] = { 1126 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1127 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1128 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1129 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1130 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1131 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1132 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1133 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1134 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1135 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1136 #ifdef FEATURE_XPAN 1137 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 4, }, 1138 #else 1139 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1140 #endif 1141 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1142 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1143 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1144 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7 1145 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, }, 1146 #endif 1147 #ifdef FEATURE_XPAN 1148 { LPASS_DATA_MSG_SVC, PIPEDIR_OUT, 0, }, 1149 { LPASS_DATA_MSG_SVC, PIPEDIR_IN, 5, }, 1150 #endif 1151 /* (Additions here) */ 1152 { 0, 0, 0, }, 1153 }; 1154 #else 1155 static struct service_to_pipe target_service_to_ce_map_kiwi[] = { 1156 }; 1157 #endif 1158 1159 static struct service_to_pipe target_service_to_ce_map_ar900b[] = { 1160 { 1161 WMI_DATA_VO_SVC, 1162 PIPEDIR_OUT, /* out = UL = host -> target */ 1163 3, 1164 }, 1165 { 1166 WMI_DATA_VO_SVC, 1167 PIPEDIR_IN, /* in = DL = target -> host */ 1168 2, 1169 }, 1170 { 1171 WMI_DATA_BK_SVC, 1172 PIPEDIR_OUT, /* out = UL = host -> target */ 1173 3, 1174 }, 1175 { 1176 WMI_DATA_BK_SVC, 1177 PIPEDIR_IN, /* in = DL = target -> host */ 1178 2, 1179 }, 1180 { 1181 WMI_DATA_BE_SVC, 1182 PIPEDIR_OUT, /* out = UL = host -> target */ 1183 3, 1184 }, 1185 { 1186 WMI_DATA_BE_SVC, 1187 PIPEDIR_IN, /* in = DL = target -> host */ 1188 2, 1189 }, 1190 { 1191 WMI_DATA_VI_SVC, 1192 PIPEDIR_OUT, /* out = UL = host -> target */ 1193 3, 1194 }, 1195 { 1196 WMI_DATA_VI_SVC, 1197 PIPEDIR_IN, /* in = DL = target -> host */ 1198 2, 1199 }, 1200 { 1201 WMI_CONTROL_SVC, 1202 PIPEDIR_OUT, /* out = UL = host -> target */ 1203 3, 1204 }, 1205 { 1206 WMI_CONTROL_SVC, 1207 PIPEDIR_IN, /* in = DL = target -> host */ 1208 2, 1209 }, 1210 { 1211 HTC_CTRL_RSVD_SVC, 1212 PIPEDIR_OUT, /* out = UL = host -> target */ 1213 0, /* could be moved to 3 (share with WMI) */ 1214 }, 1215 { 1216 HTC_CTRL_RSVD_SVC, 1217 PIPEDIR_IN, /* in = DL = target -> host */ 1218 1, 1219 }, 1220 { 1221 HTC_RAW_STREAMS_SVC, /* not currently used */ 1222 PIPEDIR_OUT, /* out = UL = host -> target */ 1223 0, 1224 }, 1225 { 1226 HTC_RAW_STREAMS_SVC, /* not currently used */ 1227 PIPEDIR_IN, /* in = DL = target -> host */ 1228 1, 1229 }, 1230 { 1231 HTT_DATA_MSG_SVC, 1232 PIPEDIR_OUT, /* out = UL = host -> target */ 1233 4, 1234 }, 1235 #ifdef WLAN_FEATURE_FASTPATH 1236 { 1237 HTT_DATA_MSG_SVC, 1238 PIPEDIR_IN, /* in = DL = target -> host */ 1239 5, 1240 }, 1241 #else /* WLAN_FEATURE_FASTPATH */ 1242 { 1243 HTT_DATA_MSG_SVC, 1244 PIPEDIR_IN, /* in = DL = target -> host */ 1245 1, 1246 }, 1247 #endif /* WLAN_FEATURE_FASTPATH */ 1248 1249 /* (Additions here) */ 1250 1251 { /* Must be last */ 1252 0, 1253 0, 1254 0, 1255 }, 1256 }; 1257 1258 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map; 1259 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map); 1260 1261 #ifdef WLAN_FEATURE_EPPING 1262 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = { 1263 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1264 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1265 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 1266 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 1267 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1268 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1269 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1270 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1271 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1272 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1273 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 1274 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1275 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 1276 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1277 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 1278 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 1279 {0, 0, 0,}, /* Must be last */ 1280 }; 1281 1282 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 1283 **tgt_svc_map_to_use, 1284 uint32_t *sz_tgt_svc_map_to_use) 1285 { 1286 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping; 1287 *sz_tgt_svc_map_to_use = 1288 sizeof(target_service_to_ce_map_wlan_epping); 1289 } 1290 #endif 1291 1292 #ifdef QCN7605_SUPPORT 1293 static inline 1294 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 1295 uint32_t *sz_tgt_svc_map_to_use) 1296 { 1297 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605; 1298 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605); 1299 } 1300 #else 1301 static inline 1302 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 1303 uint32_t *sz_tgt_svc_map_to_use) 1304 { 1305 hif_err("QCN7605 not supported"); 1306 } 1307 #endif 1308 1309 #ifdef QCA_WIFI_QCN9224 1310 static 1311 void hif_set_ce_config_qcn9224(struct hif_softc *scn, 1312 struct HIF_CE_state *hif_state) 1313 { 1314 hif_state->host_ce_config = host_ce_config_wlan_qcn9224; 1315 hif_state->target_ce_config = target_ce_config_wlan_qcn9224; 1316 hif_state->target_ce_config_sz = 1317 sizeof(target_ce_config_wlan_qcn9224); 1318 scn->ce_count = QCN_9224_CE_COUNT; 1319 scn->disable_wake_irq = 1; 1320 } 1321 1322 static 1323 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use, 1324 uint32_t *sz_tgt_svc_map_to_use) 1325 { 1326 *tgt_svc_map_to_use = target_service_to_ce_map_qcn9224; 1327 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn9224); 1328 } 1329 #else 1330 static inline 1331 void hif_set_ce_config_qcn9224(struct hif_softc *scn, 1332 struct HIF_CE_state *hif_state) 1333 { 1334 hif_err("QCN9224 not supported"); 1335 } 1336 1337 static inline 1338 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use, 1339 uint32_t *sz_tgt_svc_map_to_use) 1340 { 1341 hif_err("QCN9224 not supported"); 1342 } 1343 #endif 1344 1345 static void hif_select_service_to_pipe_map(struct hif_softc *scn, 1346 struct service_to_pipe **tgt_svc_map_to_use, 1347 uint32_t *sz_tgt_svc_map_to_use) 1348 { 1349 uint32_t mode = hif_get_conparam(scn); 1350 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1351 struct hif_target_info *tgt_info = &scn->target_info; 1352 1353 if (QDF_IS_EPPING_ENABLED(mode)) { 1354 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use, 1355 sz_tgt_svc_map_to_use); 1356 } else { 1357 switch (tgt_info->target_type) { 1358 default: 1359 *tgt_svc_map_to_use = target_service_to_ce_map_wlan; 1360 *sz_tgt_svc_map_to_use = 1361 sizeof(target_service_to_ce_map_wlan); 1362 break; 1363 case TARGET_TYPE_QCN7605: 1364 hif_select_ce_map_qcn7605(tgt_svc_map_to_use, 1365 sz_tgt_svc_map_to_use); 1366 break; 1367 case TARGET_TYPE_AR900B: 1368 case TARGET_TYPE_QCA9984: 1369 case TARGET_TYPE_QCA9888: 1370 case TARGET_TYPE_AR9888: 1371 case TARGET_TYPE_AR9888V2: 1372 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b; 1373 *sz_tgt_svc_map_to_use = 1374 sizeof(target_service_to_ce_map_ar900b); 1375 break; 1376 case TARGET_TYPE_QCA6290: 1377 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290; 1378 *sz_tgt_svc_map_to_use = 1379 sizeof(target_service_to_ce_map_qca6290); 1380 break; 1381 case TARGET_TYPE_QCA6390: 1382 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390; 1383 *sz_tgt_svc_map_to_use = 1384 sizeof(target_service_to_ce_map_qca6390); 1385 break; 1386 case TARGET_TYPE_QCA6490: 1387 *tgt_svc_map_to_use = target_service_to_ce_map_qca6490; 1388 *sz_tgt_svc_map_to_use = 1389 sizeof(target_service_to_ce_map_qca6490); 1390 break; 1391 case TARGET_TYPE_QCA6750: 1392 *tgt_svc_map_to_use = target_service_to_ce_map_qca6750; 1393 *sz_tgt_svc_map_to_use = 1394 sizeof(target_service_to_ce_map_qca6750); 1395 break; 1396 case TARGET_TYPE_KIWI: 1397 case TARGET_TYPE_MANGO: 1398 *tgt_svc_map_to_use = target_service_to_ce_map_kiwi; 1399 *sz_tgt_svc_map_to_use = 1400 sizeof(target_service_to_ce_map_kiwi); 1401 break; 1402 case TARGET_TYPE_QCA8074: 1403 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074; 1404 *sz_tgt_svc_map_to_use = 1405 sizeof(target_service_to_ce_map_qca8074); 1406 break; 1407 case TARGET_TYPE_QCA8074V2: 1408 *tgt_svc_map_to_use = 1409 target_service_to_ce_map_qca8074_v2; 1410 *sz_tgt_svc_map_to_use = 1411 sizeof(target_service_to_ce_map_qca8074_v2); 1412 break; 1413 case TARGET_TYPE_QCA9574: 1414 *tgt_svc_map_to_use = 1415 target_service_to_ce_map_qca9574; 1416 *sz_tgt_svc_map_to_use = 1417 sizeof(target_service_to_ce_map_qca9574); 1418 break; 1419 case TARGET_TYPE_QCA6018: 1420 *tgt_svc_map_to_use = 1421 target_service_to_ce_map_qca6018; 1422 *sz_tgt_svc_map_to_use = 1423 sizeof(target_service_to_ce_map_qca6018); 1424 break; 1425 case TARGET_TYPE_QCN9000: 1426 *tgt_svc_map_to_use = 1427 target_service_to_ce_map_qcn9000; 1428 *sz_tgt_svc_map_to_use = 1429 sizeof(target_service_to_ce_map_qcn9000); 1430 break; 1431 case TARGET_TYPE_QCN9224: 1432 hif_select_ce_map_qcn9224(tgt_svc_map_to_use, 1433 sz_tgt_svc_map_to_use); 1434 break; 1435 case TARGET_TYPE_QCA5332: 1436 *tgt_svc_map_to_use = target_service_to_ce_map_qca5332; 1437 *sz_tgt_svc_map_to_use = 1438 sizeof(target_service_to_ce_map_qca5332); 1439 break; 1440 case TARGET_TYPE_QCA5018: 1441 case TARGET_TYPE_QCN6122: 1442 *tgt_svc_map_to_use = 1443 target_service_to_ce_map_qca5018; 1444 *sz_tgt_svc_map_to_use = 1445 sizeof(target_service_to_ce_map_qca5018); 1446 break; 1447 } 1448 } 1449 hif_state->tgt_svc_map = *tgt_svc_map_to_use; 1450 hif_state->sz_tgt_svc_map = *sz_tgt_svc_map_to_use / 1451 sizeof(struct service_to_pipe); 1452 } 1453 1454 /** 1455 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly 1456 * @ce_state : pointer to the state context of the CE 1457 * 1458 * Description: 1459 * Sets htt_rx_data attribute of the state structure if the 1460 * CE serves one of the HTT DATA services. 1461 * 1462 * Return: 1463 * false (attribute set to false) 1464 * true (attribute set to true); 1465 */ 1466 static bool ce_mark_datapath(struct CE_state *ce_state) 1467 { 1468 struct service_to_pipe *svc_map; 1469 uint32_t map_sz, map_len; 1470 int i; 1471 bool rc = false; 1472 1473 if (ce_state) { 1474 hif_select_service_to_pipe_map(ce_state->scn, &svc_map, 1475 &map_sz); 1476 1477 map_len = map_sz / sizeof(struct service_to_pipe); 1478 for (i = 0; i < map_len; i++) { 1479 if ((svc_map[i].pipenum == ce_state->id) && 1480 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) || 1481 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) || 1482 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) { 1483 /* HTT CEs are unidirectional */ 1484 if (svc_map[i].pipedir == PIPEDIR_IN) 1485 ce_state->htt_rx_data = true; 1486 else 1487 ce_state->htt_tx_data = true; 1488 rc = true; 1489 } 1490 } 1491 } 1492 return rc; 1493 } 1494 1495 /** 1496 * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map 1497 * @hif_ctx: hif opaque handle 1498 * 1499 * Description: 1500 * Gets number of WMI EPs configured in target svc map. Since EP map 1501 * include IN and OUT direction pipes, count only OUT pipes to get EPs 1502 * configured for WMI service. 1503 * 1504 * Return: 1505 * uint8_t: count for WMI eps in target svc map 1506 */ 1507 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *hif_ctx) 1508 { 1509 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1510 struct service_to_pipe *svc_map; 1511 uint32_t map_sz, map_len; 1512 int i; 1513 uint8_t wmi_ep_count = 0; 1514 1515 hif_select_service_to_pipe_map(scn, &svc_map, 1516 &map_sz); 1517 map_len = map_sz / sizeof(struct service_to_pipe); 1518 1519 for (i = 0; i < map_len; i++) { 1520 /* Count number of WMI EPs based on out direction */ 1521 if ((svc_map[i].pipedir == PIPEDIR_OUT) && 1522 ((svc_map[i].service_id == WMI_CONTROL_SVC) || 1523 (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC1) || 1524 (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC2))) { 1525 wmi_ep_count++; 1526 } 1527 } 1528 1529 return wmi_ep_count; 1530 } 1531 1532 /** 1533 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes 1534 * @ce_id: ce in question 1535 * @ring: ring state being examined 1536 * @type: "src_ring" or "dest_ring" string for identifying the ring 1537 * 1538 * Warns on non-zero index values. 1539 * Causes a kernel panic if the ring is not empty during initialization. 1540 */ 1541 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring, 1542 char *type) 1543 { 1544 if (ring->write_index != 0 || ring->sw_index != 0) 1545 hif_err("ce %d, %s, initial sw_index = %d, initial write_index =%d", 1546 ce_id, type, ring->sw_index, ring->write_index); 1547 if (ring->write_index != ring->sw_index) 1548 QDF_BUG(0); 1549 } 1550 1551 #ifdef IPA_OFFLOAD 1552 /** 1553 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring 1554 * @scn: softc instance 1555 * @ce_id: ce in question 1556 * @base_addr: pointer to copyengine ring base address 1557 * @ce_ring: copyengine instance 1558 * @nentries: number of entries should be allocated 1559 * @desc_size: ce desc size 1560 * 1561 * Return: QDF_STATUS_SUCCESS - for success 1562 */ 1563 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1564 qdf_dma_addr_t *base_addr, 1565 struct CE_ring_state *ce_ring, 1566 unsigned int nentries, uint32_t desc_size) 1567 { 1568 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 1569 !ce_srng_based(scn)) { 1570 if (!scn->ipa_ce_ring) { 1571 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc( 1572 scn->qdf_dev, 1573 nentries * desc_size + CE_DESC_RING_ALIGN); 1574 if (!scn->ipa_ce_ring) { 1575 hif_err( 1576 "Failed to allocate memory for IPA ce ring"); 1577 return QDF_STATUS_E_NOMEM; 1578 } 1579 } 1580 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev, 1581 &scn->ipa_ce_ring->mem_info); 1582 ce_ring->base_addr_owner_space_unaligned = 1583 scn->ipa_ce_ring->vaddr; 1584 } else { 1585 ce_ring->base_addr_owner_space_unaligned = 1586 hif_mem_alloc_consistent_unaligned 1587 (scn, 1588 (nentries * desc_size + 1589 CE_DESC_RING_ALIGN), 1590 base_addr, 1591 ce_ring->hal_ring_type, 1592 &ce_ring->is_ring_prealloc); 1593 1594 if (!ce_ring->base_addr_owner_space_unaligned) { 1595 hif_err("Failed to allocate DMA memory for ce ring id: %u", 1596 CE_id); 1597 return QDF_STATUS_E_NOMEM; 1598 } 1599 } 1600 return QDF_STATUS_SUCCESS; 1601 } 1602 1603 /** 1604 * ce_free_desc_ring() - Frees copyengine descriptor ring 1605 * @scn: softc instance 1606 * @ce_id: ce in question 1607 * @ce_ring: copyengine instance 1608 * @desc_size: ce desc size 1609 * 1610 * Return: None 1611 */ 1612 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1613 struct CE_ring_state *ce_ring, uint32_t desc_size) 1614 { 1615 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 1616 !ce_srng_based(scn)) { 1617 if (scn->ipa_ce_ring) { 1618 qdf_mem_shared_mem_free(scn->qdf_dev, 1619 scn->ipa_ce_ring); 1620 scn->ipa_ce_ring = NULL; 1621 } 1622 ce_ring->base_addr_owner_space_unaligned = NULL; 1623 } else { 1624 hif_mem_free_consistent_unaligned 1625 (scn, 1626 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 1627 ce_ring->base_addr_owner_space_unaligned, 1628 ce_ring->base_addr_CE_space, 0, 1629 ce_ring->is_ring_prealloc); 1630 ce_ring->base_addr_owner_space_unaligned = NULL; 1631 } 1632 } 1633 #else 1634 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1635 qdf_dma_addr_t *base_addr, 1636 struct CE_ring_state *ce_ring, 1637 unsigned int nentries, uint32_t desc_size) 1638 { 1639 ce_ring->base_addr_owner_space_unaligned = 1640 hif_mem_alloc_consistent_unaligned 1641 (scn, 1642 (nentries * desc_size + 1643 CE_DESC_RING_ALIGN), 1644 base_addr, 1645 ce_ring->hal_ring_type, 1646 &ce_ring->is_ring_prealloc); 1647 1648 if (!ce_ring->base_addr_owner_space_unaligned) { 1649 hif_err("Failed to allocate DMA memory for ce ring id: %u", 1650 CE_id); 1651 return QDF_STATUS_E_NOMEM; 1652 } 1653 return QDF_STATUS_SUCCESS; 1654 } 1655 1656 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1657 struct CE_ring_state *ce_ring, uint32_t desc_size) 1658 { 1659 hif_mem_free_consistent_unaligned 1660 (scn, 1661 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 1662 ce_ring->base_addr_owner_space_unaligned, 1663 ce_ring->base_addr_CE_space, 0, 1664 ce_ring->is_ring_prealloc); 1665 ce_ring->base_addr_owner_space_unaligned = NULL; 1666 } 1667 #endif /* IPA_OFFLOAD */ 1668 1669 /* 1670 * TODO: Need to explore the possibility of having this as part of a 1671 * target context instead of a global array. 1672 */ 1673 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void); 1674 1675 void ce_service_register_module(enum ce_target_type target_type, 1676 struct ce_ops* (*ce_attach)(void)) 1677 { 1678 if (target_type < CE_MAX_TARGET_TYPE) 1679 ce_attach_register[target_type] = ce_attach; 1680 } 1681 1682 qdf_export_symbol(ce_service_register_module); 1683 1684 /** 1685 * ce_srng_based() - Does this target use srng 1686 * @ce_state : pointer to the state context of the CE 1687 * 1688 * Description: 1689 * returns true if the target is SRNG based 1690 * 1691 * Return: 1692 * false (attribute set to false) 1693 * true (attribute set to true); 1694 */ 1695 bool ce_srng_based(struct hif_softc *scn) 1696 { 1697 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1698 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 1699 1700 switch (tgt_info->target_type) { 1701 case TARGET_TYPE_QCA8074: 1702 case TARGET_TYPE_QCA8074V2: 1703 case TARGET_TYPE_QCA6290: 1704 case TARGET_TYPE_QCA6390: 1705 case TARGET_TYPE_QCA6490: 1706 case TARGET_TYPE_QCA6750: 1707 case TARGET_TYPE_QCA6018: 1708 case TARGET_TYPE_QCN9000: 1709 case TARGET_TYPE_QCN6122: 1710 case TARGET_TYPE_QCA5018: 1711 case TARGET_TYPE_KIWI: 1712 case TARGET_TYPE_MANGO: 1713 case TARGET_TYPE_QCN9224: 1714 case TARGET_TYPE_QCA9574: 1715 case TARGET_TYPE_QCA5332: 1716 return true; 1717 default: 1718 return false; 1719 } 1720 return false; 1721 } 1722 qdf_export_symbol(ce_srng_based); 1723 1724 #ifdef QCA_WIFI_SUPPORT_SRNG 1725 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1726 { 1727 struct ce_ops *ops = NULL; 1728 1729 if (ce_srng_based(scn)) { 1730 if (ce_attach_register[CE_SVC_SRNG]) 1731 ops = ce_attach_register[CE_SVC_SRNG](); 1732 } else if (ce_attach_register[CE_SVC_LEGACY]) { 1733 ops = ce_attach_register[CE_SVC_LEGACY](); 1734 } 1735 1736 return ops; 1737 } 1738 1739 1740 #else /* QCA_LITHIUM */ 1741 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1742 { 1743 if (ce_attach_register[CE_SVC_LEGACY]) 1744 return ce_attach_register[CE_SVC_LEGACY](); 1745 1746 return NULL; 1747 } 1748 #endif /* QCA_LITHIUM */ 1749 1750 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn, 1751 struct pld_shadow_reg_v2_cfg **shadow_config, 1752 int *num_shadow_registers_configured) { 1753 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1754 1755 hif_state->ce_services->ce_prepare_shadow_register_v2_cfg( 1756 scn, shadow_config, num_shadow_registers_configured); 1757 1758 return; 1759 } 1760 1761 #ifdef CONFIG_SHADOW_V3 1762 static inline void 1763 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn, 1764 struct pld_wlan_enable_cfg *cfg) 1765 { 1766 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1767 1768 if (!hif_state->ce_services->ce_prepare_shadow_register_v3_cfg) 1769 return; 1770 1771 hif_state->ce_services->ce_prepare_shadow_register_v3_cfg( 1772 scn, &cfg->shadow_reg_v3_cfg, 1773 &cfg->num_shadow_reg_v3_cfg); 1774 } 1775 #else 1776 static inline void 1777 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn, 1778 struct pld_wlan_enable_cfg *cfg) 1779 { 1780 } 1781 #endif 1782 1783 static inline uint32_t ce_get_desc_size(struct hif_softc *scn, 1784 uint8_t ring_type) 1785 { 1786 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1787 1788 return hif_state->ce_services->ce_get_desc_size(ring_type); 1789 } 1790 1791 #ifdef QCA_WIFI_SUPPORT_SRNG 1792 static inline int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type) 1793 { 1794 switch (ce_ring_type) { 1795 case CE_RING_SRC: 1796 return CE_SRC; 1797 case CE_RING_DEST: 1798 return CE_DST; 1799 case CE_RING_STATUS: 1800 return CE_DST_STATUS; 1801 default: 1802 return -EINVAL; 1803 } 1804 } 1805 #else 1806 static int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type) 1807 { 1808 return 0; 1809 } 1810 #endif 1811 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state, 1812 uint8_t ring_type, uint32_t nentries) 1813 { 1814 uint32_t ce_nbytes; 1815 char *ptr; 1816 qdf_dma_addr_t base_addr; 1817 struct CE_ring_state *ce_ring; 1818 uint32_t desc_size; 1819 struct hif_softc *scn = CE_state->scn; 1820 1821 ce_nbytes = sizeof(struct CE_ring_state) 1822 + (nentries * sizeof(void *)); 1823 ptr = qdf_mem_malloc(ce_nbytes); 1824 if (!ptr) 1825 return NULL; 1826 1827 ce_ring = (struct CE_ring_state *)ptr; 1828 ptr += sizeof(struct CE_ring_state); 1829 ce_ring->nentries = nentries; 1830 ce_ring->nentries_mask = nentries - 1; 1831 1832 ce_ring->low_water_mark_nentries = 0; 1833 ce_ring->high_water_mark_nentries = nentries; 1834 ce_ring->per_transfer_context = (void **)ptr; 1835 ce_ring->hal_ring_type = ce_ring_type_to_hal_ring_type(ring_type); 1836 1837 desc_size = ce_get_desc_size(scn, ring_type); 1838 1839 /* Legacy platforms that do not support cache 1840 * coherent DMA are unsupported 1841 */ 1842 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr, 1843 ce_ring, nentries, 1844 desc_size) != 1845 QDF_STATUS_SUCCESS) { 1846 hif_err("ring has no DMA mem"); 1847 qdf_mem_free(ce_ring); 1848 return NULL; 1849 } 1850 ce_ring->base_addr_CE_space_unaligned = base_addr; 1851 1852 /* Correctly initialize memory to 0 to 1853 * prevent garbage data crashing system 1854 * when download firmware 1855 */ 1856 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned, 1857 nentries * desc_size + 1858 CE_DESC_RING_ALIGN); 1859 1860 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) { 1861 1862 ce_ring->base_addr_CE_space = 1863 (ce_ring->base_addr_CE_space_unaligned + 1864 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1); 1865 1866 ce_ring->base_addr_owner_space = (void *) 1867 (((size_t) ce_ring->base_addr_owner_space_unaligned + 1868 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1)); 1869 } else { 1870 ce_ring->base_addr_CE_space = 1871 ce_ring->base_addr_CE_space_unaligned; 1872 ce_ring->base_addr_owner_space = 1873 ce_ring->base_addr_owner_space_unaligned; 1874 } 1875 1876 return ce_ring; 1877 } 1878 1879 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type, 1880 uint32_t ce_id, struct CE_ring_state *ring, 1881 struct CE_attr *attr) 1882 { 1883 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1884 1885 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, 1886 ring, attr); 1887 } 1888 1889 static void ce_srng_cleanup(struct hif_softc *scn, struct CE_state *CE_state, 1890 uint8_t ring_type) 1891 { 1892 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1893 1894 if (hif_state->ce_services->ce_srng_cleanup) 1895 hif_state->ce_services->ce_srng_cleanup(scn, 1896 CE_state, ring_type); 1897 } 1898 1899 int hif_ce_bus_early_suspend(struct hif_softc *scn) 1900 { 1901 uint8_t ul_pipe, dl_pipe; 1902 int ce_id, status, ul_is_polled, dl_is_polled; 1903 struct CE_state *ce_state; 1904 1905 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC, 1906 &ul_pipe, &dl_pipe, 1907 &ul_is_polled, &dl_is_polled); 1908 if (status) { 1909 hif_err("pipe_mapping failure"); 1910 return status; 1911 } 1912 1913 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1914 if (ce_id == ul_pipe) 1915 continue; 1916 if (ce_id == dl_pipe) 1917 continue; 1918 1919 ce_state = scn->ce_id_to_state[ce_id]; 1920 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1921 if (ce_state->state == CE_RUNNING) 1922 ce_state->state = CE_PAUSED; 1923 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1924 } 1925 1926 return status; 1927 } 1928 1929 int hif_ce_bus_late_resume(struct hif_softc *scn) 1930 { 1931 int ce_id; 1932 struct CE_state *ce_state; 1933 int write_index = 0; 1934 bool index_updated; 1935 1936 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1937 ce_state = scn->ce_id_to_state[ce_id]; 1938 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1939 if (ce_state->state == CE_PENDING) { 1940 write_index = ce_state->src_ring->write_index; 1941 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 1942 write_index); 1943 ce_state->state = CE_RUNNING; 1944 index_updated = true; 1945 } else { 1946 index_updated = false; 1947 } 1948 1949 if (ce_state->state == CE_PAUSED) 1950 ce_state->state = CE_RUNNING; 1951 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1952 1953 if (index_updated) 1954 hif_record_ce_desc_event(scn, ce_id, 1955 RESUME_WRITE_INDEX_UPDATE, 1956 NULL, NULL, write_index, 0); 1957 } 1958 1959 return 0; 1960 } 1961 1962 /** 1963 * ce_oom_recovery() - try to recover rx ce from oom condition 1964 * @context: CE_state of the CE with oom rx ring 1965 * 1966 * the executing work Will continue to be rescheduled until 1967 * at least 1 descriptor is successfully posted to the rx ring. 1968 * 1969 * return: none 1970 */ 1971 static void ce_oom_recovery(void *context) 1972 { 1973 struct CE_state *ce_state = context; 1974 struct hif_softc *scn = ce_state->scn; 1975 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn); 1976 struct HIF_CE_pipe_info *pipe_info = 1977 &ce_softc->pipe_info[ce_state->id]; 1978 1979 hif_post_recv_buffers_for_pipe(pipe_info); 1980 } 1981 1982 #ifdef HIF_CE_DEBUG_DATA_BUF 1983 /** 1984 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by 1985 * the CE descriptors. 1986 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE 1987 * @scn: hif scn handle 1988 * ce_id: Copy Engine Id 1989 * 1990 * Return: QDF_STATUS 1991 */ 1992 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1993 { 1994 struct hif_ce_desc_event *event = NULL; 1995 struct hif_ce_desc_event *hist_ev = NULL; 1996 uint32_t index = 0; 1997 1998 hist_ev = 1999 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 2000 2001 if (!hist_ev) 2002 return QDF_STATUS_E_NOMEM; 2003 2004 scn->hif_ce_desc_hist.data_enable[ce_id] = true; 2005 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 2006 event = &hist_ev[index]; 2007 event->data = 2008 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE); 2009 if (!event->data) { 2010 hif_err_rl("ce debug data alloc failed"); 2011 scn->hif_ce_desc_hist.data_enable[ce_id] = false; 2012 return QDF_STATUS_E_NOMEM; 2013 } 2014 } 2015 return QDF_STATUS_SUCCESS; 2016 } 2017 2018 /** 2019 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by 2020 * the CE descriptors. 2021 * @scn: hif scn handle 2022 * ce_id: Copy Engine Id 2023 * 2024 * Return: 2025 */ 2026 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 2027 { 2028 struct hif_ce_desc_event *event = NULL; 2029 struct hif_ce_desc_event *hist_ev = NULL; 2030 uint32_t index = 0; 2031 2032 hist_ev = 2033 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 2034 2035 if (!hist_ev) 2036 return; 2037 2038 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 2039 event = &hist_ev[index]; 2040 if (event->data) 2041 qdf_mem_free(event->data); 2042 event->data = NULL; 2043 event = NULL; 2044 } 2045 2046 } 2047 #endif /* HIF_CE_DEBUG_DATA_BUF */ 2048 2049 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF 2050 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 2051 2052 /* define below variables for crashscope parse */ 2053 struct hif_ce_desc_event *hif_ce_desc_history[CE_COUNT_MAX]; 2054 uint32_t hif_ce_history_max = HIF_CE_HISTORY_MAX; 2055 2056 /** 2057 * for debug build, it will enable ce history for all ce, but for 2058 * perf build(if CONFIG_SLUB_DEBUG_ON is N), it only enable for 2059 * ce2(wmi event) & ce3(wmi cmd) history. 2060 */ 2061 #if defined(CONFIG_SLUB_DEBUG_ON) 2062 #define CE_DESC_HISTORY_BUFF_CNT CE_COUNT_MAX 2063 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE 0 2064 #else 2065 /* CE2, CE3, CE7 */ 2066 #define CE_DESC_HISTORY_BUFF_CNT 3 2067 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE (BIT(2) | BIT(3) | BIT(7)) 2068 #endif 2069 struct hif_ce_desc_event 2070 hif_ce_desc_history_buff[CE_DESC_HISTORY_BUFF_CNT][HIF_CE_HISTORY_MAX]; 2071 2072 static struct hif_ce_desc_event * 2073 hif_ce_debug_history_buf_get(struct hif_softc *scn, unsigned int ce_id) 2074 { 2075 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2076 2077 hif_debug("get ce debug buffer ce_id %u, only_ce2/ce3=0x%x, idx=%u", 2078 ce_id, IS_CE_DEBUG_ONLY_FOR_CRIT_CE, 2079 ce_hist->ce_id_hist_map[ce_id]); 2080 if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE && 2081 (ce_id == CE_ID_2 || ce_id == CE_ID_3 || ce_id == CE_ID_7)) { 2082 uint8_t idx = ce_hist->ce_id_hist_map[ce_id]; 2083 2084 hif_ce_desc_history[ce_id] = hif_ce_desc_history_buff[idx]; 2085 } else { 2086 hif_ce_desc_history[ce_id] = 2087 hif_ce_desc_history_buff[ce_id]; 2088 } 2089 2090 return hif_ce_desc_history[ce_id]; 2091 } 2092 2093 /** 2094 * alloc_mem_ce_debug_history() - Allocate CE descriptor history 2095 * @scn: hif scn handle 2096 * @ce_id: Copy Engine Id 2097 * @src_nentries: source ce ring entries 2098 * Return: QDF_STATUS 2099 */ 2100 static QDF_STATUS 2101 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id, 2102 uint32_t src_nentries) 2103 { 2104 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2105 QDF_STATUS status = QDF_STATUS_SUCCESS; 2106 2107 /* For perf build, return directly for non ce2/ce3 */ 2108 if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE && 2109 ce_id != CE_ID_2 && 2110 ce_id != CE_ID_3 && 2111 ce_id != CE_ID_7) { 2112 ce_hist->enable[ce_id] = false; 2113 ce_hist->data_enable[ce_id] = false; 2114 return QDF_STATUS_SUCCESS; 2115 } 2116 2117 ce_hist->hist_ev[ce_id] = hif_ce_debug_history_buf_get(scn, ce_id); 2118 ce_hist->enable[ce_id] = true; 2119 2120 if (src_nentries) { 2121 status = alloc_mem_ce_debug_hist_data(scn, ce_id); 2122 if (status != QDF_STATUS_SUCCESS) { 2123 ce_hist->enable[ce_id] = false; 2124 ce_hist->hist_ev[ce_id] = NULL; 2125 return status; 2126 } 2127 } else { 2128 ce_hist->data_enable[ce_id] = false; 2129 } 2130 2131 return QDF_STATUS_SUCCESS; 2132 } 2133 2134 /** 2135 * free_mem_ce_debug_history() - Free CE descriptor history 2136 * @scn: hif scn handle 2137 * @ce_id: Copy Engine Id 2138 * 2139 * Return: None 2140 */ 2141 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id) 2142 { 2143 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2144 2145 if (!ce_hist->enable[ce_id]) 2146 return; 2147 2148 ce_hist->enable[ce_id] = false; 2149 if (ce_hist->data_enable[ce_id]) { 2150 ce_hist->data_enable[ce_id] = false; 2151 free_mem_ce_debug_hist_data(scn, ce_id); 2152 } 2153 ce_hist->hist_ev[ce_id] = NULL; 2154 } 2155 #else 2156 static inline QDF_STATUS 2157 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 2158 uint32_t src_nentries) 2159 { 2160 return QDF_STATUS_SUCCESS; 2161 } 2162 2163 static inline void 2164 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } 2165 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */ 2166 #else 2167 #if defined(HIF_CE_DEBUG_DATA_BUF) 2168 2169 static QDF_STATUS 2170 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 2171 uint32_t src_nentries) 2172 { 2173 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *) 2174 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event)); 2175 2176 if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) { 2177 scn->hif_ce_desc_hist.enable[CE_id] = 0; 2178 return QDF_STATUS_E_NOMEM; 2179 } else { 2180 scn->hif_ce_desc_hist.enable[CE_id] = 1; 2181 return QDF_STATUS_SUCCESS; 2182 } 2183 } 2184 2185 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) 2186 { 2187 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2188 struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id]; 2189 2190 if (!hist_ev) 2191 return; 2192 2193 if (ce_hist->data_enable[CE_id]) { 2194 ce_hist->data_enable[CE_id] = false; 2195 free_mem_ce_debug_hist_data(scn, CE_id); 2196 } 2197 2198 ce_hist->enable[CE_id] = false; 2199 qdf_mem_free(ce_hist->hist_ev[CE_id]); 2200 ce_hist->hist_ev[CE_id] = NULL; 2201 } 2202 2203 #else 2204 2205 static inline QDF_STATUS 2206 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 2207 uint32_t src_nentries) 2208 { 2209 return QDF_STATUS_SUCCESS; 2210 } 2211 2212 static inline void 2213 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } 2214 #endif /* HIF_CE_DEBUG_DATA_BUF */ 2215 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */ 2216 2217 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 2218 /** 2219 * reset_ce_debug_history() - reset the index and ce id used for dumping the 2220 * CE records on the console using sysfs. 2221 * @scn: hif scn handle 2222 * 2223 * Return: 2224 */ 2225 static inline void reset_ce_debug_history(struct hif_softc *scn) 2226 { 2227 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2228 /* Initialise the CE debug history sysfs interface inputs ce_id and 2229 * index. Disable data storing 2230 */ 2231 ce_hist->hist_index = 0; 2232 ce_hist->hist_id = 0; 2233 } 2234 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 2235 static inline void reset_ce_debug_history(struct hif_softc *scn) { } 2236 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 2237 2238 void ce_enable_polling(void *cestate) 2239 { 2240 struct CE_state *CE_state = (struct CE_state *)cestate; 2241 2242 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 2243 CE_state->timer_inited = true; 2244 } 2245 2246 void ce_disable_polling(void *cestate) 2247 { 2248 struct CE_state *CE_state = (struct CE_state *)cestate; 2249 2250 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 2251 CE_state->timer_inited = false; 2252 } 2253 2254 /* 2255 * Initialize a Copy Engine based on caller-supplied attributes. 2256 * This may be called once to initialize both source and destination 2257 * rings or it may be called twice for separate source and destination 2258 * initialization. It may be that only one side or the other is 2259 * initialized by software/firmware. 2260 * 2261 * This should be called during the initialization sequence before 2262 * interrupts are enabled, so we don't have to worry about thread safety. 2263 */ 2264 struct CE_handle *ce_init(struct hif_softc *scn, 2265 unsigned int CE_id, struct CE_attr *attr) 2266 { 2267 struct CE_state *CE_state; 2268 uint32_t ctrl_addr; 2269 unsigned int nentries; 2270 bool malloc_CE_state = false; 2271 bool malloc_src_ring = false; 2272 int status; 2273 QDF_STATUS mem_status = QDF_STATUS_SUCCESS; 2274 2275 QDF_ASSERT(CE_id < scn->ce_count); 2276 ctrl_addr = CE_BASE_ADDRESS(CE_id); 2277 CE_state = scn->ce_id_to_state[CE_id]; 2278 2279 if (!CE_state) { 2280 CE_state = 2281 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state)); 2282 if (!CE_state) 2283 return NULL; 2284 2285 malloc_CE_state = true; 2286 qdf_spinlock_create(&CE_state->ce_index_lock); 2287 #ifdef CE_TASKLET_SCHEDULE_ON_FULL 2288 qdf_spinlock_create(&CE_state->ce_interrupt_lock); 2289 #endif 2290 2291 CE_state->id = CE_id; 2292 CE_state->ctrl_addr = ctrl_addr; 2293 CE_state->state = CE_RUNNING; 2294 CE_state->attr_flags = attr->flags; 2295 } 2296 CE_state->scn = scn; 2297 CE_state->service = ce_engine_service_reg; 2298 2299 qdf_atomic_init(&CE_state->rx_pending); 2300 if (!attr) { 2301 /* Already initialized; caller wants the handle */ 2302 return (struct CE_handle *)CE_state; 2303 } 2304 2305 if (CE_state->src_sz_max) 2306 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max); 2307 else 2308 CE_state->src_sz_max = attr->src_sz_max; 2309 2310 ce_init_ce_desc_event_log(scn, CE_id, 2311 attr->src_nentries + attr->dest_nentries); 2312 2313 /* source ring setup */ 2314 nentries = attr->src_nentries; 2315 if (nentries) { 2316 struct CE_ring_state *src_ring; 2317 2318 nentries = roundup_pwr2(nentries); 2319 if (CE_state->src_ring) { 2320 QDF_ASSERT(CE_state->src_ring->nentries == nentries); 2321 } else { 2322 src_ring = CE_state->src_ring = 2323 ce_alloc_ring_state(CE_state, 2324 CE_RING_SRC, 2325 nentries); 2326 if (!src_ring) { 2327 /* cannot allocate src ring. If the 2328 * CE_state is allocated locally free 2329 * CE_State and return error. 2330 */ 2331 hif_err("src ring has no mem"); 2332 if (malloc_CE_state) { 2333 /* allocated CE_state locally */ 2334 qdf_mem_free(CE_state); 2335 malloc_CE_state = false; 2336 } 2337 return NULL; 2338 } 2339 /* we can allocate src ring. Mark that the src ring is 2340 * allocated locally 2341 */ 2342 malloc_src_ring = true; 2343 2344 /* 2345 * Also allocate a shadow src ring in 2346 * regular mem to use for faster access. 2347 */ 2348 src_ring->shadow_base_unaligned = 2349 qdf_mem_malloc(nentries * 2350 sizeof(struct CE_src_desc) + 2351 CE_DESC_RING_ALIGN); 2352 if (!src_ring->shadow_base_unaligned) 2353 goto error_no_dma_mem; 2354 2355 src_ring->shadow_base = (struct CE_src_desc *) 2356 (((size_t) src_ring->shadow_base_unaligned + 2357 CE_DESC_RING_ALIGN - 1) & 2358 ~(CE_DESC_RING_ALIGN - 1)); 2359 2360 status = ce_ring_setup(scn, CE_RING_SRC, CE_id, 2361 src_ring, attr); 2362 if (status < 0) 2363 goto error_target_access; 2364 2365 ce_ring_test_initial_indexes(CE_id, src_ring, 2366 "src_ring"); 2367 } 2368 } 2369 2370 /* destination ring setup */ 2371 nentries = attr->dest_nentries; 2372 if (nentries) { 2373 struct CE_ring_state *dest_ring; 2374 2375 nentries = roundup_pwr2(nentries); 2376 if (CE_state->dest_ring) { 2377 QDF_ASSERT(CE_state->dest_ring->nentries == nentries); 2378 } else { 2379 dest_ring = CE_state->dest_ring = 2380 ce_alloc_ring_state(CE_state, 2381 CE_RING_DEST, 2382 nentries); 2383 if (!dest_ring) { 2384 /* cannot allocate dst ring. If the CE_state 2385 * or src ring is allocated locally free 2386 * CE_State and src ring and return error. 2387 */ 2388 hif_err("dest ring has no mem"); 2389 goto error_no_dma_mem; 2390 } 2391 2392 status = ce_ring_setup(scn, CE_RING_DEST, CE_id, 2393 dest_ring, attr); 2394 if (status < 0) 2395 goto error_target_access; 2396 2397 ce_ring_test_initial_indexes(CE_id, dest_ring, 2398 "dest_ring"); 2399 2400 /* For srng based target, init status ring here */ 2401 if (ce_srng_based(CE_state->scn)) { 2402 CE_state->status_ring = 2403 ce_alloc_ring_state(CE_state, 2404 CE_RING_STATUS, 2405 nentries); 2406 if (!CE_state->status_ring) { 2407 /*Allocation failed. Cleanup*/ 2408 qdf_mem_free(CE_state->dest_ring); 2409 if (malloc_src_ring) { 2410 qdf_mem_free 2411 (CE_state->src_ring); 2412 CE_state->src_ring = NULL; 2413 malloc_src_ring = false; 2414 } 2415 if (malloc_CE_state) { 2416 /* allocated CE_state locally */ 2417 scn->ce_id_to_state[CE_id] = 2418 NULL; 2419 qdf_mem_free(CE_state); 2420 malloc_CE_state = false; 2421 } 2422 2423 return NULL; 2424 } 2425 2426 status = ce_ring_setup(scn, CE_RING_STATUS, 2427 CE_id, CE_state->status_ring, 2428 attr); 2429 if (status < 0) 2430 goto error_target_access; 2431 2432 } 2433 2434 /* epping */ 2435 /* poll timer */ 2436 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) { 2437 qdf_timer_init(scn->qdf_dev, 2438 &CE_state->poll_timer, 2439 ce_poll_timeout, 2440 CE_state, 2441 QDF_TIMER_TYPE_WAKE_APPS); 2442 ce_enable_polling(CE_state); 2443 qdf_timer_mod(&CE_state->poll_timer, 2444 CE_POLL_TIMEOUT); 2445 } 2446 } 2447 } 2448 2449 if (!ce_srng_based(scn)) { 2450 /* Enable CE error interrupts */ 2451 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 2452 goto error_target_access; 2453 CE_ERROR_INTR_ENABLE(scn, ctrl_addr); 2454 if (Q_TARGET_ACCESS_END(scn) < 0) 2455 goto error_target_access; 2456 } 2457 2458 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work, 2459 ce_oom_recovery, CE_state); 2460 2461 /* update the htt_data attribute */ 2462 ce_mark_datapath(CE_state); 2463 scn->ce_id_to_state[CE_id] = CE_state; 2464 2465 mem_status = alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries); 2466 if (mem_status != QDF_STATUS_SUCCESS) 2467 goto error_target_access; 2468 2469 return (struct CE_handle *)CE_state; 2470 2471 error_target_access: 2472 error_no_dma_mem: 2473 ce_fini((struct CE_handle *)CE_state); 2474 return NULL; 2475 } 2476 2477 /** 2478 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs 2479 * @hif_ctx: HIF Context 2480 * 2481 * API to check if polling is enabled on all CEs. Returns true when polling 2482 * is enabled on all CEs. 2483 * 2484 * Return: bool 2485 */ 2486 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx) 2487 { 2488 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2489 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2490 struct CE_attr *attr; 2491 int id; 2492 2493 for (id = 0; id < scn->ce_count; id++) { 2494 attr = &hif_state->host_ce_config[id]; 2495 if (attr && (attr->dest_nentries) && 2496 !(attr->flags & CE_ATTR_ENABLE_POLL)) 2497 return false; 2498 } 2499 return true; 2500 } 2501 qdf_export_symbol(hif_is_polled_mode_enabled); 2502 2503 static int hif_get_pktlog_ce_num(struct hif_softc *scn) 2504 { 2505 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2506 int id; 2507 2508 for (id = 0; id < hif_state->sz_tgt_svc_map; id++) { 2509 if (hif_state->tgt_svc_map[id].service_id == PACKET_LOG_SVC) 2510 return hif_state->tgt_svc_map[id].pipenum; 2511 } 2512 return -EINVAL; 2513 } 2514 2515 #ifdef WLAN_FEATURE_FASTPATH 2516 /** 2517 * hif_enable_fastpath() Update that we have enabled fastpath mode 2518 * @hif_ctx: HIF context 2519 * 2520 * For use in data path 2521 * 2522 * Return: void 2523 */ 2524 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx) 2525 { 2526 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2527 2528 if (ce_srng_based(scn)) { 2529 hif_warn("srng rings do not support fastpath"); 2530 return; 2531 } 2532 hif_debug("Enabling fastpath mode"); 2533 scn->fastpath_mode_on = true; 2534 } 2535 2536 /** 2537 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled 2538 * @hif_ctx: HIF Context 2539 * 2540 * For use in data path to skip HTC 2541 * 2542 * Return: bool 2543 */ 2544 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx) 2545 { 2546 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2547 2548 return scn->fastpath_mode_on; 2549 } 2550 2551 /** 2552 * hif_get_ce_handle - API to get CE handle for FastPath mode 2553 * @hif_ctx: HIF Context 2554 * @id: CopyEngine Id 2555 * 2556 * API to return CE handle for fastpath mode 2557 * 2558 * Return: void 2559 */ 2560 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id) 2561 { 2562 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2563 2564 return scn->ce_id_to_state[id]; 2565 } 2566 qdf_export_symbol(hif_get_ce_handle); 2567 2568 /** 2569 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup. 2570 * No processing is required inside this function. 2571 * @ce_hdl: Cope engine handle 2572 * Using an assert, this function makes sure that, 2573 * the TX CE has been processed completely. 2574 * 2575 * This is called while dismantling CE structures. No other thread 2576 * should be using these structures while dismantling is occurring 2577 * therefore no locking is needed. 2578 * 2579 * Return: none 2580 */ 2581 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) 2582 { 2583 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 2584 struct CE_ring_state *src_ring = ce_state->src_ring; 2585 struct hif_softc *sc = ce_state->scn; 2586 uint32_t sw_index, write_index; 2587 2588 if (hif_is_nss_wifi_enabled(sc)) 2589 return; 2590 2591 if (sc->fastpath_mode_on && ce_state->htt_tx_data) { 2592 hif_debug("Fastpath mode ON, Cleaning up HTT Tx CE"); 2593 sw_index = src_ring->sw_index; 2594 write_index = src_ring->sw_index; 2595 2596 /* At this point Tx CE should be clean */ 2597 qdf_assert_always(sw_index == write_index); 2598 } 2599 } 2600 2601 /** 2602 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue. 2603 * @ce_hdl: Handle to CE 2604 * 2605 * These buffers are never allocated on the fly, but 2606 * are allocated only once during HIF start and freed 2607 * only once during HIF stop. 2608 * NOTE: 2609 * The assumption here is there is no in-flight DMA in progress 2610 * currently, so that buffers can be freed up safely. 2611 * 2612 * Return: NONE 2613 */ 2614 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) 2615 { 2616 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 2617 struct CE_ring_state *dst_ring = ce_state->dest_ring; 2618 qdf_nbuf_t nbuf; 2619 int i; 2620 2621 if (ce_state->scn->fastpath_mode_on == false) 2622 return; 2623 2624 if (!ce_state->htt_rx_data) 2625 return; 2626 2627 /* 2628 * when fastpath_mode is on and for datapath CEs. Unlike other CE's, 2629 * this CE is completely full: does not leave one blank space, to 2630 * distinguish between empty queue & full queue. So free all the 2631 * entries. 2632 */ 2633 for (i = 0; i < dst_ring->nentries; i++) { 2634 nbuf = dst_ring->per_transfer_context[i]; 2635 2636 /* 2637 * The reasons for doing this check are: 2638 * 1) Protect against calling cleanup before allocating buffers 2639 * 2) In a corner case, FASTPATH_mode_on may be set, but we 2640 * could have a partially filled ring, because of a memory 2641 * allocation failure in the middle of allocating ring. 2642 * This check accounts for that case, checking 2643 * fastpath_mode_on flag or started flag would not have 2644 * covered that case. This is not in performance path, 2645 * so OK to do this. 2646 */ 2647 if (nbuf) { 2648 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf, 2649 QDF_DMA_FROM_DEVICE); 2650 qdf_nbuf_free(nbuf); 2651 } 2652 } 2653 } 2654 2655 /** 2656 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1 2657 * @scn: HIF handle 2658 * 2659 * Datapath Rx CEs are special case, where we reuse all the message buffers. 2660 * Hence we have to post all the entries in the pipe, even, in the beginning 2661 * unlike for other CE pipes where one less than dest_nentries are filled in 2662 * the beginning. 2663 * 2664 * Return: None 2665 */ 2666 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 2667 { 2668 int pipe_num; 2669 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2670 2671 if (scn->fastpath_mode_on == false) 2672 return; 2673 2674 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2675 struct HIF_CE_pipe_info *pipe_info = 2676 &hif_state->pipe_info[pipe_num]; 2677 struct CE_state *ce_state = 2678 scn->ce_id_to_state[pipe_info->pipe_num]; 2679 2680 if (ce_state->htt_rx_data) 2681 atomic_inc(&pipe_info->recv_bufs_needed); 2682 } 2683 } 2684 #else 2685 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 2686 { 2687 } 2688 2689 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 2690 { 2691 return false; 2692 } 2693 #endif /* WLAN_FEATURE_FASTPATH */ 2694 2695 void ce_fini(struct CE_handle *copyeng) 2696 { 2697 struct CE_state *CE_state = (struct CE_state *)copyeng; 2698 unsigned int CE_id = CE_state->id; 2699 struct hif_softc *scn = CE_state->scn; 2700 uint32_t desc_size; 2701 2702 bool inited = CE_state->timer_inited; 2703 CE_state->state = CE_UNUSED; 2704 scn->ce_id_to_state[CE_id] = NULL; 2705 /* Set the flag to false first to stop processing in ce_poll_timeout */ 2706 ce_disable_polling(CE_state); 2707 2708 qdf_lro_deinit(CE_state->lro_data); 2709 2710 if (CE_state->src_ring) { 2711 /* Cleanup the datapath Tx ring */ 2712 ce_h2t_tx_ce_cleanup(copyeng); 2713 2714 desc_size = ce_get_desc_size(scn, CE_RING_SRC); 2715 if (CE_state->src_ring->shadow_base_unaligned) 2716 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned); 2717 if (CE_state->src_ring->base_addr_owner_space_unaligned) 2718 ce_free_desc_ring(scn, CE_state->id, 2719 CE_state->src_ring, 2720 desc_size); 2721 ce_srng_cleanup(scn, CE_state, CE_RING_SRC); 2722 qdf_mem_free(CE_state->src_ring); 2723 } 2724 if (CE_state->dest_ring) { 2725 /* Cleanup the datapath Rx ring */ 2726 ce_t2h_msg_ce_cleanup(copyeng); 2727 2728 desc_size = ce_get_desc_size(scn, CE_RING_DEST); 2729 if (CE_state->dest_ring->base_addr_owner_space_unaligned) 2730 ce_free_desc_ring(scn, CE_state->id, 2731 CE_state->dest_ring, 2732 desc_size); 2733 ce_srng_cleanup(scn, CE_state, CE_RING_DEST); 2734 qdf_mem_free(CE_state->dest_ring); 2735 2736 /* epping */ 2737 if (inited) { 2738 qdf_timer_free(&CE_state->poll_timer); 2739 } 2740 } 2741 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) { 2742 /* Cleanup the datapath Tx ring */ 2743 ce_h2t_tx_ce_cleanup(copyeng); 2744 2745 if (CE_state->status_ring->shadow_base_unaligned) 2746 qdf_mem_free( 2747 CE_state->status_ring->shadow_base_unaligned); 2748 2749 desc_size = ce_get_desc_size(scn, CE_RING_STATUS); 2750 if (CE_state->status_ring->base_addr_owner_space_unaligned) 2751 ce_free_desc_ring(scn, CE_state->id, 2752 CE_state->status_ring, 2753 desc_size); 2754 ce_srng_cleanup(scn, CE_state, CE_RING_STATUS); 2755 qdf_mem_free(CE_state->status_ring); 2756 } 2757 2758 free_mem_ce_debug_history(scn, CE_id); 2759 reset_ce_debug_history(scn); 2760 ce_deinit_ce_desc_event_log(scn, CE_id); 2761 2762 qdf_spinlock_destroy(&CE_state->ce_index_lock); 2763 #ifdef CE_TASKLET_SCHEDULE_ON_FULL 2764 qdf_spinlock_destroy(&CE_state->ce_interrupt_lock); 2765 #endif 2766 qdf_mem_free(CE_state); 2767 } 2768 2769 void hif_detach_htc(struct hif_opaque_softc *hif_ctx) 2770 { 2771 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2772 2773 qdf_mem_zero(&hif_state->msg_callbacks_pending, 2774 sizeof(hif_state->msg_callbacks_pending)); 2775 qdf_mem_zero(&hif_state->msg_callbacks_current, 2776 sizeof(hif_state->msg_callbacks_current)); 2777 } 2778 2779 /* Send the first nbytes bytes of the buffer */ 2780 QDF_STATUS 2781 hif_send_head(struct hif_opaque_softc *hif_ctx, 2782 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes, 2783 qdf_nbuf_t nbuf, unsigned int data_attr) 2784 { 2785 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2786 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2787 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 2788 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 2789 int bytes = nbytes, nfrags = 0; 2790 struct ce_sendlist sendlist; 2791 int i = 0; 2792 QDF_STATUS status; 2793 unsigned int mux_id = 0; 2794 2795 if (nbytes > qdf_nbuf_len(nbuf)) { 2796 hif_err("nbytes: %d nbuf_len: %d", nbytes, 2797 (uint32_t)qdf_nbuf_len(nbuf)); 2798 QDF_ASSERT(0); 2799 } 2800 2801 transfer_id = 2802 (mux_id & MUX_ID_MASK) | 2803 (transfer_id & TRANSACTION_ID_MASK); 2804 data_attr &= DESC_DATA_FLAG_MASK; 2805 /* 2806 * The common case involves sending multiple fragments within a 2807 * single download (the tx descriptor and the tx frame header). 2808 * So, optimize for the case of multiple fragments by not even 2809 * checking whether it's necessary to use a sendlist. 2810 * The overhead of using a sendlist for a single buffer download 2811 * is not a big deal, since it happens rarely (for WMI messages). 2812 */ 2813 ce_sendlist_init(&sendlist); 2814 do { 2815 qdf_dma_addr_t frag_paddr; 2816 int frag_bytes; 2817 2818 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags); 2819 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags); 2820 /* 2821 * Clear the packet offset for all but the first CE desc. 2822 */ 2823 if (i++ > 0) 2824 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; 2825 2826 status = ce_sendlist_buf_add(&sendlist, frag_paddr, 2827 frag_bytes > 2828 bytes ? bytes : frag_bytes, 2829 qdf_nbuf_get_frag_is_wordstream 2830 (nbuf, 2831 nfrags) ? 0 : 2832 CE_SEND_FLAG_SWAP_DISABLE, 2833 data_attr); 2834 if (status != QDF_STATUS_SUCCESS) { 2835 hif_err("frag_num: %d larger than limit (status=%d)", 2836 nfrags, status); 2837 return status; 2838 } 2839 bytes -= frag_bytes; 2840 nfrags++; 2841 } while (bytes > 0); 2842 2843 /* Make sure we have resources to handle this request */ 2844 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2845 if (pipe_info->num_sends_allowed < nfrags) { 2846 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2847 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE); 2848 return QDF_STATUS_E_RESOURCES; 2849 } 2850 pipe_info->num_sends_allowed -= nfrags; 2851 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2852 2853 if (qdf_unlikely(!ce_hdl)) { 2854 hif_err("CE handle is null"); 2855 return A_ERROR; 2856 } 2857 2858 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF); 2859 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, 2860 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf), 2861 sizeof(qdf_nbuf_data(nbuf)), QDF_TX)); 2862 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); 2863 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 2864 2865 return status; 2866 } 2867 2868 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, 2869 int force) 2870 { 2871 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2872 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2873 2874 if (!force) { 2875 int resources; 2876 /* 2877 * Decide whether to actually poll for completions, or just 2878 * wait for a later chance. If there seem to be plenty of 2879 * resources left, then just wait, since checking involves 2880 * reading a CE register, which is a relatively expensive 2881 * operation. 2882 */ 2883 resources = hif_get_free_queue_number(hif_ctx, pipe); 2884 /* 2885 * If at least 50% of the total resources are still available, 2886 * don't bother checking again yet. 2887 */ 2888 if (resources > (hif_state->host_ce_config[pipe].src_nentries >> 2889 1)) 2890 return; 2891 } 2892 #ifdef ATH_11AC_TXCOMPACT 2893 ce_per_engine_servicereap(scn, pipe); 2894 #else 2895 ce_per_engine_service(scn, pipe); 2896 #endif 2897 } 2898 2899 #if defined(CE_TASKLET_SCHEDULE_ON_FULL) && defined(CE_TASKLET_DEBUG_ENABLE) 2900 #define CE_RING_FULL_THRESHOLD_TIME 3000000 2901 #define CE_RING_FULL_THRESHOLD 1024 2902 /* This function is called from htc_send path. If there is no resourse to send 2903 * packet via HTC, then check if interrupts are not processed from that 2904 * CE for last 3 seconds. If so, schedule a tasklet to reap available entries. 2905 * Also if Queue has reached 1024 entries within 3 seconds, then also schedule 2906 * tasklet. 2907 */ 2908 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 2909 { 2910 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2911 int64_t diff_time = qdf_get_log_timestamp_usecs() - 2912 hif_state->stats.tasklet_sched_entry_ts[pipe]; 2913 2914 hif_state->stats.ce_ring_full_count[pipe]++; 2915 2916 if (diff_time >= CE_RING_FULL_THRESHOLD_TIME || 2917 hif_state->stats.ce_ring_full_count[pipe] >= 2918 CE_RING_FULL_THRESHOLD) { 2919 hif_state->stats.ce_ring_full_count[pipe] = 0; 2920 hif_state->stats.ce_manual_tasklet_schedule_count[pipe]++; 2921 hif_state->stats.ce_last_manual_tasklet_schedule_ts[pipe] = 2922 qdf_get_log_timestamp_usecs(); 2923 ce_dispatch_interrupt(pipe, &hif_state->tasklets[pipe]); 2924 } 2925 } 2926 #else 2927 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 2928 { 2929 } 2930 #endif 2931 2932 uint16_t 2933 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 2934 { 2935 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2936 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 2937 uint16_t rv; 2938 2939 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2940 rv = pipe_info->num_sends_allowed; 2941 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2942 return rv; 2943 } 2944 2945 /* Called by lower (CE) layer when a send to Target completes. */ 2946 static void 2947 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context, 2948 void *transfer_context, qdf_dma_addr_t CE_data, 2949 unsigned int nbytes, unsigned int transfer_id, 2950 unsigned int sw_index, unsigned int hw_index, 2951 unsigned int toeplitz_hash_result) 2952 { 2953 struct HIF_CE_pipe_info *pipe_info = 2954 (struct HIF_CE_pipe_info *)ce_context; 2955 unsigned int sw_idx = sw_index, hw_idx = hw_index; 2956 struct hif_msg_callbacks *msg_callbacks = 2957 &pipe_info->pipe_callbacks; 2958 2959 do { 2960 /* 2961 * The upper layer callback will be triggered 2962 * when last fragment is complteted. 2963 */ 2964 if (transfer_context != CE_SENDLIST_ITEM_CTXT) 2965 msg_callbacks->txCompletionHandler( 2966 msg_callbacks->Context, 2967 transfer_context, transfer_id, 2968 toeplitz_hash_result); 2969 2970 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2971 pipe_info->num_sends_allowed++; 2972 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2973 } while (ce_completed_send_next(copyeng, 2974 &ce_context, &transfer_context, 2975 &CE_data, &nbytes, &transfer_id, 2976 &sw_idx, &hw_idx, 2977 &toeplitz_hash_result) == QDF_STATUS_SUCCESS); 2978 } 2979 2980 /** 2981 * hif_ce_do_recv(): send message from copy engine to upper layers 2982 * @msg_callbacks: structure containing callback and callback context 2983 * @netbuff: skb containing message 2984 * @nbytes: number of bytes in the message 2985 * @pipe_info: used for the pipe_number info 2986 * 2987 * Checks the packet length, configures the length in the netbuff, 2988 * and calls the upper layer callback. 2989 * 2990 * return: None 2991 */ 2992 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks, 2993 qdf_nbuf_t netbuf, int nbytes, 2994 struct HIF_CE_pipe_info *pipe_info) { 2995 if (nbytes <= pipe_info->buf_sz) { 2996 qdf_nbuf_set_pktlen(netbuf, nbytes); 2997 msg_callbacks-> 2998 rxCompletionHandler(msg_callbacks->Context, 2999 netbuf, pipe_info->pipe_num); 3000 } else { 3001 hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes); 3002 qdf_nbuf_free(netbuf); 3003 } 3004 } 3005 3006 /* Called by lower (CE) layer when data is received from the Target. */ 3007 static void 3008 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context, 3009 void *transfer_context, qdf_dma_addr_t CE_data, 3010 unsigned int nbytes, unsigned int transfer_id, 3011 unsigned int flags) 3012 { 3013 struct HIF_CE_pipe_info *pipe_info = 3014 (struct HIF_CE_pipe_info *)ce_context; 3015 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 3016 struct CE_state *ce_state = (struct CE_state *) copyeng; 3017 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 3018 struct hif_msg_callbacks *msg_callbacks = &pipe_info->pipe_callbacks; 3019 3020 do { 3021 hif_rtpm_mark_last_busy(HIF_RTPM_ID_CE); 3022 qdf_nbuf_unmap_single(scn->qdf_dev, 3023 (qdf_nbuf_t) transfer_context, 3024 QDF_DMA_FROM_DEVICE); 3025 3026 atomic_inc(&pipe_info->recv_bufs_needed); 3027 hif_post_recv_buffers_for_pipe(pipe_info); 3028 if (scn->target_status == TARGET_STATUS_RESET) 3029 qdf_nbuf_free(transfer_context); 3030 else 3031 hif_ce_do_recv(msg_callbacks, transfer_context, 3032 nbytes, pipe_info); 3033 3034 /* Set up force_break flag if num of receices reaches 3035 * MAX_NUM_OF_RECEIVES 3036 */ 3037 ce_state->receive_count++; 3038 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { 3039 ce_state->force_break = 1; 3040 break; 3041 } 3042 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context, 3043 &CE_data, &nbytes, &transfer_id, 3044 &flags) == QDF_STATUS_SUCCESS); 3045 3046 } 3047 3048 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */ 3049 3050 void 3051 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused, 3052 struct hif_msg_callbacks *callbacks) 3053 { 3054 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 3055 3056 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 3057 spin_lock_init(&pcie_access_log_lock); 3058 #endif 3059 /* Save callbacks for later installation */ 3060 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks, 3061 sizeof(hif_state->msg_callbacks_pending)); 3062 3063 } 3064 3065 static int hif_completion_thread_startup_by_ceid(struct HIF_CE_state *hif_state, 3066 int pipe_num) 3067 { 3068 struct CE_attr attr; 3069 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 3070 struct hif_msg_callbacks *hif_msg_callbacks = 3071 &hif_state->msg_callbacks_current; 3072 struct HIF_CE_pipe_info *pipe_info; 3073 struct CE_state *ce_state; 3074 3075 if (pipe_num >= CE_COUNT_MAX) 3076 return -EINVAL; 3077 3078 pipe_info = &hif_state->pipe_info[pipe_num]; 3079 ce_state = scn->ce_id_to_state[pipe_num]; 3080 3081 if (!hif_msg_callbacks || 3082 !hif_msg_callbacks->rxCompletionHandler || 3083 !hif_msg_callbacks->txCompletionHandler) { 3084 hif_err("%s: no completion handler registered", __func__); 3085 return -EFAULT; 3086 } 3087 3088 attr = hif_state->host_ce_config[pipe_num]; 3089 if (attr.src_nentries) { 3090 /* pipe used to send to target */ 3091 hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n", 3092 __func__, pipe_num, pipe_info); 3093 ce_send_cb_register(pipe_info->ce_hdl, 3094 hif_pci_ce_send_done, pipe_info, 3095 attr.flags & CE_ATTR_DISABLE_INTR); 3096 pipe_info->num_sends_allowed = attr.src_nentries - 1; 3097 } 3098 if (attr.dest_nentries) { 3099 hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n", 3100 __func__, pipe_num, pipe_info); 3101 /* pipe used to receive from target */ 3102 ce_recv_cb_register(pipe_info->ce_hdl, 3103 hif_pci_ce_recv_data, pipe_info, 3104 attr.flags & CE_ATTR_DISABLE_INTR); 3105 } 3106 3107 if (attr.src_nentries) 3108 qdf_spinlock_create(&pipe_info->completion_freeq_lock); 3109 3110 if (!(ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND)) 3111 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks, 3112 sizeof(pipe_info->pipe_callbacks)); 3113 3114 return 0; 3115 } 3116 3117 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state) 3118 { 3119 struct CE_handle *ce_diag = hif_state->ce_diag; 3120 int pipe_num, ret; 3121 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 3122 3123 /* daemonize("hif_compl_thread"); */ 3124 3125 if (scn->ce_count == 0) { 3126 hif_err("ce_count is 0"); 3127 return -EINVAL; 3128 } 3129 3130 3131 A_TARGET_ACCESS_LIKELY(scn); 3132 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3133 struct HIF_CE_pipe_info *pipe_info; 3134 3135 pipe_info = &hif_state->pipe_info[pipe_num]; 3136 if (pipe_info->ce_hdl == ce_diag) 3137 continue; /* Handle Diagnostic CE specially */ 3138 3139 ret = hif_completion_thread_startup_by_ceid(hif_state, 3140 pipe_num); 3141 if (ret < 0) 3142 return ret; 3143 3144 } 3145 3146 A_TARGET_ACCESS_UNLIKELY(scn); 3147 return 0; 3148 } 3149 3150 /* 3151 * Install pending msg callbacks. 3152 * 3153 * TBDXXX: This hack is needed because upper layers install msg callbacks 3154 * for use with HTC before BMI is done; yet this HIF implementation 3155 * needs to continue to use BMI msg callbacks. Really, upper layers 3156 * should not register HTC callbacks until AFTER BMI phase. 3157 */ 3158 static void hif_msg_callbacks_install(struct hif_softc *scn) 3159 { 3160 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3161 3162 qdf_mem_copy(&hif_state->msg_callbacks_current, 3163 &hif_state->msg_callbacks_pending, 3164 sizeof(hif_state->msg_callbacks_pending)); 3165 } 3166 3167 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe, 3168 uint8_t *DLPipe) 3169 { 3170 int ul_is_polled, dl_is_polled; 3171 3172 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC, 3173 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled); 3174 } 3175 3176 /** 3177 * hif_dump_pipe_debug_count() - Log error count 3178 * @scn: hif_softc pointer. 3179 * 3180 * Output the pipe error counts of each pipe to log file 3181 * 3182 * Return: N/A 3183 */ 3184 void hif_dump_pipe_debug_count(struct hif_softc *scn) 3185 { 3186 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3187 int pipe_num; 3188 3189 if (!hif_state) { 3190 hif_err("hif_state is NULL"); 3191 return; 3192 } 3193 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3194 struct HIF_CE_pipe_info *pipe_info; 3195 3196 pipe_info = &hif_state->pipe_info[pipe_num]; 3197 3198 if (pipe_info->nbuf_alloc_err_count > 0 || 3199 pipe_info->nbuf_dma_err_count > 0 || 3200 pipe_info->nbuf_ce_enqueue_err_count) 3201 hif_err( 3202 "pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u", 3203 pipe_info->pipe_num, 3204 atomic_read(&pipe_info->recv_bufs_needed), 3205 pipe_info->nbuf_alloc_err_count, 3206 pipe_info->nbuf_dma_err_count, 3207 pipe_info->nbuf_ce_enqueue_err_count); 3208 } 3209 } 3210 3211 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info, 3212 void *nbuf, uint32_t *error_cnt, 3213 enum hif_ce_event_type failure_type, 3214 const char *failure_type_string) 3215 { 3216 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed); 3217 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl; 3218 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 3219 int ce_id = CE_state->id; 3220 uint32_t error_cnt_tmp; 3221 3222 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 3223 error_cnt_tmp = ++(*error_cnt); 3224 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 3225 hif_debug("pipe_num: %d, needed: %d, err_cnt: %u, fail_type: %s", 3226 pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp, 3227 failure_type_string); 3228 hif_record_ce_desc_event(scn, ce_id, failure_type, 3229 NULL, nbuf, bufs_needed_tmp, 0); 3230 /* if we fail to allocate the last buffer for an rx pipe, 3231 * there is no trigger to refill the ce and we will 3232 * eventually crash 3233 */ 3234 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 || 3235 (ce_srng_based(scn) && 3236 bufs_needed_tmp == CE_state->dest_ring->nentries - 2)) 3237 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work); 3238 3239 } 3240 3241 3242 3243 3244 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) 3245 { 3246 struct CE_handle *ce_hdl; 3247 qdf_size_t buf_sz; 3248 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 3249 QDF_STATUS status; 3250 uint32_t bufs_posted = 0; 3251 unsigned int ce_id; 3252 3253 buf_sz = pipe_info->buf_sz; 3254 if (buf_sz == 0) { 3255 /* Unused Copy Engine */ 3256 return QDF_STATUS_SUCCESS; 3257 } 3258 3259 ce_hdl = pipe_info->ce_hdl; 3260 ce_id = ((struct CE_state *)ce_hdl)->id; 3261 3262 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 3263 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) { 3264 qdf_dma_addr_t CE_data; /* CE space buffer address */ 3265 qdf_nbuf_t nbuf; 3266 3267 atomic_dec(&pipe_info->recv_bufs_needed); 3268 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 3269 3270 hif_record_ce_desc_event(scn, ce_id, 3271 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL, 3272 0, 0); 3273 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false); 3274 if (!nbuf) { 3275 hif_post_recv_buffers_failure(pipe_info, nbuf, 3276 &pipe_info->nbuf_alloc_err_count, 3277 HIF_RX_NBUF_ALLOC_FAILURE, 3278 "HIF_RX_NBUF_ALLOC_FAILURE"); 3279 return QDF_STATUS_E_NOMEM; 3280 } 3281 3282 hif_record_ce_desc_event(scn, ce_id, 3283 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf, 3284 0, 0); 3285 /* 3286 * qdf_nbuf_peek_header(nbuf, &data, &unused); 3287 * CE_data = dma_map_single(dev, data, buf_sz, ); 3288 * DMA_FROM_DEVICE); 3289 */ 3290 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf, 3291 QDF_DMA_FROM_DEVICE); 3292 3293 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 3294 hif_post_recv_buffers_failure(pipe_info, nbuf, 3295 &pipe_info->nbuf_dma_err_count, 3296 HIF_RX_NBUF_MAP_FAILURE, 3297 "HIF_RX_NBUF_MAP_FAILURE"); 3298 qdf_nbuf_free(nbuf); 3299 return status; 3300 } 3301 3302 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0); 3303 hif_record_ce_desc_event(scn, ce_id, 3304 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf, 3305 0, 0); 3306 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data, 3307 buf_sz, DMA_FROM_DEVICE); 3308 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data); 3309 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 3310 hif_post_recv_buffers_failure(pipe_info, nbuf, 3311 &pipe_info->nbuf_ce_enqueue_err_count, 3312 HIF_RX_NBUF_ENQUEUE_FAILURE, 3313 "HIF_RX_NBUF_ENQUEUE_FAILURE"); 3314 3315 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf, 3316 QDF_DMA_FROM_DEVICE); 3317 qdf_nbuf_free(nbuf); 3318 return status; 3319 } 3320 3321 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 3322 bufs_posted++; 3323 } 3324 pipe_info->nbuf_alloc_err_count = 3325 (pipe_info->nbuf_alloc_err_count > bufs_posted) ? 3326 pipe_info->nbuf_alloc_err_count - bufs_posted : 0; 3327 pipe_info->nbuf_dma_err_count = 3328 (pipe_info->nbuf_dma_err_count > bufs_posted) ? 3329 pipe_info->nbuf_dma_err_count - bufs_posted : 0; 3330 pipe_info->nbuf_ce_enqueue_err_count = 3331 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ? 3332 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; 3333 3334 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 3335 3336 return QDF_STATUS_SUCCESS; 3337 } 3338 3339 /* 3340 * Try to post all desired receive buffers for all pipes. 3341 * Returns 0 for non fastpath rx copy engine as 3342 * oom_allocation_work will be scheduled to recover any 3343 * failures, non-zero if unable to completely replenish 3344 * receive buffers for fastpath rx Copy engine. 3345 */ 3346 static QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn) 3347 { 3348 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3349 int pipe_num; 3350 struct CE_state *ce_state = NULL; 3351 QDF_STATUS qdf_status; 3352 3353 A_TARGET_ACCESS_LIKELY(scn); 3354 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3355 struct HIF_CE_pipe_info *pipe_info; 3356 3357 ce_state = scn->ce_id_to_state[pipe_num]; 3358 pipe_info = &hif_state->pipe_info[pipe_num]; 3359 3360 if (!ce_state) 3361 continue; 3362 3363 /* Do not init dynamic CEs, during initial load */ 3364 if (ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND) 3365 continue; 3366 3367 if (hif_is_nss_wifi_enabled(scn) && 3368 ce_state && (ce_state->htt_rx_data)) 3369 continue; 3370 3371 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); 3372 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state && 3373 ce_state->htt_rx_data && 3374 scn->fastpath_mode_on) { 3375 A_TARGET_ACCESS_UNLIKELY(scn); 3376 return qdf_status; 3377 } 3378 } 3379 3380 A_TARGET_ACCESS_UNLIKELY(scn); 3381 3382 return QDF_STATUS_SUCCESS; 3383 } 3384 3385 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx) 3386 { 3387 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 3388 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3389 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 3390 3391 hif_update_fastpath_recv_bufs_cnt(scn); 3392 3393 hif_msg_callbacks_install(scn); 3394 3395 if (hif_completion_thread_startup(hif_state)) 3396 return QDF_STATUS_E_FAILURE; 3397 3398 /* enable buffer cleanup */ 3399 hif_state->started = true; 3400 3401 /* Post buffers once to start things off. */ 3402 qdf_status = hif_post_recv_buffers(scn); 3403 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 3404 /* cleanup is done in hif_ce_disable */ 3405 hif_err("Failed to post buffers"); 3406 return qdf_status; 3407 } 3408 3409 return qdf_status; 3410 } 3411 3412 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 3413 { 3414 struct hif_softc *scn; 3415 struct CE_handle *ce_hdl; 3416 uint32_t buf_sz; 3417 struct HIF_CE_state *hif_state; 3418 qdf_nbuf_t netbuf; 3419 qdf_dma_addr_t CE_data; 3420 void *per_CE_context; 3421 3422 buf_sz = pipe_info->buf_sz; 3423 /* Unused Copy Engine */ 3424 if (buf_sz == 0) 3425 return; 3426 3427 3428 hif_state = pipe_info->HIF_CE_state; 3429 if (!hif_state->started) 3430 return; 3431 3432 scn = HIF_GET_SOFTC(hif_state); 3433 ce_hdl = pipe_info->ce_hdl; 3434 3435 if (!scn->qdf_dev) 3436 return; 3437 while (ce_revoke_recv_next 3438 (ce_hdl, &per_CE_context, (void **)&netbuf, 3439 &CE_data) == QDF_STATUS_SUCCESS) { 3440 if (netbuf) { 3441 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf, 3442 QDF_DMA_FROM_DEVICE); 3443 qdf_nbuf_free(netbuf); 3444 } 3445 } 3446 } 3447 3448 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 3449 { 3450 struct CE_handle *ce_hdl; 3451 struct HIF_CE_state *hif_state; 3452 struct hif_softc *scn; 3453 qdf_nbuf_t netbuf; 3454 void *per_CE_context; 3455 qdf_dma_addr_t CE_data; 3456 unsigned int nbytes; 3457 unsigned int id; 3458 uint32_t buf_sz; 3459 uint32_t toeplitz_hash_result; 3460 3461 buf_sz = pipe_info->buf_sz; 3462 if (buf_sz == 0) { 3463 /* Unused Copy Engine */ 3464 return; 3465 } 3466 3467 hif_state = pipe_info->HIF_CE_state; 3468 if (!hif_state->started) { 3469 return; 3470 } 3471 3472 scn = HIF_GET_SOFTC(hif_state); 3473 3474 ce_hdl = pipe_info->ce_hdl; 3475 3476 while (ce_cancel_send_next 3477 (ce_hdl, &per_CE_context, 3478 (void **)&netbuf, &CE_data, &nbytes, 3479 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 3480 if (netbuf != CE_SENDLIST_ITEM_CTXT) { 3481 /* 3482 * Packets enqueued by htt_h2t_ver_req_msg() and 3483 * htt_h2t_rx_ring_cfg_msg_ll() have already been 3484 * freed in htt_htc_misc_pkt_pool_free() in 3485 * wlantl_close(), so do not free them here again 3486 * by checking whether it's the endpoint 3487 * which they are queued in. 3488 */ 3489 if (id == scn->htc_htt_tx_endpoint) 3490 return; 3491 /* Indicate the completion to higher 3492 * layer to free the buffer 3493 */ 3494 if (pipe_info->pipe_callbacks.txCompletionHandler) 3495 pipe_info->pipe_callbacks. 3496 txCompletionHandler(pipe_info-> 3497 pipe_callbacks.Context, 3498 netbuf, id, toeplitz_hash_result); 3499 } 3500 } 3501 } 3502 3503 /* 3504 * Cleanup residual buffers for device shutdown: 3505 * buffers that were enqueued for receive 3506 * buffers that were to be sent 3507 * Note: Buffers that had completed but which were 3508 * not yet processed are on a completion queue. They 3509 * are handled when the completion thread shuts down. 3510 */ 3511 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state) 3512 { 3513 int pipe_num; 3514 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 3515 struct CE_state *ce_state; 3516 3517 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3518 struct HIF_CE_pipe_info *pipe_info; 3519 3520 ce_state = scn->ce_id_to_state[pipe_num]; 3521 if (hif_is_nss_wifi_enabled(scn) && ce_state && 3522 ((ce_state->htt_tx_data) || 3523 (ce_state->htt_rx_data))) { 3524 continue; 3525 } 3526 3527 pipe_info = &hif_state->pipe_info[pipe_num]; 3528 hif_recv_buffer_cleanup_on_pipe(pipe_info); 3529 hif_send_buffer_cleanup_on_pipe(pipe_info); 3530 } 3531 } 3532 3533 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) 3534 { 3535 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 3536 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3537 3538 hif_buffer_cleanup(hif_state); 3539 } 3540 3541 static void hif_destroy_oom_work(struct hif_softc *scn) 3542 { 3543 struct CE_state *ce_state; 3544 int ce_id; 3545 3546 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 3547 ce_state = scn->ce_id_to_state[ce_id]; 3548 if (ce_state) 3549 qdf_destroy_work(scn->qdf_dev, 3550 &ce_state->oom_allocation_work); 3551 } 3552 } 3553 3554 void hif_ce_stop(struct hif_softc *scn) 3555 { 3556 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3557 int pipe_num; 3558 3559 /* 3560 * before cleaning up any memory, ensure irq & 3561 * bottom half contexts will not be re-entered 3562 */ 3563 hif_disable_isr(&scn->osc); 3564 hif_destroy_oom_work(scn); 3565 scn->hif_init_done = false; 3566 3567 /* 3568 * At this point, asynchronous threads are stopped, 3569 * The Target should not DMA nor interrupt, Host code may 3570 * not initiate anything more. So we just need to clean 3571 * up Host-side state. 3572 */ 3573 3574 if (scn->athdiag_procfs_inited) { 3575 athdiag_procfs_remove(); 3576 scn->athdiag_procfs_inited = false; 3577 } 3578 3579 hif_buffer_cleanup(hif_state); 3580 3581 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3582 struct HIF_CE_pipe_info *pipe_info; 3583 struct CE_attr attr; 3584 struct CE_handle *ce_diag = hif_state->ce_diag; 3585 3586 pipe_info = &hif_state->pipe_info[pipe_num]; 3587 if (pipe_info->ce_hdl) { 3588 if (pipe_info->ce_hdl != ce_diag && 3589 hif_state->started) { 3590 attr = hif_state->host_ce_config[pipe_num]; 3591 if (attr.src_nentries) 3592 qdf_spinlock_destroy(&pipe_info-> 3593 completion_freeq_lock); 3594 } 3595 ce_fini(pipe_info->ce_hdl); 3596 pipe_info->ce_hdl = NULL; 3597 pipe_info->buf_sz = 0; 3598 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 3599 } 3600 } 3601 3602 if (hif_state->sleep_timer_init) { 3603 qdf_timer_stop(&hif_state->sleep_timer); 3604 qdf_timer_free(&hif_state->sleep_timer); 3605 hif_state->sleep_timer_init = false; 3606 } 3607 3608 hif_state->started = false; 3609 } 3610 3611 static void hif_get_shadow_reg_cfg(struct hif_softc *scn, 3612 struct shadow_reg_cfg 3613 **target_shadow_reg_cfg_ret, 3614 uint32_t *shadow_cfg_sz_ret) 3615 { 3616 if (target_shadow_reg_cfg_ret) 3617 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg; 3618 if (shadow_cfg_sz_ret) 3619 *shadow_cfg_sz_ret = shadow_cfg_sz; 3620 } 3621 3622 /** 3623 * hif_get_target_ce_config() - get copy engine configuration 3624 * @target_ce_config_ret: basic copy engine configuration 3625 * @target_ce_config_sz_ret: size of the basic configuration in bytes 3626 * @target_service_to_ce_map_ret: service mapping for the copy engines 3627 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes 3628 * @target_shadow_reg_cfg_ret: shadow register configuration 3629 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes 3630 * 3631 * providing accessor to these values outside of this file. 3632 * currently these are stored in static pointers to const sections. 3633 * there are multiple configurations that are selected from at compile time. 3634 * Runtime selection would need to consider mode, target type and bus type. 3635 * 3636 * Return: return by parameter. 3637 */ 3638 void hif_get_target_ce_config(struct hif_softc *scn, 3639 struct CE_pipe_config **target_ce_config_ret, 3640 uint32_t *target_ce_config_sz_ret, 3641 struct service_to_pipe **target_service_to_ce_map_ret, 3642 uint32_t *target_service_to_ce_map_sz_ret, 3643 struct shadow_reg_cfg **target_shadow_reg_cfg_ret, 3644 uint32_t *shadow_cfg_sz_ret) 3645 { 3646 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3647 3648 *target_ce_config_ret = hif_state->target_ce_config; 3649 *target_ce_config_sz_ret = hif_state->target_ce_config_sz; 3650 3651 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret, 3652 target_service_to_ce_map_sz_ret); 3653 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret, 3654 shadow_cfg_sz_ret); 3655 } 3656 3657 #ifdef CONFIG_SHADOW_V3 3658 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 3659 { 3660 int i; 3661 3662 hif_err("v3: num_config %d", cfg->num_shadow_reg_v3_cfg); 3663 3664 for (i = 0; i < cfg->num_shadow_reg_v3_cfg; i++) { 3665 hif_err("i %d, val %x", i, cfg->shadow_reg_v3_cfg[i].addr); 3666 } 3667 } 3668 3669 #elif defined(CONFIG_SHADOW_V2) 3670 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 3671 { 3672 int i; 3673 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3674 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg); 3675 3676 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) { 3677 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 3678 "%s: i %d, val %x", __func__, i, 3679 cfg->shadow_reg_v2_cfg[i].addr); 3680 } 3681 } 3682 3683 #else 3684 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 3685 { 3686 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 3687 "%s: CONFIG_SHADOW V2/V3 not defined", __func__); 3688 } 3689 #endif 3690 3691 #ifdef ADRASTEA_RRI_ON_DDR 3692 /** 3693 * hif_get_src_ring_read_index(): Called to get the SRRI 3694 * 3695 * @scn: hif_softc pointer 3696 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 3697 * 3698 * This function returns the SRRI to the caller. For CEs that 3699 * dont have interrupts enabled, we look at the DDR based SRRI 3700 * 3701 * Return: SRRI 3702 */ 3703 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, 3704 uint32_t CE_ctrl_addr) 3705 { 3706 struct CE_attr attr; 3707 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3708 3709 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 3710 if (attr.flags & CE_ATTR_DISABLE_INTR) { 3711 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 3712 } else { 3713 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 3714 return A_TARGET_READ(scn, 3715 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS); 3716 else 3717 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, 3718 CE_ctrl_addr); 3719 } 3720 } 3721 3722 /** 3723 * hif_get_dst_ring_read_index(): Called to get the DRRI 3724 * 3725 * @scn: hif_softc pointer 3726 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 3727 * 3728 * This function returns the DRRI to the caller. For CEs that 3729 * dont have interrupts enabled, we look at the DDR based DRRI 3730 * 3731 * Return: DRRI 3732 */ 3733 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, 3734 uint32_t CE_ctrl_addr) 3735 { 3736 struct CE_attr attr; 3737 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3738 3739 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 3740 3741 if (attr.flags & CE_ATTR_DISABLE_INTR) { 3742 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 3743 } else { 3744 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 3745 return A_TARGET_READ(scn, 3746 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS); 3747 else 3748 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, 3749 CE_ctrl_addr); 3750 } 3751 } 3752 3753 /** 3754 * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr 3755 * @scn: hif_softc pointer 3756 * 3757 * Return: qdf status 3758 */ 3759 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn) 3760 { 3761 qdf_dma_addr_t paddr_rri_on_ddr = 0; 3762 3763 scn->vaddr_rri_on_ddr = 3764 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 3765 scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)), 3766 &paddr_rri_on_ddr); 3767 3768 if (!scn->vaddr_rri_on_ddr) { 3769 hif_err("dmaable page alloc fail"); 3770 return QDF_STATUS_E_NOMEM; 3771 } 3772 3773 scn->paddr_rri_on_ddr = paddr_rri_on_ddr; 3774 3775 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t)); 3776 3777 return QDF_STATUS_SUCCESS; 3778 } 3779 #endif 3780 3781 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR) 3782 /** 3783 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 3784 * 3785 * @scn: hif_softc pointer 3786 * 3787 * This function allocates non cached memory on ddr and sends 3788 * the physical address of this memory to the CE hardware. The 3789 * hardware updates the RRI on this particular location. 3790 * 3791 * Return: None 3792 */ 3793 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 3794 { 3795 unsigned int i; 3796 uint32_t high_paddr, low_paddr; 3797 3798 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) 3799 return; 3800 3801 low_paddr = BITS0_TO_31(scn->paddr_rri_on_ddr); 3802 high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr); 3803 3804 hif_debug("using srri and drri from DDR"); 3805 3806 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr); 3807 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr); 3808 3809 for (i = 0; i < CE_COUNT; i++) 3810 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i)); 3811 } 3812 #else 3813 /** 3814 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 3815 * 3816 * @scn: hif_softc pointer 3817 * 3818 * This is a dummy implementation for platforms that don't 3819 * support this functionality. 3820 * 3821 * Return: None 3822 */ 3823 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 3824 { 3825 } 3826 #endif 3827 3828 /** 3829 * hif_update_rri_over_ddr_config() - update rri_over_ddr config for 3830 * QMI command 3831 * @scn: hif context 3832 * @cfg: wlan enable config 3833 * 3834 * In case of Genoa, rri_over_ddr memory configuration is passed 3835 * to firmware through QMI configure command. 3836 */ 3837 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR) 3838 static void hif_update_rri_over_ddr_config(struct hif_softc *scn, 3839 struct pld_wlan_enable_cfg *cfg) 3840 { 3841 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) 3842 return; 3843 3844 cfg->rri_over_ddr_cfg_valid = true; 3845 cfg->rri_over_ddr_cfg.base_addr_low = 3846 BITS0_TO_31(scn->paddr_rri_on_ddr); 3847 cfg->rri_over_ddr_cfg.base_addr_high = 3848 BITS32_TO_35(scn->paddr_rri_on_ddr); 3849 } 3850 #else 3851 static void hif_update_rri_over_ddr_config(struct hif_softc *scn, 3852 struct pld_wlan_enable_cfg *cfg) 3853 { 3854 } 3855 #endif 3856 3857 /** 3858 * hif_wlan_enable(): call the platform driver to enable wlan 3859 * @scn: HIF Context 3860 * 3861 * This function passes the con_mode and CE configuration to 3862 * platform driver to enable wlan. 3863 * 3864 * Return: linux error code 3865 */ 3866 int hif_wlan_enable(struct hif_softc *scn) 3867 { 3868 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3869 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 3870 struct pld_wlan_enable_cfg cfg = { 0 }; 3871 enum pld_driver_mode mode; 3872 uint32_t con_mode = hif_get_conparam(scn); 3873 3874 hif_get_target_ce_config(scn, 3875 (struct CE_pipe_config **)&cfg.ce_tgt_cfg, 3876 &cfg.num_ce_tgt_cfg, 3877 (struct service_to_pipe **)&cfg.ce_svc_cfg, 3878 &cfg.num_ce_svc_pipe_cfg, 3879 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg, 3880 &cfg.num_shadow_reg_cfg); 3881 3882 /* translate from structure size to array size */ 3883 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config); 3884 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe); 3885 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg); 3886 3887 switch (tgt_info->target_type) { 3888 case TARGET_TYPE_KIWI: 3889 case TARGET_TYPE_MANGO: 3890 hif_prepare_hal_shadow_reg_cfg_v3(scn, &cfg); 3891 break; 3892 default: 3893 hif_prepare_hal_shadow_register_cfg(scn, 3894 &cfg.shadow_reg_v2_cfg, 3895 &cfg.num_shadow_reg_v2_cfg); 3896 break; 3897 } 3898 3899 hif_print_hal_shadow_register_cfg(&cfg); 3900 3901 hif_update_rri_over_ddr_config(scn, &cfg); 3902 3903 if (QDF_GLOBAL_FTM_MODE == con_mode) 3904 mode = PLD_FTM; 3905 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode) 3906 mode = PLD_COLDBOOT_CALIBRATION; 3907 else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode) 3908 mode = PLD_FTM_COLDBOOT_CALIBRATION; 3909 else if (QDF_IS_EPPING_ENABLED(con_mode)) 3910 mode = PLD_EPPING; 3911 else 3912 mode = PLD_MISSION; 3913 3914 if (BYPASS_QMI) 3915 return 0; 3916 else 3917 return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode); 3918 } 3919 3920 #ifdef WLAN_FEATURE_EPPING 3921 3922 #define CE_EPPING_USES_IRQ true 3923 3924 void hif_ce_prepare_epping_config(struct hif_softc *scn, 3925 struct HIF_CE_state *hif_state) 3926 { 3927 if (CE_EPPING_USES_IRQ) 3928 hif_state->host_ce_config = host_ce_config_wlan_epping_irq; 3929 else 3930 hif_state->host_ce_config = host_ce_config_wlan_epping_poll; 3931 hif_state->target_ce_config = target_ce_config_wlan_epping; 3932 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping); 3933 target_shadow_reg_cfg = target_shadow_reg_cfg_epping; 3934 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping); 3935 scn->ce_count = EPPING_HOST_CE_COUNT; 3936 } 3937 #endif 3938 3939 #ifdef QCN7605_SUPPORT 3940 static inline 3941 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 3942 struct HIF_CE_state *hif_state) 3943 { 3944 hif_state->host_ce_config = host_ce_config_wlan_qcn7605; 3945 hif_state->target_ce_config = target_ce_config_wlan_qcn7605; 3946 hif_state->target_ce_config_sz = 3947 sizeof(target_ce_config_wlan_qcn7605); 3948 target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605; 3949 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605); 3950 scn->ce_count = QCN7605_CE_COUNT; 3951 } 3952 #else 3953 static inline 3954 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 3955 struct HIF_CE_state *hif_state) 3956 { 3957 hif_err("QCN7605 not supported"); 3958 } 3959 #endif 3960 3961 #ifdef CE_SVC_CMN_INIT 3962 #ifdef QCA_WIFI_SUPPORT_SRNG 3963 static inline void hif_ce_service_init(void) 3964 { 3965 ce_service_srng_init(); 3966 } 3967 #else 3968 static inline void hif_ce_service_init(void) 3969 { 3970 ce_service_legacy_init(); 3971 } 3972 #endif 3973 #else 3974 static inline void hif_ce_service_init(void) 3975 { 3976 } 3977 #endif 3978 3979 3980 /** 3981 * hif_ce_prepare_config() - load the correct static tables. 3982 * @scn: hif context 3983 * 3984 * Epping uses different static attribute tables than mission mode. 3985 */ 3986 void hif_ce_prepare_config(struct hif_softc *scn) 3987 { 3988 uint32_t mode = hif_get_conparam(scn); 3989 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3990 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 3991 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3992 int ret; 3993 int msi_data_count = 0; 3994 int msi_data_start = 0; 3995 int msi_irq_start = 0; 3996 3997 hif_ce_service_init(); 3998 hif_state->ce_services = ce_services_attach(scn); 3999 4000 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 4001 &msi_data_count, &msi_data_start, 4002 &msi_irq_start); 4003 4004 scn->ce_count = HOST_CE_COUNT; 4005 scn->int_assignment = &ce_int_context[msi_data_count]; 4006 scn->free_irq_done = false; 4007 /* if epping is enabled we need to use the epping configuration. */ 4008 if (QDF_IS_EPPING_ENABLED(mode)) { 4009 hif_ce_prepare_epping_config(scn, hif_state); 4010 return; 4011 } 4012 4013 switch (tgt_info->target_type) { 4014 default: 4015 hif_state->host_ce_config = host_ce_config_wlan; 4016 hif_state->target_ce_config = target_ce_config_wlan; 4017 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan); 4018 break; 4019 case TARGET_TYPE_QCN7605: 4020 hif_set_ce_config_qcn7605(scn, hif_state); 4021 break; 4022 case TARGET_TYPE_AR900B: 4023 case TARGET_TYPE_QCA9984: 4024 case TARGET_TYPE_QCA9888: 4025 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 4026 hif_state->host_ce_config = 4027 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog; 4028 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 4029 hif_state->host_ce_config = 4030 host_lowdesc_ce_cfg_wlan_ar900b; 4031 } else { 4032 hif_state->host_ce_config = host_ce_config_wlan_ar900b; 4033 } 4034 4035 hif_state->target_ce_config = target_ce_config_wlan_ar900b; 4036 hif_state->target_ce_config_sz = 4037 sizeof(target_ce_config_wlan_ar900b); 4038 4039 break; 4040 4041 case TARGET_TYPE_AR9888: 4042 case TARGET_TYPE_AR9888V2: 4043 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 4044 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888; 4045 } else { 4046 hif_state->host_ce_config = host_ce_config_wlan_ar9888; 4047 } 4048 4049 hif_state->target_ce_config = target_ce_config_wlan_ar9888; 4050 hif_state->target_ce_config_sz = 4051 sizeof(target_ce_config_wlan_ar9888); 4052 4053 break; 4054 4055 case TARGET_TYPE_QCA8074: 4056 case TARGET_TYPE_QCA8074V2: 4057 case TARGET_TYPE_QCA6018: 4058 if (scn->bus_type == QDF_BUS_TYPE_PCI) { 4059 hif_state->host_ce_config = 4060 host_ce_config_wlan_qca8074_pci; 4061 hif_state->target_ce_config = 4062 target_ce_config_wlan_qca8074_pci; 4063 hif_state->target_ce_config_sz = 4064 sizeof(target_ce_config_wlan_qca8074_pci); 4065 } else { 4066 hif_state->host_ce_config = host_ce_config_wlan_qca8074; 4067 hif_state->target_ce_config = 4068 target_ce_config_wlan_qca8074; 4069 hif_state->target_ce_config_sz = 4070 sizeof(target_ce_config_wlan_qca8074); 4071 } 4072 break; 4073 case TARGET_TYPE_QCA6290: 4074 hif_state->host_ce_config = host_ce_config_wlan_qca6290; 4075 hif_state->target_ce_config = target_ce_config_wlan_qca6290; 4076 hif_state->target_ce_config_sz = 4077 sizeof(target_ce_config_wlan_qca6290); 4078 4079 scn->ce_count = QCA_6290_CE_COUNT; 4080 break; 4081 case TARGET_TYPE_QCN9000: 4082 hif_state->host_ce_config = host_ce_config_wlan_qcn9000; 4083 hif_state->target_ce_config = target_ce_config_wlan_qcn9000; 4084 hif_state->target_ce_config_sz = 4085 sizeof(target_ce_config_wlan_qcn9000); 4086 scn->ce_count = QCN_9000_CE_COUNT; 4087 scn->disable_wake_irq = 1; 4088 break; 4089 case TARGET_TYPE_QCN9224: 4090 hif_set_ce_config_qcn9224(scn, hif_state); 4091 break; 4092 case TARGET_TYPE_QCA5332: 4093 hif_state->host_ce_config = host_ce_config_wlan_qca5332; 4094 hif_state->target_ce_config = target_ce_config_wlan_qca5332; 4095 hif_state->target_ce_config_sz = 4096 sizeof(target_ce_config_wlan_qca5332); 4097 scn->ce_count = QCA_5332_CE_COUNT; 4098 break; 4099 case TARGET_TYPE_QCN6122: 4100 hif_state->host_ce_config = host_ce_config_wlan_qcn6122; 4101 hif_state->target_ce_config = target_ce_config_wlan_qcn6122; 4102 hif_state->target_ce_config_sz = 4103 sizeof(target_ce_config_wlan_qcn6122); 4104 scn->ce_count = QCN_6122_CE_COUNT; 4105 scn->disable_wake_irq = 1; 4106 break; 4107 case TARGET_TYPE_QCA5018: 4108 hif_state->host_ce_config = host_ce_config_wlan_qca5018; 4109 hif_state->target_ce_config = target_ce_config_wlan_qca5018; 4110 hif_state->target_ce_config_sz = 4111 sizeof(target_ce_config_wlan_qca5018); 4112 scn->ce_count = QCA_5018_CE_COUNT; 4113 break; 4114 case TARGET_TYPE_QCA9574: 4115 hif_state->host_ce_config = host_ce_config_wlan_qca9574; 4116 hif_state->target_ce_config = target_ce_config_wlan_qca9574; 4117 hif_state->target_ce_config_sz = 4118 sizeof(target_ce_config_wlan_qca9574); 4119 break; 4120 case TARGET_TYPE_QCA6390: 4121 hif_state->host_ce_config = host_ce_config_wlan_qca6390; 4122 hif_state->target_ce_config = target_ce_config_wlan_qca6390; 4123 hif_state->target_ce_config_sz = 4124 sizeof(target_ce_config_wlan_qca6390); 4125 4126 scn->ce_count = QCA_6390_CE_COUNT; 4127 break; 4128 case TARGET_TYPE_QCA6490: 4129 hif_state->host_ce_config = host_ce_config_wlan_qca6490; 4130 hif_state->target_ce_config = target_ce_config_wlan_qca6490; 4131 hif_state->target_ce_config_sz = 4132 sizeof(target_ce_config_wlan_qca6490); 4133 4134 scn->ce_count = QCA_6490_CE_COUNT; 4135 break; 4136 case TARGET_TYPE_QCA6750: 4137 hif_state->host_ce_config = host_ce_config_wlan_qca6750; 4138 hif_state->target_ce_config = target_ce_config_wlan_qca6750; 4139 hif_state->target_ce_config_sz = 4140 sizeof(target_ce_config_wlan_qca6750); 4141 4142 scn->ce_count = QCA_6750_CE_COUNT; 4143 break; 4144 case TARGET_TYPE_KIWI: 4145 case TARGET_TYPE_MANGO: 4146 hif_state->host_ce_config = host_ce_config_wlan_kiwi; 4147 hif_state->target_ce_config = target_ce_config_wlan_kiwi; 4148 hif_state->target_ce_config_sz = 4149 sizeof(target_ce_config_wlan_kiwi); 4150 scn->ce_count = KIWI_CE_COUNT; 4151 break; 4152 case TARGET_TYPE_ADRASTEA: 4153 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 4154 hif_state->host_ce_config = 4155 host_lowdesc_ce_config_wlan_adrastea_nopktlog; 4156 hif_state->target_ce_config = 4157 target_lowdesc_ce_config_wlan_adrastea_nopktlog; 4158 hif_state->target_ce_config_sz = 4159 sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog); 4160 } else { 4161 hif_state->host_ce_config = 4162 host_ce_config_wlan_adrastea; 4163 hif_state->target_ce_config = 4164 target_ce_config_wlan_adrastea; 4165 hif_state->target_ce_config_sz = 4166 sizeof(target_ce_config_wlan_adrastea); 4167 } 4168 break; 4169 4170 } 4171 QDF_BUG(scn->ce_count <= CE_COUNT_MAX); 4172 } 4173 4174 /** 4175 * hif_ce_open() - do ce specific allocations 4176 * @hif_sc: pointer to hif context 4177 * 4178 * return: 0 for success or QDF_STATUS_E_NOMEM 4179 */ 4180 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc) 4181 { 4182 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 4183 4184 qdf_spinlock_create(&hif_state->irq_reg_lock); 4185 qdf_spinlock_create(&hif_state->keep_awake_lock); 4186 return QDF_STATUS_SUCCESS; 4187 } 4188 4189 /** 4190 * hif_ce_close() - do ce specific free 4191 * @hif_sc: pointer to hif context 4192 */ 4193 void hif_ce_close(struct hif_softc *hif_sc) 4194 { 4195 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 4196 4197 qdf_spinlock_destroy(&hif_state->irq_reg_lock); 4198 qdf_spinlock_destroy(&hif_state->keep_awake_lock); 4199 } 4200 4201 /** 4202 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed 4203 * @hif_sc: hif context 4204 * 4205 * uses state variables to support cleaning up when hif_config_ce fails. 4206 */ 4207 void hif_unconfig_ce(struct hif_softc *hif_sc) 4208 { 4209 int pipe_num; 4210 struct HIF_CE_pipe_info *pipe_info; 4211 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 4212 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc); 4213 4214 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 4215 pipe_info = &hif_state->pipe_info[pipe_num]; 4216 if (pipe_info->ce_hdl) { 4217 ce_unregister_irq(hif_state, (1 << pipe_num)); 4218 } 4219 } 4220 deinit_tasklet_workers(hif_hdl); 4221 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 4222 pipe_info = &hif_state->pipe_info[pipe_num]; 4223 if (pipe_info->ce_hdl) { 4224 ce_fini(pipe_info->ce_hdl); 4225 pipe_info->ce_hdl = NULL; 4226 pipe_info->buf_sz = 0; 4227 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 4228 } 4229 } 4230 if (hif_sc->athdiag_procfs_inited) { 4231 athdiag_procfs_remove(); 4232 hif_sc->athdiag_procfs_inited = false; 4233 } 4234 } 4235 4236 #ifdef CONFIG_BYPASS_QMI 4237 #ifdef QCN7605_SUPPORT 4238 /** 4239 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 4240 * @scn: pointer to HIF structure 4241 * 4242 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 4243 * 4244 * Return: void 4245 */ 4246 static void hif_post_static_buf_to_target(struct hif_softc *scn) 4247 { 4248 phys_addr_t target_pa; 4249 struct ce_info *ce_info_ptr; 4250 uint32_t msi_data_start; 4251 uint32_t msi_data_count; 4252 uint32_t msi_irq_start; 4253 uint32_t i = 0; 4254 int ret; 4255 4256 scn->vaddr_qmi_bypass = 4257 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 4258 scn->qdf_dev->dev, 4259 FW_SHARED_MEM, 4260 &target_pa); 4261 if (!scn->vaddr_qmi_bypass) { 4262 hif_err("Memory allocation failed could not post target buf"); 4263 return; 4264 } 4265 4266 scn->paddr_qmi_bypass = target_pa; 4267 4268 ce_info_ptr = (struct ce_info *)scn->vaddr_qmi_bypass; 4269 4270 if (scn->vaddr_rri_on_ddr) { 4271 ce_info_ptr->rri_over_ddr_low_paddr = 4272 BITS0_TO_31(scn->paddr_rri_on_ddr); 4273 ce_info_ptr->rri_over_ddr_high_paddr = 4274 BITS32_TO_35(scn->paddr_rri_on_ddr); 4275 } 4276 4277 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 4278 &msi_data_count, &msi_data_start, 4279 &msi_irq_start); 4280 if (ret) { 4281 hif_err("Failed to get CE msi config"); 4282 return; 4283 } 4284 4285 for (i = 0; i < CE_COUNT_MAX; i++) { 4286 ce_info_ptr->cfg[i].ce_id = i; 4287 ce_info_ptr->cfg[i].msi_vector = 4288 (i % msi_data_count) + msi_irq_start; 4289 } 4290 4291 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 4292 hif_info("target va %pK target pa %pa", scn->vaddr_qmi_bypass, 4293 &target_pa); 4294 } 4295 4296 /** 4297 * hif_cleanup_static_buf_to_target() - clean up static buffer to WLAN FW 4298 * @scn: pointer to HIF structure 4299 * 4300 * 4301 * Return: void 4302 */ 4303 void hif_cleanup_static_buf_to_target(struct hif_softc *scn) 4304 { 4305 void *target_va = scn->vaddr_qmi_bypass; 4306 phys_addr_t target_pa = scn->paddr_qmi_bypass; 4307 4308 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 4309 FW_SHARED_MEM, target_va, 4310 target_pa, 0); 4311 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0); 4312 } 4313 #else 4314 /** 4315 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 4316 * @scn: pointer to HIF structure 4317 * 4318 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 4319 * 4320 * Return: void 4321 */ 4322 static void hif_post_static_buf_to_target(struct hif_softc *scn) 4323 { 4324 qdf_dma_addr_t target_pa; 4325 4326 scn->vaddr_qmi_bypass = 4327 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 4328 scn->qdf_dev->dev, 4329 FW_SHARED_MEM, 4330 &target_pa); 4331 if (!scn->vaddr_qmi_bypass) { 4332 hif_err("Memory allocation failed could not post target buf"); 4333 return; 4334 } 4335 4336 scn->paddr_qmi_bypass = target_pa; 4337 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 4338 } 4339 4340 /** 4341 * hif_cleanup_static_buf_to_target() - clean up static buffer to WLAN FW 4342 * @scn: pointer to HIF structure 4343 * 4344 * 4345 * Return: void 4346 */ 4347 void hif_cleanup_static_buf_to_target(struct hif_softc *scn) 4348 { 4349 void *target_va = scn->vaddr_qmi_bypass; 4350 phys_addr_t target_pa = scn->paddr_qmi_bypass; 4351 4352 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 4353 FW_SHARED_MEM, target_va, 4354 target_pa, 0); 4355 hif_write32_mb(snc, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0); 4356 } 4357 #endif 4358 4359 #else 4360 static inline void hif_post_static_buf_to_target(struct hif_softc *scn) 4361 { 4362 } 4363 4364 void hif_cleanup_static_buf_to_target(struct hif_softc *scn) 4365 { 4366 } 4367 #endif 4368 4369 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok, 4370 bool wait_for_it) 4371 { 4372 /* todo */ 4373 return 0; 4374 } 4375 4376 int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num) 4377 { 4378 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4379 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 4380 struct HIF_CE_pipe_info *pipe_info; 4381 struct CE_state *ce_state = NULL; 4382 struct CE_attr *attr; 4383 int rv = 0; 4384 4385 if (pipe_num >= CE_COUNT_MAX) 4386 return -EINVAL; 4387 4388 pipe_info = &hif_state->pipe_info[pipe_num]; 4389 pipe_info->pipe_num = pipe_num; 4390 pipe_info->HIF_CE_state = hif_state; 4391 attr = &hif_state->host_ce_config[pipe_num]; 4392 ce_state = scn->ce_id_to_state[pipe_num]; 4393 4394 if (ce_state) { 4395 /* Do not reinitialize the CE if its done already */ 4396 rv = QDF_STATUS_E_BUSY; 4397 goto err; 4398 } 4399 4400 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr); 4401 ce_state = scn->ce_id_to_state[pipe_num]; 4402 if (!ce_state) { 4403 A_TARGET_ACCESS_UNLIKELY(scn); 4404 rv = QDF_STATUS_E_FAILURE; 4405 goto err; 4406 } 4407 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock); 4408 QDF_ASSERT(pipe_info->ce_hdl); 4409 if (!pipe_info->ce_hdl) { 4410 rv = QDF_STATUS_E_FAILURE; 4411 A_TARGET_ACCESS_UNLIKELY(scn); 4412 goto err; 4413 } 4414 4415 ce_state->lro_data = qdf_lro_init(); 4416 4417 if (attr->flags & CE_ATTR_DIAG) { 4418 /* Reserve the ultimate CE for 4419 * Diagnostic Window support 4420 */ 4421 hif_state->ce_diag = pipe_info->ce_hdl; 4422 goto skip; 4423 } 4424 4425 if (hif_is_nss_wifi_enabled(scn) && ce_state && 4426 (ce_state->htt_rx_data)) { 4427 goto skip; 4428 } 4429 4430 pipe_info->buf_sz = (qdf_size_t)(attr->src_sz_max); 4431 if (attr->dest_nentries > 0) { 4432 atomic_set(&pipe_info->recv_bufs_needed, 4433 init_buffer_count(attr->dest_nentries - 1)); 4434 /*SRNG based CE has one entry less */ 4435 if (ce_srng_based(scn)) 4436 atomic_dec(&pipe_info->recv_bufs_needed); 4437 } else { 4438 atomic_set(&pipe_info->recv_bufs_needed, 0); 4439 } 4440 ce_tasklet_init(hif_state, (1 << pipe_num)); 4441 ce_register_irq(hif_state, (1 << pipe_num)); 4442 4443 init_tasklet_worker_by_ceid(hif_hdl, pipe_num); 4444 skip: 4445 return 0; 4446 err: 4447 return rv; 4448 } 4449 4450 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 4451 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn) 4452 { 4453 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 4454 uint8_t ce_id, hist_idx = 0; 4455 4456 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 4457 if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE & (1 << ce_id)) 4458 ce_hist->ce_id_hist_map[ce_id] = hist_idx++; 4459 else 4460 ce_hist->ce_id_hist_map[ce_id] = -1; 4461 } 4462 } 4463 #else 4464 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn) 4465 { 4466 } 4467 #endif 4468 4469 /** 4470 * hif_config_ce() - configure copy engines 4471 * @scn: hif context 4472 * 4473 * Prepares fw, copy engine hardware and host sw according 4474 * to the attributes selected by hif_ce_prepare_config. 4475 * 4476 * also calls athdiag_procfs_init 4477 * 4478 * return: 0 for success nonzero for failure. 4479 */ 4480 int hif_config_ce(struct hif_softc *scn) 4481 { 4482 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4483 struct HIF_CE_pipe_info *pipe_info; 4484 int pipe_num; 4485 4486 #ifdef ADRASTEA_SHADOW_REGISTERS 4487 int i; 4488 #endif 4489 QDF_STATUS rv = QDF_STATUS_SUCCESS; 4490 4491 scn->notice_send = true; 4492 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; 4493 4494 hif_post_static_buf_to_target(scn); 4495 4496 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS; 4497 4498 hif_config_rri_on_ddr(scn); 4499 4500 if (ce_srng_based(scn)) 4501 scn->bus_ops.hif_target_sleep_state_adjust = 4502 &hif_srng_sleep_state_adjust; 4503 4504 /* Initialise the CE debug history sysfs interface inputs ce_id and 4505 * index. Disable data storing 4506 */ 4507 reset_ce_debug_history(scn); 4508 hif_gen_ce_id_history_idx_mapping(scn); 4509 4510 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 4511 struct CE_attr *attr; 4512 4513 pipe_info = &hif_state->pipe_info[pipe_num]; 4514 attr = &hif_state->host_ce_config[pipe_num]; 4515 4516 if (attr->flags & CE_ATTR_INIT_ON_DEMAND) 4517 continue; 4518 4519 if (hif_config_ce_by_id(scn, pipe_num)) 4520 goto err; 4521 } 4522 4523 if (athdiag_procfs_init(scn) != 0) { 4524 A_TARGET_ACCESS_UNLIKELY(scn); 4525 goto err; 4526 } 4527 scn->athdiag_procfs_inited = true; 4528 4529 hif_debug("ce_init done"); 4530 hif_debug("%s: X, ret = %d", __func__, rv); 4531 4532 #ifdef ADRASTEA_SHADOW_REGISTERS 4533 hif_debug("Using Shadow Registers instead of CE Registers"); 4534 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) { 4535 hif_debug("Shadow Register%d is mapped to address %x", 4536 i, 4537 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2)); 4538 } 4539 #endif 4540 4541 return rv != QDF_STATUS_SUCCESS; 4542 err: 4543 /* Failure, so clean up */ 4544 hif_unconfig_ce(scn); 4545 hif_info("X, ret = %d", rv); 4546 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; 4547 } 4548 4549 /** 4550 * hif_config_ce_pktlog() - configure copy engines 4551 * @scn: hif context 4552 * 4553 * Prepares fw, copy engine hardware and host sw according 4554 * to the attributes selected by hif_ce_prepare_config. 4555 * 4556 * also calls athdiag_procfs_init 4557 * 4558 * return: 0 for success nonzero for failure. 4559 */ 4560 int hif_config_ce_pktlog(struct hif_opaque_softc *hif_hdl) 4561 { 4562 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 4563 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4564 int pipe_num; 4565 QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE; 4566 struct HIF_CE_pipe_info *pipe_info; 4567 4568 if (!scn) 4569 goto err; 4570 4571 if (scn->pktlog_init) 4572 return QDF_STATUS_SUCCESS; 4573 4574 pipe_num = hif_get_pktlog_ce_num(scn); 4575 if (pipe_num < 0) { 4576 qdf_status = QDF_STATUS_E_FAILURE; 4577 goto err; 4578 } 4579 4580 pipe_info = &hif_state->pipe_info[pipe_num]; 4581 4582 qdf_status = hif_config_ce_by_id(scn, pipe_num); 4583 /* CE Already initialized. Do not try to reinitialized again */ 4584 if (qdf_status == QDF_STATUS_E_BUSY) 4585 return QDF_STATUS_SUCCESS; 4586 4587 qdf_status = hif_config_irq_by_ceid(scn, pipe_num); 4588 if (qdf_status < 0) 4589 goto err; 4590 4591 qdf_status = hif_completion_thread_startup_by_ceid(hif_state, pipe_num); 4592 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 4593 hif_err("%s:failed to start hif thread", __func__); 4594 goto err; 4595 } 4596 4597 /* Post buffers for pktlog copy engine. */ 4598 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); 4599 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 4600 /* cleanup is done in hif_ce_disable */ 4601 hif_err("%s:failed to post buffers", __func__); 4602 return qdf_status; 4603 } 4604 scn->pktlog_init = true; 4605 return qdf_status != QDF_STATUS_SUCCESS; 4606 4607 err: 4608 hif_debug("%s: X, ret = %d", __func__, qdf_status); 4609 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; 4610 } 4611 4612 #ifdef IPA_OFFLOAD 4613 /** 4614 * hif_ce_ipa_get_ce_resource() - get uc resource on hif 4615 * @scn: bus context 4616 * @ce_sr_base_paddr: copyengine source ring base physical address 4617 * @ce_sr_ring_size: copyengine source ring size 4618 * @ce_reg_paddr: copyengine register physical address 4619 * 4620 * IPA micro controller data path offload feature enabled, 4621 * HIF should release copy engine related resource information to IPA UC 4622 * IPA UC will access hardware resource with released information 4623 * 4624 * Return: None 4625 */ 4626 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 4627 qdf_shared_mem_t **ce_sr, 4628 uint32_t *ce_sr_ring_size, 4629 qdf_dma_addr_t *ce_reg_paddr) 4630 { 4631 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4632 struct HIF_CE_pipe_info *pipe_info = 4633 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]); 4634 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 4635 4636 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size, 4637 ce_reg_paddr); 4638 } 4639 #endif /* IPA_OFFLOAD */ 4640 4641 4642 #ifdef ADRASTEA_SHADOW_REGISTERS 4643 4644 /* 4645 * Current shadow register config 4646 * 4647 * ----------------------------------------------------------- 4648 * Shadow Register | CE | src/dst write index 4649 * ----------------------------------------------------------- 4650 * 0 | 0 | src 4651 * 1 No Config - Doesn't point to anything 4652 * 2 No Config - Doesn't point to anything 4653 * 3 | 3 | src 4654 * 4 | 4 | src 4655 * 5 | 5 | src 4656 * 6 No Config - Doesn't point to anything 4657 * 7 | 7 | src 4658 * 8 No Config - Doesn't point to anything 4659 * 9 No Config - Doesn't point to anything 4660 * 10 No Config - Doesn't point to anything 4661 * 11 No Config - Doesn't point to anything 4662 * ----------------------------------------------------------- 4663 * 12 No Config - Doesn't point to anything 4664 * 13 | 1 | dst 4665 * 14 | 2 | dst 4666 * 15 No Config - Doesn't point to anything 4667 * 16 No Config - Doesn't point to anything 4668 * 17 No Config - Doesn't point to anything 4669 * 18 No Config - Doesn't point to anything 4670 * 19 | 7 | dst 4671 * 20 | 8 | dst 4672 * 21 No Config - Doesn't point to anything 4673 * 22 No Config - Doesn't point to anything 4674 * 23 No Config - Doesn't point to anything 4675 * ----------------------------------------------------------- 4676 * 4677 * 4678 * ToDo - Move shadow register config to following in the future 4679 * This helps free up a block of shadow registers towards the end. 4680 * Can be used for other purposes 4681 * 4682 * ----------------------------------------------------------- 4683 * Shadow Register | CE | src/dst write index 4684 * ----------------------------------------------------------- 4685 * 0 | 0 | src 4686 * 1 | 3 | src 4687 * 2 | 4 | src 4688 * 3 | 5 | src 4689 * 4 | 7 | src 4690 * ----------------------------------------------------------- 4691 * 5 | 1 | dst 4692 * 6 | 2 | dst 4693 * 7 | 7 | dst 4694 * 8 | 8 | dst 4695 * ----------------------------------------------------------- 4696 * 9 No Config - Doesn't point to anything 4697 * 12 No Config - Doesn't point to anything 4698 * 13 No Config - Doesn't point to anything 4699 * 14 No Config - Doesn't point to anything 4700 * 15 No Config - Doesn't point to anything 4701 * 16 No Config - Doesn't point to anything 4702 * 17 No Config - Doesn't point to anything 4703 * 18 No Config - Doesn't point to anything 4704 * 19 No Config - Doesn't point to anything 4705 * 20 No Config - Doesn't point to anything 4706 * 21 No Config - Doesn't point to anything 4707 * 22 No Config - Doesn't point to anything 4708 * 23 No Config - Doesn't point to anything 4709 * ----------------------------------------------------------- 4710 */ 4711 #ifndef QCN7605_SUPPORT 4712 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 4713 { 4714 u32 addr = 0; 4715 u32 ce = COPY_ENGINE_ID(ctrl_addr); 4716 4717 switch (ce) { 4718 case 0: 4719 addr = SHADOW_VALUE0; 4720 break; 4721 case 3: 4722 addr = SHADOW_VALUE3; 4723 break; 4724 case 4: 4725 addr = SHADOW_VALUE4; 4726 break; 4727 case 5: 4728 addr = SHADOW_VALUE5; 4729 break; 4730 case 7: 4731 addr = SHADOW_VALUE7; 4732 break; 4733 default: 4734 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 4735 QDF_ASSERT(0); 4736 } 4737 return addr; 4738 4739 } 4740 4741 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 4742 { 4743 u32 addr = 0; 4744 u32 ce = COPY_ENGINE_ID(ctrl_addr); 4745 4746 switch (ce) { 4747 case 1: 4748 addr = SHADOW_VALUE13; 4749 break; 4750 case 2: 4751 addr = SHADOW_VALUE14; 4752 break; 4753 case 5: 4754 addr = SHADOW_VALUE17; 4755 break; 4756 case 7: 4757 addr = SHADOW_VALUE19; 4758 break; 4759 case 8: 4760 addr = SHADOW_VALUE20; 4761 break; 4762 case 9: 4763 addr = SHADOW_VALUE21; 4764 break; 4765 case 10: 4766 addr = SHADOW_VALUE22; 4767 break; 4768 case 11: 4769 addr = SHADOW_VALUE23; 4770 break; 4771 default: 4772 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 4773 QDF_ASSERT(0); 4774 } 4775 4776 return addr; 4777 4778 } 4779 #else 4780 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 4781 { 4782 u32 addr = 0; 4783 u32 ce = COPY_ENGINE_ID(ctrl_addr); 4784 4785 switch (ce) { 4786 case 0: 4787 addr = SHADOW_VALUE0; 4788 break; 4789 case 3: 4790 addr = SHADOW_VALUE3; 4791 break; 4792 case 4: 4793 addr = SHADOW_VALUE4; 4794 break; 4795 case 5: 4796 addr = SHADOW_VALUE5; 4797 break; 4798 default: 4799 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 4800 QDF_ASSERT(0); 4801 } 4802 return addr; 4803 } 4804 4805 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 4806 { 4807 u32 addr = 0; 4808 u32 ce = COPY_ENGINE_ID(ctrl_addr); 4809 4810 switch (ce) { 4811 case 1: 4812 addr = SHADOW_VALUE13; 4813 break; 4814 case 2: 4815 addr = SHADOW_VALUE14; 4816 break; 4817 case 3: 4818 addr = SHADOW_VALUE15; 4819 break; 4820 case 5: 4821 addr = SHADOW_VALUE17; 4822 break; 4823 case 7: 4824 addr = SHADOW_VALUE19; 4825 break; 4826 case 8: 4827 addr = SHADOW_VALUE20; 4828 break; 4829 case 9: 4830 addr = SHADOW_VALUE21; 4831 break; 4832 case 10: 4833 addr = SHADOW_VALUE22; 4834 break; 4835 case 11: 4836 addr = SHADOW_VALUE23; 4837 break; 4838 default: 4839 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 4840 QDF_ASSERT(0); 4841 } 4842 4843 return addr; 4844 } 4845 #endif 4846 #endif 4847 4848 #if defined(FEATURE_LRO) 4849 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id) 4850 { 4851 struct CE_state *ce_state; 4852 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 4853 4854 ce_state = scn->ce_id_to_state[ctx_id]; 4855 4856 return ce_state->lro_data; 4857 } 4858 #endif 4859 4860 /** 4861 * hif_map_service_to_pipe() - returns the ce ids pertaining to 4862 * this service 4863 * @scn: hif_softc pointer. 4864 * @svc_id: Service ID for which the mapping is needed. 4865 * @ul_pipe: address of the container in which ul pipe is returned. 4866 * @dl_pipe: address of the container in which dl pipe is returned. 4867 * @ul_is_polled: address of the container in which a bool 4868 * indicating if the UL CE for this service 4869 * is polled is returned. 4870 * @dl_is_polled: address of the container in which a bool 4871 * indicating if the DL CE for this service 4872 * is polled is returned. 4873 * 4874 * Return: Indicates whether the service has been found in the table. 4875 * Upon return, ul_is_polled is updated only if ul_pipe is updated. 4876 * There will be warning logs if either leg has not been updated 4877 * because it missed the entry in the table (but this is not an err). 4878 */ 4879 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id, 4880 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, 4881 int *dl_is_polled) 4882 { 4883 int status = -EINVAL; 4884 unsigned int i; 4885 struct service_to_pipe element; 4886 struct service_to_pipe *tgt_svc_map_to_use; 4887 uint32_t sz_tgt_svc_map_to_use; 4888 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 4889 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4890 bool dl_updated = false; 4891 bool ul_updated = false; 4892 4893 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use, 4894 &sz_tgt_svc_map_to_use); 4895 4896 *dl_is_polled = 0; /* polling for received messages not supported */ 4897 4898 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) { 4899 4900 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element)); 4901 if (element.service_id == svc_id) { 4902 if (element.pipedir == PIPEDIR_OUT) { 4903 *ul_pipe = element.pipenum; 4904 *ul_is_polled = 4905 (hif_state->host_ce_config[*ul_pipe].flags & 4906 CE_ATTR_DISABLE_INTR) != 0; 4907 ul_updated = true; 4908 } else if (element.pipedir == PIPEDIR_IN) { 4909 *dl_pipe = element.pipenum; 4910 dl_updated = true; 4911 } 4912 status = 0; 4913 } 4914 } 4915 if (ul_updated == false) 4916 hif_debug("ul pipe is NOT updated for service %d", svc_id); 4917 if (dl_updated == false) 4918 hif_debug("dl pipe is NOT updated for service %d", svc_id); 4919 4920 return status; 4921 } 4922 4923 #ifdef SHADOW_REG_DEBUG 4924 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, 4925 uint32_t CE_ctrl_addr) 4926 { 4927 uint32_t read_from_hw, srri_from_ddr = 0; 4928 4929 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS); 4930 4931 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 4932 4933 if (read_from_hw != srri_from_ddr) { 4934 hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 4935 srri_from_ddr, read_from_hw, 4936 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 4937 QDF_ASSERT(0); 4938 } 4939 return srri_from_ddr; 4940 } 4941 4942 4943 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, 4944 uint32_t CE_ctrl_addr) 4945 { 4946 uint32_t read_from_hw, drri_from_ddr = 0; 4947 4948 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS); 4949 4950 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 4951 4952 if (read_from_hw != drri_from_ddr) { 4953 hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 4954 drri_from_ddr, read_from_hw, 4955 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 4956 QDF_ASSERT(0); 4957 } 4958 return drri_from_ddr; 4959 } 4960 4961 #endif 4962 4963 /** 4964 * hif_dump_ce_registers() - dump ce registers 4965 * @scn: hif_opaque_softc pointer. 4966 * 4967 * Output the copy engine registers 4968 * 4969 * Return: 0 for success or error code 4970 */ 4971 int hif_dump_ce_registers(struct hif_softc *scn) 4972 { 4973 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 4974 uint32_t ce_reg_address = CE0_BASE_ADDRESS; 4975 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2]; 4976 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2; 4977 uint16_t i; 4978 QDF_STATUS status; 4979 4980 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) { 4981 if (!scn->ce_id_to_state[i]) { 4982 hif_debug("CE%d not used", i); 4983 continue; 4984 } 4985 4986 status = hif_diag_read_mem(hif_hdl, ce_reg_address, 4987 (uint8_t *) &ce_reg_values[0], 4988 ce_reg_word_size * sizeof(uint32_t)); 4989 4990 if (status != QDF_STATUS_SUCCESS) { 4991 hif_err("Dumping CE register failed!"); 4992 return -EACCES; 4993 } 4994 hif_debug("CE%d=>", i); 4995 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, 4996 (uint8_t *) &ce_reg_values[0], 4997 ce_reg_word_size * sizeof(uint32_t)); 4998 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address 4999 + SR_WR_INDEX_ADDRESS), 5000 ce_reg_values[SR_WR_INDEX_ADDRESS/4]); 5001 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address 5002 + CURRENT_SRRI_ADDRESS), 5003 ce_reg_values[CURRENT_SRRI_ADDRESS/4]); 5004 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address 5005 + DST_WR_INDEX_ADDRESS), 5006 ce_reg_values[DST_WR_INDEX_ADDRESS/4]); 5007 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address 5008 + CURRENT_DRRI_ADDRESS), 5009 ce_reg_values[CURRENT_DRRI_ADDRESS/4]); 5010 qdf_print("---"); 5011 } 5012 return 0; 5013 } 5014 qdf_export_symbol(hif_dump_ce_registers); 5015 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT 5016 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, 5017 struct hif_pipe_addl_info *hif_info, uint32_t pipe) 5018 { 5019 struct hif_softc *scn = HIF_GET_SOFTC(osc); 5020 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 5021 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc); 5022 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 5023 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 5024 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 5025 struct CE_ring_state *src_ring = ce_state->src_ring; 5026 struct CE_ring_state *dest_ring = ce_state->dest_ring; 5027 5028 if (src_ring) { 5029 hif_info->ul_pipe.nentries = src_ring->nentries; 5030 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask; 5031 hif_info->ul_pipe.sw_index = src_ring->sw_index; 5032 hif_info->ul_pipe.write_index = src_ring->write_index; 5033 hif_info->ul_pipe.hw_index = src_ring->hw_index; 5034 hif_info->ul_pipe.base_addr_CE_space = 5035 src_ring->base_addr_CE_space; 5036 hif_info->ul_pipe.base_addr_owner_space = 5037 src_ring->base_addr_owner_space; 5038 } 5039 5040 5041 if (dest_ring) { 5042 hif_info->dl_pipe.nentries = dest_ring->nentries; 5043 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask; 5044 hif_info->dl_pipe.sw_index = dest_ring->sw_index; 5045 hif_info->dl_pipe.write_index = dest_ring->write_index; 5046 hif_info->dl_pipe.hw_index = dest_ring->hw_index; 5047 hif_info->dl_pipe.base_addr_CE_space = 5048 dest_ring->base_addr_CE_space; 5049 hif_info->dl_pipe.base_addr_owner_space = 5050 dest_ring->base_addr_owner_space; 5051 } 5052 5053 hif_info->pci_mem = pci_resource_start(sc->pdev, 0); 5054 hif_info->ctrl_addr = ce_state->ctrl_addr; 5055 5056 return hif_info; 5057 } 5058 qdf_export_symbol(hif_get_addl_pipe_info); 5059 5060 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode) 5061 { 5062 struct hif_softc *scn = HIF_GET_SOFTC(osc); 5063 5064 scn->nss_wifi_ol_mode = mode; 5065 return 0; 5066 } 5067 qdf_export_symbol(hif_set_nss_wifiol_mode); 5068 #endif 5069 5070 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib) 5071 { 5072 struct hif_softc *scn = HIF_GET_SOFTC(osc); 5073 scn->hif_attribute = hif_attrib; 5074 } 5075 5076 5077 /* disable interrupts (only applicable for legacy copy engine currently */ 5078 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num) 5079 { 5080 struct hif_softc *scn = HIF_GET_SOFTC(osc); 5081 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num]; 5082 uint32_t ctrl_addr = CE_state->ctrl_addr; 5083 5084 Q_TARGET_ACCESS_BEGIN(scn); 5085 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 5086 Q_TARGET_ACCESS_END(scn); 5087 } 5088 qdf_export_symbol(hif_disable_interrupt); 5089 5090 /** 5091 * hif_fw_event_handler() - hif fw event handler 5092 * @hif_state: pointer to hif ce state structure 5093 * 5094 * Process fw events and raise HTC callback to process fw events. 5095 * 5096 * Return: none 5097 */ 5098 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state) 5099 { 5100 struct hif_msg_callbacks *msg_callbacks = 5101 &hif_state->msg_callbacks_current; 5102 5103 if (!msg_callbacks->fwEventHandler) 5104 return; 5105 5106 msg_callbacks->fwEventHandler(msg_callbacks->Context, 5107 QDF_STATUS_E_FAILURE); 5108 } 5109 5110 #ifndef QCA_WIFI_3_0 5111 /** 5112 * hif_fw_interrupt_handler() - FW interrupt handler 5113 * @irq: irq number 5114 * @arg: the user pointer 5115 * 5116 * Called from the PCI interrupt handler when a 5117 * firmware-generated interrupt to the Host. 5118 * 5119 * only registered for legacy ce devices 5120 * 5121 * Return: status of handled irq 5122 */ 5123 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 5124 { 5125 struct hif_softc *scn = arg; 5126 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 5127 uint32_t fw_indicator_address, fw_indicator; 5128 5129 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 5130 return ATH_ISR_NOSCHED; 5131 5132 fw_indicator_address = hif_state->fw_indicator_address; 5133 /* For sudden unplug this will return ~0 */ 5134 fw_indicator = A_TARGET_READ(scn, fw_indicator_address); 5135 5136 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) { 5137 /* ACK: clear Target-side pending event */ 5138 A_TARGET_WRITE(scn, fw_indicator_address, 5139 fw_indicator & ~FW_IND_EVENT_PENDING); 5140 if (Q_TARGET_ACCESS_END(scn) < 0) 5141 return ATH_ISR_SCHED; 5142 5143 if (hif_state->started) { 5144 hif_fw_event_handler(hif_state); 5145 } else { 5146 /* 5147 * Probable Target failure before we're prepared 5148 * to handle it. Generally unexpected. 5149 * fw_indicator used as bitmap, and defined as below: 5150 * FW_IND_EVENT_PENDING 0x1 5151 * FW_IND_INITIALIZED 0x2 5152 * FW_IND_NEEDRECOVER 0x4 5153 */ 5154 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 5155 ("%s: Early firmware event indicated 0x%x\n", 5156 __func__, fw_indicator)); 5157 } 5158 } else { 5159 if (Q_TARGET_ACCESS_END(scn) < 0) 5160 return ATH_ISR_SCHED; 5161 } 5162 5163 return ATH_ISR_SCHED; 5164 } 5165 #else 5166 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 5167 { 5168 return ATH_ISR_SCHED; 5169 } 5170 #endif /* #ifdef QCA_WIFI_3_0 */ 5171 5172 5173 /** 5174 * hif_wlan_disable(): call the platform driver to disable wlan 5175 * @scn: HIF Context 5176 * 5177 * This function passes the con_mode to platform driver to disable 5178 * wlan. 5179 * 5180 * Return: void 5181 */ 5182 void hif_wlan_disable(struct hif_softc *scn) 5183 { 5184 enum pld_driver_mode mode; 5185 uint32_t con_mode = hif_get_conparam(scn); 5186 5187 if (scn->target_status == TARGET_STATUS_RESET) 5188 return; 5189 5190 if (QDF_GLOBAL_FTM_MODE == con_mode) 5191 mode = PLD_FTM; 5192 else if (QDF_IS_EPPING_ENABLED(con_mode)) 5193 mode = PLD_EPPING; 5194 else 5195 mode = PLD_MISSION; 5196 5197 pld_wlan_disable(scn->qdf_dev->dev, mode); 5198 } 5199 5200 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id) 5201 { 5202 int status; 5203 uint8_t ul_pipe, dl_pipe; 5204 int ul_is_polled, dl_is_polled; 5205 5206 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */ 5207 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 5208 HTC_CTRL_RSVD_SVC, 5209 &ul_pipe, &dl_pipe, 5210 &ul_is_polled, &dl_is_polled); 5211 if (status) { 5212 hif_err("Failed to map pipe: %d", status); 5213 return status; 5214 } 5215 5216 *ce_id = dl_pipe; 5217 5218 return 0; 5219 } 5220 5221 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id) 5222 { 5223 int status; 5224 uint8_t ul_pipe, dl_pipe; 5225 int ul_is_polled, dl_is_polled; 5226 5227 /* DL pipe for WMI_CONTROL_DIAG_SVC should map to the FW DIAG CE_ID */ 5228 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 5229 WMI_CONTROL_DIAG_SVC, 5230 &ul_pipe, &dl_pipe, 5231 &ul_is_polled, &dl_is_polled); 5232 if (status) { 5233 hif_err("Failed to map pipe: %d", status); 5234 return status; 5235 } 5236 5237 *ce_id = dl_pipe; 5238 5239 return 0; 5240 } 5241 5242 #ifdef HIF_CE_LOG_INFO 5243 /** 5244 * ce_get_index_info(): Get CE index info 5245 * @scn: HIF Context 5246 * @ce_state: CE opaque handle 5247 * @info: CE info 5248 * 5249 * Return: 0 for success and non zero for failure 5250 */ 5251 static 5252 int ce_get_index_info(struct hif_softc *scn, void *ce_state, 5253 struct ce_index *info) 5254 { 5255 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 5256 5257 return hif_state->ce_services->ce_get_index_info(scn, ce_state, info); 5258 } 5259 5260 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data, 5261 unsigned int *offset) 5262 { 5263 struct hang_event_info info = {0}; 5264 static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) | 5265 BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10); 5266 uint8_t curr_index = 0; 5267 uint8_t i; 5268 uint16_t size; 5269 5270 info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt); 5271 info.active_grp_tasklet_cnt = 5272 qdf_atomic_read(&scn->active_grp_tasklet_cnt); 5273 5274 for (i = 0; i < scn->ce_count; i++) { 5275 if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i]) 5276 continue; 5277 5278 if (ce_get_index_info(scn, scn->ce_id_to_state[i], 5279 &info.ce_info[curr_index])) 5280 continue; 5281 5282 curr_index++; 5283 } 5284 5285 info.ce_count = curr_index; 5286 size = sizeof(info) - 5287 (CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index); 5288 5289 if (*offset + size > QDF_WLAN_HANG_FW_OFFSET) 5290 return; 5291 5292 QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO, 5293 size - QDF_HANG_EVENT_TLV_HDR_SIZE); 5294 5295 qdf_mem_copy(data + *offset, &info, size); 5296 *offset = *offset + size; 5297 } 5298 #endif 5299