1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "targcfg.h" 21 #include "qdf_lock.h" 22 #include "qdf_status.h" 23 #include "qdf_status.h" 24 #include <qdf_atomic.h> /* qdf_atomic_read */ 25 #include <targaddrs.h> 26 #include "hif_io32.h" 27 #include <hif.h> 28 #include <target_type.h> 29 #include "regtable.h" 30 #define ATH_MODULE_NAME hif 31 #include <a_debug.h> 32 #include "hif_main.h" 33 #include "ce_api.h" 34 #include "qdf_trace.h" 35 #include "pld_common.h" 36 #include "hif_debug.h" 37 #include "ce_internal.h" 38 #include "ce_reg.h" 39 #include "ce_assignment.h" 40 #include "ce_tasklet.h" 41 #include "qdf_module.h" 42 #include "qdf_ssr_driver_dump.h" 43 #include <wbuff.h> 44 45 #define CE_POLL_TIMEOUT 10 /* ms */ 46 47 #define AGC_DUMP 1 48 #define CHANINFO_DUMP 2 49 #define BB_WATCHDOG_DUMP 3 50 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 51 #define PCIE_ACCESS_DUMP 4 52 #endif 53 #include "mp_dev.h" 54 #ifdef HIF_CE_LOG_INFO 55 #include "qdf_hang_event_notifier.h" 56 #endif 57 58 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \ 59 defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018) || \ 60 defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCA5332) || \ 61 defined(QCA_WIFI_QCA9574)) && !defined(QCA_WIFI_SUPPORT_SRNG) && \ 62 !defined(QCA_WIFI_WCN6450) 63 #define QCA_WIFI_SUPPORT_SRNG 64 #endif 65 66 #ifdef QCA_WIFI_SUPPORT_SRNG 67 #include <hal_api.h> 68 #endif 69 #include "qdf_ssr_driver_dump.h" 70 71 /* Forward references */ 72 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info); 73 74 /* 75 * Fix EV118783, poll to check whether a BMI response comes 76 * other than waiting for the interruption which may be lost. 77 */ 78 /* #define BMI_RSP_POLLING */ 79 #define BMI_RSP_TO_MILLISEC 1000 80 81 #ifdef CONFIG_BYPASS_QMI 82 #define BYPASS_QMI 1 83 #else 84 #define BYPASS_QMI 0 85 #endif 86 87 static void hif_config_rri_on_ddr(struct hif_softc *scn); 88 89 /** 90 * hif_target_access_log_dump() - dump access log 91 * 92 * dump access log 93 * 94 * Return: n/a 95 */ 96 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 97 static void hif_target_access_log_dump(void) 98 { 99 hif_target_dump_access_log(); 100 } 101 #endif 102 103 /* 104 * This structure contains the interrupt index for each Copy engine 105 * for various number of MSIs available in the system. 106 */ 107 static struct ce_int_assignment ce_int_context[NUM_CE_CONTEXT] = { 108 /* Default configuration */ 109 {{ CE_INTERRUPT_IDX(0), 110 CE_INTERRUPT_IDX(1), 111 CE_INTERRUPT_IDX(2), 112 CE_INTERRUPT_IDX(3), 113 CE_INTERRUPT_IDX(4), 114 CE_INTERRUPT_IDX(5), 115 CE_INTERRUPT_IDX(6), 116 CE_INTERRUPT_IDX(7), 117 CE_INTERRUPT_IDX(8), 118 CE_INTERRUPT_IDX(9), 119 CE_INTERRUPT_IDX(10), 120 CE_INTERRUPT_IDX(11), 121 #ifdef QCA_WIFI_QCN9224 122 CE_INTERRUPT_IDX(12), 123 CE_INTERRUPT_IDX(13), 124 CE_INTERRUPT_IDX(14), 125 CE_INTERRUPT_IDX(15), 126 #endif 127 } }, 128 /* Interrupt assignment for 1 MSI combination */ 129 {{ CE_INTERRUPT_IDX(0), 130 CE_INTERRUPT_IDX(0), 131 CE_INTERRUPT_IDX(0), 132 CE_INTERRUPT_IDX(0), 133 CE_INTERRUPT_IDX(0), 134 CE_INTERRUPT_IDX(0), 135 CE_INTERRUPT_IDX(0), 136 CE_INTERRUPT_IDX(0), 137 CE_INTERRUPT_IDX(0), 138 CE_INTERRUPT_IDX(0), 139 CE_INTERRUPT_IDX(0), 140 CE_INTERRUPT_IDX(0), 141 #ifdef QCA_WIFI_QCN9224 142 CE_INTERRUPT_IDX(0), 143 CE_INTERRUPT_IDX(0), 144 CE_INTERRUPT_IDX(0), 145 CE_INTERRUPT_IDX(0), 146 #endif 147 } }, 148 /* Interrupt assignment for 2 MSI combination */ 149 {{ CE_INTERRUPT_IDX(0), 150 CE_INTERRUPT_IDX(1), 151 CE_INTERRUPT_IDX(0), 152 CE_INTERRUPT_IDX(1), 153 CE_INTERRUPT_IDX(0), 154 CE_INTERRUPT_IDX(1), 155 CE_INTERRUPT_IDX(0), 156 CE_INTERRUPT_IDX(0), 157 CE_INTERRUPT_IDX(0), 158 CE_INTERRUPT_IDX(0), 159 CE_INTERRUPT_IDX(0), 160 CE_INTERRUPT_IDX(0), 161 #ifdef QCA_WIFI_QCN9224 162 CE_INTERRUPT_IDX(0), 163 CE_INTERRUPT_IDX(0), 164 CE_INTERRUPT_IDX(0), 165 CE_INTERRUPT_IDX(0), 166 #endif 167 } }, 168 /* Interrupt assignment for 3 MSI combination */ 169 {{ CE_INTERRUPT_IDX(0), 170 CE_INTERRUPT_IDX(1), 171 CE_INTERRUPT_IDX(2), 172 CE_INTERRUPT_IDX(1), 173 CE_INTERRUPT_IDX(0), 174 CE_INTERRUPT_IDX(1), 175 CE_INTERRUPT_IDX(0), 176 CE_INTERRUPT_IDX(0), 177 CE_INTERRUPT_IDX(0), 178 CE_INTERRUPT_IDX(0), 179 CE_INTERRUPT_IDX(0), 180 CE_INTERRUPT_IDX(0), 181 #ifdef QCA_WIFI_QCN9224 182 CE_INTERRUPT_IDX(0), 183 CE_INTERRUPT_IDX(0), 184 CE_INTERRUPT_IDX(0), 185 CE_INTERRUPT_IDX(0), 186 #endif 187 } }, 188 /* Interrupt assignment for 4 MSI combination */ 189 {{ CE_INTERRUPT_IDX(0), 190 CE_INTERRUPT_IDX(1), 191 CE_INTERRUPT_IDX(2), 192 CE_INTERRUPT_IDX(3), 193 CE_INTERRUPT_IDX(0), 194 CE_INTERRUPT_IDX(1), 195 CE_INTERRUPT_IDX(0), 196 CE_INTERRUPT_IDX(0), 197 CE_INTERRUPT_IDX(0), 198 CE_INTERRUPT_IDX(0), 199 CE_INTERRUPT_IDX(0), 200 CE_INTERRUPT_IDX(0), 201 #ifdef QCA_WIFI_QCN9224 202 CE_INTERRUPT_IDX(0), 203 CE_INTERRUPT_IDX(0), 204 CE_INTERRUPT_IDX(0), 205 CE_INTERRUPT_IDX(0), 206 #endif 207 } }, 208 /* Interrupt assignment for 5 MSI combination */ 209 {{ CE_INTERRUPT_IDX(0), 210 CE_INTERRUPT_IDX(1), 211 CE_INTERRUPT_IDX(2), 212 CE_INTERRUPT_IDX(3), 213 CE_INTERRUPT_IDX(0), 214 CE_INTERRUPT_IDX(4), 215 CE_INTERRUPT_IDX(0), 216 CE_INTERRUPT_IDX(0), 217 CE_INTERRUPT_IDX(0), 218 CE_INTERRUPT_IDX(0), 219 CE_INTERRUPT_IDX(0), 220 CE_INTERRUPT_IDX(0), 221 #ifdef QCA_WIFI_QCN9224 222 CE_INTERRUPT_IDX(0), 223 CE_INTERRUPT_IDX(0), 224 CE_INTERRUPT_IDX(0), 225 CE_INTERRUPT_IDX(0), 226 #endif 227 } }, 228 /* Interrupt assignment for 6 MSI combination */ 229 {{ CE_INTERRUPT_IDX(0), 230 CE_INTERRUPT_IDX(1), 231 CE_INTERRUPT_IDX(2), 232 CE_INTERRUPT_IDX(3), 233 CE_INTERRUPT_IDX(4), 234 CE_INTERRUPT_IDX(5), 235 CE_INTERRUPT_IDX(0), 236 CE_INTERRUPT_IDX(0), 237 CE_INTERRUPT_IDX(0), 238 CE_INTERRUPT_IDX(0), 239 CE_INTERRUPT_IDX(0), 240 CE_INTERRUPT_IDX(0), 241 #ifdef QCA_WIFI_QCN9224 242 CE_INTERRUPT_IDX(0), 243 CE_INTERRUPT_IDX(0), 244 CE_INTERRUPT_IDX(0), 245 CE_INTERRUPT_IDX(0), 246 #endif 247 } }, 248 /* Interrupt assignment for 7 MSI combination */ 249 {{ CE_INTERRUPT_IDX(0), 250 CE_INTERRUPT_IDX(1), 251 CE_INTERRUPT_IDX(2), 252 CE_INTERRUPT_IDX(3), 253 CE_INTERRUPT_IDX(4), 254 CE_INTERRUPT_IDX(5), 255 CE_INTERRUPT_IDX(6), 256 CE_INTERRUPT_IDX(0), 257 CE_INTERRUPT_IDX(0), 258 CE_INTERRUPT_IDX(0), 259 CE_INTERRUPT_IDX(0), 260 CE_INTERRUPT_IDX(0), 261 #ifdef QCA_WIFI_QCN9224 262 CE_INTERRUPT_IDX(0), 263 CE_INTERRUPT_IDX(0), 264 CE_INTERRUPT_IDX(0), 265 CE_INTERRUPT_IDX(0), 266 #endif 267 } }, 268 /* Interrupt assignment for 8 MSI combination */ 269 {{ CE_INTERRUPT_IDX(0), 270 CE_INTERRUPT_IDX(1), 271 CE_INTERRUPT_IDX(2), 272 CE_INTERRUPT_IDX(3), 273 CE_INTERRUPT_IDX(4), 274 CE_INTERRUPT_IDX(5), 275 CE_INTERRUPT_IDX(6), 276 CE_INTERRUPT_IDX(7), 277 CE_INTERRUPT_IDX(0), 278 CE_INTERRUPT_IDX(0), 279 CE_INTERRUPT_IDX(0), 280 CE_INTERRUPT_IDX(0), 281 #ifdef QCA_WIFI_QCN9224 282 CE_INTERRUPT_IDX(0), 283 CE_INTERRUPT_IDX(0), 284 CE_INTERRUPT_IDX(0), 285 CE_INTERRUPT_IDX(0), 286 #endif 287 } }, 288 /* Interrupt assignment for 9 MSI combination */ 289 {{ CE_INTERRUPT_IDX(0), 290 CE_INTERRUPT_IDX(1), 291 CE_INTERRUPT_IDX(2), 292 CE_INTERRUPT_IDX(3), 293 CE_INTERRUPT_IDX(4), 294 CE_INTERRUPT_IDX(5), 295 CE_INTERRUPT_IDX(6), 296 CE_INTERRUPT_IDX(7), 297 CE_INTERRUPT_IDX(8), 298 CE_INTERRUPT_IDX(0), 299 CE_INTERRUPT_IDX(0), 300 CE_INTERRUPT_IDX(0), 301 #ifdef QCA_WIFI_QCN9224 302 CE_INTERRUPT_IDX(0), 303 CE_INTERRUPT_IDX(0), 304 CE_INTERRUPT_IDX(0), 305 CE_INTERRUPT_IDX(0), 306 #endif 307 } }, 308 /* Interrupt assignment for 10 MSI combination */ 309 {{ CE_INTERRUPT_IDX(0), 310 CE_INTERRUPT_IDX(1), 311 CE_INTERRUPT_IDX(2), 312 CE_INTERRUPT_IDX(3), 313 CE_INTERRUPT_IDX(4), 314 CE_INTERRUPT_IDX(5), 315 CE_INTERRUPT_IDX(6), 316 CE_INTERRUPT_IDX(7), 317 CE_INTERRUPT_IDX(8), 318 CE_INTERRUPT_IDX(9), 319 CE_INTERRUPT_IDX(0), 320 CE_INTERRUPT_IDX(0), 321 #ifdef QCA_WIFI_QCN9224 322 CE_INTERRUPT_IDX(0), 323 CE_INTERRUPT_IDX(0), 324 CE_INTERRUPT_IDX(0), 325 CE_INTERRUPT_IDX(0), 326 #endif 327 } }, 328 /* Interrupt assignment for 11 MSI combination */ 329 {{ CE_INTERRUPT_IDX(0), 330 CE_INTERRUPT_IDX(1), 331 CE_INTERRUPT_IDX(2), 332 CE_INTERRUPT_IDX(3), 333 CE_INTERRUPT_IDX(4), 334 CE_INTERRUPT_IDX(5), 335 CE_INTERRUPT_IDX(6), 336 CE_INTERRUPT_IDX(7), 337 CE_INTERRUPT_IDX(8), 338 CE_INTERRUPT_IDX(9), 339 CE_INTERRUPT_IDX(10), 340 CE_INTERRUPT_IDX(0), 341 #ifdef QCA_WIFI_QCN9224 342 CE_INTERRUPT_IDX(0), 343 CE_INTERRUPT_IDX(0), 344 CE_INTERRUPT_IDX(0), 345 CE_INTERRUPT_IDX(0), 346 #endif 347 } }, 348 /* Interrupt assignment for 12 MSI combination */ 349 {{ CE_INTERRUPT_IDX(0), 350 CE_INTERRUPT_IDX(1), 351 CE_INTERRUPT_IDX(2), 352 CE_INTERRUPT_IDX(3), 353 CE_INTERRUPT_IDX(4), 354 CE_INTERRUPT_IDX(5), 355 CE_INTERRUPT_IDX(6), 356 CE_INTERRUPT_IDX(7), 357 CE_INTERRUPT_IDX(8), 358 CE_INTERRUPT_IDX(9), 359 CE_INTERRUPT_IDX(10), 360 CE_INTERRUPT_IDX(11), 361 #ifdef QCA_WIFI_QCN9224 362 CE_INTERRUPT_IDX(0), 363 CE_INTERRUPT_IDX(0), 364 CE_INTERRUPT_IDX(0), 365 CE_INTERRUPT_IDX(0), 366 #endif 367 } }, 368 #ifdef QCA_WIFI_QCN9224 369 /* Interrupt assignment for 13 MSI combination */ 370 {{ CE_INTERRUPT_IDX(0), 371 CE_INTERRUPT_IDX(1), 372 CE_INTERRUPT_IDX(2), 373 CE_INTERRUPT_IDX(3), 374 CE_INTERRUPT_IDX(4), 375 CE_INTERRUPT_IDX(5), 376 CE_INTERRUPT_IDX(6), 377 CE_INTERRUPT_IDX(7), 378 CE_INTERRUPT_IDX(8), 379 CE_INTERRUPT_IDX(9), 380 CE_INTERRUPT_IDX(10), 381 CE_INTERRUPT_IDX(11), 382 CE_INTERRUPT_IDX(12), 383 CE_INTERRUPT_IDX(0), 384 CE_INTERRUPT_IDX(0), 385 CE_INTERRUPT_IDX(0), 386 } }, 387 /* Interrupt assignment for 14 MSI combination */ 388 {{ CE_INTERRUPT_IDX(0), 389 CE_INTERRUPT_IDX(1), 390 CE_INTERRUPT_IDX(2), 391 CE_INTERRUPT_IDX(3), 392 CE_INTERRUPT_IDX(4), 393 CE_INTERRUPT_IDX(5), 394 CE_INTERRUPT_IDX(6), 395 CE_INTERRUPT_IDX(7), 396 CE_INTERRUPT_IDX(8), 397 CE_INTERRUPT_IDX(9), 398 CE_INTERRUPT_IDX(10), 399 CE_INTERRUPT_IDX(11), 400 CE_INTERRUPT_IDX(12), 401 CE_INTERRUPT_IDX(13), 402 CE_INTERRUPT_IDX(0), 403 CE_INTERRUPT_IDX(0), 404 } }, 405 /* Interrupt assignment for 15 MSI combination */ 406 {{ CE_INTERRUPT_IDX(0), 407 CE_INTERRUPT_IDX(1), 408 CE_INTERRUPT_IDX(2), 409 CE_INTERRUPT_IDX(3), 410 CE_INTERRUPT_IDX(4), 411 CE_INTERRUPT_IDX(5), 412 CE_INTERRUPT_IDX(6), 413 CE_INTERRUPT_IDX(7), 414 CE_INTERRUPT_IDX(8), 415 CE_INTERRUPT_IDX(9), 416 CE_INTERRUPT_IDX(10), 417 CE_INTERRUPT_IDX(11), 418 CE_INTERRUPT_IDX(12), 419 CE_INTERRUPT_IDX(13), 420 CE_INTERRUPT_IDX(14), 421 CE_INTERRUPT_IDX(0), 422 } }, 423 /* Interrupt assignment for 16 MSI combination */ 424 {{ CE_INTERRUPT_IDX(0), 425 CE_INTERRUPT_IDX(1), 426 CE_INTERRUPT_IDX(2), 427 CE_INTERRUPT_IDX(3), 428 CE_INTERRUPT_IDX(4), 429 CE_INTERRUPT_IDX(5), 430 CE_INTERRUPT_IDX(6), 431 CE_INTERRUPT_IDX(7), 432 CE_INTERRUPT_IDX(8), 433 CE_INTERRUPT_IDX(9), 434 CE_INTERRUPT_IDX(10), 435 CE_INTERRUPT_IDX(11), 436 CE_INTERRUPT_IDX(12), 437 CE_INTERRUPT_IDX(13), 438 CE_INTERRUPT_IDX(14), 439 CE_INTERRUPT_IDX(15), 440 } }, 441 #endif 442 }; 443 444 445 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, 446 uint8_t cmd_id, bool start) 447 { 448 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 449 450 switch (cmd_id) { 451 case AGC_DUMP: 452 if (start) 453 priv_start_agc(scn); 454 else 455 priv_dump_agc(scn); 456 break; 457 case CHANINFO_DUMP: 458 if (start) 459 priv_start_cap_chaninfo(scn); 460 else 461 priv_dump_chaninfo(scn); 462 break; 463 case BB_WATCHDOG_DUMP: 464 priv_dump_bbwatchdog(scn); 465 break; 466 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 467 case PCIE_ACCESS_DUMP: 468 hif_target_access_log_dump(); 469 break; 470 #endif 471 default: 472 hif_err("Invalid htc dump command: %d", cmd_id); 473 break; 474 } 475 } 476 477 static void ce_poll_timeout(void *arg) 478 { 479 struct CE_state *CE_state = (struct CE_state *)arg; 480 481 if (CE_state->timer_inited) { 482 ce_per_engine_service(CE_state->scn, CE_state->id); 483 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT); 484 } 485 } 486 487 static unsigned int roundup_pwr2(unsigned int n) 488 { 489 int i; 490 unsigned int test_pwr2; 491 492 if (!(n & (n - 1))) 493 return n; /* already a power of 2 */ 494 495 test_pwr2 = 4; 496 for (i = 0; i < 29; i++) { 497 if (test_pwr2 > n) 498 return test_pwr2; 499 test_pwr2 = test_pwr2 << 1; 500 } 501 502 QDF_ASSERT(0); /* n too large */ 503 return 0; 504 } 505 506 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C 507 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40 508 509 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = { 510 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 511 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 512 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 513 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 514 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 515 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 516 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 517 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 518 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 519 #ifdef QCA_WIFI_3_0_ADRASTEA 520 { 9, ADRASTEA_DST_WR_INDEX_OFFSET}, 521 { 10, ADRASTEA_DST_WR_INDEX_OFFSET}, 522 { 11, ADRASTEA_DST_WR_INDEX_OFFSET}, 523 #endif 524 }; 525 526 #ifdef QCN7605_SUPPORT 527 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = { 528 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 529 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 530 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 531 { 3, ADRASTEA_DST_WR_INDEX_OFFSET}, 532 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 533 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 534 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 535 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 536 }; 537 #endif 538 539 #ifdef WLAN_FEATURE_EPPING 540 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = { 541 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 542 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 543 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 544 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 545 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 546 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 547 { 5, ADRASTEA_DST_WR_INDEX_OFFSET}, 548 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 549 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 550 }; 551 #endif 552 553 /* CE_PCI TABLE */ 554 /* 555 * NOTE: the table below is out of date, though still a useful reference. 556 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual 557 * mapping of HTC services to HIF pipes. 558 */ 559 /* 560 * This authoritative table defines Copy Engine configuration and the mapping 561 * of services/endpoints to CEs. A subset of this information is passed to 562 * the Target during startup as a prerequisite to entering BMI phase. 563 * See: 564 * target_service_to_ce_map - Target-side mapping 565 * hif_map_service_to_pipe - Host-side mapping 566 * target_ce_config - Target-side configuration 567 * host_ce_config - Host-side configuration 568 ============================================================================ 569 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer 570 | | | ctio | Size | Frequency 571 | | | n | | 572 ============================================================================ 573 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent 574 descriptor | | | | O(100B) | and regular 575 download | | | | | 576 ---------------------------------------------------------------------------- 577 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and 578 indication | | | | O(10B) | regular 579 upload | | | | | 580 ---------------------------------------------------------------------------- 581 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare 582 upload | | | | O(1000B) | (frequent 583 e.g. noise | | | | | during IP1.0 584 packets | | | | | testing) 585 ---------------------------------------------------------------------------- 586 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare 587 download | | | | O(1000B) | (frequent 588 e.g. | | | | | during IP1.0 589 misdirecte | | | | | testing) 590 d EAPOL | | | | | 591 packets | | | | | 592 ---------------------------------------------------------------------------- 593 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?) 594 | DATA_VO (uplink) | | | | 595 ---------------------------------------------------------------------------- 596 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?) 597 | DATA_VO (downlink) | | | | 598 ---------------------------------------------------------------------------- 599 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent 600 | | | | O(100B) | 601 ---------------------------------------------------------------------------- 602 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent 603 messages | (downlink) | | | O(100B) | 604 | | | | | 605 ---------------------------------------------------------------------------- 606 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?) 607 | HTC_RAW_STREAMS | | | | 608 | (uplink) | | | | 609 ---------------------------------------------------------------------------- 610 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?) 611 | HTC_RAW_STREAMS | | | | 612 | (downlink) | | | | 613 ---------------------------------------------------------------------------- 614 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window 615 | | | | | infrequent 616 ============================================================================ 617 */ 618 619 /* 620 * Map from service/endpoint to Copy Engine. 621 * This table is derived from the CE_PCI TABLE, above. 622 * It is passed to the Target at startup for use by firmware. 623 */ 624 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 625 { 626 WMI_DATA_VO_SVC, 627 PIPEDIR_OUT, /* out = UL = host -> target */ 628 3, 629 }, 630 { 631 WMI_DATA_VO_SVC, 632 PIPEDIR_IN, /* in = DL = target -> host */ 633 2, 634 }, 635 { 636 WMI_DATA_BK_SVC, 637 PIPEDIR_OUT, /* out = UL = host -> target */ 638 3, 639 }, 640 { 641 WMI_DATA_BK_SVC, 642 PIPEDIR_IN, /* in = DL = target -> host */ 643 2, 644 }, 645 { 646 WMI_DATA_BE_SVC, 647 PIPEDIR_OUT, /* out = UL = host -> target */ 648 3, 649 }, 650 { 651 WMI_DATA_BE_SVC, 652 PIPEDIR_IN, /* in = DL = target -> host */ 653 2, 654 }, 655 { 656 WMI_DATA_VI_SVC, 657 PIPEDIR_OUT, /* out = UL = host -> target */ 658 3, 659 }, 660 { 661 WMI_DATA_VI_SVC, 662 PIPEDIR_IN, /* in = DL = target -> host */ 663 2, 664 }, 665 { 666 WMI_CONTROL_SVC, 667 PIPEDIR_OUT, /* out = UL = host -> target */ 668 3, 669 }, 670 { 671 WMI_CONTROL_SVC, 672 PIPEDIR_IN, /* in = DL = target -> host */ 673 2, 674 }, 675 { 676 HTC_CTRL_RSVD_SVC, 677 PIPEDIR_OUT, /* out = UL = host -> target */ 678 0, /* could be moved to 3 (share with WMI) */ 679 }, 680 { 681 HTC_CTRL_RSVD_SVC, 682 PIPEDIR_IN, /* in = DL = target -> host */ 683 2, 684 }, 685 { 686 HTC_RAW_STREAMS_SVC, /* not currently used */ 687 PIPEDIR_OUT, /* out = UL = host -> target */ 688 0, 689 }, 690 { 691 HTC_RAW_STREAMS_SVC, /* not currently used */ 692 PIPEDIR_IN, /* in = DL = target -> host */ 693 2, 694 }, 695 { 696 HTT_DATA_MSG_SVC, 697 PIPEDIR_OUT, /* out = UL = host -> target */ 698 4, 699 }, 700 { 701 HTT_DATA_MSG_SVC, 702 PIPEDIR_IN, /* in = DL = target -> host */ 703 1, 704 }, 705 { 706 WDI_IPA_TX_SVC, 707 PIPEDIR_OUT, /* in = DL = target -> host */ 708 5, 709 }, 710 #if defined(QCA_WIFI_3_0_ADRASTEA) 711 { 712 HTT_DATA2_MSG_SVC, 713 PIPEDIR_IN, /* in = DL = target -> host */ 714 9, 715 }, 716 { 717 HTT_DATA3_MSG_SVC, 718 PIPEDIR_IN, /* in = DL = target -> host */ 719 10, 720 }, 721 { 722 PACKET_LOG_SVC, 723 PIPEDIR_IN, /* in = DL = target -> host */ 724 11, 725 }, 726 #endif 727 /* (Additions here) */ 728 729 { /* Must be last */ 730 0, 731 0, 732 0, 733 }, 734 }; 735 736 /* PIPEDIR_OUT = HOST to Target */ 737 /* PIPEDIR_IN = TARGET to HOST */ 738 #if (defined(QCA_WIFI_QCA8074)) 739 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 740 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 741 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 742 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 743 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 744 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 745 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 746 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 747 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 748 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 749 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 750 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 751 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 752 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 753 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 754 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 755 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 756 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 757 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 758 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 759 /* (Additions here) */ 760 { 0, 0, 0, }, 761 }; 762 #else 763 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 764 }; 765 #endif 766 767 #if (defined(QCA_WIFI_QCA9574)) 768 static struct service_to_pipe target_service_to_ce_map_qca9574[] = { 769 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 770 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 771 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 772 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 773 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 774 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 775 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 776 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 777 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 778 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 779 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 780 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 781 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 782 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 783 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 784 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 785 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 786 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 787 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 788 /* (Additions here) */ 789 { 0, 0, 0, }, 790 }; 791 #else 792 static struct service_to_pipe target_service_to_ce_map_qca9574[] = { 793 }; 794 #endif 795 796 #if (defined(QCA_WIFI_QCA8074V2)) 797 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 798 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 799 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 800 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 801 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 802 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 803 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 804 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 805 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 806 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 807 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 808 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 809 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 810 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9}, 811 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2}, 812 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 813 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 814 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 815 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 816 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 817 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 818 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 819 /* (Additions here) */ 820 { 0, 0, 0, }, 821 }; 822 #else 823 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 824 }; 825 #endif 826 827 #if (defined(QCA_WIFI_QCA6018)) 828 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 829 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 830 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 831 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 832 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 833 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 834 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 835 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 836 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 837 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 838 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 839 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 840 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 841 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 842 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 843 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 844 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 845 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 846 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 847 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 848 /* (Additions here) */ 849 { 0, 0, 0, }, 850 }; 851 #else 852 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 853 }; 854 #endif 855 856 #if (defined(QCA_WIFI_QCN9000)) 857 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = { 858 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 859 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 860 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 861 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 862 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 863 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 864 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 865 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 866 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 867 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 868 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 869 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 870 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 871 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 872 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 873 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 874 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 875 /* (Additions here) */ 876 { 0, 0, 0, }, 877 }; 878 #else 879 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = { 880 }; 881 #endif 882 883 #if (defined(QCA_WIFI_QCA5332) || defined(QCA_WIFI_QCN6432)) 884 static struct service_to_pipe target_service_to_ce_map_qca5332[] = { 885 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 886 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 887 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 888 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 889 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 890 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 891 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 892 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 893 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 894 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 895 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 896 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 897 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 898 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 899 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 900 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 901 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 902 #ifdef WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE 903 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 9, }, 904 { WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 9, }, 905 #else 906 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 2, }, 907 { WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 2, }, 908 #endif 909 /* (Additions here) */ 910 { 0, 0, 0, }, 911 }; 912 #else 913 static struct service_to_pipe target_service_to_ce_map_qca5332[] = { 914 }; 915 #endif 916 917 #if (defined(QCA_WIFI_QCN9224)) 918 static struct service_to_pipe target_service_to_ce_map_qcn9224[] = { 919 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 920 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 921 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 922 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 923 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 924 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 925 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 926 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 927 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 928 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 929 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 930 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 931 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 932 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 933 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 934 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 935 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7, }, 936 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2, }, 937 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 938 #ifdef WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE 939 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 14, }, 940 { WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 14, }, 941 #else 942 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 2, }, 943 { WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 2, }, 944 #endif 945 /* (Additions here) */ 946 { 0, 0, 0, }, 947 }; 948 #endif 949 950 #if defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCN9160) 951 static struct service_to_pipe target_service_to_ce_map_qca5018[] = { 952 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 953 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 954 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 955 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 956 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 957 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 958 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 959 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 960 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 961 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 962 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 963 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 964 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 965 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 966 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 967 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 968 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 969 /* (Additions here) */ 970 { 0, 0, 0, }, 971 }; 972 #else 973 static struct service_to_pipe target_service_to_ce_map_qca5018[] = { 974 }; 975 #endif 976 977 /* PIPEDIR_OUT = HOST to Target */ 978 /* PIPEDIR_IN = TARGET to HOST */ 979 #ifdef QCN7605_SUPPORT 980 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = { 981 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, }, 982 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 983 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, }, 984 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 985 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, }, 986 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 987 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, }, 988 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 989 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, }, 990 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 991 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 992 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 993 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, }, 994 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, }, 995 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 996 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 997 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, }, 998 #ifdef IPA_OFFLOAD 999 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, }, 1000 #else 1001 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, }, 1002 #endif 1003 { PACKET_LOG_SVC, PIPEDIR_IN, 7, }, 1004 /* (Additions here) */ 1005 { 0, 0, 0, }, 1006 }; 1007 #endif 1008 1009 #if (defined(QCA_WIFI_QCA6290)) 1010 #ifdef QCA_6290_AP_MODE 1011 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 1012 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1013 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, }, 1014 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1015 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, }, 1016 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1017 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, }, 1018 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1019 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, }, 1020 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1021 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, }, 1022 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1023 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, }, 1024 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1025 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, }, 1026 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 1027 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 1028 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1029 /* (Additions here) */ 1030 { 0, 0, 0, }, 1031 }; 1032 #else 1033 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 1034 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1035 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1036 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1037 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1038 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1039 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1040 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1041 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1042 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1043 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1044 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1045 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1046 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1047 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1048 /* (Additions here) */ 1049 { 0, 0, 0, }, 1050 }; 1051 #endif 1052 #else 1053 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 1054 }; 1055 #endif 1056 1057 #if (defined(QCA_WIFI_QCA6390)) 1058 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 1059 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1060 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1061 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1062 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1063 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1064 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1065 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1066 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1067 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1068 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1069 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1070 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1071 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1072 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1073 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1074 /* (Additions here) */ 1075 { 0, 0, 0, }, 1076 }; 1077 #else 1078 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 1079 }; 1080 #endif 1081 1082 static struct service_to_pipe target_service_to_ce_map_qca6490[] = { 1083 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1084 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1085 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1086 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1087 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1088 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1089 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1090 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1091 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1092 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1093 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1094 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1095 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1096 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1097 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1098 /* (Additions here) */ 1099 { 0, 0, 0, }, 1100 }; 1101 1102 #if (defined(QCA_WIFI_QCA6750)) 1103 static struct service_to_pipe target_service_to_ce_map_qca6750[] = { 1104 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1105 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1106 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1107 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1108 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1109 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1110 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1111 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1112 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1113 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1114 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1115 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1116 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1117 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1118 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 1119 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7 1120 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, }, 1121 #endif 1122 /* (Additions here) */ 1123 { 0, 0, 0, }, 1124 }; 1125 #else 1126 static struct service_to_pipe target_service_to_ce_map_qca6750[] = { 1127 }; 1128 #endif 1129 1130 #if (defined(QCA_WIFI_KIWI)) 1131 #ifdef FEATURE_DIRECT_LINK 1132 static struct service_to_pipe target_service_to_ce_map_kiwi_direct_link[] = { 1133 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1134 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1135 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1136 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1137 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1138 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1139 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1140 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1141 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1142 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1143 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 4, }, 1144 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1145 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1146 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1147 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7 1148 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, }, 1149 #endif 1150 { LPASS_DATA_MSG_SVC, PIPEDIR_OUT, 0, }, 1151 { LPASS_DATA_MSG_SVC, PIPEDIR_IN, 5, }, 1152 /* (Additions here) */ 1153 { 0, 0, 0, }, 1154 }; 1155 #endif 1156 1157 static struct service_to_pipe target_service_to_ce_map_kiwi[] = { 1158 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1159 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1160 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1161 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1162 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1163 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1164 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1165 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1166 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1167 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1168 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1169 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1170 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1171 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1172 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7 1173 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, }, 1174 #endif 1175 /* (Additions here) */ 1176 { 0, 0, 0, }, 1177 }; 1178 #else 1179 static struct service_to_pipe target_service_to_ce_map_kiwi[] = { 1180 }; 1181 #endif 1182 1183 #ifdef QCA_WIFI_WCN6450 1184 static struct service_to_pipe target_service_to_ce_map_wcn6450[] = { 1185 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 1186 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 1187 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 1188 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 1189 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 1190 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 1191 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 1192 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 1193 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 1194 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 1195 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 1196 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 1197 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 1198 { HTT_DATA2_MSG_SVC, PIPEDIR_OUT, 5, }, 1199 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 1200 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 10, }, 1201 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 11, }, 1202 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7 1203 { WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, }, 1204 #endif 1205 /* (Additions here) */ 1206 { 0, 0, 0, }, 1207 }; 1208 #else 1209 static struct service_to_pipe target_service_to_ce_map_wcn6450[] = { 1210 }; 1211 #endif 1212 1213 static struct service_to_pipe target_service_to_ce_map_ar900b[] = { 1214 { 1215 WMI_DATA_VO_SVC, 1216 PIPEDIR_OUT, /* out = UL = host -> target */ 1217 3, 1218 }, 1219 { 1220 WMI_DATA_VO_SVC, 1221 PIPEDIR_IN, /* in = DL = target -> host */ 1222 2, 1223 }, 1224 { 1225 WMI_DATA_BK_SVC, 1226 PIPEDIR_OUT, /* out = UL = host -> target */ 1227 3, 1228 }, 1229 { 1230 WMI_DATA_BK_SVC, 1231 PIPEDIR_IN, /* in = DL = target -> host */ 1232 2, 1233 }, 1234 { 1235 WMI_DATA_BE_SVC, 1236 PIPEDIR_OUT, /* out = UL = host -> target */ 1237 3, 1238 }, 1239 { 1240 WMI_DATA_BE_SVC, 1241 PIPEDIR_IN, /* in = DL = target -> host */ 1242 2, 1243 }, 1244 { 1245 WMI_DATA_VI_SVC, 1246 PIPEDIR_OUT, /* out = UL = host -> target */ 1247 3, 1248 }, 1249 { 1250 WMI_DATA_VI_SVC, 1251 PIPEDIR_IN, /* in = DL = target -> host */ 1252 2, 1253 }, 1254 { 1255 WMI_CONTROL_SVC, 1256 PIPEDIR_OUT, /* out = UL = host -> target */ 1257 3, 1258 }, 1259 { 1260 WMI_CONTROL_SVC, 1261 PIPEDIR_IN, /* in = DL = target -> host */ 1262 2, 1263 }, 1264 { 1265 HTC_CTRL_RSVD_SVC, 1266 PIPEDIR_OUT, /* out = UL = host -> target */ 1267 0, /* could be moved to 3 (share with WMI) */ 1268 }, 1269 { 1270 HTC_CTRL_RSVD_SVC, 1271 PIPEDIR_IN, /* in = DL = target -> host */ 1272 1, 1273 }, 1274 { 1275 HTC_RAW_STREAMS_SVC, /* not currently used */ 1276 PIPEDIR_OUT, /* out = UL = host -> target */ 1277 0, 1278 }, 1279 { 1280 HTC_RAW_STREAMS_SVC, /* not currently used */ 1281 PIPEDIR_IN, /* in = DL = target -> host */ 1282 1, 1283 }, 1284 { 1285 HTT_DATA_MSG_SVC, 1286 PIPEDIR_OUT, /* out = UL = host -> target */ 1287 4, 1288 }, 1289 #ifdef WLAN_FEATURE_FASTPATH 1290 { 1291 HTT_DATA_MSG_SVC, 1292 PIPEDIR_IN, /* in = DL = target -> host */ 1293 5, 1294 }, 1295 #else /* WLAN_FEATURE_FASTPATH */ 1296 { 1297 HTT_DATA_MSG_SVC, 1298 PIPEDIR_IN, /* in = DL = target -> host */ 1299 1, 1300 }, 1301 #endif /* WLAN_FEATURE_FASTPATH */ 1302 1303 /* (Additions here) */ 1304 1305 { /* Must be last */ 1306 0, 1307 0, 1308 0, 1309 }, 1310 }; 1311 1312 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map; 1313 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map); 1314 1315 #ifdef WLAN_FEATURE_EPPING 1316 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = { 1317 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1318 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1319 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 1320 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 1321 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1322 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1323 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1324 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1325 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 1326 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1327 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 1328 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1329 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 1330 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 1331 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 1332 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 1333 {0, 0, 0,}, /* Must be last */ 1334 }; 1335 1336 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 1337 **tgt_svc_map_to_use, 1338 uint32_t *sz_tgt_svc_map_to_use) 1339 { 1340 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping; 1341 *sz_tgt_svc_map_to_use = 1342 sizeof(target_service_to_ce_map_wlan_epping); 1343 } 1344 #endif 1345 1346 #ifdef QCN7605_SUPPORT 1347 static inline 1348 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 1349 uint32_t *sz_tgt_svc_map_to_use) 1350 { 1351 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605; 1352 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605); 1353 } 1354 #else 1355 static inline 1356 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 1357 uint32_t *sz_tgt_svc_map_to_use) 1358 { 1359 hif_err("QCN7605 not supported"); 1360 } 1361 #endif 1362 1363 #ifdef QCA_WIFI_QCN9224 1364 static 1365 void hif_set_ce_config_qcn9224(struct hif_softc *scn, 1366 struct HIF_CE_state *hif_state) 1367 { 1368 hif_state->host_ce_config = host_ce_config_wlan_qcn9224; 1369 hif_state->target_ce_config = target_ce_config_wlan_qcn9224; 1370 hif_state->target_ce_config_sz = 1371 sizeof(target_ce_config_wlan_qcn9224); 1372 scn->ce_count = QCN_9224_CE_COUNT; 1373 scn->ini_cfg.disable_wake_irq = 1; 1374 } 1375 1376 static 1377 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use, 1378 uint32_t *sz_tgt_svc_map_to_use) 1379 { 1380 *tgt_svc_map_to_use = target_service_to_ce_map_qcn9224; 1381 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn9224); 1382 } 1383 #else 1384 static inline 1385 void hif_set_ce_config_qcn9224(struct hif_softc *scn, 1386 struct HIF_CE_state *hif_state) 1387 { 1388 hif_err("QCN9224 not supported"); 1389 } 1390 1391 static inline 1392 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use, 1393 uint32_t *sz_tgt_svc_map_to_use) 1394 { 1395 hif_err("QCN9224 not supported"); 1396 } 1397 #endif 1398 1399 #ifdef FEATURE_DIRECT_LINK 1400 /** 1401 * hif_select_service_to_pipe_map_kiwi() - Select service to CE map 1402 * configuration for Kiwi 1403 * @scn: HIF context 1404 * @tgt_svc_map_to_use: returned service map 1405 * @sz_tgt_svc_map_to_use: returned length of the service map 1406 * 1407 * Return: None 1408 */ 1409 static inline void 1410 hif_select_service_to_pipe_map_kiwi(struct hif_softc *scn, 1411 struct service_to_pipe **tgt_svc_map_to_use, 1412 uint32_t *sz_tgt_svc_map_to_use) 1413 { 1414 if (pld_is_direct_link_supported(scn->qdf_dev->dev)) { 1415 *tgt_svc_map_to_use = target_service_to_ce_map_kiwi_direct_link; 1416 *sz_tgt_svc_map_to_use = 1417 sizeof(target_service_to_ce_map_kiwi_direct_link); 1418 } else { 1419 *tgt_svc_map_to_use = target_service_to_ce_map_kiwi; 1420 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_kiwi); 1421 } 1422 } 1423 #else 1424 static inline void 1425 hif_select_service_to_pipe_map_kiwi(struct hif_softc *scn, 1426 struct service_to_pipe **tgt_svc_map_to_use, 1427 uint32_t *sz_tgt_svc_map_to_use) 1428 { 1429 *tgt_svc_map_to_use = target_service_to_ce_map_kiwi; 1430 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_kiwi); 1431 } 1432 #endif 1433 1434 static void hif_select_service_to_pipe_map(struct hif_softc *scn, 1435 struct service_to_pipe **tgt_svc_map_to_use, 1436 uint32_t *sz_tgt_svc_map_to_use) 1437 { 1438 uint32_t mode = hif_get_conparam(scn); 1439 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1440 struct hif_target_info *tgt_info = &scn->target_info; 1441 1442 if (QDF_IS_EPPING_ENABLED(mode)) { 1443 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use, 1444 sz_tgt_svc_map_to_use); 1445 } else { 1446 switch (tgt_info->target_type) { 1447 default: 1448 *tgt_svc_map_to_use = target_service_to_ce_map_wlan; 1449 *sz_tgt_svc_map_to_use = 1450 sizeof(target_service_to_ce_map_wlan); 1451 break; 1452 case TARGET_TYPE_QCN7605: 1453 hif_select_ce_map_qcn7605(tgt_svc_map_to_use, 1454 sz_tgt_svc_map_to_use); 1455 break; 1456 case TARGET_TYPE_AR900B: 1457 case TARGET_TYPE_QCA9984: 1458 case TARGET_TYPE_QCA9888: 1459 case TARGET_TYPE_AR9888: 1460 case TARGET_TYPE_AR9888V2: 1461 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b; 1462 *sz_tgt_svc_map_to_use = 1463 sizeof(target_service_to_ce_map_ar900b); 1464 break; 1465 case TARGET_TYPE_QCA6290: 1466 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290; 1467 *sz_tgt_svc_map_to_use = 1468 sizeof(target_service_to_ce_map_qca6290); 1469 break; 1470 case TARGET_TYPE_QCA6390: 1471 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390; 1472 *sz_tgt_svc_map_to_use = 1473 sizeof(target_service_to_ce_map_qca6390); 1474 break; 1475 case TARGET_TYPE_QCA6490: 1476 *tgt_svc_map_to_use = target_service_to_ce_map_qca6490; 1477 *sz_tgt_svc_map_to_use = 1478 sizeof(target_service_to_ce_map_qca6490); 1479 break; 1480 case TARGET_TYPE_QCA6750: 1481 *tgt_svc_map_to_use = target_service_to_ce_map_qca6750; 1482 *sz_tgt_svc_map_to_use = 1483 sizeof(target_service_to_ce_map_qca6750); 1484 break; 1485 case TARGET_TYPE_KIWI: 1486 case TARGET_TYPE_MANGO: 1487 case TARGET_TYPE_PEACH: 1488 hif_select_service_to_pipe_map_kiwi(scn, 1489 tgt_svc_map_to_use, 1490 sz_tgt_svc_map_to_use); 1491 break; 1492 case TARGET_TYPE_WCN6450: 1493 *tgt_svc_map_to_use = target_service_to_ce_map_wcn6450; 1494 *sz_tgt_svc_map_to_use = 1495 sizeof(target_service_to_ce_map_wcn6450); 1496 break; 1497 case TARGET_TYPE_QCA8074: 1498 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074; 1499 *sz_tgt_svc_map_to_use = 1500 sizeof(target_service_to_ce_map_qca8074); 1501 break; 1502 case TARGET_TYPE_QCA8074V2: 1503 *tgt_svc_map_to_use = 1504 target_service_to_ce_map_qca8074_v2; 1505 *sz_tgt_svc_map_to_use = 1506 sizeof(target_service_to_ce_map_qca8074_v2); 1507 break; 1508 case TARGET_TYPE_QCA9574: 1509 *tgt_svc_map_to_use = 1510 target_service_to_ce_map_qca9574; 1511 *sz_tgt_svc_map_to_use = 1512 sizeof(target_service_to_ce_map_qca9574); 1513 break; 1514 case TARGET_TYPE_QCA6018: 1515 *tgt_svc_map_to_use = 1516 target_service_to_ce_map_qca6018; 1517 *sz_tgt_svc_map_to_use = 1518 sizeof(target_service_to_ce_map_qca6018); 1519 break; 1520 case TARGET_TYPE_QCN9000: 1521 *tgt_svc_map_to_use = 1522 target_service_to_ce_map_qcn9000; 1523 *sz_tgt_svc_map_to_use = 1524 sizeof(target_service_to_ce_map_qcn9000); 1525 break; 1526 case TARGET_TYPE_QCN9224: 1527 hif_select_ce_map_qcn9224(tgt_svc_map_to_use, 1528 sz_tgt_svc_map_to_use); 1529 break; 1530 case TARGET_TYPE_QCA5332: 1531 case TARGET_TYPE_QCN6432: 1532 *tgt_svc_map_to_use = target_service_to_ce_map_qca5332; 1533 *sz_tgt_svc_map_to_use = 1534 sizeof(target_service_to_ce_map_qca5332); 1535 break; 1536 case TARGET_TYPE_QCA5018: 1537 case TARGET_TYPE_QCN6122: 1538 case TARGET_TYPE_QCN9160: 1539 *tgt_svc_map_to_use = 1540 target_service_to_ce_map_qca5018; 1541 *sz_tgt_svc_map_to_use = 1542 sizeof(target_service_to_ce_map_qca5018); 1543 break; 1544 } 1545 } 1546 hif_state->tgt_svc_map = *tgt_svc_map_to_use; 1547 hif_state->sz_tgt_svc_map = *sz_tgt_svc_map_to_use / 1548 sizeof(struct service_to_pipe); 1549 } 1550 1551 #ifndef QCA_WIFI_WCN6450 1552 /** 1553 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly 1554 * @ce_state : pointer to the state context of the CE 1555 * 1556 * Description: 1557 * Sets htt_rx_data attribute of the state structure if the 1558 * CE serves one of the HTT DATA services. 1559 * 1560 * Return: 1561 * false (attribute set to false) 1562 * true (attribute set to true); 1563 */ 1564 static bool ce_mark_datapath(struct CE_state *ce_state) 1565 { 1566 struct service_to_pipe *svc_map; 1567 uint32_t map_sz, map_len; 1568 int i; 1569 bool rc = false; 1570 1571 if (ce_state) { 1572 hif_select_service_to_pipe_map(ce_state->scn, &svc_map, 1573 &map_sz); 1574 1575 map_len = map_sz / sizeof(struct service_to_pipe); 1576 for (i = 0; i < map_len; i++) { 1577 if ((svc_map[i].pipenum == ce_state->id) && 1578 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) || 1579 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) || 1580 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) { 1581 /* HTT CEs are unidirectional */ 1582 if (svc_map[i].pipedir == PIPEDIR_IN) 1583 ce_state->htt_rx_data = true; 1584 else 1585 ce_state->htt_tx_data = true; 1586 rc = true; 1587 } 1588 } 1589 } 1590 return rc; 1591 } 1592 1593 static void ce_update_msi_batch_intr_flags(struct CE_state *ce_state) 1594 { 1595 } 1596 1597 static inline void ce_update_wrt_idx_offset(struct hif_softc *scn, 1598 struct CE_state *ce_state, 1599 uint8_t ring_type) 1600 { 1601 } 1602 #else 1603 static bool ce_mark_datapath(struct CE_state *ce_state) 1604 { 1605 struct service_to_pipe *svc_map; 1606 uint32_t map_sz, map_len; 1607 int i; 1608 1609 if (ce_state) { 1610 hif_select_service_to_pipe_map(ce_state->scn, &svc_map, 1611 &map_sz); 1612 1613 map_len = map_sz / sizeof(struct service_to_pipe); 1614 for (i = 0; i < map_len; i++) { 1615 if ((svc_map[i].pipenum == ce_state->id) && 1616 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) || 1617 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) || 1618 (svc_map[i].service_id == HTT_DATA3_MSG_SVC)) && 1619 (svc_map[i].pipedir == PIPEDIR_IN)) 1620 ce_state->htt_rx_data = true; 1621 else if ((svc_map[i].pipenum == ce_state->id) && 1622 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) && 1623 (svc_map[i].pipedir == PIPEDIR_OUT)) 1624 ce_state->htt_tx_data = true; 1625 } 1626 } 1627 1628 return (ce_state->htt_rx_data || ce_state->htt_tx_data); 1629 } 1630 1631 static void ce_update_msi_batch_intr_flags(struct CE_state *ce_state) 1632 { 1633 ce_state->msi_supported = true; 1634 ce_state->batch_intr_supported = true; 1635 } 1636 1637 static inline void ce_update_wrt_idx_offset(struct hif_softc *scn, 1638 struct CE_state *ce_state, 1639 uint8_t ring_type) 1640 { 1641 if (ring_type == CE_RING_SRC) 1642 ce_state->ce_wrt_idx_offset = 1643 CE_SRC_WR_IDX_OFFSET_GET(scn, ce_state->ctrl_addr); 1644 else if (ring_type == CE_RING_DEST) 1645 ce_state->ce_wrt_idx_offset = 1646 CE_DST_WR_IDX_OFFSET_GET(scn, ce_state->ctrl_addr); 1647 else 1648 QDF_BUG(0); 1649 } 1650 1651 /* 1652 * hif_ce_print_ring_stats() - Print ce ring statistics 1653 * 1654 * @hif_ctx: hif context 1655 * 1656 * Returns: None 1657 */ 1658 void hif_ce_print_ring_stats(struct hif_opaque_softc *hif_ctx) 1659 { 1660 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1661 struct CE_state *ce_state; 1662 int i; 1663 1664 for (i = 0; i < scn->ce_count; i++) { 1665 ce_state = scn->ce_id_to_state[i]; 1666 if (!ce_state) 1667 continue; 1668 1669 if (ce_state->src_ring) { 1670 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, 1671 "ce%d:SW: sw_index %u write_index %u", 1672 ce_state->src_ring->sw_index, 1673 ce_state->src_ring->write_index); 1674 1675 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, 1676 "ce%d:HW: read_index %u write_index %u", 1677 CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr), 1678 CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr)); 1679 } 1680 1681 if (ce_state->dest_ring) { 1682 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, 1683 "ce%d:SW: sw_index %u write_index %u", 1684 ce_state->dest_ring->sw_index, 1685 ce_state->dest_ring->write_index); 1686 1687 QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, 1688 "ce%d:HW: read_index %u write_index %u", 1689 CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr), 1690 CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr)); 1691 } 1692 } 1693 } 1694 #endif 1695 1696 /** 1697 * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map 1698 * @hif_ctx: hif opaque handle 1699 * 1700 * Description: 1701 * Gets number of WMI EPs configured in target svc map. Since EP map 1702 * include IN and OUT direction pipes, count only OUT pipes to get EPs 1703 * configured for WMI service. 1704 * 1705 * Return: 1706 * uint8_t: count for WMI eps in target svc map 1707 */ 1708 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *hif_ctx) 1709 { 1710 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1711 struct service_to_pipe *svc_map; 1712 uint32_t map_sz, map_len; 1713 int i; 1714 uint8_t wmi_ep_count = 0; 1715 1716 hif_select_service_to_pipe_map(scn, &svc_map, 1717 &map_sz); 1718 map_len = map_sz / sizeof(struct service_to_pipe); 1719 1720 for (i = 0; i < map_len; i++) { 1721 /* Count number of WMI EPs based on out direction */ 1722 if ((svc_map[i].pipedir == PIPEDIR_OUT) && 1723 ((svc_map[i].service_id == WMI_CONTROL_SVC) || 1724 (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC1) || 1725 (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC2))) { 1726 wmi_ep_count++; 1727 } 1728 } 1729 1730 return wmi_ep_count; 1731 } 1732 1733 /** 1734 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes 1735 * @ce_id: ce in question 1736 * @ring: ring state being examined 1737 * @type: "src_ring" or "dest_ring" string for identifying the ring 1738 * 1739 * Warns on non-zero index values. 1740 * Causes a kernel panic if the ring is not empty during initialization. 1741 */ 1742 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring, 1743 char *type) 1744 { 1745 if (ring->write_index != 0 || ring->sw_index != 0) 1746 hif_err("ce %d, %s, initial sw_index = %d, initial write_index =%d", 1747 ce_id, type, ring->sw_index, ring->write_index); 1748 if (ring->write_index != ring->sw_index) 1749 QDF_BUG(0); 1750 } 1751 1752 #ifdef IPA_OFFLOAD 1753 /** 1754 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring 1755 * @scn: softc instance 1756 * @CE_id: ce in question 1757 * @base_addr: pointer to copyengine ring base address 1758 * @ce_ring: copyengine instance 1759 * @nentries: number of entries should be allocated 1760 * @desc_size: ce desc size 1761 * 1762 * Return: QDF_STATUS_SUCCESS - for success 1763 */ 1764 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1765 qdf_dma_addr_t *base_addr, 1766 struct CE_ring_state *ce_ring, 1767 unsigned int nentries, uint32_t desc_size) 1768 { 1769 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 1770 !ce_srng_based(scn)) { 1771 if (!scn->ipa_ce_ring) { 1772 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc( 1773 scn->qdf_dev, 1774 nentries * desc_size + CE_DESC_RING_ALIGN); 1775 if (!scn->ipa_ce_ring) { 1776 hif_err( 1777 "Failed to allocate memory for IPA ce ring"); 1778 return QDF_STATUS_E_NOMEM; 1779 } 1780 } 1781 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev, 1782 &scn->ipa_ce_ring->mem_info); 1783 ce_ring->base_addr_owner_space_unaligned = 1784 scn->ipa_ce_ring->vaddr; 1785 } else { 1786 ce_ring->base_addr_owner_space_unaligned = 1787 hif_mem_alloc_consistent_unaligned 1788 (scn, 1789 (nentries * desc_size + 1790 CE_DESC_RING_ALIGN), 1791 base_addr, 1792 ce_ring->hal_ring_type, 1793 &ce_ring->is_ring_prealloc); 1794 1795 if (!ce_ring->base_addr_owner_space_unaligned) { 1796 hif_err("Failed to allocate DMA memory for ce ring id: %u", 1797 CE_id); 1798 return QDF_STATUS_E_NOMEM; 1799 } 1800 } 1801 return QDF_STATUS_SUCCESS; 1802 } 1803 1804 /** 1805 * ce_free_desc_ring() - Frees copyengine descriptor ring 1806 * @scn: softc instance 1807 * @CE_id: ce in question 1808 * @ce_ring: copyengine instance 1809 * @desc_size: ce desc size 1810 * 1811 * Return: None 1812 */ 1813 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1814 struct CE_ring_state *ce_ring, uint32_t desc_size) 1815 { 1816 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 1817 !ce_srng_based(scn)) { 1818 if (scn->ipa_ce_ring) { 1819 qdf_mem_shared_mem_free(scn->qdf_dev, 1820 scn->ipa_ce_ring); 1821 scn->ipa_ce_ring = NULL; 1822 } 1823 ce_ring->base_addr_owner_space_unaligned = NULL; 1824 } else { 1825 hif_mem_free_consistent_unaligned 1826 (scn, 1827 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 1828 ce_ring->base_addr_owner_space_unaligned, 1829 ce_ring->base_addr_CE_space, 0, 1830 ce_ring->is_ring_prealloc); 1831 ce_ring->base_addr_owner_space_unaligned = NULL; 1832 } 1833 } 1834 #else 1835 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1836 qdf_dma_addr_t *base_addr, 1837 struct CE_ring_state *ce_ring, 1838 unsigned int nentries, uint32_t desc_size) 1839 { 1840 ce_ring->base_addr_owner_space_unaligned = 1841 hif_mem_alloc_consistent_unaligned 1842 (scn, 1843 (nentries * desc_size + 1844 CE_DESC_RING_ALIGN), 1845 base_addr, 1846 ce_ring->hal_ring_type, 1847 &ce_ring->is_ring_prealloc); 1848 1849 if (!ce_ring->base_addr_owner_space_unaligned) { 1850 hif_err("Failed to allocate DMA memory for ce ring id: %u", 1851 CE_id); 1852 return QDF_STATUS_E_NOMEM; 1853 } 1854 return QDF_STATUS_SUCCESS; 1855 } 1856 1857 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1858 struct CE_ring_state *ce_ring, uint32_t desc_size) 1859 { 1860 hif_mem_free_consistent_unaligned 1861 (scn, 1862 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 1863 ce_ring->base_addr_owner_space_unaligned, 1864 ce_ring->base_addr_CE_space, 0, 1865 ce_ring->is_ring_prealloc); 1866 ce_ring->base_addr_owner_space_unaligned = NULL; 1867 } 1868 #endif /* IPA_OFFLOAD */ 1869 1870 /* 1871 * TODO: Need to explore the possibility of having this as part of a 1872 * target context instead of a global array. 1873 */ 1874 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void); 1875 1876 void ce_service_register_module(enum ce_target_type target_type, 1877 struct ce_ops* (*ce_attach)(void)) 1878 { 1879 if (target_type < CE_MAX_TARGET_TYPE) 1880 ce_attach_register[target_type] = ce_attach; 1881 } 1882 1883 qdf_export_symbol(ce_service_register_module); 1884 1885 /** 1886 * ce_srng_based() - Does this target use srng 1887 * @scn: pointer to the state context of the CE 1888 * 1889 * Description: 1890 * returns true if the target is SRNG based 1891 * 1892 * Return: 1893 * false (attribute set to false) 1894 * true (attribute set to true); 1895 */ 1896 bool ce_srng_based(struct hif_softc *scn) 1897 { 1898 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1899 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 1900 1901 switch (tgt_info->target_type) { 1902 case TARGET_TYPE_QCA8074: 1903 case TARGET_TYPE_QCA8074V2: 1904 case TARGET_TYPE_QCA6290: 1905 case TARGET_TYPE_QCA6390: 1906 case TARGET_TYPE_QCA6490: 1907 case TARGET_TYPE_QCA6750: 1908 case TARGET_TYPE_QCA6018: 1909 case TARGET_TYPE_QCN9000: 1910 case TARGET_TYPE_QCN6122: 1911 case TARGET_TYPE_QCN9160: 1912 case TARGET_TYPE_QCA5018: 1913 case TARGET_TYPE_KIWI: 1914 case TARGET_TYPE_MANGO: 1915 case TARGET_TYPE_PEACH: 1916 case TARGET_TYPE_QCN9224: 1917 case TARGET_TYPE_QCA9574: 1918 case TARGET_TYPE_QCA5332: 1919 case TARGET_TYPE_QCN6432: 1920 return true; 1921 default: 1922 return false; 1923 } 1924 return false; 1925 } 1926 qdf_export_symbol(ce_srng_based); 1927 1928 #ifdef QCA_WIFI_SUPPORT_SRNG 1929 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1930 { 1931 struct ce_ops *ops = NULL; 1932 1933 if (ce_srng_based(scn)) { 1934 if (ce_attach_register[CE_SVC_SRNG]) 1935 ops = ce_attach_register[CE_SVC_SRNG](); 1936 } else if (ce_attach_register[CE_SVC_LEGACY]) { 1937 ops = ce_attach_register[CE_SVC_LEGACY](); 1938 } 1939 1940 return ops; 1941 } 1942 1943 1944 #else /* QCA_LITHIUM */ 1945 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1946 { 1947 if (ce_attach_register[CE_SVC_LEGACY]) 1948 return ce_attach_register[CE_SVC_LEGACY](); 1949 1950 return NULL; 1951 } 1952 #endif /* QCA_LITHIUM */ 1953 1954 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn, 1955 struct pld_shadow_reg_v2_cfg **shadow_config, 1956 int *num_shadow_registers_configured) { 1957 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1958 1959 hif_state->ce_services->ce_prepare_shadow_register_v2_cfg( 1960 scn, shadow_config, num_shadow_registers_configured); 1961 1962 return; 1963 } 1964 1965 #ifdef CONFIG_SHADOW_V3 1966 static inline void 1967 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn, 1968 struct pld_wlan_enable_cfg *cfg) 1969 { 1970 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1971 1972 if (!hif_state->ce_services->ce_prepare_shadow_register_v3_cfg) 1973 return; 1974 1975 hif_state->ce_services->ce_prepare_shadow_register_v3_cfg( 1976 scn, &cfg->shadow_reg_v3_cfg, 1977 &cfg->num_shadow_reg_v3_cfg); 1978 } 1979 #else 1980 static inline void 1981 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn, 1982 struct pld_wlan_enable_cfg *cfg) 1983 { 1984 } 1985 #endif 1986 1987 static inline uint32_t ce_get_desc_size(struct hif_softc *scn, 1988 uint8_t ring_type) 1989 { 1990 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1991 1992 return hif_state->ce_services->ce_get_desc_size(ring_type); 1993 } 1994 1995 #ifdef QCA_WIFI_SUPPORT_SRNG 1996 static inline int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type) 1997 { 1998 switch (ce_ring_type) { 1999 case CE_RING_SRC: 2000 return CE_SRC; 2001 case CE_RING_DEST: 2002 return CE_DST; 2003 case CE_RING_STATUS: 2004 return CE_DST_STATUS; 2005 default: 2006 return -EINVAL; 2007 } 2008 } 2009 #else 2010 static int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type) 2011 { 2012 return 0; 2013 } 2014 #endif 2015 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state, 2016 uint8_t ring_type, uint32_t nentries) 2017 { 2018 uint32_t ce_nbytes; 2019 char *ptr; 2020 qdf_dma_addr_t base_addr; 2021 struct CE_ring_state *ce_ring; 2022 uint32_t desc_size; 2023 struct hif_softc *scn = CE_state->scn; 2024 2025 ce_nbytes = sizeof(struct CE_ring_state) 2026 + (nentries * sizeof(void *)); 2027 ptr = qdf_mem_malloc(ce_nbytes); 2028 if (!ptr) 2029 return NULL; 2030 2031 ce_ring = (struct CE_ring_state *)ptr; 2032 ptr += sizeof(struct CE_ring_state); 2033 ce_ring->nentries = nentries; 2034 ce_ring->nentries_mask = nentries - 1; 2035 2036 ce_ring->low_water_mark_nentries = 0; 2037 ce_ring->high_water_mark_nentries = nentries; 2038 ce_ring->per_transfer_context = (void **)ptr; 2039 ce_ring->hal_ring_type = ce_ring_type_to_hal_ring_type(ring_type); 2040 2041 desc_size = ce_get_desc_size(scn, ring_type); 2042 2043 /* Legacy platforms that do not support cache 2044 * coherent DMA are unsupported 2045 */ 2046 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr, 2047 ce_ring, nentries, 2048 desc_size) != 2049 QDF_STATUS_SUCCESS) { 2050 hif_err("ring has no DMA mem"); 2051 qdf_mem_free(ce_ring); 2052 return NULL; 2053 } 2054 ce_ring->base_addr_CE_space_unaligned = base_addr; 2055 2056 /* Correctly initialize memory to 0 to 2057 * prevent garbage data crashing system 2058 * when download firmware 2059 */ 2060 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned, 2061 nentries * desc_size + 2062 CE_DESC_RING_ALIGN); 2063 2064 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) { 2065 2066 ce_ring->base_addr_CE_space = 2067 (ce_ring->base_addr_CE_space_unaligned + 2068 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1); 2069 2070 ce_ring->base_addr_owner_space = (void *) 2071 (((size_t) ce_ring->base_addr_owner_space_unaligned + 2072 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1)); 2073 } else { 2074 ce_ring->base_addr_CE_space = 2075 ce_ring->base_addr_CE_space_unaligned; 2076 ce_ring->base_addr_owner_space = 2077 ce_ring->base_addr_owner_space_unaligned; 2078 } 2079 2080 return ce_ring; 2081 } 2082 2083 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type, 2084 uint32_t ce_id, struct CE_ring_state *ring, 2085 struct CE_attr *attr) 2086 { 2087 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2088 2089 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, 2090 ring, attr); 2091 } 2092 2093 static void ce_srng_cleanup(struct hif_softc *scn, struct CE_state *CE_state, 2094 uint8_t ring_type) 2095 { 2096 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2097 2098 if (hif_state->ce_services->ce_srng_cleanup) 2099 hif_state->ce_services->ce_srng_cleanup(scn, 2100 CE_state, ring_type); 2101 } 2102 2103 int hif_ce_bus_early_suspend(struct hif_softc *scn) 2104 { 2105 uint8_t ul_pipe, dl_pipe; 2106 int ce_id, status, ul_is_polled, dl_is_polled; 2107 struct CE_state *ce_state; 2108 2109 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC, 2110 &ul_pipe, &dl_pipe, 2111 &ul_is_polled, &dl_is_polled); 2112 if (status) { 2113 hif_err("pipe_mapping failure"); 2114 return status; 2115 } 2116 2117 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 2118 if (ce_id == ul_pipe) 2119 continue; 2120 if (ce_id == dl_pipe) 2121 continue; 2122 2123 ce_state = scn->ce_id_to_state[ce_id]; 2124 qdf_spin_lock_bh(&ce_state->ce_index_lock); 2125 if (ce_state->state == CE_RUNNING) 2126 ce_state->state = CE_PAUSED; 2127 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 2128 } 2129 2130 return status; 2131 } 2132 2133 int hif_ce_bus_late_resume(struct hif_softc *scn) 2134 { 2135 int ce_id; 2136 struct CE_state *ce_state; 2137 int write_index = 0; 2138 bool index_updated; 2139 2140 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 2141 ce_state = scn->ce_id_to_state[ce_id]; 2142 qdf_spin_lock_bh(&ce_state->ce_index_lock); 2143 if (ce_state->state == CE_PENDING) { 2144 write_index = ce_state->src_ring->write_index; 2145 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 2146 write_index); 2147 ce_state->state = CE_RUNNING; 2148 index_updated = true; 2149 } else { 2150 index_updated = false; 2151 } 2152 2153 if (ce_state->state == CE_PAUSED) 2154 ce_state->state = CE_RUNNING; 2155 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 2156 2157 if (index_updated) 2158 hif_record_ce_desc_event(scn, ce_id, 2159 RESUME_WRITE_INDEX_UPDATE, 2160 NULL, NULL, write_index, 0); 2161 } 2162 2163 return 0; 2164 } 2165 2166 /** 2167 * ce_oom_recovery() - try to recover rx ce from oom condition 2168 * @context: CE_state of the CE with oom rx ring 2169 * 2170 * the executing work Will continue to be rescheduled until 2171 * at least 1 descriptor is successfully posted to the rx ring. 2172 * 2173 * return: none 2174 */ 2175 static void ce_oom_recovery(void *context) 2176 { 2177 struct CE_state *ce_state = context; 2178 struct hif_softc *scn = ce_state->scn; 2179 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn); 2180 struct HIF_CE_pipe_info *pipe_info = 2181 &ce_softc->pipe_info[ce_state->id]; 2182 2183 hif_post_recv_buffers_for_pipe(pipe_info); 2184 } 2185 2186 #ifdef HIF_CE_DEBUG_DATA_BUF 2187 /** 2188 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by 2189 * the CE descriptors. 2190 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE 2191 * @scn: hif scn handle 2192 * @ce_id: Copy Engine Id 2193 * 2194 * Return: QDF_STATUS 2195 */ 2196 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 2197 { 2198 struct hif_ce_desc_event *event = NULL; 2199 struct hif_ce_desc_event *hist_ev = NULL; 2200 uint32_t index = 0; 2201 2202 hist_ev = 2203 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 2204 2205 if (!hist_ev) 2206 return QDF_STATUS_E_NOMEM; 2207 2208 scn->hif_ce_desc_hist.data_enable[ce_id] = true; 2209 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 2210 event = &hist_ev[index]; 2211 event->data = 2212 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE); 2213 if (!event->data) { 2214 hif_err_rl("ce debug data alloc failed"); 2215 scn->hif_ce_desc_hist.data_enable[ce_id] = false; 2216 return QDF_STATUS_E_NOMEM; 2217 } 2218 } 2219 return QDF_STATUS_SUCCESS; 2220 } 2221 2222 /** 2223 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by 2224 * the CE descriptors. 2225 * @scn: hif scn handle 2226 * @ce_id: Copy Engine Id 2227 * 2228 * Return: 2229 */ 2230 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 2231 { 2232 struct hif_ce_desc_event *event = NULL; 2233 struct hif_ce_desc_event *hist_ev = NULL; 2234 uint32_t index = 0; 2235 2236 hist_ev = 2237 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 2238 2239 if (!hist_ev) 2240 return; 2241 2242 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 2243 event = &hist_ev[index]; 2244 if (event->data) 2245 qdf_mem_free(event->data); 2246 event->data = NULL; 2247 event = NULL; 2248 } 2249 2250 } 2251 #endif /* HIF_CE_DEBUG_DATA_BUF */ 2252 2253 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF 2254 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 2255 2256 /* define below variables for crashscope parse */ 2257 struct hif_ce_desc_event *hif_ce_desc_history[CE_COUNT_MAX]; 2258 uint32_t hif_ce_history_max = HIF_CE_HISTORY_MAX; 2259 uint32_t hif_ce_count_max = CE_COUNT_MAX; 2260 2261 /* 2262 * for debug build, it will enable ce history for all ce, but for 2263 * perf build(if CONFIG_SLUB_DEBUG_ON is N), it only enable for 2264 * ce2(wmi event) & ce3(wmi cmd) history. 2265 */ 2266 #if defined(CONFIG_SLUB_DEBUG_ON) 2267 #define CE_DESC_HISTORY_BUFF_CNT CE_COUNT_MAX 2268 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE 0 2269 #else 2270 /* CE2, CE3, CE7 */ 2271 #define CE_DESC_HISTORY_BUFF_CNT 3 2272 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE (BIT(2) | BIT(3) | BIT(7)) 2273 #endif 2274 bool hif_ce_only_for_crit = IS_CE_DEBUG_ONLY_FOR_CRIT_CE; 2275 struct hif_ce_desc_event 2276 hif_ce_desc_history_buff[CE_DESC_HISTORY_BUFF_CNT][HIF_CE_HISTORY_MAX]; 2277 2278 static void 2279 __hif_ce_desc_history_log_register(struct hif_softc *scn) 2280 { 2281 qdf_ssr_driver_dump_register_region("hif_ce_desc_history_buff", 2282 hif_ce_desc_history_buff, 2283 sizeof(hif_ce_desc_history_buff)); 2284 qdf_ssr_driver_dump_register_region("hif_ce_desc_hist", 2285 &scn->hif_ce_desc_hist, 2286 sizeof(scn->hif_ce_desc_hist)); 2287 qdf_ssr_driver_dump_register_region("hif_ce_count_max", 2288 &hif_ce_count_max, 2289 sizeof(hif_ce_count_max)); 2290 qdf_ssr_driver_dump_register_region("hif_ce_history_max", 2291 &hif_ce_history_max, 2292 sizeof(hif_ce_history_max)); 2293 qdf_ssr_driver_dump_register_region("hif_ce_only_for_crit", 2294 &hif_ce_only_for_crit, 2295 sizeof(hif_ce_only_for_crit)); 2296 } 2297 2298 static void __hif_ce_desc_history_log_unregister(void) 2299 { 2300 qdf_ssr_driver_dump_unregister_region("hif_ce_only_for_crit"); 2301 qdf_ssr_driver_dump_unregister_region("hif_ce_history_max"); 2302 qdf_ssr_driver_dump_unregister_region("hif_ce_count_max"); 2303 qdf_ssr_driver_dump_unregister_region("hif_ce_desc_hist"); 2304 qdf_ssr_driver_dump_unregister_region("hif_ce_desc_history_buff"); 2305 } 2306 2307 static struct hif_ce_desc_event * 2308 hif_ce_debug_history_buf_get(struct hif_softc *scn, unsigned int ce_id) 2309 { 2310 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2311 2312 hif_debug("get ce debug buffer ce_id %u, only_ce2/ce3=0x%lx, idx=%u", 2313 ce_id, IS_CE_DEBUG_ONLY_FOR_CRIT_CE, 2314 ce_hist->ce_id_hist_map[ce_id]); 2315 if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE && 2316 (ce_id == CE_ID_2 || ce_id == CE_ID_3 || ce_id == CE_ID_7)) { 2317 uint8_t idx = ce_hist->ce_id_hist_map[ce_id]; 2318 2319 hif_ce_desc_history[ce_id] = hif_ce_desc_history_buff[idx]; 2320 } else { 2321 hif_ce_desc_history[ce_id] = 2322 hif_ce_desc_history_buff[ce_id]; 2323 } 2324 2325 return hif_ce_desc_history[ce_id]; 2326 } 2327 2328 /** 2329 * alloc_mem_ce_debug_history() - Allocate CE descriptor history 2330 * @scn: hif scn handle 2331 * @ce_id: Copy Engine Id 2332 * @src_nentries: source ce ring entries 2333 * Return: QDF_STATUS 2334 */ 2335 static QDF_STATUS 2336 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id, 2337 uint32_t src_nentries) 2338 { 2339 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2340 QDF_STATUS status = QDF_STATUS_SUCCESS; 2341 2342 /* For perf build, return directly for non ce2/ce3 */ 2343 if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE && 2344 ce_id != CE_ID_2 && 2345 ce_id != CE_ID_3 && 2346 ce_id != CE_ID_7) { 2347 ce_hist->enable[ce_id] = false; 2348 ce_hist->data_enable[ce_id] = false; 2349 return QDF_STATUS_SUCCESS; 2350 } 2351 2352 ce_hist->hist_ev[ce_id] = hif_ce_debug_history_buf_get(scn, ce_id); 2353 ce_hist->enable[ce_id] = true; 2354 2355 if (src_nentries) { 2356 status = alloc_mem_ce_debug_hist_data(scn, ce_id); 2357 if (status != QDF_STATUS_SUCCESS) { 2358 ce_hist->enable[ce_id] = false; 2359 ce_hist->hist_ev[ce_id] = NULL; 2360 return status; 2361 } 2362 } else { 2363 ce_hist->data_enable[ce_id] = false; 2364 } 2365 2366 return QDF_STATUS_SUCCESS; 2367 } 2368 2369 /** 2370 * free_mem_ce_debug_history() - Free CE descriptor history 2371 * @scn: hif scn handle 2372 * @ce_id: Copy Engine Id 2373 * 2374 * Return: None 2375 */ 2376 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id) 2377 { 2378 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2379 2380 if (!ce_hist->enable[ce_id]) 2381 return; 2382 2383 ce_hist->enable[ce_id] = false; 2384 if (ce_hist->data_enable[ce_id]) { 2385 ce_hist->data_enable[ce_id] = false; 2386 free_mem_ce_debug_hist_data(scn, ce_id); 2387 } 2388 ce_hist->hist_ev[ce_id] = NULL; 2389 } 2390 #else 2391 2392 static void 2393 __hif_ce_desc_history_log_register(struct hif_softc *scn) 2394 { 2395 } 2396 2397 static void __hif_ce_desc_history_log_unregister(void) { } 2398 2399 static inline QDF_STATUS 2400 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 2401 uint32_t src_nentries) 2402 { 2403 return QDF_STATUS_SUCCESS; 2404 } 2405 2406 static inline void 2407 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } 2408 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */ 2409 #else 2410 #if defined(HIF_CE_DEBUG_DATA_BUF) 2411 2412 static void 2413 __hif_ce_desc_history_log_register(struct hif_softc *scn) 2414 { 2415 } 2416 2417 static void __hif_ce_desc_history_log_unregister(void) { } 2418 2419 static QDF_STATUS 2420 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 2421 uint32_t src_nentries) 2422 { 2423 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *) 2424 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event)); 2425 2426 if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) { 2427 scn->hif_ce_desc_hist.enable[CE_id] = 0; 2428 return QDF_STATUS_E_NOMEM; 2429 } else { 2430 scn->hif_ce_desc_hist.enable[CE_id] = 1; 2431 return QDF_STATUS_SUCCESS; 2432 } 2433 } 2434 2435 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) 2436 { 2437 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2438 struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id]; 2439 2440 if (!hist_ev) 2441 return; 2442 2443 if (ce_hist->data_enable[CE_id]) { 2444 ce_hist->data_enable[CE_id] = false; 2445 free_mem_ce_debug_hist_data(scn, CE_id); 2446 } 2447 2448 ce_hist->enable[CE_id] = false; 2449 qdf_mem_free(ce_hist->hist_ev[CE_id]); 2450 ce_hist->hist_ev[CE_id] = NULL; 2451 } 2452 2453 #else 2454 2455 static void 2456 __hif_ce_desc_history_log_register(struct hif_softc *scn) 2457 { 2458 } 2459 2460 static void __hif_ce_desc_history_log_unregister(void) { } 2461 2462 static inline QDF_STATUS 2463 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 2464 uint32_t src_nentries) 2465 { 2466 return QDF_STATUS_SUCCESS; 2467 } 2468 2469 static inline void 2470 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } 2471 #endif /* HIF_CE_DEBUG_DATA_BUF */ 2472 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */ 2473 2474 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 2475 /** 2476 * reset_ce_debug_history() - reset the index and ce id used for dumping the 2477 * CE records on the console using sysfs. 2478 * @scn: hif scn handle 2479 * 2480 * Return: 2481 */ 2482 static inline void reset_ce_debug_history(struct hif_softc *scn) 2483 { 2484 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 2485 /* Initialise the CE debug history sysfs interface inputs ce_id and 2486 * index. Disable data storing 2487 */ 2488 ce_hist->hist_index = 0; 2489 ce_hist->hist_id = 0; 2490 } 2491 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 2492 static inline void reset_ce_debug_history(struct hif_softc *scn) { } 2493 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 2494 2495 void ce_enable_polling(void *cestate) 2496 { 2497 struct CE_state *CE_state = (struct CE_state *)cestate; 2498 2499 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 2500 CE_state->timer_inited = true; 2501 } 2502 2503 void ce_disable_polling(void *cestate) 2504 { 2505 struct CE_state *CE_state = (struct CE_state *)cestate; 2506 2507 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 2508 CE_state->timer_inited = false; 2509 } 2510 2511 #ifdef WLAN_FEATURE_SSR_DRIVER_DUMP 2512 #define MAX_CE_STR_LEN 50 2513 /** 2514 * ce_ring_dump_register_region() - Register CE ring with SSR dump 2515 * @CE_state: CE_state pointer 2516 * @CE_id: CE id 2517 * 2518 * Return: None 2519 */ 2520 static inline 2521 void ce_ring_dump_register_region(struct CE_state *CE_state, unsigned int CE_id) 2522 { 2523 struct CE_ring_state *ce_ring; 2524 char ce[MAX_CE_STR_LEN]; 2525 char CE_ring_state[MAX_CE_STR_LEN]; 2526 char srng[MAX_CE_STR_LEN]; 2527 2528 qdf_snprint(ce, MAX_CE_STR_LEN, "%s%d", "ce_", CE_id); 2529 qdf_ssr_driver_dump_register_region(ce, CE_state, sizeof(*CE_state)); 2530 2531 if (CE_state->status_ring) { 2532 ce_ring = CE_state->status_ring; 2533 qdf_snprint(CE_ring_state, MAX_CE_STR_LEN, 2534 "%s%s", ce, "_status_ring"); 2535 qdf_ssr_driver_dump_register_region(CE_ring_state, ce_ring, 2536 sizeof(struct CE_ring_state) 2537 ); 2538 qdf_snprint(srng, MAX_CE_STR_LEN, 2539 "%s%s", CE_ring_state, "_ctx"); 2540 qdf_ssr_driver_dump_register_region(srng, ce_ring->srng_ctx, 2541 sizeof(struct hal_srng)); 2542 } 2543 if (CE_state->dest_ring) { 2544 ce_ring = CE_state->dest_ring; 2545 qdf_snprint(CE_ring_state, MAX_CE_STR_LEN, 2546 "%s%s", ce, "_dest_ring"); 2547 qdf_ssr_driver_dump_register_region(CE_ring_state, ce_ring, 2548 sizeof(struct CE_ring_state) 2549 ); 2550 qdf_snprint(srng, MAX_CE_STR_LEN, 2551 "%s%s", CE_ring_state, "_ctx"); 2552 qdf_ssr_driver_dump_register_region(srng, ce_ring->srng_ctx, 2553 sizeof(struct hal_srng)); 2554 } 2555 if (CE_state->src_ring) { 2556 ce_ring = CE_state->src_ring; 2557 qdf_snprint(CE_ring_state, MAX_CE_STR_LEN, 2558 "%s%s", ce, "_src_ring"); 2559 qdf_ssr_driver_dump_register_region(CE_ring_state, ce_ring, 2560 sizeof(struct CE_ring_state) 2561 ); 2562 qdf_snprint(srng, MAX_CE_STR_LEN, 2563 "%s%s", CE_ring_state, "_ctx"); 2564 qdf_ssr_driver_dump_register_region(srng, ce_ring->srng_ctx, 2565 sizeof(struct hal_srng)); 2566 } 2567 } 2568 2569 /** 2570 * ce_ring_dump_unregister_region() - Unregister CE ring with SSR dump 2571 * @CE_state: CE_state pointer 2572 * @CE_id: CE id 2573 * 2574 * Return: None 2575 */ 2576 static inline void 2577 ce_ring_dump_unregister_region(struct CE_state *CE_state, unsigned int CE_id) 2578 { 2579 char ce[MAX_CE_STR_LEN]; 2580 char CE_ring_state[MAX_CE_STR_LEN]; 2581 char srng[MAX_CE_STR_LEN]; 2582 2583 qdf_snprint(ce, MAX_CE_STR_LEN, "%s%d", "ce_", CE_id); 2584 qdf_ssr_driver_dump_unregister_region(ce); 2585 if (CE_state->status_ring) { 2586 qdf_snprint(CE_ring_state, MAX_CE_STR_LEN, 2587 "%s%s", ce, "_status_ring"); 2588 qdf_snprint(srng, MAX_CE_STR_LEN, 2589 "%s%s", CE_ring_state, "_ctx"); 2590 qdf_ssr_driver_dump_unregister_region(CE_ring_state); 2591 qdf_ssr_driver_dump_unregister_region(srng); 2592 } 2593 if (CE_state->dest_ring) { 2594 qdf_snprint(CE_ring_state, MAX_CE_STR_LEN, 2595 "%s%s", ce, "_dest_ring"); 2596 qdf_snprint(srng, MAX_CE_STR_LEN, 2597 "%s%s", CE_ring_state, "_ctx"); 2598 qdf_ssr_driver_dump_unregister_region(CE_ring_state); 2599 qdf_ssr_driver_dump_unregister_region(srng); 2600 } 2601 if (CE_state->src_ring) { 2602 qdf_snprint(CE_ring_state, MAX_CE_STR_LEN, 2603 "%s%s", ce, "_src_ring"); 2604 qdf_snprint(srng, MAX_CE_STR_LEN, 2605 "%s%s", CE_ring_state, "_ctx"); 2606 qdf_ssr_driver_dump_unregister_region(CE_ring_state); 2607 qdf_ssr_driver_dump_unregister_region(srng); 2608 } 2609 } 2610 #else 2611 static inline 2612 void ce_ring_dump_register_region(struct CE_state *CE_state, unsigned int CE_id) 2613 { 2614 } 2615 2616 static inline void 2617 ce_ring_dump_unregister_region(struct CE_state *CE_state, unsigned int CE_id) 2618 { 2619 } 2620 #endif 2621 /* 2622 * Initialize a Copy Engine based on caller-supplied attributes. 2623 * This may be called once to initialize both source and destination 2624 * rings or it may be called twice for separate source and destination 2625 * initialization. It may be that only one side or the other is 2626 * initialized by software/firmware. 2627 * 2628 * This should be called during the initialization sequence before 2629 * interrupts are enabled, so we don't have to worry about thread safety. 2630 */ 2631 struct CE_handle *ce_init(struct hif_softc *scn, 2632 unsigned int CE_id, struct CE_attr *attr) 2633 { 2634 struct CE_state *CE_state; 2635 uint32_t ctrl_addr; 2636 unsigned int nentries; 2637 bool malloc_CE_state = false; 2638 bool malloc_src_ring = false; 2639 int status; 2640 QDF_STATUS mem_status = QDF_STATUS_SUCCESS; 2641 2642 QDF_ASSERT(CE_id < scn->ce_count); 2643 ctrl_addr = CE_BASE_ADDRESS(CE_id); 2644 CE_state = scn->ce_id_to_state[CE_id]; 2645 2646 if (!CE_state) { 2647 CE_state = 2648 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state)); 2649 if (!CE_state) 2650 return NULL; 2651 2652 malloc_CE_state = true; 2653 qdf_spinlock_create(&CE_state->ce_index_lock); 2654 #ifdef CE_TASKLET_SCHEDULE_ON_FULL 2655 qdf_spinlock_create(&CE_state->ce_interrupt_lock); 2656 #endif 2657 2658 CE_state->id = CE_id; 2659 CE_state->ctrl_addr = ctrl_addr; 2660 CE_state->state = CE_RUNNING; 2661 CE_state->attr_flags = attr->flags; 2662 } 2663 CE_state->scn = scn; 2664 CE_state->service = ce_engine_service_reg; 2665 2666 qdf_atomic_init(&CE_state->rx_pending); 2667 if (!attr) { 2668 /* Already initialized; caller wants the handle */ 2669 return (struct CE_handle *)CE_state; 2670 } 2671 2672 if (CE_state->src_sz_max) 2673 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max); 2674 else 2675 CE_state->src_sz_max = attr->src_sz_max; 2676 2677 ce_init_ce_desc_event_log(scn, CE_id, 2678 attr->src_nentries + attr->dest_nentries); 2679 2680 /* source ring setup */ 2681 nentries = attr->src_nentries; 2682 if (nentries) { 2683 struct CE_ring_state *src_ring; 2684 2685 nentries = roundup_pwr2(nentries); 2686 if (CE_state->src_ring) { 2687 QDF_ASSERT(CE_state->src_ring->nentries == nentries); 2688 } else { 2689 src_ring = CE_state->src_ring = 2690 ce_alloc_ring_state(CE_state, 2691 CE_RING_SRC, 2692 nentries); 2693 if (!src_ring) { 2694 /* cannot allocate src ring. If the 2695 * CE_state is allocated locally free 2696 * CE_State and return error. 2697 */ 2698 hif_err("src ring has no mem"); 2699 if (malloc_CE_state) { 2700 /* allocated CE_state locally */ 2701 qdf_mem_free(CE_state); 2702 malloc_CE_state = false; 2703 } 2704 return NULL; 2705 } 2706 /* we can allocate src ring. Mark that the src ring is 2707 * allocated locally 2708 */ 2709 malloc_src_ring = true; 2710 2711 /* 2712 * Also allocate a shadow src ring in 2713 * regular mem to use for faster access. 2714 */ 2715 src_ring->shadow_base_unaligned = 2716 qdf_mem_malloc(nentries * 2717 sizeof(struct CE_src_desc) + 2718 CE_DESC_RING_ALIGN); 2719 if (!src_ring->shadow_base_unaligned) 2720 goto error_no_dma_mem; 2721 2722 src_ring->shadow_base = (struct CE_src_desc *) 2723 (((size_t) src_ring->shadow_base_unaligned + 2724 CE_DESC_RING_ALIGN - 1) & 2725 ~(CE_DESC_RING_ALIGN - 1)); 2726 2727 status = ce_ring_setup(scn, CE_RING_SRC, CE_id, 2728 src_ring, attr); 2729 if (status < 0) 2730 goto error_target_access; 2731 ce_ring_test_initial_indexes(CE_id, src_ring, 2732 "src_ring"); 2733 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) { 2734 qdf_timer_init(scn->qdf_dev, 2735 &CE_state->poll_timer, 2736 ce_poll_timeout, 2737 CE_state, 2738 QDF_TIMER_TYPE_WAKE_APPS); 2739 ce_enable_polling(CE_state); 2740 qdf_timer_mod(&CE_state->poll_timer, 2741 CE_POLL_TIMEOUT); 2742 } 2743 } 2744 } 2745 2746 /* destination ring setup */ 2747 nentries = attr->dest_nentries; 2748 if (nentries) { 2749 struct CE_ring_state *dest_ring; 2750 2751 nentries = roundup_pwr2(nentries); 2752 if (CE_state->dest_ring) { 2753 QDF_ASSERT(CE_state->dest_ring->nentries == nentries); 2754 } else { 2755 dest_ring = CE_state->dest_ring = 2756 ce_alloc_ring_state(CE_state, 2757 CE_RING_DEST, 2758 nentries); 2759 if (!dest_ring) { 2760 /* cannot allocate dst ring. If the CE_state 2761 * or src ring is allocated locally free 2762 * CE_State and src ring and return error. 2763 */ 2764 hif_err("dest ring has no mem"); 2765 goto error_no_dma_mem; 2766 } 2767 2768 status = ce_ring_setup(scn, CE_RING_DEST, CE_id, 2769 dest_ring, attr); 2770 if (status < 0) 2771 goto error_target_access; 2772 2773 ce_ring_test_initial_indexes(CE_id, dest_ring, 2774 "dest_ring"); 2775 2776 /* For srng based target, init status ring here */ 2777 if (ce_srng_based(CE_state->scn)) { 2778 CE_state->status_ring = 2779 ce_alloc_ring_state(CE_state, 2780 CE_RING_STATUS, 2781 nentries); 2782 if (!CE_state->status_ring) { 2783 /*Allocation failed. Cleanup*/ 2784 qdf_mem_free(CE_state->dest_ring); 2785 if (malloc_src_ring) { 2786 qdf_mem_free 2787 (CE_state->src_ring); 2788 CE_state->src_ring = NULL; 2789 malloc_src_ring = false; 2790 } 2791 if (malloc_CE_state) { 2792 /* allocated CE_state locally */ 2793 scn->ce_id_to_state[CE_id] = 2794 NULL; 2795 qdf_mem_free(CE_state); 2796 malloc_CE_state = false; 2797 } 2798 2799 return NULL; 2800 } 2801 2802 status = ce_ring_setup(scn, CE_RING_STATUS, 2803 CE_id, CE_state->status_ring, 2804 attr); 2805 if (status < 0) 2806 goto error_target_access; 2807 2808 } 2809 2810 /* epping */ 2811 /* poll timer */ 2812 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) { 2813 qdf_timer_init(scn->qdf_dev, 2814 &CE_state->poll_timer, 2815 ce_poll_timeout, 2816 CE_state, 2817 QDF_TIMER_TYPE_WAKE_APPS); 2818 ce_enable_polling(CE_state); 2819 qdf_timer_mod(&CE_state->poll_timer, 2820 CE_POLL_TIMEOUT); 2821 } 2822 } 2823 } 2824 2825 if (!ce_srng_based(scn)) { 2826 /* Enable CE error interrupts */ 2827 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 2828 goto error_target_access; 2829 CE_ERROR_INTR_ENABLE(scn, ctrl_addr); 2830 if (Q_TARGET_ACCESS_END(scn) < 0) 2831 goto error_target_access; 2832 } 2833 2834 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work, 2835 ce_oom_recovery, CE_state); 2836 2837 /* update the htt_data attribute */ 2838 ce_mark_datapath(CE_state); 2839 scn->ce_id_to_state[CE_id] = CE_state; 2840 2841 ce_ring_dump_register_region(CE_state, CE_id); 2842 2843 mem_status = alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries); 2844 if (mem_status != QDF_STATUS_SUCCESS) 2845 goto error_target_access; 2846 2847 ce_update_msi_batch_intr_flags(CE_state); 2848 ce_update_wrt_idx_offset(scn, CE_state, 2849 attr->src_nentries ? 2850 CE_RING_SRC : CE_RING_DEST); 2851 2852 return (struct CE_handle *)CE_state; 2853 2854 error_target_access: 2855 error_no_dma_mem: 2856 ce_fini((struct CE_handle *)CE_state); 2857 return NULL; 2858 } 2859 2860 void hif_ce_desc_history_log_register(struct hif_softc *scn) 2861 { 2862 __hif_ce_desc_history_log_register(scn); 2863 } 2864 2865 /** 2866 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs 2867 * @hif_ctx: HIF Context 2868 * 2869 * API to check if polling is enabled on all CEs. Returns true when polling 2870 * is enabled on all CEs. 2871 * 2872 * Return: bool 2873 */ 2874 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx) 2875 { 2876 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2877 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2878 struct CE_attr *attr; 2879 int id; 2880 2881 for (id = 0; id < scn->ce_count; id++) { 2882 attr = &hif_state->host_ce_config[id]; 2883 if (attr && (attr->dest_nentries) && 2884 !(attr->flags & CE_ATTR_ENABLE_POLL)) 2885 return false; 2886 } 2887 return true; 2888 } 2889 qdf_export_symbol(hif_is_polled_mode_enabled); 2890 2891 static int hif_get_pktlog_ce_num(struct hif_softc *scn) 2892 { 2893 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2894 int id; 2895 2896 for (id = 0; id < hif_state->sz_tgt_svc_map; id++) { 2897 if (hif_state->tgt_svc_map[id].service_id == PACKET_LOG_SVC) 2898 return hif_state->tgt_svc_map[id].pipenum; 2899 } 2900 return -EINVAL; 2901 } 2902 2903 #ifdef WLAN_FEATURE_FASTPATH 2904 /** 2905 * hif_enable_fastpath() - Update that we have enabled fastpath mode 2906 * @hif_ctx: HIF context 2907 * 2908 * For use in data path 2909 * 2910 * Return: void 2911 */ 2912 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx) 2913 { 2914 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2915 2916 if (ce_srng_based(scn)) { 2917 hif_warn("srng rings do not support fastpath"); 2918 return; 2919 } 2920 hif_debug("Enabling fastpath mode"); 2921 scn->fastpath_mode_on = true; 2922 } 2923 2924 /** 2925 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled 2926 * @hif_ctx: HIF Context 2927 * 2928 * For use in data path to skip HTC 2929 * 2930 * Return: bool 2931 */ 2932 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx) 2933 { 2934 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2935 2936 return scn->fastpath_mode_on; 2937 } 2938 2939 /** 2940 * hif_get_ce_handle - API to get CE handle for FastPath mode 2941 * @hif_ctx: HIF Context 2942 * @id: CopyEngine Id 2943 * 2944 * API to return CE handle for fastpath mode 2945 * 2946 * Return: void 2947 */ 2948 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id) 2949 { 2950 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2951 2952 return scn->ce_id_to_state[id]; 2953 } 2954 qdf_export_symbol(hif_get_ce_handle); 2955 2956 /** 2957 * ce_h2t_tx_ce_cleanup() - Place holder function for H2T CE cleanup. 2958 * No processing is required inside this function. 2959 * @ce_hdl: Cope engine handle 2960 * Using an assert, this function makes sure that, 2961 * the TX CE has been processed completely. 2962 * 2963 * This is called while dismantling CE structures. No other thread 2964 * should be using these structures while dismantling is occurring 2965 * therefore no locking is needed. 2966 * 2967 * Return: none 2968 */ 2969 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) 2970 { 2971 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 2972 struct CE_ring_state *src_ring = ce_state->src_ring; 2973 struct hif_softc *sc = ce_state->scn; 2974 uint32_t sw_index, write_index; 2975 2976 if (hif_is_nss_wifi_enabled(sc)) 2977 return; 2978 2979 if (sc->fastpath_mode_on && ce_state->htt_tx_data) { 2980 hif_debug("Fastpath mode ON, Cleaning up HTT Tx CE"); 2981 sw_index = src_ring->sw_index; 2982 write_index = src_ring->sw_index; 2983 2984 /* At this point Tx CE should be clean */ 2985 qdf_assert_always(sw_index == write_index); 2986 } 2987 } 2988 2989 /** 2990 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue. 2991 * @ce_hdl: Handle to CE 2992 * 2993 * These buffers are never allocated on the fly, but 2994 * are allocated only once during HIF start and freed 2995 * only once during HIF stop. 2996 * NOTE: 2997 * The assumption here is there is no in-flight DMA in progress 2998 * currently, so that buffers can be freed up safely. 2999 * 3000 * Return: NONE 3001 */ 3002 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) 3003 { 3004 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 3005 struct CE_ring_state *dst_ring = ce_state->dest_ring; 3006 qdf_nbuf_t nbuf; 3007 int i; 3008 3009 if (ce_state->scn->fastpath_mode_on == false) 3010 return; 3011 3012 if (!ce_state->htt_rx_data) 3013 return; 3014 3015 /* 3016 * when fastpath_mode is on and for datapath CEs. Unlike other CE's, 3017 * this CE is completely full: does not leave one blank space, to 3018 * distinguish between empty queue & full queue. So free all the 3019 * entries. 3020 */ 3021 for (i = 0; i < dst_ring->nentries; i++) { 3022 nbuf = dst_ring->per_transfer_context[i]; 3023 3024 /* 3025 * The reasons for doing this check are: 3026 * 1) Protect against calling cleanup before allocating buffers 3027 * 2) In a corner case, FASTPATH_mode_on may be set, but we 3028 * could have a partially filled ring, because of a memory 3029 * allocation failure in the middle of allocating ring. 3030 * This check accounts for that case, checking 3031 * fastpath_mode_on flag or started flag would not have 3032 * covered that case. This is not in performance path, 3033 * so OK to do this. 3034 */ 3035 if (nbuf) { 3036 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf, 3037 QDF_DMA_FROM_DEVICE); 3038 qdf_nbuf_free(nbuf); 3039 } 3040 } 3041 } 3042 3043 /** 3044 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1 3045 * @scn: HIF handle 3046 * 3047 * Datapath Rx CEs are special case, where we reuse all the message buffers. 3048 * Hence we have to post all the entries in the pipe, even, in the beginning 3049 * unlike for other CE pipes where one less than dest_nentries are filled in 3050 * the beginning. 3051 * 3052 * Return: None 3053 */ 3054 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 3055 { 3056 int pipe_num; 3057 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3058 3059 if (scn->fastpath_mode_on == false) 3060 return; 3061 3062 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3063 struct HIF_CE_pipe_info *pipe_info = 3064 &hif_state->pipe_info[pipe_num]; 3065 struct CE_state *ce_state = 3066 scn->ce_id_to_state[pipe_info->pipe_num]; 3067 3068 if (ce_state->htt_rx_data) 3069 atomic_inc(&pipe_info->recv_bufs_needed); 3070 } 3071 } 3072 #else 3073 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 3074 { 3075 } 3076 3077 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 3078 { 3079 return false; 3080 } 3081 #endif /* WLAN_FEATURE_FASTPATH */ 3082 3083 void ce_fini(struct CE_handle *copyeng) 3084 { 3085 struct CE_state *CE_state = (struct CE_state *)copyeng; 3086 unsigned int CE_id = CE_state->id; 3087 struct hif_softc *scn = CE_state->scn; 3088 uint32_t desc_size; 3089 3090 bool inited = CE_state->timer_inited; 3091 CE_state->state = CE_UNUSED; 3092 scn->ce_id_to_state[CE_id] = NULL; 3093 /* Set the flag to false first to stop processing in ce_poll_timeout */ 3094 ce_disable_polling(CE_state); 3095 3096 qdf_lro_deinit(CE_state->lro_data); 3097 3098 ce_ring_dump_unregister_region(CE_state, CE_id); 3099 3100 if (CE_state->src_ring) { 3101 /* Cleanup the datapath Tx ring */ 3102 ce_h2t_tx_ce_cleanup(copyeng); 3103 3104 desc_size = ce_get_desc_size(scn, CE_RING_SRC); 3105 if (CE_state->src_ring->shadow_base_unaligned) 3106 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned); 3107 if (CE_state->src_ring->base_addr_owner_space_unaligned) 3108 ce_free_desc_ring(scn, CE_state->id, 3109 CE_state->src_ring, 3110 desc_size); 3111 ce_srng_cleanup(scn, CE_state, CE_RING_SRC); 3112 qdf_mem_free(CE_state->src_ring); 3113 } 3114 if (CE_state->dest_ring) { 3115 /* Cleanup the datapath Rx ring */ 3116 ce_t2h_msg_ce_cleanup(copyeng); 3117 3118 desc_size = ce_get_desc_size(scn, CE_RING_DEST); 3119 if (CE_state->dest_ring->base_addr_owner_space_unaligned) 3120 ce_free_desc_ring(scn, CE_state->id, 3121 CE_state->dest_ring, 3122 desc_size); 3123 ce_srng_cleanup(scn, CE_state, CE_RING_DEST); 3124 qdf_mem_free(CE_state->dest_ring); 3125 3126 /* epping */ 3127 if (inited) { 3128 qdf_timer_free(&CE_state->poll_timer); 3129 } 3130 } 3131 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) { 3132 /* Cleanup the datapath Tx ring */ 3133 ce_h2t_tx_ce_cleanup(copyeng); 3134 3135 if (CE_state->status_ring->shadow_base_unaligned) 3136 qdf_mem_free( 3137 CE_state->status_ring->shadow_base_unaligned); 3138 3139 desc_size = ce_get_desc_size(scn, CE_RING_STATUS); 3140 if (CE_state->status_ring->base_addr_owner_space_unaligned) 3141 ce_free_desc_ring(scn, CE_state->id, 3142 CE_state->status_ring, 3143 desc_size); 3144 ce_srng_cleanup(scn, CE_state, CE_RING_STATUS); 3145 qdf_mem_free(CE_state->status_ring); 3146 } 3147 3148 free_mem_ce_debug_history(scn, CE_id); 3149 reset_ce_debug_history(scn); 3150 ce_deinit_ce_desc_event_log(scn, CE_id); 3151 3152 qdf_spinlock_destroy(&CE_state->ce_index_lock); 3153 #ifdef CE_TASKLET_SCHEDULE_ON_FULL 3154 qdf_spinlock_destroy(&CE_state->ce_interrupt_lock); 3155 #endif 3156 qdf_mem_free(CE_state); 3157 } 3158 3159 void hif_ce_desc_history_log_unregister(void) 3160 { 3161 __hif_ce_desc_history_log_unregister(); 3162 } 3163 3164 void hif_detach_htc(struct hif_opaque_softc *hif_ctx) 3165 { 3166 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 3167 3168 qdf_mem_zero(&hif_state->msg_callbacks_pending, 3169 sizeof(hif_state->msg_callbacks_pending)); 3170 qdf_mem_zero(&hif_state->msg_callbacks_current, 3171 sizeof(hif_state->msg_callbacks_current)); 3172 } 3173 3174 /* Send the first nbytes bytes of the buffer */ 3175 QDF_STATUS 3176 hif_send_head(struct hif_opaque_softc *hif_ctx, 3177 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes, 3178 qdf_nbuf_t nbuf, unsigned int data_attr) 3179 { 3180 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 3181 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 3182 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 3183 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 3184 int bytes = nbytes, nfrags = 0; 3185 struct ce_sendlist sendlist; 3186 int i = 0; 3187 QDF_STATUS status; 3188 unsigned int mux_id = 0; 3189 3190 if (nbytes > qdf_nbuf_len(nbuf)) { 3191 hif_err("nbytes: %d nbuf_len: %d", nbytes, 3192 (uint32_t)qdf_nbuf_len(nbuf)); 3193 QDF_ASSERT(0); 3194 } 3195 3196 transfer_id = 3197 (mux_id & MUX_ID_MASK) | 3198 (transfer_id & TRANSACTION_ID_MASK); 3199 data_attr &= DESC_DATA_FLAG_MASK; 3200 /* 3201 * The common case involves sending multiple fragments within a 3202 * single download (the tx descriptor and the tx frame header). 3203 * So, optimize for the case of multiple fragments by not even 3204 * checking whether it's necessary to use a sendlist. 3205 * The overhead of using a sendlist for a single buffer download 3206 * is not a big deal, since it happens rarely (for WMI messages). 3207 */ 3208 ce_sendlist_init(&sendlist); 3209 do { 3210 qdf_dma_addr_t frag_paddr; 3211 int frag_bytes; 3212 3213 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags); 3214 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags); 3215 /* 3216 * Clear the packet offset for all but the first CE desc. 3217 */ 3218 if (i++ > 0) 3219 data_attr &= ~CE_DESC_PKT_OFFSET_BIT_M; 3220 3221 status = ce_sendlist_buf_add(&sendlist, frag_paddr, 3222 frag_bytes > 3223 bytes ? bytes : frag_bytes, 3224 qdf_nbuf_get_frag_is_wordstream 3225 (nbuf, 3226 nfrags) ? 0 : 3227 CE_SEND_FLAG_SWAP_DISABLE, 3228 data_attr); 3229 if (status != QDF_STATUS_SUCCESS) { 3230 hif_err("frag_num: %d larger than limit (status=%d)", 3231 nfrags, status); 3232 return status; 3233 } 3234 bytes -= frag_bytes; 3235 nfrags++; 3236 } while (bytes > 0); 3237 3238 /* Make sure we have resources to handle this request */ 3239 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 3240 if (pipe_info->num_sends_allowed < nfrags) { 3241 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 3242 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE); 3243 return QDF_STATUS_E_RESOURCES; 3244 } 3245 pipe_info->num_sends_allowed -= nfrags; 3246 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 3247 3248 if (qdf_unlikely(!ce_hdl)) { 3249 hif_err("CE handle is null"); 3250 return A_ERROR; 3251 } 3252 3253 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF); 3254 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, 3255 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf), 3256 sizeof(qdf_nbuf_data(nbuf)), QDF_TX)); 3257 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); 3258 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 3259 3260 return status; 3261 } 3262 3263 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, 3264 int force) 3265 { 3266 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 3267 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 3268 3269 if (!force) { 3270 int resources; 3271 /* 3272 * Decide whether to actually poll for completions, or just 3273 * wait for a later chance. If there seem to be plenty of 3274 * resources left, then just wait, since checking involves 3275 * reading a CE register, which is a relatively expensive 3276 * operation. 3277 */ 3278 resources = hif_get_free_queue_number(hif_ctx, pipe); 3279 /* 3280 * If at least 50% of the total resources are still available, 3281 * don't bother checking again yet. 3282 */ 3283 if (resources > (hif_state->host_ce_config[pipe].src_nentries >> 3284 1)) 3285 return; 3286 } 3287 #ifdef ATH_11AC_TXCOMPACT 3288 ce_per_engine_servicereap(scn, pipe); 3289 #else 3290 ce_per_engine_service(scn, pipe); 3291 #endif 3292 } 3293 3294 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT 3295 QDF_STATUS 3296 hif_register_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe, 3297 void (*custom_cb)(void *), void *custom_cb_context) 3298 { 3299 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 3300 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3301 struct HIF_CE_pipe_info *pipe_info; 3302 3303 if (pipe >= CE_COUNT_MAX) 3304 return QDF_STATUS_E_INVAL; 3305 3306 pipe_info = &hif_state->pipe_info[pipe]; 3307 ce_register_custom_cb(pipe_info->ce_hdl, custom_cb, custom_cb_context); 3308 3309 return QDF_STATUS_SUCCESS; 3310 } 3311 3312 QDF_STATUS 3313 hif_unregister_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 3314 { 3315 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 3316 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3317 struct HIF_CE_pipe_info *pipe_info; 3318 3319 if (pipe >= CE_COUNT_MAX) 3320 return QDF_STATUS_E_INVAL; 3321 3322 pipe_info = &hif_state->pipe_info[pipe]; 3323 ce_unregister_custom_cb(pipe_info->ce_hdl); 3324 3325 return QDF_STATUS_SUCCESS; 3326 } 3327 3328 QDF_STATUS 3329 hif_enable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 3330 { 3331 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 3332 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3333 struct HIF_CE_pipe_info *pipe_info; 3334 3335 if (pipe >= CE_COUNT_MAX) 3336 return QDF_STATUS_E_INVAL; 3337 3338 pipe_info = &hif_state->pipe_info[pipe]; 3339 ce_enable_custom_cb(pipe_info->ce_hdl); 3340 ce_dispatch_interrupt(pipe, &hif_state->tasklets[pipe]); 3341 3342 return QDF_STATUS_SUCCESS; 3343 } 3344 3345 QDF_STATUS 3346 hif_disable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 3347 { 3348 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 3349 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3350 struct HIF_CE_pipe_info *pipe_info; 3351 3352 if (pipe >= CE_COUNT_MAX) 3353 return QDF_STATUS_E_INVAL; 3354 3355 pipe_info = &hif_state->pipe_info[pipe]; 3356 ce_disable_custom_cb(pipe_info->ce_hdl); 3357 3358 return QDF_STATUS_SUCCESS; 3359 } 3360 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */ 3361 3362 #if defined(CE_TASKLET_SCHEDULE_ON_FULL) && defined(CE_TASKLET_DEBUG_ENABLE) 3363 #define CE_RING_FULL_THRESHOLD_TIME 3000000 3364 #define CE_RING_FULL_THRESHOLD 1024 3365 /* This function is called from htc_send path. If there is no resourse to send 3366 * packet via HTC, then check if interrupts are not processed from that 3367 * CE for last 3 seconds. If so, schedule a tasklet to reap available entries. 3368 * Also if Queue has reached 1024 entries within 3 seconds, then also schedule 3369 * tasklet. 3370 */ 3371 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 3372 { 3373 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 3374 int64_t diff_time = qdf_get_log_timestamp_usecs() - 3375 hif_state->stats.tasklet_sched_entry_ts[pipe]; 3376 3377 hif_state->stats.ce_ring_full_count[pipe]++; 3378 3379 if (diff_time >= CE_RING_FULL_THRESHOLD_TIME || 3380 hif_state->stats.ce_ring_full_count[pipe] >= 3381 CE_RING_FULL_THRESHOLD) { 3382 hif_state->stats.ce_ring_full_count[pipe] = 0; 3383 hif_state->stats.ce_manual_tasklet_schedule_count[pipe]++; 3384 hif_state->stats.ce_last_manual_tasklet_schedule_ts[pipe] = 3385 qdf_get_log_timestamp_usecs(); 3386 ce_dispatch_interrupt(pipe, &hif_state->tasklets[pipe]); 3387 } 3388 } 3389 #else 3390 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 3391 { 3392 } 3393 #endif 3394 3395 uint16_t 3396 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 3397 { 3398 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 3399 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 3400 uint16_t rv; 3401 3402 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 3403 rv = pipe_info->num_sends_allowed; 3404 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 3405 return rv; 3406 } 3407 3408 /* Called by lower (CE) layer when a send to Target completes. */ 3409 static void 3410 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context, 3411 void *transfer_context, qdf_dma_addr_t CE_data, 3412 unsigned int nbytes, unsigned int transfer_id, 3413 unsigned int sw_index, unsigned int hw_index, 3414 unsigned int toeplitz_hash_result) 3415 { 3416 struct HIF_CE_pipe_info *pipe_info = 3417 (struct HIF_CE_pipe_info *)ce_context; 3418 unsigned int sw_idx = sw_index, hw_idx = hw_index; 3419 struct hif_msg_callbacks *msg_callbacks = 3420 &pipe_info->pipe_callbacks; 3421 3422 do { 3423 /* 3424 * The upper layer callback will be triggered 3425 * when last fragment is complteted. 3426 */ 3427 if (transfer_context != CE_SENDLIST_ITEM_CTXT) 3428 msg_callbacks->txCompletionHandler( 3429 msg_callbacks->Context, 3430 transfer_context, transfer_id, 3431 toeplitz_hash_result); 3432 3433 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 3434 pipe_info->num_sends_allowed++; 3435 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 3436 } while (ce_completed_send_next(copyeng, 3437 &ce_context, &transfer_context, 3438 &CE_data, &nbytes, &transfer_id, 3439 &sw_idx, &hw_idx, 3440 &toeplitz_hash_result) == QDF_STATUS_SUCCESS); 3441 } 3442 3443 #ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE 3444 3445 #define HIF_CE_RX_NBUF_WMI_POOL_SIZE 32 3446 3447 static qdf_nbuf_t hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id) 3448 { 3449 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3450 struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id]; 3451 qdf_nbuf_t nbuf; 3452 3453 nbuf = wbuff_buff_get(scn->wbuff_handle, ce_id, 0, __func__, 3454 __LINE__); 3455 if (!nbuf) 3456 nbuf = qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz, 3457 0, 4, false); 3458 3459 if (!nbuf) 3460 return NULL; 3461 3462 return nbuf; 3463 } 3464 3465 static void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf) 3466 { 3467 nbuf = wbuff_buff_put(nbuf); 3468 if (nbuf) 3469 qdf_nbuf_free(nbuf); 3470 } 3471 3472 static int 3473 hif_calc_wbuff_pool_size(struct hif_softc *scn, struct CE_state *ce_state) 3474 { 3475 int ul_is_polled, dl_is_polled; 3476 bool is_wmi_svc, wmi_diag_svc; 3477 uint8_t ul_pipe, dl_pipe; 3478 int pool_size; 3479 int status; 3480 int ce_id; 3481 3482 if (!ce_state) 3483 return 0; 3484 3485 ce_id = ce_state->id; 3486 3487 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC, 3488 &ul_pipe, &dl_pipe, 3489 &ul_is_polled, &dl_is_polled); 3490 is_wmi_svc = !status && (dl_pipe == ce_id); 3491 3492 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 3493 WMI_CONTROL_DIAG_SVC, 3494 &ul_pipe, &dl_pipe, 3495 &ul_is_polled, &dl_is_polled); 3496 wmi_diag_svc = !status; 3497 3498 if (is_wmi_svc && !wmi_diag_svc) 3499 pool_size = ce_state->dest_ring->nentries + 3500 HIF_CE_RX_NBUF_WMI_POOL_SIZE; 3501 else if (is_wmi_svc && wmi_diag_svc) 3502 pool_size = ce_state->dest_ring->nentries + 3503 HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2; 3504 else if (!is_wmi_svc && wmi_diag_svc && ce_id == dl_pipe) 3505 pool_size = ce_state->dest_ring->nentries + 3506 HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2; 3507 else 3508 pool_size = ce_state->dest_ring->nentries; 3509 3510 return pool_size; 3511 } 3512 3513 static void hif_ce_rx_wbuff_register(struct hif_softc *scn) 3514 { 3515 struct wbuff_alloc_request wbuff_alloc[CE_COUNT_MAX] = {0}; 3516 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3517 struct HIF_CE_pipe_info *pipe_info; 3518 struct CE_state *ce_state; 3519 int ce_id; 3520 3521 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 3522 pipe_info = &hif_state->pipe_info[ce_id]; 3523 ce_state = scn->ce_id_to_state[ce_id]; 3524 3525 if (!pipe_info->buf_sz) 3526 continue; 3527 3528 /* Only RX CEs need WBUFF registration. recv_bufs_needed 3529 * contains valid count for RX CEs during init time. 3530 */ 3531 if (!atomic_read(&pipe_info->recv_bufs_needed)) 3532 continue; 3533 3534 if (ce_is_fastpath_enabled(scn) && 3535 ce_state->htt_rx_data) 3536 continue; 3537 3538 wbuff_alloc[ce_id].pool_id = ce_id; 3539 wbuff_alloc[ce_id].buffer_size = pipe_info->buf_sz; 3540 wbuff_alloc[ce_id].pool_size = 3541 hif_calc_wbuff_pool_size(scn, ce_state); 3542 } 3543 3544 scn->wbuff_handle = 3545 wbuff_module_register(wbuff_alloc, CE_COUNT_MAX, 0, 4, 3546 WBUFF_MODULE_CE_RX); 3547 } 3548 3549 static void hif_ce_rx_wbuff_deregister(struct hif_softc *scn) 3550 { 3551 wbuff_module_deregister(scn->wbuff_handle); 3552 scn->wbuff_handle = NULL; 3553 } 3554 #else 3555 static inline qdf_nbuf_t 3556 hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id) 3557 { 3558 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3559 struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id]; 3560 3561 return qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz, 0, 4, false); 3562 } 3563 3564 static inline void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf) 3565 { 3566 return qdf_nbuf_free(nbuf); 3567 } 3568 3569 static inline void hif_ce_rx_wbuff_register(struct hif_softc *scn) 3570 { 3571 } 3572 3573 static inline void hif_ce_rx_wbuff_deregister(struct hif_softc *scn) 3574 { 3575 } 3576 #endif /* WLAN_FEATURE_CE_RX_BUFFER_REUSE */ 3577 3578 /** 3579 * hif_ce_do_recv(): send message from copy engine to upper layers 3580 * @msg_callbacks: structure containing callback and callback context 3581 * @netbuf: skb containing message 3582 * @nbytes: number of bytes in the message 3583 * @pipe_info: used for the pipe_number info 3584 * 3585 * Checks the packet length, configures the length in the netbuff, 3586 * and calls the upper layer callback. 3587 * 3588 * return: None 3589 */ 3590 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks, 3591 qdf_nbuf_t netbuf, int nbytes, 3592 struct HIF_CE_pipe_info *pipe_info) { 3593 if (nbytes <= pipe_info->buf_sz) { 3594 qdf_nbuf_set_pktlen(netbuf, nbytes); 3595 msg_callbacks-> 3596 rxCompletionHandler(msg_callbacks->Context, 3597 netbuf, pipe_info->pipe_num); 3598 } else { 3599 hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes); 3600 hif_ce_rx_nbuf_free(netbuf); 3601 } 3602 } 3603 3604 /* Called by lower (CE) layer when data is received from the Target. */ 3605 static void 3606 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context, 3607 void *transfer_context, qdf_dma_addr_t CE_data, 3608 unsigned int nbytes, unsigned int transfer_id, 3609 unsigned int flags) 3610 { 3611 struct HIF_CE_pipe_info *pipe_info = 3612 (struct HIF_CE_pipe_info *)ce_context; 3613 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 3614 struct CE_state *ce_state = (struct CE_state *) copyeng; 3615 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 3616 struct hif_msg_callbacks *msg_callbacks = &pipe_info->pipe_callbacks; 3617 3618 do { 3619 hif_rtpm_record_ce_last_busy_evt(scn, ce_state->id); 3620 hif_rtpm_mark_last_busy(HIF_RTPM_ID_CE); 3621 qdf_nbuf_unmap_single(scn->qdf_dev, 3622 (qdf_nbuf_t) transfer_context, 3623 QDF_DMA_FROM_DEVICE); 3624 3625 atomic_inc(&pipe_info->recv_bufs_needed); 3626 hif_post_recv_buffers_for_pipe(pipe_info); 3627 if (scn->target_status == TARGET_STATUS_RESET) 3628 hif_ce_rx_nbuf_free(transfer_context); 3629 else 3630 hif_ce_do_recv(msg_callbacks, transfer_context, 3631 nbytes, pipe_info); 3632 3633 /* Set up force_break flag if num of receices reaches 3634 * MAX_NUM_OF_RECEIVES 3635 */ 3636 ce_state->receive_count++; 3637 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { 3638 ce_state->force_break = 1; 3639 break; 3640 } 3641 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context, 3642 &CE_data, &nbytes, &transfer_id, 3643 &flags) == QDF_STATUS_SUCCESS); 3644 3645 } 3646 3647 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */ 3648 3649 void 3650 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused, 3651 struct hif_msg_callbacks *callbacks) 3652 { 3653 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 3654 3655 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 3656 spin_lock_init(&pcie_access_log_lock); 3657 #endif 3658 /* Save callbacks for later installation */ 3659 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks, 3660 sizeof(hif_state->msg_callbacks_pending)); 3661 3662 } 3663 3664 static int hif_completion_thread_startup_by_ceid(struct HIF_CE_state *hif_state, 3665 int pipe_num) 3666 { 3667 struct CE_attr attr; 3668 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 3669 struct hif_msg_callbacks *hif_msg_callbacks = 3670 &hif_state->msg_callbacks_current; 3671 struct HIF_CE_pipe_info *pipe_info; 3672 struct CE_state *ce_state; 3673 3674 if (pipe_num >= CE_COUNT_MAX) 3675 return -EINVAL; 3676 3677 pipe_info = &hif_state->pipe_info[pipe_num]; 3678 ce_state = scn->ce_id_to_state[pipe_num]; 3679 3680 if (!hif_msg_callbacks || 3681 !hif_msg_callbacks->rxCompletionHandler || 3682 !hif_msg_callbacks->txCompletionHandler) { 3683 hif_err("no completion handler registered"); 3684 return -EFAULT; 3685 } 3686 3687 attr = hif_state->host_ce_config[pipe_num]; 3688 if (attr.src_nentries) { 3689 /* pipe used to send to target */ 3690 hif_debug("pipe_num:%d pipe_info:0x%pK\n", 3691 pipe_num, pipe_info); 3692 ce_send_cb_register(pipe_info->ce_hdl, 3693 hif_pci_ce_send_done, pipe_info, 3694 attr.flags & CE_ATTR_DISABLE_INTR); 3695 pipe_info->num_sends_allowed = attr.src_nentries - 1; 3696 } 3697 if (attr.dest_nentries) { 3698 hif_debug("pipe_num:%d pipe_info:0x%pK\n", 3699 pipe_num, pipe_info); 3700 /* pipe used to receive from target */ 3701 ce_recv_cb_register(pipe_info->ce_hdl, 3702 hif_pci_ce_recv_data, pipe_info, 3703 attr.flags & CE_ATTR_DISABLE_INTR); 3704 } 3705 3706 if (attr.src_nentries) 3707 qdf_spinlock_create(&pipe_info->completion_freeq_lock); 3708 3709 if (!(ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND)) 3710 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks, 3711 sizeof(pipe_info->pipe_callbacks)); 3712 3713 return 0; 3714 } 3715 3716 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state) 3717 { 3718 struct CE_handle *ce_diag = hif_state->ce_diag; 3719 int pipe_num, ret; 3720 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 3721 3722 /* daemonize("hif_compl_thread"); */ 3723 3724 if (scn->ce_count == 0) { 3725 hif_err("ce_count is 0"); 3726 return -EINVAL; 3727 } 3728 3729 3730 A_TARGET_ACCESS_LIKELY(scn); 3731 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3732 struct HIF_CE_pipe_info *pipe_info; 3733 3734 pipe_info = &hif_state->pipe_info[pipe_num]; 3735 if (pipe_info->ce_hdl == ce_diag) 3736 continue; /* Handle Diagnostic CE specially */ 3737 3738 ret = hif_completion_thread_startup_by_ceid(hif_state, 3739 pipe_num); 3740 if (ret < 0) 3741 return ret; 3742 3743 } 3744 3745 A_TARGET_ACCESS_UNLIKELY(scn); 3746 return 0; 3747 } 3748 3749 /* 3750 * Install pending msg callbacks. 3751 * 3752 * TBDXXX: This hack is needed because upper layers install msg callbacks 3753 * for use with HTC before BMI is done; yet this HIF implementation 3754 * needs to continue to use BMI msg callbacks. Really, upper layers 3755 * should not register HTC callbacks until AFTER BMI phase. 3756 */ 3757 static void hif_msg_callbacks_install(struct hif_softc *scn) 3758 { 3759 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3760 3761 qdf_mem_copy(&hif_state->msg_callbacks_current, 3762 &hif_state->msg_callbacks_pending, 3763 sizeof(hif_state->msg_callbacks_pending)); 3764 } 3765 3766 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe, 3767 uint8_t *DLPipe) 3768 { 3769 int ul_is_polled, dl_is_polled; 3770 3771 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC, 3772 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled); 3773 } 3774 3775 /** 3776 * hif_dump_pipe_debug_count() - Log error count 3777 * @scn: hif_softc pointer. 3778 * 3779 * Output the pipe error counts of each pipe to log file 3780 * 3781 * Return: N/A 3782 */ 3783 void hif_dump_pipe_debug_count(struct hif_softc *scn) 3784 { 3785 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3786 int pipe_num; 3787 3788 if (!hif_state) { 3789 hif_err("hif_state is NULL"); 3790 return; 3791 } 3792 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3793 struct HIF_CE_pipe_info *pipe_info; 3794 3795 pipe_info = &hif_state->pipe_info[pipe_num]; 3796 3797 if (pipe_info->nbuf_alloc_err_count > 0 || 3798 pipe_info->nbuf_dma_err_count > 0 || 3799 pipe_info->nbuf_ce_enqueue_err_count) 3800 hif_err( 3801 "pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u", 3802 pipe_info->pipe_num, 3803 atomic_read(&pipe_info->recv_bufs_needed), 3804 pipe_info->nbuf_alloc_err_count, 3805 pipe_info->nbuf_dma_err_count, 3806 pipe_info->nbuf_ce_enqueue_err_count); 3807 } 3808 } 3809 3810 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info, 3811 void *nbuf, uint32_t *error_cnt, 3812 enum hif_ce_event_type failure_type, 3813 const char *failure_type_string) 3814 { 3815 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed); 3816 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl; 3817 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 3818 int ce_id = CE_state->id; 3819 uint32_t error_cnt_tmp; 3820 3821 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 3822 error_cnt_tmp = ++(*error_cnt); 3823 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 3824 hif_debug("pipe_num: %d, needed: %d, err_cnt: %u, fail_type: %s", 3825 pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp, 3826 failure_type_string); 3827 hif_record_ce_desc_event(scn, ce_id, failure_type, 3828 NULL, nbuf, bufs_needed_tmp, 0); 3829 /* if we fail to allocate the last buffer for an rx pipe, 3830 * there is no trigger to refill the ce and we will 3831 * eventually crash 3832 */ 3833 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 || 3834 (ce_srng_based(scn) && 3835 bufs_needed_tmp == CE_state->dest_ring->nentries - 2)) 3836 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work); 3837 3838 } 3839 3840 3841 3842 3843 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) 3844 { 3845 struct CE_handle *ce_hdl; 3846 qdf_size_t buf_sz; 3847 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 3848 QDF_STATUS status; 3849 uint32_t bufs_posted = 0; 3850 unsigned int ce_id; 3851 3852 buf_sz = pipe_info->buf_sz; 3853 if (buf_sz == 0) { 3854 /* Unused Copy Engine */ 3855 return QDF_STATUS_SUCCESS; 3856 } 3857 3858 ce_hdl = pipe_info->ce_hdl; 3859 if (!ce_hdl) { 3860 hif_err("ce_hdl is NULL"); 3861 return QDF_STATUS_E_INVAL; 3862 } 3863 3864 ce_id = ((struct CE_state *)ce_hdl)->id; 3865 3866 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 3867 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) { 3868 qdf_dma_addr_t CE_data; /* CE space buffer address */ 3869 qdf_nbuf_t nbuf; 3870 3871 atomic_dec(&pipe_info->recv_bufs_needed); 3872 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 3873 3874 hif_record_ce_desc_event(scn, ce_id, 3875 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL, 3876 0, 0); 3877 nbuf = hif_ce_rx_nbuf_alloc(scn, ce_id); 3878 if (!nbuf) { 3879 hif_post_recv_buffers_failure(pipe_info, nbuf, 3880 &pipe_info->nbuf_alloc_err_count, 3881 HIF_RX_NBUF_ALLOC_FAILURE, 3882 "HIF_RX_NBUF_ALLOC_FAILURE"); 3883 return QDF_STATUS_E_NOMEM; 3884 } 3885 3886 hif_record_ce_desc_event(scn, ce_id, 3887 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf, 3888 0, 0); 3889 /* 3890 * qdf_nbuf_peek_header(nbuf, &data, &unused); 3891 * CE_data = dma_map_single(dev, data, buf_sz, ); 3892 * DMA_FROM_DEVICE); 3893 */ 3894 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf, 3895 QDF_DMA_FROM_DEVICE); 3896 3897 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 3898 hif_post_recv_buffers_failure(pipe_info, nbuf, 3899 &pipe_info->nbuf_dma_err_count, 3900 HIF_RX_NBUF_MAP_FAILURE, 3901 "HIF_RX_NBUF_MAP_FAILURE"); 3902 hif_ce_rx_nbuf_free(nbuf); 3903 return status; 3904 } 3905 3906 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0); 3907 hif_record_ce_desc_event(scn, ce_id, 3908 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf, 3909 0, 0); 3910 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data, 3911 buf_sz, DMA_FROM_DEVICE); 3912 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data); 3913 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 3914 hif_post_recv_buffers_failure(pipe_info, nbuf, 3915 &pipe_info->nbuf_ce_enqueue_err_count, 3916 HIF_RX_NBUF_ENQUEUE_FAILURE, 3917 "HIF_RX_NBUF_ENQUEUE_FAILURE"); 3918 3919 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf, 3920 QDF_DMA_FROM_DEVICE); 3921 hif_ce_rx_nbuf_free(nbuf); 3922 return status; 3923 } 3924 3925 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 3926 bufs_posted++; 3927 } 3928 pipe_info->nbuf_alloc_err_count = 3929 (pipe_info->nbuf_alloc_err_count > bufs_posted) ? 3930 pipe_info->nbuf_alloc_err_count - bufs_posted : 0; 3931 pipe_info->nbuf_dma_err_count = 3932 (pipe_info->nbuf_dma_err_count > bufs_posted) ? 3933 pipe_info->nbuf_dma_err_count - bufs_posted : 0; 3934 pipe_info->nbuf_ce_enqueue_err_count = 3935 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ? 3936 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; 3937 3938 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 3939 3940 return QDF_STATUS_SUCCESS; 3941 } 3942 3943 #ifdef FEATURE_DIRECT_LINK 3944 static QDF_STATUS 3945 hif_alloc_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state, 3946 int pipe_num) 3947 { 3948 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 3949 struct service_to_pipe *tgt_svc_cfg; 3950 struct HIF_CE_pipe_info *pipe_info; 3951 int32_t recv_bufs_needed; 3952 qdf_dma_addr_t dma_addr; 3953 uint16_t num_elem_per_page; 3954 uint16_t i; 3955 bool is_found = false; 3956 3957 tgt_svc_cfg = hif_ce_state->tgt_svc_map; 3958 3959 for (i = 0; i < hif_ce_state->sz_tgt_svc_map; i++) { 3960 if (tgt_svc_cfg[i].service_id != LPASS_DATA_MSG_SVC || 3961 tgt_svc_cfg[i].pipedir != PIPEDIR_IN || 3962 tgt_svc_cfg[i].pipenum != pipe_num) 3963 continue; 3964 3965 pipe_info = &hif_ce_state->pipe_info[pipe_num]; 3966 recv_bufs_needed = atomic_read(&pipe_info->recv_bufs_needed); 3967 3968 if (!pipe_info->buf_sz || !recv_bufs_needed) 3969 continue; 3970 3971 is_found = true; 3972 break; 3973 } 3974 3975 if (!is_found) 3976 return QDF_STATUS_E_NOSUPPORT; 3977 3978 scn->dl_recv_pipe_num = pipe_num; 3979 3980 hif_prealloc_get_multi_pages(scn, QDF_DP_RX_DIRECT_LINK_CE_BUF_TYPE, 3981 pipe_info->buf_sz, recv_bufs_needed, 3982 &scn->dl_recv_pages, false); 3983 if (!scn->dl_recv_pages.num_pages) 3984 return QDF_STATUS_E_NOMEM; 3985 3986 num_elem_per_page = scn->dl_recv_pages.num_element_per_page; 3987 for (i = 0; i < recv_bufs_needed; i++) { 3988 dma_addr = scn->dl_recv_pages.dma_pages[i / num_elem_per_page].page_p_addr; 3989 dma_addr += (i % num_elem_per_page) * pipe_info->buf_sz; 3990 ce_recv_buf_enqueue(pipe_info->ce_hdl, NULL, dma_addr); 3991 } 3992 3993 return QDF_STATUS_SUCCESS; 3994 } 3995 3996 static QDF_STATUS 3997 hif_free_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state, 3998 int pipe_num) 3999 { 4000 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 4001 4002 if (pipe_num != scn->dl_recv_pipe_num) 4003 return QDF_STATUS_E_NOSUPPORT; 4004 4005 hif_prealloc_put_multi_pages(scn, QDF_DP_RX_DIRECT_LINK_CE_BUF_TYPE, 4006 &scn->dl_recv_pages, false); 4007 4008 return QDF_STATUS_SUCCESS; 4009 } 4010 #else 4011 static inline QDF_STATUS 4012 hif_alloc_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state, 4013 int pipe_num) 4014 { 4015 return QDF_STATUS_E_NOSUPPORT; 4016 } 4017 4018 static inline QDF_STATUS 4019 hif_free_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state, 4020 int pipe_num) 4021 { 4022 return QDF_STATUS_E_NOSUPPORT; 4023 } 4024 #endif 4025 4026 /* 4027 * Try to post all desired receive buffers for all pipes. 4028 * Returns 0 for non fastpath rx copy engine as 4029 * oom_allocation_work will be scheduled to recover any 4030 * failures, non-zero if unable to completely replenish 4031 * receive buffers for fastpath rx Copy engine. 4032 */ 4033 static QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn) 4034 { 4035 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4036 int pipe_num; 4037 struct CE_state *ce_state = NULL; 4038 QDF_STATUS qdf_status; 4039 4040 A_TARGET_ACCESS_LIKELY(scn); 4041 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 4042 struct HIF_CE_pipe_info *pipe_info; 4043 4044 if (pipe_num >= CE_COUNT_MAX) { 4045 A_TARGET_ACCESS_UNLIKELY(scn); 4046 return QDF_STATUS_E_INVAL; 4047 } 4048 4049 ce_state = scn->ce_id_to_state[pipe_num]; 4050 pipe_info = &hif_state->pipe_info[pipe_num]; 4051 4052 if (!ce_state) 4053 continue; 4054 4055 /* Do not init dynamic CEs, during initial load */ 4056 if (ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND) 4057 continue; 4058 4059 if (hif_is_nss_wifi_enabled(scn) && 4060 ce_state && (ce_state->htt_rx_data)) 4061 continue; 4062 4063 qdf_status = 4064 hif_alloc_pages_for_direct_link_recv_pipe(hif_state, 4065 pipe_num); 4066 if (QDF_IS_STATUS_SUCCESS(qdf_status)) 4067 continue; 4068 4069 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); 4070 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state && 4071 ce_state->htt_rx_data && 4072 scn->fastpath_mode_on) { 4073 A_TARGET_ACCESS_UNLIKELY(scn); 4074 return qdf_status; 4075 } 4076 } 4077 4078 A_TARGET_ACCESS_UNLIKELY(scn); 4079 4080 return QDF_STATUS_SUCCESS; 4081 } 4082 4083 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx) 4084 { 4085 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 4086 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4087 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 4088 4089 hif_update_fastpath_recv_bufs_cnt(scn); 4090 4091 hif_msg_callbacks_install(scn); 4092 4093 if (hif_completion_thread_startup(hif_state)) 4094 return QDF_STATUS_E_FAILURE; 4095 4096 hif_ce_rx_wbuff_register(scn); 4097 4098 /* enable buffer cleanup */ 4099 hif_state->started = true; 4100 4101 /* Post buffers once to start things off. */ 4102 qdf_status = hif_post_recv_buffers(scn); 4103 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 4104 /* cleanup is done in hif_ce_disable */ 4105 hif_err("Failed to post buffers"); 4106 return qdf_status; 4107 } 4108 4109 return qdf_status; 4110 } 4111 4112 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 4113 { 4114 struct hif_softc *scn; 4115 struct CE_handle *ce_hdl; 4116 uint32_t buf_sz; 4117 struct HIF_CE_state *hif_state; 4118 qdf_nbuf_t netbuf; 4119 qdf_dma_addr_t CE_data; 4120 void *per_CE_context; 4121 QDF_STATUS status; 4122 4123 buf_sz = pipe_info->buf_sz; 4124 /* Unused Copy Engine */ 4125 if (buf_sz == 0) 4126 return; 4127 4128 4129 hif_state = pipe_info->HIF_CE_state; 4130 if (!hif_state->started) 4131 return; 4132 4133 scn = HIF_GET_SOFTC(hif_state); 4134 ce_hdl = pipe_info->ce_hdl; 4135 4136 if (!scn->qdf_dev) 4137 return; 4138 4139 status = hif_free_pages_for_direct_link_recv_pipe(hif_state, 4140 pipe_info->pipe_num); 4141 if (QDF_IS_STATUS_SUCCESS(status)) 4142 return; 4143 4144 while (ce_revoke_recv_next 4145 (ce_hdl, &per_CE_context, (void **)&netbuf, 4146 &CE_data) == QDF_STATUS_SUCCESS) { 4147 if (netbuf) { 4148 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf, 4149 QDF_DMA_FROM_DEVICE); 4150 hif_ce_rx_nbuf_free(netbuf); 4151 } 4152 } 4153 } 4154 4155 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 4156 { 4157 struct CE_handle *ce_hdl; 4158 struct HIF_CE_state *hif_state; 4159 struct hif_softc *scn; 4160 qdf_nbuf_t netbuf; 4161 void *per_CE_context; 4162 qdf_dma_addr_t CE_data; 4163 unsigned int nbytes; 4164 unsigned int id; 4165 uint32_t buf_sz; 4166 uint32_t toeplitz_hash_result; 4167 4168 buf_sz = pipe_info->buf_sz; 4169 if (buf_sz == 0) { 4170 /* Unused Copy Engine */ 4171 return; 4172 } 4173 4174 hif_state = pipe_info->HIF_CE_state; 4175 if (!hif_state->started) { 4176 return; 4177 } 4178 4179 scn = HIF_GET_SOFTC(hif_state); 4180 4181 ce_hdl = pipe_info->ce_hdl; 4182 4183 while (ce_cancel_send_next 4184 (ce_hdl, &per_CE_context, 4185 (void **)&netbuf, &CE_data, &nbytes, 4186 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 4187 if (netbuf != CE_SENDLIST_ITEM_CTXT) { 4188 /* 4189 * Packets enqueued by htt_h2t_ver_req_msg() and 4190 * htt_h2t_rx_ring_cfg_msg_ll() have already been 4191 * freed in htt_htc_misc_pkt_pool_free() in 4192 * wlantl_close(), so do not free them here again 4193 * by checking whether it's the endpoint 4194 * which they are queued in. 4195 */ 4196 if (id == scn->htc_htt_tx_endpoint) 4197 return; 4198 /* Indicate the completion to higher 4199 * layer to free the buffer 4200 */ 4201 if (pipe_info->pipe_callbacks.txCompletionHandler) 4202 pipe_info->pipe_callbacks. 4203 txCompletionHandler(pipe_info-> 4204 pipe_callbacks.Context, 4205 netbuf, id, toeplitz_hash_result); 4206 } 4207 } 4208 } 4209 4210 /* 4211 * Cleanup residual buffers for device shutdown: 4212 * buffers that were enqueued for receive 4213 * buffers that were to be sent 4214 * Note: Buffers that had completed but which were 4215 * not yet processed are on a completion queue. They 4216 * are handled when the completion thread shuts down. 4217 */ 4218 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state) 4219 { 4220 int pipe_num; 4221 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 4222 struct CE_state *ce_state; 4223 4224 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 4225 struct HIF_CE_pipe_info *pipe_info; 4226 4227 ce_state = scn->ce_id_to_state[pipe_num]; 4228 if (hif_is_nss_wifi_enabled(scn) && ce_state && 4229 ((ce_state->htt_tx_data) || 4230 (ce_state->htt_rx_data))) { 4231 continue; 4232 } 4233 4234 pipe_info = &hif_state->pipe_info[pipe_num]; 4235 hif_recv_buffer_cleanup_on_pipe(pipe_info); 4236 hif_send_buffer_cleanup_on_pipe(pipe_info); 4237 } 4238 } 4239 4240 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) 4241 { 4242 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 4243 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4244 4245 hif_buffer_cleanup(hif_state); 4246 } 4247 4248 static void hif_destroy_oom_work(struct hif_softc *scn) 4249 { 4250 struct CE_state *ce_state; 4251 int ce_id; 4252 4253 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 4254 ce_state = scn->ce_id_to_state[ce_id]; 4255 if (ce_state) 4256 qdf_destroy_work(scn->qdf_dev, 4257 &ce_state->oom_allocation_work); 4258 } 4259 } 4260 4261 void hif_ce_stop(struct hif_softc *scn) 4262 { 4263 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4264 int pipe_num; 4265 4266 /* 4267 * before cleaning up any memory, ensure irq & 4268 * bottom half contexts will not be re-entered 4269 */ 4270 hif_disable_isr(&scn->osc); 4271 hif_destroy_oom_work(scn); 4272 scn->hif_init_done = false; 4273 4274 /* 4275 * At this point, asynchronous threads are stopped, 4276 * The Target should not DMA nor interrupt, Host code may 4277 * not initiate anything more. So we just need to clean 4278 * up Host-side state. 4279 */ 4280 4281 if (scn->athdiag_procfs_inited) { 4282 athdiag_procfs_remove(); 4283 scn->athdiag_procfs_inited = false; 4284 } 4285 4286 hif_buffer_cleanup(hif_state); 4287 hif_ce_rx_wbuff_deregister(scn); 4288 4289 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 4290 struct HIF_CE_pipe_info *pipe_info; 4291 struct CE_attr attr; 4292 struct CE_handle *ce_diag = hif_state->ce_diag; 4293 4294 pipe_info = &hif_state->pipe_info[pipe_num]; 4295 if (pipe_info->ce_hdl) { 4296 if (pipe_info->ce_hdl != ce_diag && 4297 hif_state->started) { 4298 attr = hif_state->host_ce_config[pipe_num]; 4299 if (attr.src_nentries) 4300 qdf_spinlock_destroy(&pipe_info-> 4301 completion_freeq_lock); 4302 } 4303 ce_fini(pipe_info->ce_hdl); 4304 pipe_info->ce_hdl = NULL; 4305 pipe_info->buf_sz = 0; 4306 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 4307 } 4308 } 4309 4310 if (hif_state->sleep_timer_init) { 4311 qdf_timer_stop(&hif_state->sleep_timer); 4312 qdf_timer_free(&hif_state->sleep_timer); 4313 hif_state->sleep_timer_init = false; 4314 } 4315 4316 hif_state->started = false; 4317 } 4318 4319 #ifdef CONFIG_SHADOW_V3 4320 void hif_preare_shadow_register_cfg_v3(struct hif_softc *scn) 4321 { 4322 int shadow_cfg_idx = scn->num_shadow_registers_configured; 4323 int i; 4324 4325 /* shadow reg config for CE SRC registers */ 4326 for (i = 0; i < scn->ce_count; i++) { 4327 scn->shadow_regs[shadow_cfg_idx].addr = 4328 CE_BASE_ADDRESS(i) + SR_WR_INDEX_ADDRESS; 4329 shadow_cfg_idx++; 4330 } 4331 4332 /* shadow reg config for CE DST registers */ 4333 for (i = 0; i < scn->ce_count; i++) { 4334 scn->shadow_regs[shadow_cfg_idx].addr = 4335 CE_BASE_ADDRESS(i) + DST_WR_INDEX_ADDRESS; 4336 shadow_cfg_idx++; 4337 } 4338 4339 scn->num_shadow_registers_configured = shadow_cfg_idx; 4340 } 4341 4342 void hif_get_shadow_reg_config_v3(struct hif_softc *scn, 4343 struct pld_shadow_reg_v3_cfg **shadow_config, 4344 int *num_shadow_registers_configured) 4345 { 4346 *shadow_config = scn->shadow_regs; 4347 *num_shadow_registers_configured = 4348 scn->num_shadow_registers_configured; 4349 } 4350 #endif 4351 4352 static void hif_get_shadow_reg_cfg(struct hif_softc *scn, 4353 struct shadow_reg_cfg 4354 **target_shadow_reg_cfg_ret, 4355 uint32_t *shadow_cfg_sz_ret) 4356 { 4357 if (target_shadow_reg_cfg_ret) 4358 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg; 4359 if (shadow_cfg_sz_ret) 4360 *shadow_cfg_sz_ret = shadow_cfg_sz; 4361 } 4362 4363 /** 4364 * hif_get_target_ce_config() - get copy engine configuration 4365 * @scn: HIF context 4366 * @target_ce_config_ret: basic copy engine configuration 4367 * @target_ce_config_sz_ret: size of the basic configuration in bytes 4368 * @target_service_to_ce_map_ret: service mapping for the copy engines 4369 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes 4370 * @target_shadow_reg_cfg_ret: shadow register configuration 4371 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes 4372 * 4373 * providing accessor to these values outside of this file. 4374 * currently these are stored in static pointers to const sections. 4375 * there are multiple configurations that are selected from at compile time. 4376 * Runtime selection would need to consider mode, target type and bus type. 4377 * 4378 * Return: return by parameter. 4379 */ 4380 void hif_get_target_ce_config(struct hif_softc *scn, 4381 struct CE_pipe_config **target_ce_config_ret, 4382 uint32_t *target_ce_config_sz_ret, 4383 struct service_to_pipe **target_service_to_ce_map_ret, 4384 uint32_t *target_service_to_ce_map_sz_ret, 4385 struct shadow_reg_cfg **target_shadow_reg_cfg_ret, 4386 uint32_t *shadow_cfg_sz_ret) 4387 { 4388 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4389 4390 *target_ce_config_ret = hif_state->target_ce_config; 4391 *target_ce_config_sz_ret = hif_state->target_ce_config_sz; 4392 4393 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret, 4394 target_service_to_ce_map_sz_ret); 4395 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret, 4396 shadow_cfg_sz_ret); 4397 } 4398 4399 #ifdef CONFIG_SHADOW_V3 4400 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 4401 { 4402 int i; 4403 4404 hif_info("v3: num_config %d", cfg->num_shadow_reg_v3_cfg); 4405 for (i = 0; i < cfg->num_shadow_reg_v3_cfg; i++) 4406 hif_info("i %d, val %x", i, cfg->shadow_reg_v3_cfg[i].addr); 4407 } 4408 4409 #elif defined(CONFIG_SHADOW_V2) 4410 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 4411 { 4412 int i; 4413 4414 hif_info("v2: num_config %d", cfg->num_shadow_reg_v2_cfg); 4415 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) 4416 hif_info("i %d, val %x", i, cfg->shadow_reg_v2_cfg[i].addr); 4417 } 4418 4419 #else 4420 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 4421 { 4422 hif_info("CONFIG_SHADOW V2/V3 not defined"); 4423 } 4424 #endif 4425 4426 #ifdef ADRASTEA_RRI_ON_DDR 4427 /** 4428 * hif_get_src_ring_read_index(): Called to get the SRRI 4429 * 4430 * @scn: hif_softc pointer 4431 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 4432 * 4433 * This function returns the SRRI to the caller. For CEs that 4434 * dont have interrupts enabled, we look at the DDR based SRRI 4435 * 4436 * Return: SRRI 4437 */ 4438 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, 4439 uint32_t CE_ctrl_addr) 4440 { 4441 struct CE_attr attr; 4442 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4443 4444 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 4445 if (attr.flags & CE_ATTR_DISABLE_INTR) { 4446 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 4447 } else { 4448 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 4449 return A_TARGET_READ(scn, 4450 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS); 4451 else 4452 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, 4453 CE_ctrl_addr); 4454 } 4455 } 4456 4457 /** 4458 * hif_get_dst_ring_read_index(): Called to get the DRRI 4459 * 4460 * @scn: hif_softc pointer 4461 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 4462 * 4463 * This function returns the DRRI to the caller. For CEs that 4464 * dont have interrupts enabled, we look at the DDR based DRRI 4465 * 4466 * Return: DRRI 4467 */ 4468 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, 4469 uint32_t CE_ctrl_addr) 4470 { 4471 struct CE_attr attr; 4472 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4473 4474 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 4475 4476 if (attr.flags & CE_ATTR_DISABLE_INTR) { 4477 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 4478 } else { 4479 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 4480 return A_TARGET_READ(scn, 4481 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS); 4482 else 4483 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, 4484 CE_ctrl_addr); 4485 } 4486 } 4487 4488 /** 4489 * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr 4490 * @scn: hif_softc pointer 4491 * 4492 * Return: qdf status 4493 */ 4494 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn) 4495 { 4496 qdf_dma_addr_t paddr_rri_on_ddr = 0; 4497 4498 scn->vaddr_rri_on_ddr = 4499 (void *)qdf_mem_alloc_consistent(scn->qdf_dev, 4500 scn->qdf_dev->dev, RRI_ON_DDR_MEM_SIZE, 4501 &paddr_rri_on_ddr); 4502 4503 if (!scn->vaddr_rri_on_ddr) { 4504 hif_err("dmaable page alloc fail"); 4505 return QDF_STATUS_E_NOMEM; 4506 } 4507 4508 scn->paddr_rri_on_ddr = paddr_rri_on_ddr; 4509 4510 qdf_mem_zero(scn->vaddr_rri_on_ddr, RRI_ON_DDR_MEM_SIZE); 4511 4512 return QDF_STATUS_SUCCESS; 4513 } 4514 #endif 4515 4516 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR) 4517 /** 4518 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 4519 * 4520 * @scn: hif_softc pointer 4521 * 4522 * This function allocates non cached memory on ddr and sends 4523 * the physical address of this memory to the CE hardware. The 4524 * hardware updates the RRI on this particular location. 4525 * 4526 * Return: None 4527 */ 4528 #ifdef QCA_WIFI_WCN6450 4529 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 4530 { 4531 unsigned int i; 4532 uint32_t high_paddr, low_paddr; 4533 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4534 struct CE_attr *attr; 4535 4536 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) 4537 return; 4538 4539 low_paddr = RRI_ON_DDR_PADDR_LOW(scn->paddr_rri_on_ddr); 4540 high_paddr = RRI_ON_DDR_PADDR_HIGH(scn->paddr_rri_on_ddr); 4541 4542 hif_debug("using srri and drri from DDR"); 4543 4544 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr); 4545 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr); 4546 4547 for (i = 0; i < CE_COUNT; i++) { 4548 attr = &hif_state->host_ce_config[i]; 4549 if (attr->src_nentries || attr->dest_nentries) 4550 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i)); 4551 } 4552 } 4553 #else 4554 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 4555 { 4556 unsigned int i; 4557 uint32_t high_paddr, low_paddr; 4558 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4559 struct CE_pipe_config *ce_config; 4560 4561 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) 4562 return; 4563 4564 low_paddr = RRI_ON_DDR_PADDR_LOW(scn->paddr_rri_on_ddr); 4565 high_paddr = RRI_ON_DDR_PADDR_HIGH(scn->paddr_rri_on_ddr); 4566 4567 hif_debug("using srri and drri from DDR"); 4568 4569 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr); 4570 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr); 4571 4572 for (i = 0; i < CE_COUNT; i++) { 4573 ce_config = &hif_state->target_ce_config[i]; 4574 /* 4575 * For DST channel program both IDX_UPD_EN and 4576 * DMAX length(behalf of F.W) at once to avoid 4577 * race with F.W register update. 4578 */ 4579 if (ce_config->pipedir == PIPEDIR_IN && ce_config->nbytes_max) 4580 CE_IDX_UPD_EN_DMAX_LEN_SET(scn, CE_BASE_ADDRESS(i), 4581 ce_config->nbytes_max); 4582 else 4583 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i)); 4584 } 4585 } 4586 #endif 4587 4588 #else 4589 /** 4590 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 4591 * 4592 * @scn: hif_softc pointer 4593 * 4594 * This is a dummy implementation for platforms that don't 4595 * support this functionality. 4596 * 4597 * Return: None 4598 */ 4599 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 4600 { 4601 } 4602 #endif 4603 4604 /** 4605 * hif_update_rri_over_ddr_config() - update rri_over_ddr config for 4606 * QMI command 4607 * @scn: hif context 4608 * @cfg: wlan enable config 4609 * 4610 * In case of Genoa, rri_over_ddr memory configuration is passed 4611 * to firmware through QMI configure command. 4612 */ 4613 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR) 4614 static void hif_update_rri_over_ddr_config(struct hif_softc *scn, 4615 struct pld_wlan_enable_cfg *cfg) 4616 { 4617 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) 4618 return; 4619 4620 cfg->rri_over_ddr_cfg_valid = true; 4621 cfg->rri_over_ddr_cfg.base_addr_low = 4622 BITS0_TO_31(scn->paddr_rri_on_ddr); 4623 cfg->rri_over_ddr_cfg.base_addr_high = 4624 BITS32_TO_35(scn->paddr_rri_on_ddr); 4625 } 4626 #else 4627 static void hif_update_rri_over_ddr_config(struct hif_softc *scn, 4628 struct pld_wlan_enable_cfg *cfg) 4629 { 4630 } 4631 #endif 4632 4633 /** 4634 * hif_wlan_enable(): call the platform driver to enable wlan 4635 * @scn: HIF Context 4636 * 4637 * This function passes the con_mode and CE configuration to 4638 * platform driver to enable wlan. 4639 * 4640 * Return: linux error code 4641 */ 4642 int hif_wlan_enable(struct hif_softc *scn) 4643 { 4644 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 4645 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 4646 struct pld_wlan_enable_cfg cfg = { 0 }; 4647 enum pld_driver_mode mode; 4648 uint32_t con_mode = hif_get_conparam(scn); 4649 4650 hif_get_target_ce_config(scn, 4651 (struct CE_pipe_config **)&cfg.ce_tgt_cfg, 4652 &cfg.num_ce_tgt_cfg, 4653 (struct service_to_pipe **)&cfg.ce_svc_cfg, 4654 &cfg.num_ce_svc_pipe_cfg, 4655 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg, 4656 &cfg.num_shadow_reg_cfg); 4657 4658 /* translate from structure size to array size */ 4659 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config); 4660 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe); 4661 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg); 4662 4663 switch (tgt_info->target_type) { 4664 case TARGET_TYPE_KIWI: 4665 case TARGET_TYPE_MANGO: 4666 case TARGET_TYPE_PEACH: 4667 case TARGET_TYPE_WCN6450: 4668 hif_prepare_hal_shadow_reg_cfg_v3(scn, &cfg); 4669 break; 4670 default: 4671 hif_prepare_hal_shadow_register_cfg(scn, 4672 &cfg.shadow_reg_v2_cfg, 4673 &cfg.num_shadow_reg_v2_cfg); 4674 break; 4675 } 4676 4677 hif_print_hal_shadow_register_cfg(&cfg); 4678 4679 hif_update_rri_over_ddr_config(scn, &cfg); 4680 4681 if (QDF_GLOBAL_FTM_MODE == con_mode) 4682 mode = PLD_FTM; 4683 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode) 4684 mode = PLD_COLDBOOT_CALIBRATION; 4685 else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode) 4686 mode = PLD_FTM_COLDBOOT_CALIBRATION; 4687 else if (QDF_IS_EPPING_ENABLED(con_mode)) 4688 mode = PLD_EPPING; 4689 else 4690 mode = PLD_MISSION; 4691 4692 if (BYPASS_QMI) 4693 return 0; 4694 else 4695 return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode); 4696 } 4697 4698 #ifdef WLAN_FEATURE_EPPING 4699 4700 #define CE_EPPING_USES_IRQ true 4701 4702 void hif_ce_prepare_epping_config(struct hif_softc *scn, 4703 struct HIF_CE_state *hif_state) 4704 { 4705 if (CE_EPPING_USES_IRQ) 4706 hif_state->host_ce_config = host_ce_config_wlan_epping_irq; 4707 else 4708 hif_state->host_ce_config = host_ce_config_wlan_epping_poll; 4709 hif_state->target_ce_config = target_ce_config_wlan_epping; 4710 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping); 4711 target_shadow_reg_cfg = target_shadow_reg_cfg_epping; 4712 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping); 4713 scn->ce_count = EPPING_HOST_CE_COUNT; 4714 } 4715 #endif 4716 4717 #ifdef QCN7605_SUPPORT 4718 static inline 4719 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 4720 struct HIF_CE_state *hif_state) 4721 { 4722 hif_state->host_ce_config = host_ce_config_wlan_qcn7605; 4723 hif_state->target_ce_config = target_ce_config_wlan_qcn7605; 4724 hif_state->target_ce_config_sz = 4725 sizeof(target_ce_config_wlan_qcn7605); 4726 target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605; 4727 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605); 4728 scn->ce_count = QCN7605_CE_COUNT; 4729 } 4730 #else 4731 static inline 4732 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 4733 struct HIF_CE_state *hif_state) 4734 { 4735 hif_err("QCN7605 not supported"); 4736 } 4737 #endif 4738 4739 #ifdef CE_SVC_CMN_INIT 4740 #ifdef QCA_WIFI_SUPPORT_SRNG 4741 static inline void hif_ce_service_init(void) 4742 { 4743 ce_service_srng_init(); 4744 } 4745 #else 4746 static inline void hif_ce_service_init(void) 4747 { 4748 ce_service_legacy_init(); 4749 } 4750 #endif 4751 #else 4752 static inline void hif_ce_service_init(void) 4753 { 4754 } 4755 #endif 4756 4757 #ifdef FEATURE_DIRECT_LINK 4758 /** 4759 * hif_ce_select_config_kiwi() - Select the host and target CE 4760 * configuration for Kiwi 4761 * @hif_state: HIF CE context 4762 * 4763 * Return: None 4764 */ 4765 static inline 4766 void hif_ce_select_config_kiwi(struct HIF_CE_state *hif_state) 4767 { 4768 struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif_state); 4769 4770 if (pld_is_direct_link_supported(hif_ctx->qdf_dev->dev)) { 4771 hif_state->host_ce_config = 4772 host_ce_config_wlan_kiwi_direct_link; 4773 hif_state->target_ce_config = 4774 target_ce_config_wlan_kiwi_direct_link; 4775 hif_state->target_ce_config_sz = 4776 sizeof(target_ce_config_wlan_kiwi_direct_link); 4777 } else { 4778 hif_state->host_ce_config = host_ce_config_wlan_kiwi; 4779 hif_state->target_ce_config = target_ce_config_wlan_kiwi; 4780 hif_state->target_ce_config_sz = 4781 sizeof(target_ce_config_wlan_kiwi); 4782 } 4783 } 4784 #else 4785 static inline 4786 void hif_ce_select_config_kiwi(struct HIF_CE_state *hif_state) 4787 { 4788 hif_state->host_ce_config = host_ce_config_wlan_kiwi; 4789 hif_state->target_ce_config = target_ce_config_wlan_kiwi; 4790 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_kiwi); 4791 } 4792 #endif 4793 4794 /** 4795 * hif_ce_prepare_config() - load the correct static tables. 4796 * @scn: hif context 4797 * 4798 * Epping uses different static attribute tables than mission mode. 4799 */ 4800 void hif_ce_prepare_config(struct hif_softc *scn) 4801 { 4802 uint32_t mode = hif_get_conparam(scn); 4803 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 4804 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 4805 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4806 int ret; 4807 int msi_data_count = 0; 4808 int msi_data_start = 0; 4809 int msi_irq_start = 0; 4810 4811 hif_ce_service_init(); 4812 hif_state->ce_services = ce_services_attach(scn); 4813 4814 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 4815 &msi_data_count, &msi_data_start, 4816 &msi_irq_start); 4817 4818 scn->ce_count = HOST_CE_COUNT; 4819 scn->int_assignment = &ce_int_context[msi_data_count]; 4820 scn->free_irq_done = false; 4821 /* if epping is enabled we need to use the epping configuration. */ 4822 if (QDF_IS_EPPING_ENABLED(mode)) { 4823 hif_ce_prepare_epping_config(scn, hif_state); 4824 return; 4825 } 4826 4827 switch (tgt_info->target_type) { 4828 default: 4829 hif_state->host_ce_config = host_ce_config_wlan; 4830 hif_state->target_ce_config = target_ce_config_wlan; 4831 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan); 4832 break; 4833 case TARGET_TYPE_QCN7605: 4834 hif_set_ce_config_qcn7605(scn, hif_state); 4835 break; 4836 case TARGET_TYPE_AR900B: 4837 case TARGET_TYPE_QCA9984: 4838 case TARGET_TYPE_QCA9888: 4839 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 4840 hif_state->host_ce_config = 4841 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog; 4842 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 4843 hif_state->host_ce_config = 4844 host_lowdesc_ce_cfg_wlan_ar900b; 4845 } else { 4846 hif_state->host_ce_config = host_ce_config_wlan_ar900b; 4847 } 4848 4849 hif_state->target_ce_config = target_ce_config_wlan_ar900b; 4850 hif_state->target_ce_config_sz = 4851 sizeof(target_ce_config_wlan_ar900b); 4852 4853 break; 4854 4855 case TARGET_TYPE_AR9888: 4856 case TARGET_TYPE_AR9888V2: 4857 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 4858 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888; 4859 } else { 4860 hif_state->host_ce_config = host_ce_config_wlan_ar9888; 4861 } 4862 4863 hif_state->target_ce_config = target_ce_config_wlan_ar9888; 4864 hif_state->target_ce_config_sz = 4865 sizeof(target_ce_config_wlan_ar9888); 4866 4867 break; 4868 4869 case TARGET_TYPE_QCA8074: 4870 case TARGET_TYPE_QCA8074V2: 4871 case TARGET_TYPE_QCA6018: 4872 if (scn->bus_type == QDF_BUS_TYPE_PCI) { 4873 hif_state->host_ce_config = 4874 host_ce_config_wlan_qca8074_pci; 4875 hif_state->target_ce_config = 4876 target_ce_config_wlan_qca8074_pci; 4877 hif_state->target_ce_config_sz = 4878 sizeof(target_ce_config_wlan_qca8074_pci); 4879 } else { 4880 hif_state->host_ce_config = host_ce_config_wlan_qca8074; 4881 hif_state->target_ce_config = 4882 target_ce_config_wlan_qca8074; 4883 hif_state->target_ce_config_sz = 4884 sizeof(target_ce_config_wlan_qca8074); 4885 } 4886 break; 4887 case TARGET_TYPE_QCA6290: 4888 hif_state->host_ce_config = host_ce_config_wlan_qca6290; 4889 hif_state->target_ce_config = target_ce_config_wlan_qca6290; 4890 hif_state->target_ce_config_sz = 4891 sizeof(target_ce_config_wlan_qca6290); 4892 4893 scn->ce_count = QCA_6290_CE_COUNT; 4894 break; 4895 case TARGET_TYPE_QCN9000: 4896 hif_state->host_ce_config = host_ce_config_wlan_qcn9000; 4897 hif_state->target_ce_config = target_ce_config_wlan_qcn9000; 4898 hif_state->target_ce_config_sz = 4899 sizeof(target_ce_config_wlan_qcn9000); 4900 scn->ce_count = QCN_9000_CE_COUNT; 4901 scn->ini_cfg.disable_wake_irq = 1; 4902 break; 4903 case TARGET_TYPE_QCN9224: 4904 hif_set_ce_config_qcn9224(scn, hif_state); 4905 break; 4906 case TARGET_TYPE_QCA5332: 4907 hif_state->host_ce_config = host_ce_config_wlan_qca5332; 4908 hif_state->target_ce_config = target_ce_config_wlan_qca5332; 4909 hif_state->target_ce_config_sz = 4910 sizeof(target_ce_config_wlan_qca5332); 4911 scn->ce_count = QCA_5332_CE_COUNT; 4912 break; 4913 case TARGET_TYPE_QCN6122: 4914 hif_state->host_ce_config = host_ce_config_wlan_qcn6122; 4915 hif_state->target_ce_config = target_ce_config_wlan_qcn6122; 4916 hif_state->target_ce_config_sz = 4917 sizeof(target_ce_config_wlan_qcn6122); 4918 scn->ce_count = QCN_6122_CE_COUNT; 4919 scn->ini_cfg.disable_wake_irq = 1; 4920 break; 4921 case TARGET_TYPE_QCN9160: 4922 hif_state->host_ce_config = host_ce_config_wlan_qcn9160; 4923 hif_state->target_ce_config = target_ce_config_wlan_qcn9160; 4924 hif_state->target_ce_config_sz = 4925 sizeof(target_ce_config_wlan_qcn9160); 4926 scn->ce_count = QCN_9160_CE_COUNT; 4927 scn->ini_cfg.disable_wake_irq = 1; 4928 break; 4929 case TARGET_TYPE_QCN6432: 4930 hif_state->host_ce_config = host_ce_config_wlan_qcn6432; 4931 hif_state->target_ce_config = target_ce_config_wlan_qcn6432; 4932 hif_state->target_ce_config_sz = 4933 sizeof(target_ce_config_wlan_qcn6432); 4934 scn->ce_count = QCN_6432_CE_COUNT; 4935 scn->ini_cfg.disable_wake_irq = 1; 4936 break; 4937 case TARGET_TYPE_QCA5018: 4938 hif_state->host_ce_config = host_ce_config_wlan_qca5018; 4939 hif_state->target_ce_config = target_ce_config_wlan_qca5018; 4940 hif_state->target_ce_config_sz = 4941 sizeof(target_ce_config_wlan_qca5018); 4942 scn->ce_count = QCA_5018_CE_COUNT; 4943 break; 4944 case TARGET_TYPE_QCA9574: 4945 hif_state->host_ce_config = host_ce_config_wlan_qca9574; 4946 hif_state->target_ce_config = target_ce_config_wlan_qca9574; 4947 hif_state->target_ce_config_sz = 4948 sizeof(target_ce_config_wlan_qca9574); 4949 break; 4950 case TARGET_TYPE_QCA6390: 4951 hif_state->host_ce_config = host_ce_config_wlan_qca6390; 4952 hif_state->target_ce_config = target_ce_config_wlan_qca6390; 4953 hif_state->target_ce_config_sz = 4954 sizeof(target_ce_config_wlan_qca6390); 4955 4956 scn->ce_count = QCA_6390_CE_COUNT; 4957 break; 4958 case TARGET_TYPE_QCA6490: 4959 hif_state->host_ce_config = host_ce_config_wlan_qca6490; 4960 hif_state->target_ce_config = target_ce_config_wlan_qca6490; 4961 hif_state->target_ce_config_sz = 4962 sizeof(target_ce_config_wlan_qca6490); 4963 4964 scn->ce_count = QCA_6490_CE_COUNT; 4965 break; 4966 case TARGET_TYPE_QCA6750: 4967 hif_state->host_ce_config = host_ce_config_wlan_qca6750; 4968 hif_state->target_ce_config = target_ce_config_wlan_qca6750; 4969 hif_state->target_ce_config_sz = 4970 sizeof(target_ce_config_wlan_qca6750); 4971 4972 scn->ce_count = QCA_6750_CE_COUNT; 4973 break; 4974 case TARGET_TYPE_KIWI: 4975 case TARGET_TYPE_MANGO: 4976 case TARGET_TYPE_PEACH: 4977 hif_ce_select_config_kiwi(hif_state); 4978 scn->ce_count = KIWI_CE_COUNT; 4979 break; 4980 case TARGET_TYPE_ADRASTEA: 4981 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 4982 hif_state->host_ce_config = 4983 host_lowdesc_ce_config_wlan_adrastea_nopktlog; 4984 hif_state->target_ce_config = 4985 target_lowdesc_ce_config_wlan_adrastea_nopktlog; 4986 hif_state->target_ce_config_sz = 4987 sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog); 4988 } else { 4989 hif_state->host_ce_config = 4990 host_ce_config_wlan_adrastea; 4991 hif_state->target_ce_config = 4992 target_ce_config_wlan_adrastea; 4993 hif_state->target_ce_config_sz = 4994 sizeof(target_ce_config_wlan_adrastea); 4995 } 4996 break; 4997 case TARGET_TYPE_WCN6450: 4998 hif_state->host_ce_config = host_ce_config_wlan_wcn6450; 4999 hif_state->target_ce_config = target_ce_config_wlan_wcn6450; 5000 hif_state->target_ce_config_sz = 5001 sizeof(target_ce_config_wlan_wcn6450); 5002 break; 5003 } 5004 QDF_BUG(scn->ce_count <= CE_COUNT_MAX); 5005 } 5006 5007 /** 5008 * hif_ce_open() - do ce specific allocations 5009 * @hif_sc: pointer to hif context 5010 * 5011 * return: 0 for success or QDF_STATUS_E_NOMEM 5012 */ 5013 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc) 5014 { 5015 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 5016 5017 qdf_spinlock_create(&hif_state->irq_reg_lock); 5018 qdf_spinlock_create(&hif_state->keep_awake_lock); 5019 return QDF_STATUS_SUCCESS; 5020 } 5021 5022 /** 5023 * hif_ce_close() - do ce specific free 5024 * @hif_sc: pointer to hif context 5025 */ 5026 void hif_ce_close(struct hif_softc *hif_sc) 5027 { 5028 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 5029 5030 qdf_spinlock_destroy(&hif_state->irq_reg_lock); 5031 qdf_spinlock_destroy(&hif_state->keep_awake_lock); 5032 } 5033 5034 /** 5035 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed 5036 * @hif_sc: hif context 5037 * 5038 * uses state variables to support cleaning up when hif_config_ce fails. 5039 */ 5040 void hif_unconfig_ce(struct hif_softc *hif_sc) 5041 { 5042 int pipe_num; 5043 struct HIF_CE_pipe_info *pipe_info; 5044 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 5045 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc); 5046 5047 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 5048 pipe_info = &hif_state->pipe_info[pipe_num]; 5049 if (pipe_info->ce_hdl) { 5050 ce_unregister_irq(hif_state, (1 << pipe_num)); 5051 } 5052 } 5053 deinit_tasklet_workers(hif_hdl); 5054 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 5055 pipe_info = &hif_state->pipe_info[pipe_num]; 5056 if (pipe_info->ce_hdl) { 5057 ce_fini(pipe_info->ce_hdl); 5058 pipe_info->ce_hdl = NULL; 5059 pipe_info->buf_sz = 0; 5060 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 5061 } 5062 } 5063 if (hif_sc->athdiag_procfs_inited) { 5064 athdiag_procfs_remove(); 5065 hif_sc->athdiag_procfs_inited = false; 5066 } 5067 } 5068 5069 #ifdef CONFIG_BYPASS_QMI 5070 #ifdef QCN7605_SUPPORT 5071 /** 5072 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 5073 * @scn: pointer to HIF structure 5074 * 5075 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 5076 * 5077 * Return: void 5078 */ 5079 static void hif_post_static_buf_to_target(struct hif_softc *scn) 5080 { 5081 phys_addr_t target_pa; 5082 struct ce_info *ce_info_ptr; 5083 uint32_t msi_data_start; 5084 uint32_t msi_data_count; 5085 uint32_t msi_irq_start; 5086 uint32_t i = 0; 5087 int ret; 5088 5089 scn->vaddr_qmi_bypass = 5090 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 5091 scn->qdf_dev->dev, 5092 FW_SHARED_MEM, 5093 &target_pa); 5094 if (!scn->vaddr_qmi_bypass) { 5095 hif_err("Memory allocation failed could not post target buf"); 5096 return; 5097 } 5098 5099 scn->paddr_qmi_bypass = target_pa; 5100 5101 ce_info_ptr = (struct ce_info *)scn->vaddr_qmi_bypass; 5102 5103 if (scn->vaddr_rri_on_ddr) { 5104 ce_info_ptr->rri_over_ddr_low_paddr = 5105 BITS0_TO_31(scn->paddr_rri_on_ddr); 5106 ce_info_ptr->rri_over_ddr_high_paddr = 5107 BITS32_TO_35(scn->paddr_rri_on_ddr); 5108 } 5109 5110 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 5111 &msi_data_count, &msi_data_start, 5112 &msi_irq_start); 5113 if (ret) { 5114 hif_err("Failed to get CE msi config"); 5115 return; 5116 } 5117 5118 for (i = 0; i < CE_COUNT_MAX; i++) { 5119 ce_info_ptr->cfg[i].ce_id = i; 5120 ce_info_ptr->cfg[i].msi_vector = 5121 (i % msi_data_count) + msi_irq_start; 5122 } 5123 5124 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 5125 hif_info("target va %pK target pa %pa", scn->vaddr_qmi_bypass, 5126 &target_pa); 5127 } 5128 5129 /** 5130 * hif_cleanup_static_buf_to_target() - clean up static buffer to WLAN FW 5131 * @scn: pointer to HIF structure 5132 * 5133 * 5134 * Return: void 5135 */ 5136 void hif_cleanup_static_buf_to_target(struct hif_softc *scn) 5137 { 5138 void *target_va = scn->vaddr_qmi_bypass; 5139 phys_addr_t target_pa = scn->paddr_qmi_bypass; 5140 5141 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 5142 FW_SHARED_MEM, target_va, 5143 target_pa, 0); 5144 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0); 5145 } 5146 #else 5147 /** 5148 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 5149 * @scn: pointer to HIF structure 5150 * 5151 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 5152 * 5153 * Return: void 5154 */ 5155 static void hif_post_static_buf_to_target(struct hif_softc *scn) 5156 { 5157 qdf_dma_addr_t target_pa; 5158 5159 scn->vaddr_qmi_bypass = 5160 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 5161 scn->qdf_dev->dev, 5162 FW_SHARED_MEM, 5163 &target_pa); 5164 if (!scn->vaddr_qmi_bypass) { 5165 hif_err("Memory allocation failed could not post target buf"); 5166 return; 5167 } 5168 5169 scn->paddr_qmi_bypass = target_pa; 5170 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 5171 } 5172 5173 /** 5174 * hif_cleanup_static_buf_to_target() - clean up static buffer to WLAN FW 5175 * @scn: pointer to HIF structure 5176 * 5177 * 5178 * Return: void 5179 */ 5180 void hif_cleanup_static_buf_to_target(struct hif_softc *scn) 5181 { 5182 void *target_va = scn->vaddr_qmi_bypass; 5183 phys_addr_t target_pa = scn->paddr_qmi_bypass; 5184 5185 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 5186 FW_SHARED_MEM, target_va, 5187 target_pa, 0); 5188 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0); 5189 } 5190 #endif 5191 5192 #else 5193 static inline void hif_post_static_buf_to_target(struct hif_softc *scn) 5194 { 5195 } 5196 5197 void hif_cleanup_static_buf_to_target(struct hif_softc *scn) 5198 { 5199 } 5200 #endif 5201 5202 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok, 5203 bool wait_for_it) 5204 { 5205 /* todo */ 5206 return 0; 5207 } 5208 5209 int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num) 5210 { 5211 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 5212 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 5213 struct HIF_CE_pipe_info *pipe_info; 5214 struct CE_state *ce_state = NULL; 5215 struct CE_attr *attr; 5216 int rv = 0; 5217 5218 if (pipe_num >= CE_COUNT_MAX) 5219 return -EINVAL; 5220 5221 pipe_info = &hif_state->pipe_info[pipe_num]; 5222 pipe_info->pipe_num = pipe_num; 5223 pipe_info->HIF_CE_state = hif_state; 5224 attr = &hif_state->host_ce_config[pipe_num]; 5225 ce_state = scn->ce_id_to_state[pipe_num]; 5226 5227 if (ce_state) { 5228 /* Do not reinitialize the CE if its done already */ 5229 rv = QDF_STATUS_E_BUSY; 5230 goto err; 5231 } 5232 5233 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr); 5234 ce_state = scn->ce_id_to_state[pipe_num]; 5235 if (!ce_state) { 5236 A_TARGET_ACCESS_UNLIKELY(scn); 5237 rv = QDF_STATUS_E_FAILURE; 5238 goto err; 5239 } 5240 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock); 5241 QDF_ASSERT(pipe_info->ce_hdl); 5242 if (!pipe_info->ce_hdl) { 5243 rv = QDF_STATUS_E_FAILURE; 5244 A_TARGET_ACCESS_UNLIKELY(scn); 5245 goto err; 5246 } 5247 5248 ce_state->lro_data = qdf_lro_init(); 5249 5250 if (attr->flags & CE_ATTR_DIAG) { 5251 /* Reserve the ultimate CE for 5252 * Diagnostic Window support 5253 */ 5254 hif_state->ce_diag = pipe_info->ce_hdl; 5255 goto skip; 5256 } 5257 5258 if (hif_is_nss_wifi_enabled(scn) && ce_state && 5259 (ce_state->htt_rx_data)) { 5260 goto skip; 5261 } 5262 5263 pipe_info->buf_sz = (qdf_size_t)(attr->src_sz_max); 5264 if (attr->dest_nentries > 0) { 5265 atomic_set(&pipe_info->recv_bufs_needed, 5266 init_buffer_count(attr->dest_nentries - 1)); 5267 /*SRNG based CE has one entry less */ 5268 if (ce_srng_based(scn)) 5269 atomic_dec(&pipe_info->recv_bufs_needed); 5270 } else { 5271 atomic_set(&pipe_info->recv_bufs_needed, 0); 5272 } 5273 ce_tasklet_init(hif_state, (1 << pipe_num)); 5274 ce_register_irq(hif_state, (1 << pipe_num)); 5275 5276 init_tasklet_worker_by_ceid(hif_hdl, pipe_num); 5277 skip: 5278 return 0; 5279 err: 5280 return rv; 5281 } 5282 5283 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 5284 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn) 5285 { 5286 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 5287 uint8_t ce_id, hist_idx = 0; 5288 5289 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 5290 if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE & (1 << ce_id)) 5291 ce_hist->ce_id_hist_map[ce_id] = hist_idx++; 5292 else 5293 ce_hist->ce_id_hist_map[ce_id] = -1; 5294 } 5295 } 5296 #else 5297 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn) 5298 { 5299 } 5300 #endif 5301 5302 /** 5303 * hif_config_ce() - configure copy engines 5304 * @scn: hif context 5305 * 5306 * Prepares fw, copy engine hardware and host sw according 5307 * to the attributes selected by hif_ce_prepare_config. 5308 * 5309 * also calls athdiag_procfs_init 5310 * 5311 * return: 0 for success nonzero for failure. 5312 */ 5313 int hif_config_ce(struct hif_softc *scn) 5314 { 5315 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 5316 struct HIF_CE_pipe_info *pipe_info; 5317 int pipe_num; 5318 5319 #ifdef ADRASTEA_SHADOW_REGISTERS 5320 int i; 5321 #endif 5322 QDF_STATUS rv = QDF_STATUS_SUCCESS; 5323 5324 scn->notice_send = true; 5325 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; 5326 5327 hif_post_static_buf_to_target(scn); 5328 5329 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS; 5330 5331 hif_config_rri_on_ddr(scn); 5332 5333 if (ce_srng_based(scn)) 5334 scn->bus_ops.hif_target_sleep_state_adjust = 5335 &hif_srng_sleep_state_adjust; 5336 5337 /* Initialise the CE debug history sysfs interface inputs ce_id and 5338 * index. Disable data storing 5339 */ 5340 reset_ce_debug_history(scn); 5341 hif_gen_ce_id_history_idx_mapping(scn); 5342 5343 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 5344 struct CE_attr *attr; 5345 5346 pipe_info = &hif_state->pipe_info[pipe_num]; 5347 attr = &hif_state->host_ce_config[pipe_num]; 5348 5349 if (attr->flags & CE_ATTR_INIT_ON_DEMAND) 5350 continue; 5351 5352 if (hif_config_ce_by_id(scn, pipe_num)) 5353 goto err; 5354 } 5355 5356 if (athdiag_procfs_init(scn) != 0) { 5357 A_TARGET_ACCESS_UNLIKELY(scn); 5358 goto err; 5359 } 5360 scn->athdiag_procfs_inited = true; 5361 5362 hif_debug("ce_init done"); 5363 hif_debug("X, ret = %d", rv); 5364 5365 #ifdef ADRASTEA_SHADOW_REGISTERS 5366 hif_debug("Using Shadow Registers instead of CE Registers"); 5367 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) { 5368 hif_debug("Shadow Register%d is mapped to address %x", 5369 i, 5370 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2)); 5371 } 5372 #endif 5373 5374 return rv != QDF_STATUS_SUCCESS; 5375 err: 5376 /* Failure, so clean up */ 5377 hif_unconfig_ce(scn); 5378 hif_info("X, ret = %d", rv); 5379 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; 5380 } 5381 5382 /** 5383 * hif_config_ce_pktlog() - configure copy engines 5384 * @hif_hdl: hif context 5385 * 5386 * Prepares fw, copy engine hardware and host sw according 5387 * to the attributes selected by hif_ce_prepare_config. 5388 * 5389 * also calls athdiag_procfs_init 5390 * 5391 * return: 0 for success nonzero for failure. 5392 */ 5393 int hif_config_ce_pktlog(struct hif_opaque_softc *hif_hdl) 5394 { 5395 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 5396 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 5397 int pipe_num; 5398 QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE; 5399 struct HIF_CE_pipe_info *pipe_info; 5400 5401 if (!scn) 5402 goto err; 5403 5404 if (scn->pktlog_init) 5405 return QDF_STATUS_SUCCESS; 5406 5407 pipe_num = hif_get_pktlog_ce_num(scn); 5408 if (pipe_num < 0) { 5409 qdf_status = QDF_STATUS_E_FAILURE; 5410 goto err; 5411 } 5412 5413 pipe_info = &hif_state->pipe_info[pipe_num]; 5414 5415 qdf_status = hif_config_ce_by_id(scn, pipe_num); 5416 /* CE Already initialized. Do not try to reinitialized again */ 5417 if (qdf_status == QDF_STATUS_E_BUSY) 5418 return QDF_STATUS_SUCCESS; 5419 5420 qdf_status = hif_config_irq_by_ceid(scn, pipe_num); 5421 if (qdf_status < 0) 5422 goto err; 5423 5424 qdf_status = hif_completion_thread_startup_by_ceid(hif_state, pipe_num); 5425 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 5426 hif_err("Failed to start hif thread"); 5427 goto err; 5428 } 5429 5430 /* Post buffers for pktlog copy engine. */ 5431 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); 5432 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 5433 /* cleanup is done in hif_ce_disable */ 5434 hif_err("Failed to post buffers"); 5435 return qdf_status; 5436 } 5437 scn->pktlog_init = true; 5438 return qdf_status != QDF_STATUS_SUCCESS; 5439 5440 err: 5441 hif_debug("X, ret = %d", qdf_status); 5442 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; 5443 } 5444 5445 #ifdef IPA_OFFLOAD 5446 /** 5447 * hif_ce_ipa_get_ce_resource() - get uc resource on hif 5448 * @scn: bus context 5449 * @ce_sr: copyengine source ring base physical address 5450 * @ce_sr_ring_size: copyengine source ring size 5451 * @ce_reg_paddr: copyengine register physical address 5452 * 5453 * IPA micro controller data path offload feature enabled, 5454 * HIF should release copy engine related resource information to IPA UC 5455 * IPA UC will access hardware resource with released information 5456 * 5457 * Return: None 5458 */ 5459 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 5460 qdf_shared_mem_t **ce_sr, 5461 uint32_t *ce_sr_ring_size, 5462 qdf_dma_addr_t *ce_reg_paddr) 5463 { 5464 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 5465 struct HIF_CE_pipe_info *pipe_info = 5466 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]); 5467 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 5468 5469 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size, 5470 ce_reg_paddr); 5471 } 5472 #endif /* IPA_OFFLOAD */ 5473 5474 5475 #ifdef ADRASTEA_SHADOW_REGISTERS 5476 5477 /* 5478 * Current shadow register config 5479 * 5480 * ----------------------------------------------------------- 5481 * Shadow Register | CE | src/dst write index 5482 * ----------------------------------------------------------- 5483 * 0 | 0 | src 5484 * 1 No Config - Doesn't point to anything 5485 * 2 No Config - Doesn't point to anything 5486 * 3 | 3 | src 5487 * 4 | 4 | src 5488 * 5 | 5 | src 5489 * 6 No Config - Doesn't point to anything 5490 * 7 | 7 | src 5491 * 8 No Config - Doesn't point to anything 5492 * 9 No Config - Doesn't point to anything 5493 * 10 No Config - Doesn't point to anything 5494 * 11 No Config - Doesn't point to anything 5495 * ----------------------------------------------------------- 5496 * 12 No Config - Doesn't point to anything 5497 * 13 | 1 | dst 5498 * 14 | 2 | dst 5499 * 15 No Config - Doesn't point to anything 5500 * 16 No Config - Doesn't point to anything 5501 * 17 No Config - Doesn't point to anything 5502 * 18 No Config - Doesn't point to anything 5503 * 19 | 7 | dst 5504 * 20 | 8 | dst 5505 * 21 No Config - Doesn't point to anything 5506 * 22 No Config - Doesn't point to anything 5507 * 23 No Config - Doesn't point to anything 5508 * ----------------------------------------------------------- 5509 * 5510 * 5511 * ToDo - Move shadow register config to following in the future 5512 * This helps free up a block of shadow registers towards the end. 5513 * Can be used for other purposes 5514 * 5515 * ----------------------------------------------------------- 5516 * Shadow Register | CE | src/dst write index 5517 * ----------------------------------------------------------- 5518 * 0 | 0 | src 5519 * 1 | 3 | src 5520 * 2 | 4 | src 5521 * 3 | 5 | src 5522 * 4 | 7 | src 5523 * ----------------------------------------------------------- 5524 * 5 | 1 | dst 5525 * 6 | 2 | dst 5526 * 7 | 7 | dst 5527 * 8 | 8 | dst 5528 * ----------------------------------------------------------- 5529 * 9 No Config - Doesn't point to anything 5530 * 12 No Config - Doesn't point to anything 5531 * 13 No Config - Doesn't point to anything 5532 * 14 No Config - Doesn't point to anything 5533 * 15 No Config - Doesn't point to anything 5534 * 16 No Config - Doesn't point to anything 5535 * 17 No Config - Doesn't point to anything 5536 * 18 No Config - Doesn't point to anything 5537 * 19 No Config - Doesn't point to anything 5538 * 20 No Config - Doesn't point to anything 5539 * 21 No Config - Doesn't point to anything 5540 * 22 No Config - Doesn't point to anything 5541 * 23 No Config - Doesn't point to anything 5542 * ----------------------------------------------------------- 5543 */ 5544 #ifndef QCN7605_SUPPORT 5545 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 5546 { 5547 u32 addr = 0; 5548 u32 ce = COPY_ENGINE_ID(ctrl_addr); 5549 5550 switch (ce) { 5551 case 0: 5552 addr = SHADOW_VALUE0; 5553 break; 5554 case 3: 5555 addr = SHADOW_VALUE3; 5556 break; 5557 case 4: 5558 addr = SHADOW_VALUE4; 5559 break; 5560 case 5: 5561 addr = SHADOW_VALUE5; 5562 break; 5563 case 7: 5564 addr = SHADOW_VALUE7; 5565 break; 5566 default: 5567 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 5568 QDF_ASSERT(0); 5569 } 5570 return addr; 5571 5572 } 5573 5574 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 5575 { 5576 u32 addr = 0; 5577 u32 ce = COPY_ENGINE_ID(ctrl_addr); 5578 5579 switch (ce) { 5580 case 1: 5581 addr = SHADOW_VALUE13; 5582 break; 5583 case 2: 5584 addr = SHADOW_VALUE14; 5585 break; 5586 case 5: 5587 addr = SHADOW_VALUE17; 5588 break; 5589 case 7: 5590 addr = SHADOW_VALUE19; 5591 break; 5592 case 8: 5593 addr = SHADOW_VALUE20; 5594 break; 5595 case 9: 5596 addr = SHADOW_VALUE21; 5597 break; 5598 case 10: 5599 addr = SHADOW_VALUE22; 5600 break; 5601 case 11: 5602 addr = SHADOW_VALUE23; 5603 break; 5604 default: 5605 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 5606 QDF_ASSERT(0); 5607 } 5608 5609 return addr; 5610 5611 } 5612 #else 5613 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 5614 { 5615 u32 addr = 0; 5616 u32 ce = COPY_ENGINE_ID(ctrl_addr); 5617 5618 switch (ce) { 5619 case 0: 5620 addr = SHADOW_VALUE0; 5621 break; 5622 case 3: 5623 addr = SHADOW_VALUE3; 5624 break; 5625 case 4: 5626 addr = SHADOW_VALUE4; 5627 break; 5628 case 5: 5629 addr = SHADOW_VALUE5; 5630 break; 5631 default: 5632 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 5633 QDF_ASSERT(0); 5634 } 5635 return addr; 5636 } 5637 5638 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 5639 { 5640 u32 addr = 0; 5641 u32 ce = COPY_ENGINE_ID(ctrl_addr); 5642 5643 switch (ce) { 5644 case 1: 5645 addr = SHADOW_VALUE13; 5646 break; 5647 case 2: 5648 addr = SHADOW_VALUE14; 5649 break; 5650 case 3: 5651 addr = SHADOW_VALUE15; 5652 break; 5653 case 5: 5654 addr = SHADOW_VALUE17; 5655 break; 5656 case 7: 5657 addr = SHADOW_VALUE19; 5658 break; 5659 case 8: 5660 addr = SHADOW_VALUE20; 5661 break; 5662 case 9: 5663 addr = SHADOW_VALUE21; 5664 break; 5665 case 10: 5666 addr = SHADOW_VALUE22; 5667 break; 5668 case 11: 5669 addr = SHADOW_VALUE23; 5670 break; 5671 default: 5672 hif_err("Invalid CE ctrl_addr (CE=%d)", ce); 5673 QDF_ASSERT(0); 5674 } 5675 5676 return addr; 5677 } 5678 #endif 5679 #endif 5680 5681 #if defined(FEATURE_LRO) 5682 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id) 5683 { 5684 struct CE_state *ce_state; 5685 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 5686 5687 ce_state = scn->ce_id_to_state[ctx_id]; 5688 5689 return ce_state->lro_data; 5690 } 5691 #endif 5692 5693 /** 5694 * hif_map_service_to_pipe() - returns the ce ids pertaining to 5695 * this service 5696 * @hif_hdl: hif_softc pointer. 5697 * @svc_id: Service ID for which the mapping is needed. 5698 * @ul_pipe: address of the container in which ul pipe is returned. 5699 * @dl_pipe: address of the container in which dl pipe is returned. 5700 * @ul_is_polled: address of the container in which a bool 5701 * indicating if the UL CE for this service 5702 * is polled is returned. 5703 * @dl_is_polled: address of the container in which a bool 5704 * indicating if the DL CE for this service 5705 * is polled is returned. 5706 * 5707 * Return: Indicates whether the service has been found in the table. 5708 * Upon return, ul_is_polled is updated only if ul_pipe is updated. 5709 * There will be warning logs if either leg has not been updated 5710 * because it missed the entry in the table (but this is not an err). 5711 */ 5712 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id, 5713 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, 5714 int *dl_is_polled) 5715 { 5716 int status = -EINVAL; 5717 unsigned int i; 5718 struct service_to_pipe element; 5719 struct service_to_pipe *tgt_svc_map_to_use; 5720 uint32_t sz_tgt_svc_map_to_use; 5721 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 5722 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 5723 bool dl_updated = false; 5724 bool ul_updated = false; 5725 5726 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use, 5727 &sz_tgt_svc_map_to_use); 5728 5729 *dl_is_polled = 0; /* polling for received messages not supported */ 5730 5731 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) { 5732 5733 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element)); 5734 if (element.service_id == svc_id) { 5735 if (element.pipedir == PIPEDIR_OUT) { 5736 *ul_pipe = element.pipenum; 5737 *ul_is_polled = 5738 (hif_state->host_ce_config[*ul_pipe].flags & 5739 CE_ATTR_DISABLE_INTR) != 0; 5740 ul_updated = true; 5741 } else if (element.pipedir == PIPEDIR_IN) { 5742 *dl_pipe = element.pipenum; 5743 dl_updated = true; 5744 } 5745 status = 0; 5746 } 5747 } 5748 if (ul_updated == false) 5749 hif_debug("ul pipe is NOT updated for service %d", svc_id); 5750 if (dl_updated == false) 5751 hif_debug("dl pipe is NOT updated for service %d", svc_id); 5752 5753 return status; 5754 } 5755 5756 #ifdef SHADOW_REG_DEBUG 5757 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, 5758 uint32_t CE_ctrl_addr) 5759 { 5760 uint32_t read_from_hw, srri_from_ddr = 0; 5761 5762 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS); 5763 5764 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 5765 5766 if (read_from_hw != srri_from_ddr) { 5767 hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 5768 srri_from_ddr, read_from_hw, 5769 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 5770 QDF_ASSERT(0); 5771 } 5772 return srri_from_ddr; 5773 } 5774 5775 5776 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, 5777 uint32_t CE_ctrl_addr) 5778 { 5779 uint32_t read_from_hw, drri_from_ddr = 0; 5780 5781 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS); 5782 5783 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 5784 5785 if (read_from_hw != drri_from_ddr) { 5786 hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 5787 drri_from_ddr, read_from_hw, 5788 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 5789 QDF_ASSERT(0); 5790 } 5791 return drri_from_ddr; 5792 } 5793 5794 #endif 5795 5796 /** 5797 * hif_dump_ce_registers() - dump ce registers 5798 * @scn: hif_opaque_softc pointer. 5799 * 5800 * Output the copy engine registers 5801 * 5802 * Return: 0 for success or error code 5803 */ 5804 int hif_dump_ce_registers(struct hif_softc *scn) 5805 { 5806 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 5807 uint32_t ce_reg_address = CE0_BASE_ADDRESS; 5808 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2]; 5809 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2; 5810 uint16_t i; 5811 QDF_STATUS status; 5812 5813 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) { 5814 if (!scn->ce_id_to_state[i]) { 5815 hif_debug("CE%d not used", i); 5816 continue; 5817 } 5818 5819 status = hif_diag_read_mem(hif_hdl, ce_reg_address, 5820 (uint8_t *) &ce_reg_values[0], 5821 ce_reg_word_size * sizeof(uint32_t)); 5822 5823 if (status != QDF_STATUS_SUCCESS) { 5824 hif_err("Dumping CE register failed!"); 5825 return -EACCES; 5826 } 5827 hif_debug("CE%d=>", i); 5828 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, 5829 (uint8_t *) &ce_reg_values[0], 5830 ce_reg_word_size * sizeof(uint32_t)); 5831 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address 5832 + SR_WR_INDEX_ADDRESS), 5833 ce_reg_values[SR_WR_INDEX_ADDRESS/4]); 5834 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address 5835 + CURRENT_SRRI_ADDRESS), 5836 ce_reg_values[CURRENT_SRRI_ADDRESS/4]); 5837 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address 5838 + DST_WR_INDEX_ADDRESS), 5839 ce_reg_values[DST_WR_INDEX_ADDRESS/4]); 5840 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address 5841 + CURRENT_DRRI_ADDRESS), 5842 ce_reg_values[CURRENT_DRRI_ADDRESS/4]); 5843 qdf_print("---"); 5844 } 5845 return 0; 5846 } 5847 qdf_export_symbol(hif_dump_ce_registers); 5848 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT 5849 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, 5850 struct hif_pipe_addl_info *hif_info, uint32_t pipe) 5851 { 5852 struct hif_softc *scn = HIF_GET_SOFTC(osc); 5853 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 5854 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc); 5855 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 5856 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 5857 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 5858 struct CE_ring_state *src_ring = ce_state->src_ring; 5859 struct CE_ring_state *dest_ring = ce_state->dest_ring; 5860 5861 if (src_ring) { 5862 hif_info->ul_pipe.nentries = src_ring->nentries; 5863 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask; 5864 hif_info->ul_pipe.sw_index = src_ring->sw_index; 5865 hif_info->ul_pipe.write_index = src_ring->write_index; 5866 hif_info->ul_pipe.hw_index = src_ring->hw_index; 5867 hif_info->ul_pipe.base_addr_CE_space = 5868 src_ring->base_addr_CE_space; 5869 hif_info->ul_pipe.base_addr_owner_space = 5870 src_ring->base_addr_owner_space; 5871 } 5872 5873 5874 if (dest_ring) { 5875 hif_info->dl_pipe.nentries = dest_ring->nentries; 5876 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask; 5877 hif_info->dl_pipe.sw_index = dest_ring->sw_index; 5878 hif_info->dl_pipe.write_index = dest_ring->write_index; 5879 hif_info->dl_pipe.hw_index = dest_ring->hw_index; 5880 hif_info->dl_pipe.base_addr_CE_space = 5881 dest_ring->base_addr_CE_space; 5882 hif_info->dl_pipe.base_addr_owner_space = 5883 dest_ring->base_addr_owner_space; 5884 } 5885 5886 hif_info->pci_mem = pci_resource_start(sc->pdev, 0); 5887 hif_info->ctrl_addr = ce_state->ctrl_addr; 5888 5889 return hif_info; 5890 } 5891 qdf_export_symbol(hif_get_addl_pipe_info); 5892 5893 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode) 5894 { 5895 struct hif_softc *scn = HIF_GET_SOFTC(osc); 5896 5897 scn->nss_wifi_ol_mode = mode; 5898 return 0; 5899 } 5900 qdf_export_symbol(hif_set_nss_wifiol_mode); 5901 #endif 5902 5903 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib) 5904 { 5905 struct hif_softc *scn = HIF_GET_SOFTC(osc); 5906 scn->hif_attribute = hif_attrib; 5907 } 5908 5909 5910 /* disable interrupts (only applicable for legacy copy engine currently */ 5911 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num) 5912 { 5913 struct hif_softc *scn = HIF_GET_SOFTC(osc); 5914 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num]; 5915 uint32_t ctrl_addr = CE_state->ctrl_addr; 5916 5917 Q_TARGET_ACCESS_BEGIN(scn); 5918 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 5919 Q_TARGET_ACCESS_END(scn); 5920 } 5921 qdf_export_symbol(hif_disable_interrupt); 5922 5923 /** 5924 * hif_fw_event_handler() - hif fw event handler 5925 * @hif_state: pointer to hif ce state structure 5926 * 5927 * Process fw events and raise HTC callback to process fw events. 5928 * 5929 * Return: none 5930 */ 5931 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state) 5932 { 5933 struct hif_msg_callbacks *msg_callbacks = 5934 &hif_state->msg_callbacks_current; 5935 5936 if (!msg_callbacks->fwEventHandler) 5937 return; 5938 5939 msg_callbacks->fwEventHandler(msg_callbacks->Context, 5940 QDF_STATUS_E_FAILURE); 5941 } 5942 5943 #ifndef QCA_WIFI_3_0 5944 /** 5945 * hif_fw_interrupt_handler() - FW interrupt handler 5946 * @irq: irq number 5947 * @arg: the user pointer 5948 * 5949 * Called from the PCI interrupt handler when a 5950 * firmware-generated interrupt to the Host. 5951 * 5952 * only registered for legacy ce devices 5953 * 5954 * Return: status of handled irq 5955 */ 5956 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 5957 { 5958 struct hif_softc *scn = arg; 5959 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 5960 uint32_t fw_indicator_address, fw_indicator; 5961 5962 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 5963 return ATH_ISR_NOSCHED; 5964 5965 fw_indicator_address = hif_state->fw_indicator_address; 5966 /* For sudden unplug this will return ~0 */ 5967 fw_indicator = A_TARGET_READ(scn, fw_indicator_address); 5968 5969 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) { 5970 /* ACK: clear Target-side pending event */ 5971 A_TARGET_WRITE(scn, fw_indicator_address, 5972 fw_indicator & ~FW_IND_EVENT_PENDING); 5973 if (Q_TARGET_ACCESS_END(scn) < 0) 5974 return ATH_ISR_SCHED; 5975 5976 if (hif_state->started) { 5977 hif_fw_event_handler(hif_state); 5978 } else { 5979 /* 5980 * Probable Target failure before we're prepared 5981 * to handle it. Generally unexpected. 5982 * fw_indicator used as bitmap, and defined as below: 5983 * FW_IND_EVENT_PENDING 0x1 5984 * FW_IND_INITIALIZED 0x2 5985 * FW_IND_NEEDRECOVER 0x4 5986 */ 5987 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 5988 ("%s: Early firmware event indicated 0x%x\n", 5989 __func__, fw_indicator)); 5990 } 5991 } else { 5992 if (Q_TARGET_ACCESS_END(scn) < 0) 5993 return ATH_ISR_SCHED; 5994 } 5995 5996 return ATH_ISR_SCHED; 5997 } 5998 #else 5999 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 6000 { 6001 return ATH_ISR_SCHED; 6002 } 6003 #endif /* #ifdef QCA_WIFI_3_0 */ 6004 6005 6006 /** 6007 * hif_wlan_disable(): call the platform driver to disable wlan 6008 * @scn: HIF Context 6009 * 6010 * This function passes the con_mode to platform driver to disable 6011 * wlan. 6012 * 6013 * Return: void 6014 */ 6015 void hif_wlan_disable(struct hif_softc *scn) 6016 { 6017 enum pld_driver_mode mode; 6018 uint32_t con_mode = hif_get_conparam(scn); 6019 6020 if (scn->target_status == TARGET_STATUS_RESET) 6021 return; 6022 6023 if (QDF_GLOBAL_FTM_MODE == con_mode) 6024 mode = PLD_FTM; 6025 else if (QDF_IS_EPPING_ENABLED(con_mode)) 6026 mode = PLD_EPPING; 6027 else 6028 mode = PLD_MISSION; 6029 6030 pld_wlan_disable(scn->qdf_dev->dev, mode); 6031 } 6032 6033 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id) 6034 { 6035 int status; 6036 uint8_t ul_pipe, dl_pipe; 6037 int ul_is_polled, dl_is_polled; 6038 6039 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */ 6040 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 6041 HTC_CTRL_RSVD_SVC, 6042 &ul_pipe, &dl_pipe, 6043 &ul_is_polled, &dl_is_polled); 6044 if (status) { 6045 hif_err("Failed to map pipe: %d", status); 6046 return status; 6047 } 6048 6049 *ce_id = dl_pipe; 6050 6051 return 0; 6052 } 6053 6054 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id) 6055 { 6056 int status; 6057 uint8_t ul_pipe, dl_pipe; 6058 int ul_is_polled, dl_is_polled; 6059 6060 /* DL pipe for WMI_CONTROL_DIAG_SVC should map to the FW DIAG CE_ID */ 6061 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 6062 WMI_CONTROL_DIAG_SVC, 6063 &ul_pipe, &dl_pipe, 6064 &ul_is_polled, &dl_is_polled); 6065 if (status) { 6066 hif_err("Failed to map pipe: %d", status); 6067 return status; 6068 } 6069 6070 *ce_id = dl_pipe; 6071 6072 return 0; 6073 } 6074 6075 #ifdef HIF_CE_LOG_INFO 6076 /** 6077 * ce_get_index_info(): Get CE index info 6078 * @scn: HIF Context 6079 * @ce_state: CE opaque handle 6080 * @info: CE info 6081 * 6082 * Return: 0 for success and non zero for failure 6083 */ 6084 static 6085 int ce_get_index_info(struct hif_softc *scn, void *ce_state, 6086 struct ce_index *info) 6087 { 6088 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 6089 6090 return hif_state->ce_services->ce_get_index_info(scn, ce_state, info); 6091 } 6092 6093 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data, 6094 unsigned int *offset) 6095 { 6096 struct hang_event_info info = {0}; 6097 static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) | 6098 BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10); 6099 uint8_t curr_index = 0; 6100 uint8_t i; 6101 uint16_t size; 6102 6103 info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt); 6104 info.active_grp_tasklet_cnt = 6105 qdf_atomic_read(&scn->active_grp_tasklet_cnt); 6106 6107 for (i = 0; i < scn->ce_count; i++) { 6108 if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i]) 6109 continue; 6110 6111 if (ce_get_index_info(scn, scn->ce_id_to_state[i], 6112 &info.ce_info[curr_index])) 6113 continue; 6114 6115 curr_index++; 6116 } 6117 6118 info.ce_count = curr_index; 6119 size = sizeof(info) - 6120 (CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index); 6121 6122 if (*offset + size > QDF_WLAN_HANG_FW_OFFSET) 6123 return; 6124 6125 QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO, 6126 size - QDF_HANG_EVENT_TLV_HDR_SIZE); 6127 6128 qdf_mem_copy(data + *offset, &info, size); 6129 *offset = *offset + size; 6130 } 6131 #endif 6132 6133 #ifdef FEATURE_DIRECT_LINK 6134 QDF_STATUS 6135 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id, 6136 uint64_t addr, uint32_t data) 6137 { 6138 struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn); 6139 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 6140 6141 if (hif_state->ce_services->ce_set_irq_config_by_ceid) 6142 return hif_state->ce_services->ce_set_irq_config_by_ceid( 6143 hif_ctx, 6144 ce_id, 6145 addr, 6146 data); 6147 6148 return QDF_STATUS_E_NOSUPPORT; 6149 } 6150 6151 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn, 6152 uint64_t **dma_addr, 6153 uint32_t *buf_size) 6154 { 6155 struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn); 6156 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 6157 struct ce_ops *ce_services = hif_state->ce_services; 6158 6159 if (ce_services->ce_get_direct_link_dest_buffers) 6160 return ce_services->ce_get_direct_link_dest_buffers(hif_ctx, 6161 dma_addr, 6162 buf_size); 6163 6164 return 0; 6165 } 6166 6167 QDF_STATUS 6168 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn, 6169 struct hif_direct_link_ce_info *info, 6170 uint8_t max_ce_info_len) 6171 { 6172 struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn); 6173 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 6174 struct ce_ops *ce_services = hif_state->ce_services; 6175 6176 if (ce_services->ce_get_direct_link_ring_info) 6177 return ce_services->ce_get_direct_link_ring_info(hif_ctx, 6178 info, 6179 max_ce_info_len); 6180 6181 return QDF_STATUS_E_NOSUPPORT; 6182 } 6183 #endif 6184