1 /* 2 * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. 3 * 4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc. 5 * 6 * 7 * Permission to use, copy, modify, and/or distribute this software for 8 * any purpose with or without fee is hereby granted, provided that the 9 * above copyright notice and this permission notice appear in all 10 * copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 19 * PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * This file was originally distributed by Qualcomm Atheros, Inc. 24 * under proprietary terms before Copyright ownership was assigned 25 * to the Linux Foundation. 26 */ 27 28 #include "targcfg.h" 29 #include "qdf_lock.h" 30 #include "qdf_status.h" 31 #include "qdf_status.h" 32 #include <qdf_atomic.h> /* qdf_atomic_read */ 33 #include <targaddrs.h> 34 #include "hif_io32.h" 35 #include <hif.h> 36 #include "regtable.h" 37 #define ATH_MODULE_NAME hif 38 #include <a_debug.h> 39 #include "hif_main.h" 40 #include "hif_hw_version.h" 41 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) 42 #include "ce_tasklet.h" 43 #endif 44 #include "qdf_trace.h" 45 #include "qdf_status.h" 46 #include "hif_debug.h" 47 #include "mp_dev.h" 48 #include "ce_api.h" 49 #ifdef QCA_WIFI_QCA8074 50 #include "hal_api.h" 51 #endif 52 #include "hif_napi.h" 53 54 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start) 55 { 56 hif_trigger_dump(hif_ctx, cmd_id, start); 57 } 58 59 /** 60 * hif_get_target_id(): hif_get_target_id 61 * 62 * Return the virtual memory base address to the caller 63 * 64 * @scn: hif_softc 65 * 66 * Return: A_target_id_t 67 */ 68 A_target_id_t hif_get_target_id(struct hif_softc *scn) 69 { 70 return scn->mem; 71 } 72 73 /** 74 * hif_get_targetdef(): hif_get_targetdef 75 * @scn: scn 76 * 77 * Return: void * 78 */ 79 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx) 80 { 81 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 82 83 return scn->targetdef; 84 } 85 86 /** 87 * hif_vote_link_down(): unvote for link up 88 * 89 * Call hif_vote_link_down to release a previous request made using 90 * hif_vote_link_up. A hif_vote_link_down call should only be made 91 * after a corresponding hif_vote_link_up, otherwise you could be 92 * negating a vote from another source. When no votes are present 93 * hif will not guarantee the linkstate after hif_bus_suspend. 94 * 95 * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread 96 * and initialization deinitialization sequencences. 97 * 98 * Return: n/a 99 */ 100 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx) 101 { 102 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 103 QDF_BUG(scn); 104 105 scn->linkstate_vote--; 106 if (scn->linkstate_vote == 0) 107 hif_bus_prevent_linkdown(scn, false); 108 } 109 110 /** 111 * hif_vote_link_up(): vote to prevent bus from suspending 112 * 113 * Makes hif guarantee that fw can message the host normally 114 * durring suspend. 115 * 116 * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread 117 * and initialization deinitialization sequencences. 118 * 119 * Return: n/a 120 */ 121 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx) 122 { 123 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 124 QDF_BUG(scn); 125 126 scn->linkstate_vote++; 127 if (scn->linkstate_vote == 1) 128 hif_bus_prevent_linkdown(scn, true); 129 } 130 131 /** 132 * hif_can_suspend_link(): query if hif is permitted to suspend the link 133 * 134 * Hif will ensure that the link won't be suspended if the upperlayers 135 * don't want it to. 136 * 137 * SYNCHRONIZATION: MC thread is stopped before bus suspend thus 138 * we don't need extra locking to ensure votes dont change while 139 * we are in the process of suspending or resuming. 140 * 141 * Return: false if hif will guarantee link up durring suspend. 142 */ 143 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx) 144 { 145 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 146 QDF_BUG(scn); 147 148 return scn->linkstate_vote == 0; 149 } 150 151 #ifndef CONFIG_WIN 152 #define QCA9984_HOST_INTEREST_ADDRESS -1 153 #define QCA9888_HOST_INTEREST_ADDRESS -1 154 #define IPQ4019_HOST_INTEREST_ADDRESS -1 155 #endif 156 157 /** 158 * hif_hia_item_address(): hif_hia_item_address 159 * @target_type: target_type 160 * @item_offset: item_offset 161 * 162 * Return: n/a 163 */ 164 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset) 165 { 166 switch (target_type) { 167 case TARGET_TYPE_AR6002: 168 return AR6002_HOST_INTEREST_ADDRESS + item_offset; 169 case TARGET_TYPE_AR6003: 170 return AR6003_HOST_INTEREST_ADDRESS + item_offset; 171 case TARGET_TYPE_AR6004: 172 return AR6004_HOST_INTEREST_ADDRESS + item_offset; 173 case TARGET_TYPE_AR6006: 174 return AR6006_HOST_INTEREST_ADDRESS + item_offset; 175 case TARGET_TYPE_AR9888: 176 return AR9888_HOST_INTEREST_ADDRESS + item_offset; 177 case TARGET_TYPE_AR6320: 178 case TARGET_TYPE_AR6320V2: 179 return AR6320_HOST_INTEREST_ADDRESS + item_offset; 180 case TARGET_TYPE_ADRASTEA: 181 /* ADRASTEA doesn't have a host interest address */ 182 ASSERT(0); 183 return 0; 184 case TARGET_TYPE_AR900B: 185 return AR900B_HOST_INTEREST_ADDRESS + item_offset; 186 case TARGET_TYPE_QCA9984: 187 return QCA9984_HOST_INTEREST_ADDRESS + item_offset; 188 case TARGET_TYPE_QCA9888: 189 return QCA9888_HOST_INTEREST_ADDRESS + item_offset; 190 case TARGET_TYPE_IPQ4019: 191 return IPQ4019_HOST_INTEREST_ADDRESS + item_offset; 192 193 default: 194 ASSERT(0); 195 return 0; 196 } 197 } 198 199 /** 200 * hif_max_num_receives_reached() - check max receive is reached 201 * @scn: HIF Context 202 * @count: unsigned int. 203 * 204 * Output check status as bool 205 * 206 * Return: bool 207 */ 208 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count) 209 { 210 if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn))) 211 return count > 120; 212 else 213 return count > MAX_NUM_OF_RECEIVES; 214 } 215 216 /** 217 * init_buffer_count() - initial buffer count 218 * @maxSize: qdf_size_t 219 * 220 * routine to modify the initial buffer count to be allocated on an os 221 * platform basis. Platform owner will need to modify this as needed 222 * 223 * Return: qdf_size_t 224 */ 225 qdf_size_t init_buffer_count(qdf_size_t maxSize) 226 { 227 return maxSize; 228 } 229 230 /** 231 * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint 232 * @hif_ctx: hif context 233 * @htc_htt_tx_endpoint: htt_tx_endpoint 234 * 235 * Return: void 236 */ 237 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx, 238 int htc_htt_tx_endpoint) 239 { 240 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 241 242 if (!scn) { 243 HIF_ERROR("%s: error: scn or scn->hif_sc is NULL!", 244 __func__); 245 return; 246 } 247 248 scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint; 249 } 250 251 static const struct qwlan_hw qwlan_hw_list[] = { 252 { 253 .id = AR6320_REV1_VERSION, 254 .subid = 0, 255 .name = "QCA6174_REV1", 256 }, 257 { 258 .id = AR6320_REV1_1_VERSION, 259 .subid = 0x1, 260 .name = "QCA6174_REV1_1", 261 }, 262 { 263 .id = AR6320_REV1_3_VERSION, 264 .subid = 0x2, 265 .name = "QCA6174_REV1_3", 266 }, 267 { 268 .id = AR6320_REV2_1_VERSION, 269 .subid = 0x4, 270 .name = "QCA6174_REV2_1", 271 }, 272 { 273 .id = AR6320_REV2_1_VERSION, 274 .subid = 0x5, 275 .name = "QCA6174_REV2_2", 276 }, 277 { 278 .id = AR6320_REV3_VERSION, 279 .subid = 0x6, 280 .name = "QCA6174_REV2.3", 281 }, 282 { 283 .id = AR6320_REV3_VERSION, 284 .subid = 0x8, 285 .name = "QCA6174_REV3", 286 }, 287 { 288 .id = AR6320_REV3_VERSION, 289 .subid = 0x9, 290 .name = "QCA6174_REV3_1", 291 }, 292 { 293 .id = AR6320_REV3_2_VERSION, 294 .subid = 0xA, 295 .name = "AR6320_REV3_2_VERSION", 296 }, 297 { 298 .id = WCN3990_v1, 299 .subid = 0x0, 300 .name = "WCN3990_V1", 301 }, 302 { 303 .id = WCN3990_v2, 304 .subid = 0x0, 305 .name = "WCN3990_V2", 306 }, 307 { 308 .id = WCN3990_v2_1, 309 .subid = 0x0, 310 .name = "WCN3990_V2.1", 311 }, 312 { 313 .id = QCA9379_REV1_VERSION, 314 .subid = 0xC, 315 .name = "QCA9379_REV1", 316 }, 317 { 318 .id = QCA9379_REV1_VERSION, 319 .subid = 0xD, 320 .name = "QCA9379_REV1_1", 321 } 322 }; 323 324 /** 325 * hif_get_hw_name(): get a human readable name for the hardware 326 * @info: Target Info 327 * 328 * Return: human readable name for the underlying wifi hardware. 329 */ 330 static const char *hif_get_hw_name(struct hif_target_info *info) 331 { 332 int i; 333 334 if (info->hw_name) 335 return info->hw_name; 336 337 for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) { 338 if (info->target_version == qwlan_hw_list[i].id && 339 info->target_revision == qwlan_hw_list[i].subid) { 340 return qwlan_hw_list[i].name; 341 } 342 } 343 344 info->hw_name = qdf_mem_malloc(64); 345 if (!info->hw_name) 346 return "Unknown Device (nomem)"; 347 348 i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.", 349 info->target_version); 350 if (i < 0) 351 return "Unknown Device (snprintf failure)"; 352 else 353 return info->hw_name; 354 } 355 356 /** 357 * hif_get_hw_info(): hif_get_hw_info 358 * @scn: scn 359 * @version: version 360 * @revision: revision 361 * 362 * Return: n/a 363 */ 364 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision, 365 const char **target_name) 366 { 367 struct hif_target_info *info = hif_get_target_info_handle(scn); 368 struct hif_softc *sc = HIF_GET_SOFTC(scn); 369 370 if (sc->bus_type == QDF_BUS_TYPE_USB) 371 hif_usb_get_hw_info(sc); 372 373 *version = info->target_version; 374 *revision = info->target_revision; 375 *target_name = hif_get_hw_name(info); 376 } 377 378 /** 379 * hif_get_dev_ba(): API to get device base address. 380 * @scn: scn 381 * @version: version 382 * @revision: revision 383 * 384 * Return: n/a 385 */ 386 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle) 387 { 388 struct hif_softc *scn = (struct hif_softc *)hif_handle; 389 390 return scn->mem; 391 } 392 /** 393 * hif_open(): hif_open 394 * @qdf_ctx: QDF Context 395 * @mode: Driver Mode 396 * @bus_type: Bus Type 397 * @cbk: CDS Callbacks 398 * 399 * API to open HIF Context 400 * 401 * Return: HIF Opaque Pointer 402 */ 403 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode, 404 enum qdf_bus_type bus_type, 405 struct hif_driver_state_callbacks *cbk) 406 { 407 struct hif_softc *scn; 408 QDF_STATUS status = QDF_STATUS_SUCCESS; 409 int bus_context_size = hif_bus_get_context_size(bus_type); 410 411 if (bus_context_size == 0) { 412 HIF_ERROR("%s: context size 0 not allowed", __func__); 413 return NULL; 414 } 415 416 scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size); 417 if (!scn) { 418 HIF_ERROR("%s: cannot alloc memory for HIF context of size:%d", 419 __func__, bus_context_size); 420 return GET_HIF_OPAQUE_HDL(scn); 421 } 422 423 scn->qdf_dev = qdf_ctx; 424 scn->hif_con_param = mode; 425 qdf_atomic_init(&scn->active_tasklet_cnt); 426 qdf_atomic_init(&scn->active_grp_tasklet_cnt); 427 qdf_atomic_init(&scn->link_suspended); 428 qdf_atomic_init(&scn->tasklet_from_intr); 429 qdf_mem_copy(&scn->callbacks, cbk, sizeof(struct hif_driver_state_callbacks)); 430 scn->bus_type = bus_type; 431 status = hif_bus_open(scn, bus_type); 432 if (status != QDF_STATUS_SUCCESS) { 433 HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d", 434 __func__, status, bus_type); 435 qdf_mem_free(scn); 436 scn = NULL; 437 } 438 439 return GET_HIF_OPAQUE_HDL(scn); 440 } 441 442 /** 443 * hif_close(): hif_close 444 * @hif_ctx: hif_ctx 445 * 446 * Return: n/a 447 */ 448 void hif_close(struct hif_opaque_softc *hif_ctx) 449 { 450 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 451 452 if (scn == NULL) { 453 HIF_ERROR("%s: hif_opaque_softc is NULL", __func__); 454 return; 455 } 456 457 if (scn->athdiag_procfs_inited) { 458 athdiag_procfs_remove(); 459 scn->athdiag_procfs_inited = false; 460 } 461 462 if (scn->target_info.hw_name) { 463 char *hw_name = scn->target_info.hw_name; 464 scn->target_info.hw_name = "ErrUnloading"; 465 qdf_mem_free(hw_name); 466 } 467 468 hif_bus_close(scn); 469 qdf_mem_free(scn); 470 } 471 472 #ifdef QCA_WIFI_QCA8074 473 static QDF_STATUS hif_hal_attach(struct hif_softc *scn) 474 { 475 if (ce_srng_based(scn)) { 476 scn->hal_soc = hal_attach(scn, scn->qdf_dev); 477 if (scn->hal_soc == NULL) 478 return QDF_STATUS_E_FAILURE; 479 } 480 481 return QDF_STATUS_SUCCESS; 482 } 483 #else 484 static QDF_STATUS hif_hal_attach(struct hif_softc *scn) 485 { 486 return QDF_STATUS_SUCCESS; 487 } 488 #endif 489 490 /** 491 * hif_enable(): hif_enable 492 * @hif_ctx: hif_ctx 493 * @dev: dev 494 * @bdev: bus dev 495 * @bid: bus ID 496 * @bus_type: bus type 497 * @type: enable type 498 * 499 * Return: QDF_STATUS 500 */ 501 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev, 502 void *bdev, const hif_bus_id *bid, 503 enum qdf_bus_type bus_type, 504 enum hif_enable_type type) 505 { 506 QDF_STATUS status; 507 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 508 509 if (scn == NULL) { 510 HIF_ERROR("%s: hif_ctx = NULL", __func__); 511 return QDF_STATUS_E_NULL_VALUE; 512 } 513 514 status = hif_enable_bus(scn, dev, bdev, bid, type); 515 if (status != QDF_STATUS_SUCCESS) { 516 HIF_ERROR("%s: hif_enable_bus error = %d", 517 __func__, status); 518 return status; 519 } 520 521 status = hif_hal_attach(scn); 522 if (status != QDF_STATUS_SUCCESS) { 523 HIF_ERROR("%s: hal attach failed", __func__); 524 return status; 525 } 526 527 if (hif_bus_configure(scn)) { 528 HIF_ERROR("%s: Target probe failed.", __func__); 529 hif_disable_bus(scn); 530 status = QDF_STATUS_E_FAILURE; 531 return status; 532 } 533 534 /* 535 * Flag to avoid potential unallocated memory access from MSI 536 * interrupt handler which could get scheduled as soon as MSI 537 * is enabled, i.e to take care of the race due to the order 538 * in where MSI is enabled before the memory, that will be 539 * in interrupt handlers, is allocated. 540 */ 541 542 scn->hif_init_done = true; 543 544 HIF_TRACE("%s: OK", __func__); 545 546 return QDF_STATUS_SUCCESS; 547 } 548 549 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type) 550 { 551 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 552 553 if (!scn) 554 return; 555 556 hif_nointrs(scn); 557 if (scn->hif_init_done == false) 558 hif_shutdown_device(hif_ctx); 559 else 560 hif_stop(hif_ctx); 561 562 hif_disable_bus(scn); 563 564 hif_wlan_disable(scn); 565 566 scn->notice_send = false; 567 568 HIF_INFO("%s: X", __func__); 569 } 570 571 void hif_display_stats(struct hif_opaque_softc *hif_ctx) 572 { 573 hif_display_bus_stats(hif_ctx); 574 } 575 576 void hif_clear_stats(struct hif_opaque_softc *hif_ctx) 577 { 578 hif_clear_bus_stats(hif_ctx); 579 } 580 581 /** 582 * hif_crash_shutdown_dump_bus_register() - dump bus registers 583 * @hif_ctx: hif_ctx 584 * 585 * Return: n/a 586 */ 587 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) \ 588 && defined(DEBUG) 589 590 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx) 591 { 592 struct hif_opaque_softc *scn = hif_ctx; 593 594 if (hif_check_soc_status(scn)) 595 return; 596 597 if (hif_dump_registers(scn)) 598 HIF_ERROR("Failed to dump bus registers!"); 599 } 600 601 /** 602 * hif_crash_shutdown(): hif_crash_shutdown 603 * 604 * This function is called by the platform driver to dump CE registers 605 * 606 * @hif_ctx: hif_ctx 607 * 608 * Return: n/a 609 */ 610 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx) 611 { 612 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 613 614 if (!hif_ctx) 615 return; 616 617 if (scn->bus_type == QDF_BUS_TYPE_SNOC) { 618 HIF_INFO_MED("%s: RAM dump disabled for bustype %d", 619 __func__, scn->bus_type); 620 return; 621 } 622 623 if (TARGET_STATUS_RESET == scn->target_status) { 624 HIF_INFO_MED("%s: Target is already asserted, ignore!", 625 __func__); 626 return; 627 } 628 629 if (hif_is_load_or_unload_in_progress(scn)) { 630 HIF_ERROR("%s: Load/unload is in progress, ignore!", __func__); 631 return; 632 } 633 634 hif_crash_shutdown_dump_bus_register(hif_ctx); 635 636 if (ol_copy_ramdump(hif_ctx)) 637 goto out; 638 639 HIF_INFO_MED("%s: RAM dump collecting completed!", __func__); 640 641 out: 642 return; 643 } 644 #else 645 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx) 646 { 647 HIF_INFO_MED("%s: Collecting target RAM dump disabled", 648 __func__); 649 return; 650 } 651 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */ 652 653 #ifdef QCA_WIFI_3_0 654 /** 655 * hif_check_fw_reg(): hif_check_fw_reg 656 * @scn: scn 657 * @state: 658 * 659 * Return: int 660 */ 661 int hif_check_fw_reg(struct hif_opaque_softc *scn) 662 { 663 return 0; 664 } 665 #endif 666 667 #ifdef IPA_OFFLOAD 668 /** 669 * hif_read_phy_mem_base(): hif_read_phy_mem_base 670 * @scn: scn 671 * @phy_mem_base: physical mem base 672 * 673 * Return: n/a 674 */ 675 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base) 676 { 677 *phy_mem_base = scn->mem_pa; 678 } 679 #endif /* IPA_OFFLOAD */ 680 681 /** 682 * hif_get_device_type(): hif_get_device_type 683 * @device_id: device_id 684 * @revision_id: revision_id 685 * @hif_type: returned hif_type 686 * @target_type: returned target_type 687 * 688 * Return: int 689 */ 690 int hif_get_device_type(uint32_t device_id, 691 uint32_t revision_id, 692 uint32_t *hif_type, uint32_t *target_type) 693 { 694 int ret = 0; 695 696 switch (device_id) { 697 case ADRASTEA_DEVICE_ID_P2_E12: 698 699 *hif_type = HIF_TYPE_ADRASTEA; 700 *target_type = TARGET_TYPE_ADRASTEA; 701 break; 702 703 case AR9888_DEVICE_ID: 704 *hif_type = HIF_TYPE_AR9888; 705 *target_type = TARGET_TYPE_AR9888; 706 break; 707 708 case AR6320_DEVICE_ID: 709 switch (revision_id) { 710 case AR6320_FW_1_1: 711 case AR6320_FW_1_3: 712 *hif_type = HIF_TYPE_AR6320; 713 *target_type = TARGET_TYPE_AR6320; 714 break; 715 716 case AR6320_FW_2_0: 717 case AR6320_FW_3_0: 718 case AR6320_FW_3_2: 719 *hif_type = HIF_TYPE_AR6320V2; 720 *target_type = TARGET_TYPE_AR6320V2; 721 break; 722 723 default: 724 HIF_ERROR("%s: error - dev_id = 0x%x, rev_id = 0x%x", 725 __func__, device_id, revision_id); 726 ret = -ENODEV; 727 goto end; 728 } 729 break; 730 731 case AR9887_DEVICE_ID: 732 *hif_type = HIF_TYPE_AR9888; 733 *target_type = TARGET_TYPE_AR9888; 734 HIF_INFO(" *********** AR9887 **************"); 735 break; 736 737 case QCA9984_DEVICE_ID: 738 *hif_type = HIF_TYPE_QCA9984; 739 *target_type = TARGET_TYPE_QCA9984; 740 HIF_INFO(" *********** QCA9984 *************"); 741 break; 742 743 case QCA9888_DEVICE_ID: 744 *hif_type = HIF_TYPE_QCA9888; 745 *target_type = TARGET_TYPE_QCA9888; 746 HIF_INFO(" *********** QCA9888 *************"); 747 break; 748 749 case AR900B_DEVICE_ID: 750 *hif_type = HIF_TYPE_AR900B; 751 *target_type = TARGET_TYPE_AR900B; 752 HIF_INFO(" *********** AR900B *************"); 753 break; 754 755 case IPQ4019_DEVICE_ID: 756 *hif_type = HIF_TYPE_IPQ4019; 757 *target_type = TARGET_TYPE_IPQ4019; 758 HIF_INFO(" *********** IPQ4019 *************"); 759 break; 760 761 case QCA8074_DEVICE_ID: 762 case RUMIM2M_DEVICE_ID_NODE0: 763 case RUMIM2M_DEVICE_ID_NODE1: 764 *hif_type = HIF_TYPE_QCA8074; 765 *target_type = TARGET_TYPE_QCA8074; 766 HIF_INFO(" *********** QCA8074 *************\n"); 767 break; 768 769 case QCA6290_EMULATION_DEVICE_ID: 770 *hif_type = HIF_TYPE_QCA6290; 771 *target_type = TARGET_TYPE_QCA6290; 772 HIF_INFO(" *********** QCA6290EMU *************\n"); 773 break; 774 775 default: 776 HIF_ERROR("%s: Unsupported device ID!", __func__); 777 ret = -ENODEV; 778 break; 779 } 780 end: 781 return ret; 782 } 783 784 /** 785 * hif_needs_bmi() - return true if the soc needs bmi through the driver 786 * @hif_ctx: hif context 787 * 788 * Return: true if the soc needs driver bmi otherwise false 789 */ 790 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx) 791 { 792 struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); 793 794 return (hif_sc->bus_type != QDF_BUS_TYPE_SNOC) && 795 !ce_srng_based(hif_sc); 796 } 797 798 /** 799 * hif_get_bus_type() - return the bus type 800 * 801 * Return: enum qdf_bus_type 802 */ 803 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl) 804 { 805 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 806 return scn->bus_type; 807 } 808 809 /** 810 * Target info and ini parameters are global to the driver 811 * Hence these structures are exposed to all the modules in 812 * the driver and they don't need to maintains multiple copies 813 * of the same info, instead get the handle from hif and 814 * modify them in hif 815 */ 816 817 /** 818 * hif_get_ini_handle() - API to get hif_config_param handle 819 * @hif_ctx: HIF Context 820 * 821 * Return: pointer to hif_config_info 822 */ 823 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx) 824 { 825 struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx); 826 827 return &sc->hif_config; 828 } 829 830 /** 831 * hif_get_target_info_handle() - API to get hif_target_info handle 832 * @hif_ctx: HIF context 833 * 834 * Return: Pointer to hif_target_info 835 */ 836 struct hif_target_info *hif_get_target_info_handle( 837 struct hif_opaque_softc *hif_ctx) 838 { 839 struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx); 840 841 return &sc->target_info; 842 843 } 844 845 #if defined(FEATURE_LRO) 846 /** 847 * hif_lro_flush_cb_register - API to register for LRO Flush Callback 848 * @scn: HIF Context 849 * @handler: Function pointer to be called by HIF 850 * @data: Private data to be used by the module registering to HIF 851 * 852 * Return: void 853 */ 854 void hif_lro_flush_cb_register(struct hif_opaque_softc *scn, 855 void (lro_flush_handler)(void *), 856 void *(lro_init_handler)(void)) 857 { 858 if (hif_napi_enabled(scn, -1)) 859 hif_napi_lro_flush_cb_register(scn, lro_flush_handler, 860 lro_init_handler); 861 else 862 ce_lro_flush_cb_register(scn, lro_flush_handler, 863 lro_init_handler); 864 } 865 866 /** 867 * hif_get_lro_info - Returns LRO instance for instance ID 868 * @ctx_id: LRO instance ID 869 * @hif_hdl: HIF Context 870 * 871 * Return: Pointer to LRO instance. 872 */ 873 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl) 874 { 875 void *data; 876 877 if (hif_napi_enabled(hif_hdl, -1)) 878 data = hif_napi_get_lro_info(hif_hdl, ctx_id); 879 else 880 data = hif_ce_get_lro_ctx(hif_hdl, ctx_id); 881 882 return data; 883 } 884 885 /** 886 * hif_get_rx_ctx_id - Returns LRO instance ID based on underlying LRO instance 887 * @ctx_id: LRO context ID 888 * @hif_hdl: HIF Context 889 * 890 * Return: LRO instance ID 891 */ 892 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl) 893 { 894 if (hif_napi_enabled(hif_hdl, -1)) 895 return NAPI_PIPE2ID(ctx_id); 896 else 897 return ctx_id; 898 } 899 900 /** 901 * hif_lro_flush_cb_deregister - API to deregister for LRO Flush Callbacks 902 * @hif_hdl: HIF Context 903 * @lro_deinit_cb: LRO deinit callback 904 * 905 * Return: void 906 */ 907 void hif_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl, 908 void (lro_deinit_cb)(void *)) 909 { 910 if (hif_napi_enabled(hif_hdl, -1)) 911 hif_napi_lro_flush_cb_deregister(hif_hdl, lro_deinit_cb); 912 else 913 ce_lro_flush_cb_deregister(hif_hdl, lro_deinit_cb); 914 } 915 #else /* !defined(FEATURE_LRO) */ 916 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl) 917 { 918 return 0; 919 } 920 #endif 921 922 /** 923 * hif_get_target_status - API to get target status 924 * @hif_ctx: HIF Context 925 * 926 * Return: enum hif_target_status 927 */ 928 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx) 929 { 930 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 931 932 return scn->target_status; 933 } 934 935 /** 936 * hif_set_target_status() - API to set target status 937 * @hif_ctx: HIF Context 938 * @status: Target Status 939 * 940 * Return: void 941 */ 942 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum 943 hif_target_status status) 944 { 945 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 946 947 scn->target_status = status; 948 } 949 950 /** 951 * hif_init_ini_config() - API to initialize HIF configuration parameters 952 * @hif_ctx: HIF Context 953 * @cfg: HIF Configuration 954 * 955 * Return: void 956 */ 957 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx, 958 struct hif_config_info *cfg) 959 { 960 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 961 962 qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info)); 963 } 964 965 /** 966 * hif_get_conparam() - API to get driver mode in HIF 967 * @scn: HIF Context 968 * 969 * Return: driver mode of operation 970 */ 971 uint32_t hif_get_conparam(struct hif_softc *scn) 972 { 973 if (!scn) 974 return 0; 975 976 return scn->hif_con_param; 977 } 978 979 /** 980 * hif_get_callbacks_handle() - API to get callbacks Handle 981 * @scn: HIF Context 982 * 983 * Return: pointer to HIF Callbacks 984 */ 985 struct hif_driver_state_callbacks *hif_get_callbacks_handle(struct hif_softc *scn) 986 { 987 return &scn->callbacks; 988 } 989 990 /** 991 * hif_is_driver_unloading() - API to query upper layers if driver is unloading 992 * @scn: HIF Context 993 * 994 * Return: True/False 995 */ 996 bool hif_is_driver_unloading(struct hif_softc *scn) 997 { 998 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); 999 1000 if (cbk && cbk->is_driver_unloading) 1001 return cbk->is_driver_unloading(cbk->context); 1002 1003 return false; 1004 } 1005 1006 /** 1007 * hif_is_load_or_unload_in_progress() - API to query upper layers if 1008 * load/unload in progress 1009 * @scn: HIF Context 1010 * 1011 * Return: True/False 1012 */ 1013 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn) 1014 { 1015 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); 1016 1017 if (cbk && cbk->is_load_unload_in_progress) 1018 return cbk->is_load_unload_in_progress(cbk->context); 1019 1020 return false; 1021 } 1022 1023 /** 1024 * hif_update_pipe_callback() - API to register pipe specific callbacks 1025 * @osc: Opaque softc 1026 * @pipeid: pipe id 1027 * @callbacks: callbacks to register 1028 * 1029 * Return: void 1030 */ 1031 1032 void hif_update_pipe_callback(struct hif_opaque_softc *osc, 1033 u_int8_t pipeid, 1034 struct hif_msg_callbacks *callbacks) 1035 { 1036 struct hif_softc *scn = HIF_GET_SOFTC(osc); 1037 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1038 struct HIF_CE_pipe_info *pipe_info; 1039 1040 QDF_BUG(pipeid < CE_COUNT_MAX); 1041 1042 HIF_INFO_LO("+%s pipeid %d\n", __func__, pipeid); 1043 1044 pipe_info = &hif_state->pipe_info[pipeid]; 1045 1046 qdf_mem_copy(&pipe_info->pipe_callbacks, 1047 callbacks, sizeof(pipe_info->pipe_callbacks)); 1048 1049 HIF_INFO_LO("-%s\n", __func__); 1050 } 1051 1052 /** 1053 * hif_is_recovery_in_progress() - API to query upper layers if recovery in 1054 * progress 1055 * @scn: HIF Context 1056 * 1057 * Return: True/False 1058 */ 1059 bool hif_is_recovery_in_progress(struct hif_softc *scn) 1060 { 1061 struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); 1062 1063 if (cbk && cbk->is_recovery_in_progress) 1064 return cbk->is_recovery_in_progress(cbk->context); 1065 1066 return false; 1067 } 1068 #if defined(HIF_PCI) || defined(SNOC) || defined(HIF_AHB) 1069 /** 1070 * hif_batch_send() - API to access hif specific function 1071 * ce_batch_send. 1072 * @osc: HIF Context 1073 * @msdu : list of msdus to be sent 1074 * @transfer_id : transfer id 1075 * @len : donwloaded length 1076 * 1077 * Return: list of msds not sent 1078 */ 1079 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, 1080 uint32_t transfer_id, u_int32_t len, uint32_t sendhead) 1081 { 1082 void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); 1083 1084 return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id, 1085 len, sendhead); 1086 } 1087 1088 /** 1089 * hif_update_tx_ring() - API to access hif specific function 1090 * ce_update_tx_ring. 1091 * @osc: HIF Context 1092 * @num_htt_cmpls : number of htt compl received. 1093 * 1094 * Return: void 1095 */ 1096 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls) 1097 { 1098 void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); 1099 ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls); 1100 } 1101 1102 1103 /** 1104 * hif_send_single() - API to access hif specific function 1105 * ce_send_single. 1106 * @osc: HIF Context 1107 * @msdu : msdu to be sent 1108 * @transfer_id: transfer id 1109 * @len : downloaded length 1110 * 1111 * Return: msdu sent status 1112 */ 1113 int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t 1114 transfer_id, u_int32_t len) 1115 { 1116 void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); 1117 1118 return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id, 1119 len); 1120 } 1121 1122 /** 1123 * hif_send_fast() - API to access hif specific function 1124 * ce_send_fast. 1125 * @osc: HIF Context 1126 * @msdu : array of msdus to be sent 1127 * @num_msdus : number of msdus in an array 1128 * @transfer_id: transfer id 1129 * @download_len: download length 1130 * 1131 * Return: No. of packets that could be sent 1132 */ 1133 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf, 1134 uint32_t transfer_id, uint32_t download_len) 1135 { 1136 void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); 1137 1138 return ce_send_fast((struct CE_handle *)ce_tx_hdl, nbuf, 1139 transfer_id, download_len); 1140 } 1141 #endif 1142 1143 /** 1144 * hif_reg_write() - API to access hif specific function 1145 * hif_write32_mb. 1146 * @hif_ctx : HIF Context 1147 * @offset : offset on which value has to be written 1148 * @value : value to be written 1149 * 1150 * Return: None 1151 */ 1152 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset, 1153 uint32_t value) 1154 { 1155 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1156 hif_write32_mb(scn->mem + offset, value); 1157 1158 } 1159 1160 /** 1161 * hif_reg_read() - API to access hif specific function 1162 * hif_read32_mb. 1163 * @hif_ctx : HIF Context 1164 * @offset : offset from which value has to be read 1165 * 1166 * Return: Read value 1167 */ 1168 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset) 1169 { 1170 1171 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1172 return hif_read32_mb(scn->mem + offset); 1173 } 1174 1175 #if defined(HIF_USB) 1176 /** 1177 * hif_ramdump_handler(): generic ramdump handler 1178 * @scn: struct hif_opaque_softc 1179 * 1180 * Return: None 1181 */ 1182 1183 void hif_ramdump_handler(struct hif_opaque_softc *scn) 1184 1185 { 1186 if (hif_get_bus_type == QDF_BUS_TYPE_USB) 1187 hif_usb_ramdump_handler(); 1188 } 1189 #endif 1190 1191 /** 1192 * hif_register_ext_group_int_handler() - API to register external group 1193 * interrupt handler. 1194 * @hif_ctx : HIF Context 1195 * @numirq: number of irq's in the group 1196 * @irq: array of irq values 1197 * @ext_intr_handler: callback interrupt handler function 1198 * @context: context to passed in callback 1199 * 1200 * Return: status 1201 */ 1202 uint32_t hif_register_ext_group_int_handler(struct hif_opaque_softc *hif_ctx, 1203 uint32_t numirq, uint32_t irq[], ext_intr_handler handler, 1204 void *context) 1205 { 1206 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1207 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1208 struct hif_ext_group_entry *hif_ext_group; 1209 1210 if (scn->hif_init_done) { 1211 HIF_ERROR("%s Called after HIF initialization \n", __func__); 1212 return QDF_STATUS_E_FAILURE; 1213 } 1214 1215 if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) { 1216 HIF_ERROR("%s Max groups reached\n", __func__); 1217 return QDF_STATUS_E_FAILURE; 1218 } 1219 1220 if (numirq >= HIF_MAX_GRP_IRQ) { 1221 HIF_ERROR("%s invalid numirq\n", __func__); 1222 return QDF_STATUS_E_FAILURE; 1223 } 1224 1225 hif_ext_group = &hif_state->hif_ext_group[hif_state->hif_num_extgroup]; 1226 1227 hif_ext_group->numirq = numirq; 1228 qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0])); 1229 hif_ext_group->context = context; 1230 hif_ext_group->handler = handler; 1231 hif_ext_group->configured = true; 1232 hif_ext_group->grp_id = hif_state->hif_num_extgroup; 1233 hif_ext_group->hif_state = hif_state; 1234 1235 hif_state->hif_num_extgroup++; 1236 return QDF_STATUS_SUCCESS; 1237 } 1238 1239 /** 1240 * hif_ext_grp_tasklet() - grp tasklet 1241 * data: context 1242 * 1243 * return: void 1244 */ 1245 void hif_ext_grp_tasklet(unsigned long data) 1246 { 1247 struct hif_ext_group_entry *hif_ext_group = 1248 (struct hif_ext_group_entry *)data; 1249 struct HIF_CE_state *hif_state = hif_ext_group->hif_state; 1250 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 1251 1252 if (hif_ext_group->grp_id < HIF_MAX_GROUP) { 1253 hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET); 1254 hif_grp_irq_enable(scn, hif_ext_group->grp_id); 1255 } else { 1256 HIF_ERROR("%s: ERROR - invalid grp_id = %d", 1257 __func__, hif_ext_group->grp_id); 1258 } 1259 1260 qdf_atomic_dec(&scn->active_grp_tasklet_cnt); 1261 } 1262 1263 /** 1264 * hif_grp_tasklet_kill() - grp tasklet kill 1265 * scn: hif_softc 1266 * 1267 * return: void 1268 */ 1269 void hif_grp_tasklet_kill(struct hif_softc *scn) 1270 { 1271 int i; 1272 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1273 1274 for (i = 0; i < HIF_MAX_GROUP; i++) 1275 if (hif_state->hif_ext_group[i].inited) { 1276 tasklet_kill(&hif_state->hif_ext_group[i].intr_tq); 1277 hif_state->hif_ext_group[i].inited = false; 1278 } 1279 qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); 1280 } 1281