1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _HIF_H_ 21 #define _HIF_H_ 22 23 #ifdef __cplusplus 24 extern "C" { 25 #endif /* __cplusplus */ 26 27 /* Header files */ 28 #include <qdf_status.h> 29 #include "qdf_nbuf.h" 30 #include "qdf_lro.h" 31 #include "ol_if_athvar.h" 32 #include <linux/platform_device.h> 33 #ifdef HIF_PCI 34 #include <linux/pci.h> 35 #endif /* HIF_PCI */ 36 #ifdef HIF_USB 37 #include <linux/usb.h> 38 #endif /* HIF_USB */ 39 #ifdef IPA_OFFLOAD 40 #include <linux/ipa.h> 41 #endif 42 #include "cfg_ucfg_api.h" 43 #include "qdf_dev.h" 44 #include <wlan_init_cfg.h> 45 46 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1 47 48 typedef void __iomem *A_target_id_t; 49 typedef void *hif_handle_t; 50 51 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE) 52 #define HIF_WORK_DRAIN_WAIT_CNT 50 53 54 #define HIF_EP_WAKE_RESET_WAIT_CNT 10 55 #endif 56 57 #define HIF_TYPE_AR6002 2 58 #define HIF_TYPE_AR6003 3 59 #define HIF_TYPE_AR6004 5 60 #define HIF_TYPE_AR9888 6 61 #define HIF_TYPE_AR6320 7 62 #define HIF_TYPE_AR6320V2 8 63 /* For attaching Peregrine 2.0 board host_reg_tbl only */ 64 #define HIF_TYPE_AR9888V2 9 65 #define HIF_TYPE_ADRASTEA 10 66 #define HIF_TYPE_AR900B 11 67 #define HIF_TYPE_QCA9984 12 68 #define HIF_TYPE_QCA9888 14 69 #define HIF_TYPE_QCA8074 15 70 #define HIF_TYPE_QCA6290 16 71 #define HIF_TYPE_QCN7605 17 72 #define HIF_TYPE_QCA6390 18 73 #define HIF_TYPE_QCA8074V2 19 74 #define HIF_TYPE_QCA6018 20 75 #define HIF_TYPE_QCN9000 21 76 #define HIF_TYPE_QCA6490 22 77 #define HIF_TYPE_QCA6750 23 78 #define HIF_TYPE_QCA5018 24 79 #define HIF_TYPE_QCN6122 25 80 #define HIF_TYPE_KIWI 26 81 #define HIF_TYPE_QCN9224 27 82 #define HIF_TYPE_QCA9574 28 83 #define HIF_TYPE_MANGO 29 84 85 #define DMA_COHERENT_MASK_DEFAULT 37 86 87 #ifdef IPA_OFFLOAD 88 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32 89 #endif 90 91 /* enum hif_ic_irq - enum defining integrated chip irq numbers 92 * defining irq nubers that can be used by external modules like datapath 93 */ 94 enum hif_ic_irq { 95 host2wbm_desc_feed = 16, 96 host2reo_re_injection, 97 host2reo_command, 98 host2rxdma_monitor_ring3, 99 host2rxdma_monitor_ring2, 100 host2rxdma_monitor_ring1, 101 reo2host_exception, 102 wbm2host_rx_release, 103 reo2host_status, 104 reo2host_destination_ring4, 105 reo2host_destination_ring3, 106 reo2host_destination_ring2, 107 reo2host_destination_ring1, 108 rxdma2host_monitor_destination_mac3, 109 rxdma2host_monitor_destination_mac2, 110 rxdma2host_monitor_destination_mac1, 111 ppdu_end_interrupts_mac3, 112 ppdu_end_interrupts_mac2, 113 ppdu_end_interrupts_mac1, 114 rxdma2host_monitor_status_ring_mac3, 115 rxdma2host_monitor_status_ring_mac2, 116 rxdma2host_monitor_status_ring_mac1, 117 host2rxdma_host_buf_ring_mac3, 118 host2rxdma_host_buf_ring_mac2, 119 host2rxdma_host_buf_ring_mac1, 120 rxdma2host_destination_ring_mac3, 121 rxdma2host_destination_ring_mac2, 122 rxdma2host_destination_ring_mac1, 123 host2tcl_input_ring4, 124 host2tcl_input_ring3, 125 host2tcl_input_ring2, 126 host2tcl_input_ring1, 127 wbm2host_tx_completions_ring4, 128 wbm2host_tx_completions_ring3, 129 wbm2host_tx_completions_ring2, 130 wbm2host_tx_completions_ring1, 131 tcl2host_status_ring, 132 }; 133 134 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS 135 enum hif_legacy_pci_irq { 136 ce0, 137 ce1, 138 ce2, 139 ce3, 140 ce4, 141 ce5, 142 ce6, 143 ce7, 144 ce8, 145 ce9, 146 ce10, 147 ce11, 148 ce12, 149 ce13, 150 ce14, 151 ce15, 152 reo2sw8_intr2, 153 reo2sw7_intr2, 154 reo2sw6_intr2, 155 reo2sw5_intr2, 156 reo2sw4_intr2, 157 reo2sw3_intr2, 158 reo2sw2_intr2, 159 reo2sw1_intr2, 160 reo2sw0_intr2, 161 reo2sw8_intr, 162 reo2sw7_intr, 163 reo2sw6_inrr, 164 reo2sw5_intr, 165 reo2sw4_intr, 166 reo2sw3_intr, 167 reo2sw2_intr, 168 reo2sw1_intr, 169 reo2sw0_intr, 170 reo2status_intr2, 171 reo_status, 172 reo2rxdma_out_2, 173 reo2rxdma_out_1, 174 reo_cmd, 175 sw2reo6, 176 sw2reo5, 177 sw2reo1, 178 sw2reo, 179 rxdma2reo_mlo_0_dst_ring1, 180 rxdma2reo_mlo_0_dst_ring0, 181 rxdma2reo_mlo_1_dst_ring1, 182 rxdma2reo_mlo_1_dst_ring0, 183 rxdma2reo_dst_ring1, 184 rxdma2reo_dst_ring0, 185 rxdma2sw_dst_ring1, 186 rxdma2sw_dst_ring0, 187 rxdma2release_dst_ring1, 188 rxdma2release_dst_ring0, 189 sw2rxdma_2_src_ring, 190 sw2rxdma_1_src_ring, 191 sw2rxdma_0, 192 wbm2sw6_release2, 193 wbm2sw5_release2, 194 wbm2sw4_release2, 195 wbm2sw3_release2, 196 wbm2sw2_release2, 197 wbm2sw1_release2, 198 wbm2sw0_release2, 199 wbm2sw6_release, 200 wbm2sw5_release, 201 wbm2sw4_release, 202 wbm2sw3_release, 203 wbm2sw2_release, 204 wbm2sw1_release, 205 wbm2sw0_release, 206 wbm2sw_link, 207 wbm_error_release, 208 sw2txmon_src_ring, 209 sw2rxmon_src_ring, 210 txmon2sw_p1_intr1, 211 txmon2sw_p1_intr0, 212 txmon2sw_p0_dest1, 213 txmon2sw_p0_dest0, 214 rxmon2sw_p1_intr1, 215 rxmon2sw_p1_intr0, 216 rxmon2sw_p0_dest1, 217 rxmon2sw_p0_dest0, 218 sw_release, 219 sw2tcl_credit2, 220 sw2tcl_credit, 221 sw2tcl4, 222 sw2tcl5, 223 sw2tcl3, 224 sw2tcl2, 225 sw2tcl1, 226 sw2wbm1, 227 misc_8, 228 misc_7, 229 misc_6, 230 misc_5, 231 misc_4, 232 misc_3, 233 misc_2, 234 misc_1, 235 misc_0, 236 }; 237 #endif 238 239 struct CE_state; 240 #ifdef QCA_WIFI_QCN9224 241 #define CE_COUNT_MAX 16 242 #else 243 #define CE_COUNT_MAX 12 244 #endif 245 246 #ifndef HIF_MAX_GROUP 247 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS 248 #endif 249 250 #ifdef CONFIG_BERYLLIUM 251 #define HIF_MAX_GRP_IRQ 25 252 #else 253 #define HIF_MAX_GRP_IRQ 16 254 #endif 255 256 #ifndef NAPI_YIELD_BUDGET_BASED 257 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT 258 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4 259 #endif 260 #else /* NAPI_YIELD_BUDGET_BASED */ 261 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2 262 #endif /* NAPI_YIELD_BUDGET_BASED */ 263 264 #define QCA_NAPI_BUDGET 64 265 #define QCA_NAPI_DEF_SCALE \ 266 (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT) 267 268 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE) 269 /* NOTE: "napi->scale" can be changed, 270 * but this does not change the number of buckets 271 */ 272 #define QCA_NAPI_NUM_BUCKETS 4 273 274 /** 275 * qca_napi_stat - stats structure for execution contexts 276 * @napi_schedules - number of times the schedule function is called 277 * @napi_polls - number of times the execution context runs 278 * @napi_completes - number of times that the generating interrupt is reenabled 279 * @napi_workdone - cumulative of all work done reported by handler 280 * @cpu_corrected - incremented when execution context runs on a different core 281 * than the one that its irq is affined to. 282 * @napi_budget_uses - histogram of work done per execution run 283 * @time_limit_reache - count of yields due to time limit threshholds 284 * @rxpkt_thresh_reached - count of yields due to a work limit 285 * @poll_time_buckets - histogram of poll times for the napi 286 * 287 */ 288 struct qca_napi_stat { 289 uint32_t napi_schedules; 290 uint32_t napi_polls; 291 uint32_t napi_completes; 292 uint32_t napi_workdone; 293 uint32_t cpu_corrected; 294 uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS]; 295 uint32_t time_limit_reached; 296 uint32_t rxpkt_thresh_reached; 297 unsigned long long napi_max_poll_time; 298 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 299 uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS]; 300 #endif 301 }; 302 303 304 /** 305 * per NAPI instance data structure 306 * This data structure holds stuff per NAPI instance. 307 * Note that, in the current implementation, though scale is 308 * an instance variable, it is set to the same value for all 309 * instances. 310 */ 311 struct qca_napi_info { 312 struct net_device netdev; /* dummy net_dev */ 313 void *hif_ctx; 314 struct napi_struct napi; 315 uint8_t scale; /* currently same on all instances */ 316 uint8_t id; 317 uint8_t cpu; 318 int irq; 319 cpumask_t cpumask; 320 struct qca_napi_stat stats[NR_CPUS]; 321 #ifdef RECEIVE_OFFLOAD 322 /* will only be present for data rx CE's */ 323 void (*offld_flush_cb)(void *); 324 struct napi_struct rx_thread_napi; 325 struct net_device rx_thread_netdev; 326 #endif /* RECEIVE_OFFLOAD */ 327 qdf_lro_ctx_t lro_ctx; 328 }; 329 330 enum qca_napi_tput_state { 331 QCA_NAPI_TPUT_UNINITIALIZED, 332 QCA_NAPI_TPUT_LO, 333 QCA_NAPI_TPUT_HI 334 }; 335 enum qca_napi_cpu_state { 336 QCA_NAPI_CPU_UNINITIALIZED, 337 QCA_NAPI_CPU_DOWN, 338 QCA_NAPI_CPU_UP }; 339 340 /** 341 * struct qca_napi_cpu - an entry of the napi cpu table 342 * @core_id: physical core id of the core 343 * @cluster_id: cluster this core belongs to 344 * @core_mask: mask to match all core of this cluster 345 * @thread_mask: mask for this core within the cluster 346 * @max_freq: maximum clock this core can be clocked at 347 * same for all cpus of the same core. 348 * @napis: bitmap of napi instances on this core 349 * @execs: bitmap of execution contexts on this core 350 * cluster_nxt: chain to link cores within the same cluster 351 * 352 * This structure represents a single entry in the napi cpu 353 * table. The table is part of struct qca_napi_data. 354 * This table is initialized by the init function, called while 355 * the first napi instance is being created, updated by hotplug 356 * notifier and when cpu affinity decisions are made (by throughput 357 * detection), and deleted when the last napi instance is removed. 358 */ 359 struct qca_napi_cpu { 360 enum qca_napi_cpu_state state; 361 int core_id; 362 int cluster_id; 363 cpumask_t core_mask; 364 cpumask_t thread_mask; 365 unsigned int max_freq; 366 uint32_t napis; 367 uint32_t execs; 368 int cluster_nxt; /* index, not pointer */ 369 }; 370 371 /** 372 * struct qca_napi_data - collection of napi data for a single hif context 373 * @hif_softc: pointer to the hif context 374 * @lock: spinlock used in the event state machine 375 * @state: state variable used in the napi stat machine 376 * @ce_map: bit map indicating which ce's have napis running 377 * @exec_map: bit map of instanciated exec contexts 378 * @user_cpu_affin_map: CPU affinity map from INI config. 379 * @napi_cpu: cpu info for irq affinty 380 * @lilcl_head: 381 * @bigcl_head: 382 * @napi_mode: irq affinity & clock voting mode 383 * @cpuhp_handler: CPU hotplug event registration handle 384 */ 385 struct qca_napi_data { 386 struct hif_softc *hif_softc; 387 qdf_spinlock_t lock; 388 uint32_t state; 389 390 /* bitmap of created/registered NAPI instances, indexed by pipe_id, 391 * not used by clients (clients use an id returned by create) 392 */ 393 uint32_t ce_map; 394 uint32_t exec_map; 395 uint32_t user_cpu_affin_mask; 396 struct qca_napi_info *napis[CE_COUNT_MAX]; 397 struct qca_napi_cpu napi_cpu[NR_CPUS]; 398 int lilcl_head, bigcl_head; 399 enum qca_napi_tput_state napi_mode; 400 struct qdf_cpuhp_handler *cpuhp_handler; 401 uint8_t flags; 402 }; 403 404 /** 405 * struct hif_config_info - Place Holder for HIF configuration 406 * @enable_self_recovery: Self Recovery 407 * @enable_runtime_pm: Enable Runtime PM 408 * @runtime_pm_delay: Runtime PM Delay 409 * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq 410 * 411 * Structure for holding HIF ini parameters. 412 */ 413 struct hif_config_info { 414 bool enable_self_recovery; 415 #ifdef FEATURE_RUNTIME_PM 416 uint8_t enable_runtime_pm; 417 u_int32_t runtime_pm_delay; 418 #endif 419 uint64_t rx_softirq_max_yield_duration_ns; 420 }; 421 422 /** 423 * struct hif_target_info - Target Information 424 * @target_version: Target Version 425 * @target_type: Target Type 426 * @target_revision: Target Revision 427 * @soc_version: SOC Version 428 * @hw_name: pointer to hardware name 429 * 430 * Structure to hold target information. 431 */ 432 struct hif_target_info { 433 uint32_t target_version; 434 uint32_t target_type; 435 uint32_t target_revision; 436 uint32_t soc_version; 437 char *hw_name; 438 }; 439 440 struct hif_opaque_softc { 441 }; 442 443 /** 444 * enum hif_event_type - Type of DP events to be recorded 445 * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event 446 * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event 447 * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event 448 * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event 449 * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event 450 * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event 451 * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event 452 * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event 453 */ 454 enum hif_event_type { 455 HIF_EVENT_IRQ_TRIGGER, 456 HIF_EVENT_TIMER_ENTRY, 457 HIF_EVENT_TIMER_EXIT, 458 HIF_EVENT_BH_SCHED, 459 HIF_EVENT_SRNG_ACCESS_START, 460 HIF_EVENT_SRNG_ACCESS_END, 461 HIF_EVENT_BH_COMPLETE, 462 HIF_EVENT_BH_FORCE_BREAK, 463 /* Do check hif_hist_skip_event_record when adding new events */ 464 }; 465 466 /** 467 * enum hif_system_pm_state - System PM state 468 * HIF_SYSTEM_PM_STATE_ON: System in active state 469 * HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of 470 * system resume 471 * HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of 472 * system suspend 473 * HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend 474 */ 475 enum hif_system_pm_state { 476 HIF_SYSTEM_PM_STATE_ON, 477 HIF_SYSTEM_PM_STATE_BUS_RESUMING, 478 HIF_SYSTEM_PM_STATE_BUS_SUSPENDING, 479 HIF_SYSTEM_PM_STATE_BUS_SUSPENDED, 480 }; 481 482 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 483 #define HIF_NUM_INT_CONTEXTS HIF_MAX_GROUP 484 485 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 486 /* HIF_EVENT_HIST_MAX should always be power of 2 */ 487 #define HIF_EVENT_HIST_MAX 512 488 489 #define HIF_EVENT_HIST_ENABLE_MASK 0xFF 490 491 static inline uint64_t hif_get_log_timestamp(void) 492 { 493 return qdf_get_log_timestamp(); 494 } 495 496 #else 497 498 #define HIF_EVENT_HIST_MAX 32 499 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */ 500 #define HIF_EVENT_HIST_ENABLE_MASK 0x19 501 502 static inline uint64_t hif_get_log_timestamp(void) 503 { 504 return qdf_sched_clock(); 505 } 506 507 #endif 508 509 /** 510 * struct hif_event_record - an entry of the DP event history 511 * @hal_ring_id: ring id for which event is recorded 512 * @hp: head pointer of the ring (may not be applicable for all events) 513 * @tp: tail pointer of the ring (may not be applicable for all events) 514 * @cpu_id: cpu id on which the event occurred 515 * @timestamp: timestamp when event occurred 516 * @type: type of the event 517 * 518 * This structure represents the information stored for every datapath 519 * event which is logged in the history. 520 */ 521 struct hif_event_record { 522 uint8_t hal_ring_id; 523 uint32_t hp; 524 uint32_t tp; 525 int cpu_id; 526 uint64_t timestamp; 527 enum hif_event_type type; 528 }; 529 530 /** 531 * struct hif_event_misc - history related misc info 532 * @last_irq_index: last irq event index in history 533 * @last_irq_ts: last irq timestamp 534 */ 535 struct hif_event_misc { 536 int32_t last_irq_index; 537 uint64_t last_irq_ts; 538 }; 539 540 /** 541 * struct hif_event_history - history for one interrupt group 542 * @index: index to store new event 543 * @event: event entry 544 * 545 * This structure represents the datapath history for one 546 * interrupt group. 547 */ 548 struct hif_event_history { 549 qdf_atomic_t index; 550 struct hif_event_misc misc; 551 struct hif_event_record event[HIF_EVENT_HIST_MAX]; 552 }; 553 554 /** 555 * hif_hist_record_event() - Record one datapath event in history 556 * @hif_ctx: HIF opaque context 557 * @event: DP event entry 558 * @intr_grp_id: interrupt group ID registered with hif 559 * 560 * Return: None 561 */ 562 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx, 563 struct hif_event_record *event, 564 uint8_t intr_grp_id); 565 566 /** 567 * hif_event_history_init() - Initialize SRNG event history buffers 568 * @hif_ctx: HIF opaque context 569 * @id: context group ID for which history is recorded 570 * 571 * Returns: None 572 */ 573 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id); 574 575 /** 576 * hif_event_history_deinit() - De-initialize SRNG event history buffers 577 * @hif_ctx: HIF opaque context 578 * @id: context group ID for which history is recorded 579 * 580 * Returns: None 581 */ 582 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id); 583 584 /** 585 * hif_record_event() - Wrapper function to form and record DP event 586 * @hif_ctx: HIF opaque context 587 * @intr_grp_id: interrupt group ID registered with hif 588 * @hal_ring_id: ring id for which event is recorded 589 * @hp: head pointer index of the srng 590 * @tp: tail pointer index of the srng 591 * @type: type of the event to be logged in history 592 * 593 * Return: None 594 */ 595 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx, 596 uint8_t intr_grp_id, 597 uint8_t hal_ring_id, 598 uint32_t hp, 599 uint32_t tp, 600 enum hif_event_type type) 601 { 602 struct hif_event_record event; 603 604 event.hal_ring_id = hal_ring_id; 605 event.hp = hp; 606 event.tp = tp; 607 event.type = type; 608 609 hif_hist_record_event(hif_ctx, &event, intr_grp_id); 610 611 return; 612 } 613 614 #else 615 616 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx, 617 uint8_t intr_grp_id, 618 uint8_t hal_ring_id, 619 uint32_t hp, 620 uint32_t tp, 621 enum hif_event_type type) 622 { 623 } 624 625 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx, 626 uint8_t id) 627 { 628 } 629 630 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, 631 uint8_t id) 632 { 633 } 634 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 635 636 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx); 637 638 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 639 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx); 640 #else 641 static 642 inline void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) {} 643 #endif 644 645 /** 646 * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type 647 * 648 * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module 649 * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to 650 * minimize power 651 * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR 652 * platform-specific measures to completely power-off 653 * the module and associated hardware (i.e. cut power 654 * supplies) 655 */ 656 enum HIF_DEVICE_POWER_CHANGE_TYPE { 657 HIF_DEVICE_POWER_UP, 658 HIF_DEVICE_POWER_DOWN, 659 HIF_DEVICE_POWER_CUT 660 }; 661 662 /** 663 * enum hif_enable_type: what triggered the enabling of hif 664 * 665 * @HIF_ENABLE_TYPE_PROBE: probe triggered enable 666 * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable 667 */ 668 enum hif_enable_type { 669 HIF_ENABLE_TYPE_PROBE, 670 HIF_ENABLE_TYPE_REINIT, 671 HIF_ENABLE_TYPE_MAX 672 }; 673 674 /** 675 * enum hif_disable_type: what triggered the disabling of hif 676 * 677 * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable 678 * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable 679 * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable 680 * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable 681 */ 682 enum hif_disable_type { 683 HIF_DISABLE_TYPE_PROBE_ERROR, 684 HIF_DISABLE_TYPE_REINIT_ERROR, 685 HIF_DISABLE_TYPE_REMOVE, 686 HIF_DISABLE_TYPE_SHUTDOWN, 687 HIF_DISABLE_TYPE_MAX 688 }; 689 /** 690 * enum hif_device_config_opcode: configure mode 691 * 692 * @HIF_DEVICE_POWER_STATE: device power state 693 * @HIF_DEVICE_GET_BLOCK_SIZE: get block size 694 * @HIF_DEVICE_GET_ADDR: get block address 695 * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions 696 * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode 697 * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function 698 * @HIF_DEVICE_POWER_STATE_CHANGE: change power state 699 * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params 700 * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request 701 * @HIF_DEVICE_GET_OS_DEVICE: get OS device 702 * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state 703 * @HIF_BMI_DONE: bmi done 704 * @HIF_DEVICE_SET_TARGET_TYPE: set target type 705 * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context 706 * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context 707 */ 708 enum hif_device_config_opcode { 709 HIF_DEVICE_POWER_STATE = 0, 710 HIF_DEVICE_GET_BLOCK_SIZE, 711 HIF_DEVICE_GET_FIFO_ADDR, 712 HIF_DEVICE_GET_PENDING_EVENTS_FUNC, 713 HIF_DEVICE_GET_IRQ_PROC_MODE, 714 HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC, 715 HIF_DEVICE_POWER_STATE_CHANGE, 716 HIF_DEVICE_GET_IRQ_YIELD_PARAMS, 717 HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT, 718 HIF_DEVICE_GET_OS_DEVICE, 719 HIF_DEVICE_DEBUG_BUS_STATE, 720 HIF_BMI_DONE, 721 HIF_DEVICE_SET_TARGET_TYPE, 722 HIF_DEVICE_SET_HTC_CONTEXT, 723 HIF_DEVICE_GET_HTC_CONTEXT, 724 }; 725 726 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 727 struct HID_ACCESS_LOG { 728 uint32_t seqnum; 729 bool is_write; 730 void *addr; 731 uint32_t value; 732 }; 733 #endif 734 735 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset, 736 uint32_t value); 737 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset); 738 739 #define HIF_MAX_DEVICES 1 740 /** 741 * struct htc_callbacks - Structure for HTC Callbacks methods 742 * @context: context to pass to the dsrhandler 743 * note : rwCompletionHandler is provided the context 744 * passed to hif_read_write 745 * @rwCompletionHandler: Read / write completion handler 746 * @dsrHandler: DSR Handler 747 */ 748 struct htc_callbacks { 749 void *context; 750 QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status); 751 QDF_STATUS(*dsr_handler)(void *context); 752 }; 753 754 /** 755 * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state 756 * @context: Private data context 757 * @set_recovery_in_progress: To Set Driver state for recovery in progress 758 * @is_recovery_in_progress: Query if driver state is recovery in progress 759 * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress 760 * @is_driver_unloading: Query if driver is unloading. 761 * @get_bandwidth_level: Query current bandwidth level for the driver 762 * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem 763 * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool 764 * This Structure provides callback pointer for HIF to query hdd for driver 765 * states. 766 */ 767 struct hif_driver_state_callbacks { 768 void *context; 769 void (*set_recovery_in_progress)(void *context, uint8_t val); 770 bool (*is_recovery_in_progress)(void *context); 771 bool (*is_load_unload_in_progress)(void *context); 772 bool (*is_driver_unloading)(void *context); 773 bool (*is_target_ready)(void *context); 774 int (*get_bandwidth_level)(void *context); 775 void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size, 776 qdf_dma_addr_t *paddr, 777 uint32_t ring_type); 778 void (*prealloc_put_consistent_mem_unaligned)(void *vaddr); 779 }; 780 781 /* This API detaches the HTC layer from the HIF device */ 782 void hif_detach_htc(struct hif_opaque_softc *hif_ctx); 783 784 /****************************************************************/ 785 /* BMI and Diag window abstraction */ 786 /****************************************************************/ 787 788 #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0)) 789 790 #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be 791 * handled atomically by 792 * DiagRead/DiagWrite 793 */ 794 795 #ifdef WLAN_FEATURE_BMI 796 /* 797 * API to handle HIF-specific BMI message exchanges, this API is synchronous 798 * and only allowed to be called from a context that can block (sleep) 799 */ 800 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx, 801 qdf_dma_addr_t cmd, qdf_dma_addr_t rsp, 802 uint8_t *pSendMessage, uint32_t Length, 803 uint8_t *pResponseMessage, 804 uint32_t *pResponseLength, uint32_t TimeoutMS); 805 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx); 806 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx); 807 #else /* WLAN_FEATURE_BMI */ 808 static inline void 809 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx) 810 { 811 } 812 813 static inline bool 814 hif_needs_bmi(struct hif_opaque_softc *hif_ctx) 815 { 816 return false; 817 } 818 #endif /* WLAN_FEATURE_BMI */ 819 820 #ifdef HIF_CPU_CLEAR_AFFINITY 821 /** 822 * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ 823 * @scn: HIF handle 824 * @intr_ctxt_id: interrupt group index 825 * @cpu: CPU core to clear 826 * 827 * Return: None 828 */ 829 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn, 830 int intr_ctxt_id, int cpu); 831 #else 832 static inline 833 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn, 834 int intr_ctxt_id, int cpu) 835 { 836 } 837 #endif 838 839 /* 840 * APIs to handle HIF specific diagnostic read accesses. These APIs are 841 * synchronous and only allowed to be called from a context that 842 * can block (sleep). They are not high performance APIs. 843 * 844 * hif_diag_read_access reads a 4 Byte aligned/length value from a 845 * Target register or memory word. 846 * 847 * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory. 848 */ 849 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx, 850 uint32_t address, uint32_t *data); 851 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address, 852 uint8_t *data, int nbytes); 853 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx, 854 void *ramdump_base, uint32_t address, uint32_t size); 855 /* 856 * APIs to handle HIF specific diagnostic write accesses. These APIs are 857 * synchronous and only allowed to be called from a context that 858 * can block (sleep). 859 * They are not high performance APIs. 860 * 861 * hif_diag_write_access writes a 4 Byte aligned/length value to a 862 * Target register or memory word. 863 * 864 * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory. 865 */ 866 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx, 867 uint32_t address, uint32_t data); 868 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx, 869 uint32_t address, uint8_t *data, int nbytes); 870 871 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t); 872 873 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx); 874 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx); 875 876 /* 877 * Set the FASTPATH_mode_on flag in sc, for use by data path 878 */ 879 #ifdef WLAN_FEATURE_FASTPATH 880 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx); 881 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx); 882 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret); 883 884 /** 885 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler 886 * @handler: Callback funtcion 887 * @context: handle for callback function 888 * 889 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE 890 */ 891 QDF_STATUS hif_ce_fastpath_cb_register( 892 struct hif_opaque_softc *hif_ctx, 893 fastpath_msg_handler handler, void *context); 894 #else 895 static inline QDF_STATUS hif_ce_fastpath_cb_register( 896 struct hif_opaque_softc *hif_ctx, 897 fastpath_msg_handler handler, void *context) 898 { 899 return QDF_STATUS_E_FAILURE; 900 } 901 902 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret) 903 { 904 return NULL; 905 } 906 907 #endif 908 909 /* 910 * Enable/disable CDC max performance workaround 911 * For max-performace set this to 0 912 * To allow SoC to enter sleep set this to 1 913 */ 914 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0 915 916 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx, 917 qdf_shared_mem_t **ce_sr, 918 uint32_t *ce_sr_ring_size, 919 qdf_dma_addr_t *ce_reg_paddr); 920 921 /** 922 * @brief List of callbacks - filled in by HTC. 923 */ 924 struct hif_msg_callbacks { 925 void *Context; 926 /**< context meaningful to HTC */ 927 QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf, 928 uint32_t transferID, 929 uint32_t toeplitz_hash_result); 930 QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf, 931 uint8_t pipeID); 932 void (*txResourceAvailHandler)(void *context, uint8_t pipe); 933 void (*fwEventHandler)(void *context, QDF_STATUS status); 934 void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle); 935 }; 936 937 enum hif_target_status { 938 TARGET_STATUS_CONNECTED = 0, /* target connected */ 939 TARGET_STATUS_RESET, /* target got reset */ 940 TARGET_STATUS_EJECT, /* target got ejected */ 941 TARGET_STATUS_SUSPEND /*target got suspend */ 942 }; 943 944 /** 945 * enum hif_attribute_flags: configure hif 946 * 947 * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE 948 * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor 949 * + No pktlog CE 950 */ 951 enum hif_attribute_flags { 952 HIF_LOWDESC_CE_CFG = 1, 953 HIF_LOWDESC_CE_NO_PKTLOG_CFG 954 }; 955 956 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \ 957 (attr |= (v & 0x01) << 5) 958 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \ 959 (attr |= (v & 0x03) << 6) 960 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \ 961 (attr |= (v & 0x01) << 13) 962 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \ 963 (attr |= (v & 0x01) << 14) 964 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \ 965 (attr |= (v & 0x01) << 15) 966 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \ 967 (attr |= (v & 0x0FFF) << 16) 968 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \ 969 (attr |= (v & 0x01) << 30) 970 971 struct hif_ul_pipe_info { 972 unsigned int nentries; 973 unsigned int nentries_mask; 974 unsigned int sw_index; 975 unsigned int write_index; /* cached copy */ 976 unsigned int hw_index; /* cached copy */ 977 void *base_addr_owner_space; /* Host address space */ 978 qdf_dma_addr_t base_addr_CE_space; /* CE address space */ 979 }; 980 981 struct hif_dl_pipe_info { 982 unsigned int nentries; 983 unsigned int nentries_mask; 984 unsigned int sw_index; 985 unsigned int write_index; /* cached copy */ 986 unsigned int hw_index; /* cached copy */ 987 void *base_addr_owner_space; /* Host address space */ 988 qdf_dma_addr_t base_addr_CE_space; /* CE address space */ 989 }; 990 991 struct hif_pipe_addl_info { 992 uint32_t pci_mem; 993 uint32_t ctrl_addr; 994 struct hif_ul_pipe_info ul_pipe; 995 struct hif_dl_pipe_info dl_pipe; 996 }; 997 998 #ifdef CONFIG_SLUB_DEBUG_ON 999 #define MSG_FLUSH_NUM 16 1000 #else /* PERF build */ 1001 #define MSG_FLUSH_NUM 32 1002 #endif /* SLUB_DEBUG_ON */ 1003 1004 struct hif_bus_id; 1005 1006 void hif_claim_device(struct hif_opaque_softc *hif_ctx); 1007 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx, 1008 int opcode, void *config, uint32_t config_len); 1009 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx); 1010 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx); 1011 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC, 1012 struct hif_msg_callbacks *callbacks); 1013 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx); 1014 void hif_stop(struct hif_opaque_softc *hif_ctx); 1015 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx); 1016 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start); 1017 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, 1018 uint8_t cmd_id, bool start); 1019 1020 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID, 1021 uint32_t transferID, uint32_t nbytes, 1022 qdf_nbuf_t wbuf, uint32_t data_attr); 1023 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID, 1024 int force); 1025 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t PipeID); 1026 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx); 1027 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe, 1028 uint8_t *DLPipe); 1029 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id, 1030 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, 1031 int *dl_is_polled); 1032 uint16_t 1033 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID); 1034 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx); 1035 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset); 1036 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok, 1037 bool wait_for_it); 1038 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx); 1039 #ifndef HIF_PCI 1040 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx) 1041 { 1042 return 0; 1043 } 1044 #else 1045 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx); 1046 #endif 1047 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version, 1048 u32 *revision, const char **target_name); 1049 1050 #ifdef RECEIVE_OFFLOAD 1051 /** 1052 * hif_offld_flush_cb_register() - Register the offld flush callback 1053 * @scn: HIF opaque context 1054 * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread 1055 * Or GRO/LRO flush when RxThread is not enabled. Called 1056 * with corresponding context for flush. 1057 * Return: None 1058 */ 1059 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn, 1060 void (offld_flush_handler)(void *ol_ctx)); 1061 1062 /** 1063 * hif_offld_flush_cb_deregister() - deRegister the offld flush callback 1064 * @scn: HIF opaque context 1065 * 1066 * Return: None 1067 */ 1068 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn); 1069 #endif 1070 1071 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 1072 /** 1073 * hif_exec_should_yield() - Check if hif napi context should yield 1074 * @hif_ctx - HIF opaque context 1075 * @grp_id - grp_id of the napi for which check needs to be done 1076 * 1077 * The function uses grp_id to look for NAPI and checks if NAPI needs to 1078 * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for 1079 * yield decision. 1080 * 1081 * Return: true if NAPI needs to yield, else false 1082 */ 1083 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id); 1084 #else 1085 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, 1086 uint grp_id) 1087 { 1088 return false; 1089 } 1090 #endif 1091 1092 void hif_disable_isr(struct hif_opaque_softc *hif_ctx); 1093 void hif_reset_soc(struct hif_opaque_softc *hif_ctx); 1094 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx, 1095 int htc_htt_tx_endpoint); 1096 1097 /** 1098 * hif_open() - Create hif handle 1099 * @qdf_ctx: qdf context 1100 * @mode: Driver Mode 1101 * @bus_type: Bus Type 1102 * @cbk: CDS Callbacks 1103 * @psoc: psoc object manager 1104 * 1105 * API to open HIF Context 1106 * 1107 * Return: HIF Opaque Pointer 1108 */ 1109 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, 1110 uint32_t mode, 1111 enum qdf_bus_type bus_type, 1112 struct hif_driver_state_callbacks *cbk, 1113 struct wlan_objmgr_psoc *psoc); 1114 1115 /** 1116 * hif_init_dma_mask() - Set dma mask for the dev 1117 * @dev: dev for which DMA mask is to be set 1118 * @bus_type: bus type for the target 1119 * 1120 * This API sets the DMA mask for the device. before the datapath 1121 * memory pre-allocation is done. If the DMA mask is not set before 1122 * requesting the DMA memory, kernel defaults to a 32-bit DMA mask, 1123 * and does not utilize the full device capability. 1124 * 1125 * Return: 0 - success, non-zero on failure. 1126 */ 1127 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type); 1128 void hif_close(struct hif_opaque_softc *hif_ctx); 1129 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev, 1130 void *bdev, const struct hif_bus_id *bid, 1131 enum qdf_bus_type bus_type, 1132 enum hif_enable_type type); 1133 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type); 1134 #ifdef CE_TASKLET_DEBUG_ENABLE 1135 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, 1136 uint8_t value); 1137 #endif 1138 void hif_display_stats(struct hif_opaque_softc *hif_ctx); 1139 void hif_clear_stats(struct hif_opaque_softc *hif_ctx); 1140 1141 /** 1142 * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management 1143 * HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured 1144 * HIF_PM_MSI_WAKE: Wake irq is MSI interrupt 1145 * HIF_PM_CE_WAKE: Wake irq is CE interrupt 1146 */ 1147 typedef enum { 1148 HIF_PM_INVALID_WAKE, 1149 HIF_PM_MSI_WAKE, 1150 HIF_PM_CE_WAKE, 1151 } hif_pm_wake_irq_type; 1152 1153 /** 1154 * hif_pm_get_wake_irq_type - Get wake irq type for Power Management 1155 * @hif_ctx: HIF context 1156 * 1157 * Return: enum hif_pm_wake_irq_type 1158 */ 1159 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx); 1160 1161 /** 1162 * enum hif_ep_vote_type - hif ep vote type 1163 * HIF_EP_VOTE_DP_ACCESS: vote type is specific DP 1164 * HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access 1165 */ 1166 enum hif_ep_vote_type { 1167 HIF_EP_VOTE_DP_ACCESS, 1168 HIF_EP_VOTE_NONDP_ACCESS 1169 }; 1170 1171 /** 1172 * enum hif_ep_vote_access - hif ep vote access 1173 * HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting 1174 * HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transistion 1175 * HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting 1176 */ 1177 enum hif_ep_vote_access { 1178 HIF_EP_VOTE_ACCESS_ENABLE, 1179 HIF_EP_VOTE_INTERMEDIATE_ACCESS, 1180 HIF_EP_VOTE_ACCESS_DISABLE 1181 }; 1182 1183 /** 1184 * enum hif_rpm_id - modules registered with runtime pm module 1185 * @HIF_RTPM_ID_RESERVED: Reserved ID 1186 * @HIF_RTPM_ID_HAL_REO_CMD: HAL REO commands 1187 * @HIF_RTPM_ID_WMI: WMI commands Tx 1188 * @HIF_RTPM_ID_HTT: HTT commands Tx 1189 * @HIF_RTPM_ID_DP_TX: Datapath Tx path 1190 * @HIF_RTPM_ID_DP_RING_STATS: Datapath ring stats 1191 * @HIF_RTPM_ID_CE_SEND_FAST: CE Tx buffer posting 1192 * @HIF_RTPM_ID_FORCE_WAKE: Force wake request 1193 * @HIF_RTPM_ID_PREVENT_LINKDOWN: Prevent linkdown by not allowing runtime PM 1194 * @HIF_RTPM_ID_PREVENT_ALLOW_LOCK: Generic ID for runtime PM lock contexts 1195 * @HIF_RTPM_ID_MAX: Max id 1196 */ 1197 enum hif_rtpm_client_id { 1198 HIF_RTPM_ID_RESERVED, 1199 HIF_RTPM_ID_HAL_REO_CMD, 1200 HIF_RTPM_ID_WMI, 1201 HIF_RTPM_ID_HTT, 1202 HIF_RTPM_ID_DP, 1203 HIF_RTPM_ID_DP_RING_STATS, 1204 HIF_RTPM_ID_CE, 1205 HIF_RTPM_ID_FORCE_WAKE, 1206 HIF_RTPM_ID_PM_QOS_NOTIFY, 1207 HIF_RTPM_ID_WIPHY_SUSPEND, 1208 HIF_RTPM_ID_MAX 1209 }; 1210 1211 /** 1212 * enum hif_rpm_type - Get and Put calls types 1213 * HIF_RTPM_GET_ASYNC: Increment usage count and when system is suspended 1214 * schedule resume process, return depends on pm state. 1215 * HIF_RTPM_GET_FORCE: Increment usage count and when system is suspended 1216 * shedule resume process, returns success irrespective of 1217 * pm_state. 1218 * HIF_RTPM_GET_SYNC: Increment usage count and when system is suspended, 1219 * wait till process is resumed. 1220 * HIF_RTPM_GET_NORESUME: Only increments usage count. 1221 * HIF_RTPM_PUT_ASYNC: Decrements usage count and puts system in idle state. 1222 * HIF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in 1223 * suspended state. 1224 * HIF_RTPM_PUT_NOIDLE: Decrements usage count. 1225 */ 1226 enum rpm_type { 1227 HIF_RTPM_GET_ASYNC, 1228 HIF_RTPM_GET_FORCE, 1229 HIF_RTPM_GET_SYNC, 1230 HIF_RTPM_GET_NORESUME, 1231 HIF_RTPM_PUT_ASYNC, 1232 HIF_RTPM_PUT_SYNC_SUSPEND, 1233 HIF_RTPM_PUT_NOIDLE, 1234 }; 1235 1236 /** 1237 * struct hif_pm_runtime_lock - data structure for preventing runtime suspend 1238 * @list - global list of runtime locks 1239 * @active - true if this lock is preventing suspend 1240 * @name - character string for tracking this lock 1241 */ 1242 struct hif_pm_runtime_lock { 1243 struct list_head list; 1244 bool active; 1245 const char *name; 1246 }; 1247 1248 #ifdef FEATURE_RUNTIME_PM 1249 /** 1250 * hif_rtpm_register() - Register a module with runtime PM. 1251 * @id: ID of the module which needs to be registered 1252 * @hif_rpm_cbk: callback to be called when get was called in suspended state. 1253 * @prevent_multiple_get: not allow simultaneous get calls or put calls 1254 * 1255 * Return: success status if successfully registered 1256 */ 1257 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void)); 1258 1259 /** 1260 * hif_rtpm_deregister() - Deregister the module 1261 * @id: ID of the module which needs to be de-registered 1262 */ 1263 QDF_STATUS hif_rtpm_deregister(uint32_t id); 1264 1265 /** 1266 * hif_runtime_lock_init() - API to initialize Runtime PM context 1267 * @lock: QDF lock context 1268 * @name: Context name 1269 * 1270 * This API initializes the Runtime PM context of the caller and 1271 * return the pointer. 1272 * 1273 * Return: None 1274 */ 1275 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name); 1276 1277 /** 1278 * hif_runtime_lock_deinit() - This API frees the runtime pm context 1279 * @data: Runtime PM context 1280 * 1281 * Return: void 1282 */ 1283 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data); 1284 1285 /** 1286 * hif_rtpm_get() - Increment usage_count on the device to avoid suspend. 1287 * @type: get call types from hif_rpm_type 1288 * @id: ID of the module calling get() 1289 * 1290 * A get operation will prevent a runtime suspend until a 1291 * corresponding put is done. This api should be used when accessing bus. 1292 * 1293 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED, 1294 * THIS API WILL ONLY REQUEST THE RESUME AND NOT DO A GET!!! 1295 * 1296 * return: success if a get has been issued, else error code. 1297 */ 1298 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id); 1299 1300 /** 1301 * hif_pm_runtime_put() - do a put operation on the device 1302 * @type: put call types from hif_rpm_type 1303 * @id: ID of the module calling put() 1304 * 1305 * A put operation will allow a runtime suspend after a corresponding 1306 * get was done. This api should be used when finished accessing bus. 1307 * 1308 * This api will return a failure if runtime pm is stopped 1309 * This api will return failure if it would decrement the usage count below 0. 1310 * 1311 * return: QDF_STATUS_SUCCESS if the put is performed 1312 */ 1313 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id); 1314 1315 /** 1316 * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend 1317 * @data: runtime PM lock 1318 * 1319 * This function will prevent runtime suspend, by incrementing 1320 * device's usage count. 1321 * 1322 * Return: status 1323 */ 1324 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data); 1325 1326 /** 1327 * hif_pm_runtime_allow_suspend() - Allow Runtime suspend 1328 * @data: runtime PM lock 1329 * 1330 * This function will allow runtime suspend, by decrementing 1331 * device's usage count. 1332 * 1333 * Return: status 1334 */ 1335 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data); 1336 1337 /** 1338 * hif_rtpm_request_resume() - Request resume if bus is suspended 1339 * 1340 * Return: None 1341 */ 1342 void hif_rtpm_request_resume(void); 1343 1344 /** 1345 * hif_rtpm_sync_resume() - Invoke synchronous runtime resume. 1346 * 1347 * This function will invoke synchronous runtime resume. 1348 * 1349 * Return: status 1350 */ 1351 QDF_STATUS hif_rtpm_sync_resume(void); 1352 1353 /** 1354 * hif_rtpm_check_and_request_resume() - check if bus is suspended and 1355 * request resume. 1356 * 1357 * Return: void 1358 */ 1359 void hif_rtpm_check_and_request_resume(void); 1360 1361 /** 1362 * hif_rtpm_set_client_job() - Set job for the client. 1363 * @client_id: Client id for which job needs to be set 1364 * 1365 * If get failed due to system being in suspended state, set the client job so 1366 * when system resumes the client's job is called. 1367 * 1368 * Return: None 1369 */ 1370 void hif_rtpm_set_client_job(uint32_t client_id); 1371 1372 /** 1373 * hif_rtpm_mark_last_busy() - Mark last busy to delay retry to suspend 1374 * @id: ID marking last busy 1375 * 1376 * Return: None 1377 */ 1378 void hif_rtpm_mark_last_busy(uint32_t id); 1379 1380 /** 1381 * hif_rtpm_get_monitor_wake_intr() - API to get monitor_wake_intr 1382 * 1383 * monitor_wake_intr variable can be used to indicate if driver expects wake 1384 * MSI for runtime PM 1385 * 1386 * Return: monitor_wake_intr variable 1387 */ 1388 int hif_rtpm_get_monitor_wake_intr(void); 1389 1390 /** 1391 * hif_rtpm_set_monitor_wake_intr() - API to set monitor_wake_intr 1392 * @val: value to set 1393 * 1394 * monitor_wake_intr variable can be used to indicate if driver expects wake 1395 * MSI for runtime PM 1396 * 1397 * Return: void 1398 */ 1399 void hif_rtpm_set_monitor_wake_intr(int val); 1400 1401 /** 1402 * hif_pre_runtime_suspend() - book keeping before beginning runtime suspend. 1403 * @hif_ctx: HIF context 1404 * 1405 * Makes sure that the pci link will be taken down by the suspend opperation. 1406 * If the hif layer is configured to leave the bus on, runtime suspend will 1407 * not save any power. 1408 * 1409 * Set the runtime suspend state to SUSPENDING. 1410 * 1411 * return -EINVAL if the bus won't go down. otherwise return 0 1412 */ 1413 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx); 1414 1415 /** 1416 * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume 1417 * 1418 * update the runtime pm state to RESUMING. 1419 * Return: void 1420 */ 1421 void hif_pre_runtime_resume(void); 1422 1423 /** 1424 * hif_process_runtime_suspend_success() - bookkeeping of suspend success 1425 * 1426 * Record the success. 1427 * update the runtime_pm state to SUSPENDED 1428 * Return: void 1429 */ 1430 void hif_process_runtime_suspend_success(void); 1431 1432 /** 1433 * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure 1434 * 1435 * Record the failure. 1436 * mark last busy to delay a retry. 1437 * update the runtime_pm state back to ON 1438 * 1439 * Return: void 1440 */ 1441 void hif_process_runtime_suspend_failure(void); 1442 1443 /** 1444 * hif_process_runtime_suspend_failure() - bookkeeping of resuming link up 1445 * 1446 * update the runtime_pm state to RESUMING_LINKUP 1447 * Return: void 1448 */ 1449 void hif_process_runtime_resume_linkup(void); 1450 1451 /** 1452 * hif_process_runtime_resume_success() - bookkeeping after a runtime resume 1453 * 1454 * record the success. 1455 * update the runtime_pm state to SUSPENDED 1456 * Return: void 1457 */ 1458 void hif_process_runtime_resume_success(void); 1459 1460 /** 1461 * hif_rtpm_print_prevent_list() - list the clients preventing suspend. 1462 * 1463 * Return: None 1464 */ 1465 void hif_rtpm_print_prevent_list(void); 1466 1467 /** 1468 * hif_rtpm_suspend_lock() - spin_lock on marking runtime suspend 1469 * 1470 * Return: void 1471 */ 1472 void hif_rtpm_suspend_lock(void); 1473 1474 /** 1475 * hif_rtpm_suspend_unlock() - spin_unlock on marking runtime suspend 1476 * 1477 * Return: void 1478 */ 1479 void hif_rtpm_suspend_unlock(void); 1480 1481 /** 1482 * hif_runtime_suspend() - do the bus suspend part of a runtime suspend 1483 * @hif_ctx: HIF context 1484 * 1485 * Return: 0 for success and non-zero error code for failure 1486 */ 1487 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx); 1488 1489 /** 1490 * hif_runtime_resume() - do the bus resume part of a runtime resume 1491 * @hif_ctx: HIF context 1492 * 1493 * Return: 0 for success and non-zero error code for failure 1494 */ 1495 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx); 1496 1497 /** 1498 * hif_fastpath_resume() - resume fastpath for runtimepm 1499 * @hif_ctx: HIF context 1500 * 1501 * ensure that the fastpath write index register is up to date 1502 * since runtime pm may cause ce_send_fast to skip the register 1503 * write. 1504 * 1505 * fastpath only applicable to legacy copy engine 1506 */ 1507 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx); 1508 #else 1509 static inline 1510 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void)) 1511 { return QDF_STATUS_SUCCESS; } 1512 1513 static inline 1514 QDF_STATUS hif_rtpm_deregister(uint32_t id) 1515 { return QDF_STATUS_SUCCESS; } 1516 1517 static inline 1518 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) 1519 { return 0; } 1520 1521 static inline 1522 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data) 1523 {} 1524 1525 static inline 1526 int hif_rtpm_get(uint8_t type, uint32_t id) 1527 { return QDF_STATUS_SUCCESS; } 1528 1529 static inline 1530 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id) 1531 { return QDF_STATUS_SUCCESS; } 1532 1533 static inline 1534 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data) 1535 { return 0; } 1536 1537 static inline 1538 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data) 1539 { return 0; } 1540 1541 static inline 1542 QDF_STATUS hif_rtpm_sync_resume(void) 1543 { return QDF_STATUS_SUCCESS; } 1544 1545 static inline 1546 void hif_rtpm_request_resume(void) 1547 {} 1548 1549 static inline 1550 void hif_rtpm_check_and_request_resume(void) 1551 {} 1552 1553 static inline 1554 void hif_rtpm_set_client_job(uint32_t client_id) 1555 {} 1556 1557 static inline 1558 void hif_rtpm_print_prevent_list(void) 1559 {} 1560 1561 static inline 1562 void hif_rtpm_suspend_unlock(void) 1563 {} 1564 1565 static inline 1566 void hif_rtpm_suspend_lock(void) 1567 {} 1568 1569 static inline 1570 int hif_rtpm_get_monitor_wake_intr(void) 1571 { return 0; } 1572 1573 static inline 1574 void hif_rtpm_set_monitor_wake_intr(int val) 1575 {} 1576 1577 static inline 1578 void hif_rtpm_mark_last_busy(uint32_t id) 1579 {} 1580 #endif 1581 1582 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx, 1583 bool is_packet_log_enabled); 1584 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx); 1585 1586 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx); 1587 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx); 1588 1589 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx); 1590 1591 #ifdef IPA_OFFLOAD 1592 /** 1593 * hif_get_ipa_hw_type() - get IPA hw type 1594 * 1595 * This API return the IPA hw type. 1596 * 1597 * Return: IPA hw type 1598 */ 1599 static inline 1600 enum ipa_hw_type hif_get_ipa_hw_type(void) 1601 { 1602 return ipa_get_hw_type(); 1603 } 1604 1605 /** 1606 * hif_get_ipa_present() - get IPA hw status 1607 * 1608 * This API return the IPA hw status. 1609 * 1610 * Return: true if IPA is present or false otherwise 1611 */ 1612 static inline 1613 bool hif_get_ipa_present(void) 1614 { 1615 if (ipa_uc_reg_rdyCB(NULL) != -EPERM) 1616 return true; 1617 else 1618 return false; 1619 } 1620 #endif 1621 int hif_bus_resume(struct hif_opaque_softc *hif_ctx); 1622 /** 1623 * hif_bus_ealry_suspend() - stop non wmi tx traffic 1624 * @context: hif context 1625 */ 1626 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx); 1627 1628 /** 1629 * hif_bus_late_resume() - resume non wmi traffic 1630 * @context: hif context 1631 */ 1632 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx); 1633 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx); 1634 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx); 1635 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx); 1636 1637 /** 1638 * hif_apps_irqs_enable() - Enables all irqs from the APPS side 1639 * @hif_ctx: an opaque HIF handle to use 1640 * 1641 * As opposed to the standard hif_irq_enable, this function always applies to 1642 * the APPS side kernel interrupt handling. 1643 * 1644 * Return: errno 1645 */ 1646 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx); 1647 1648 /** 1649 * hif_apps_irqs_disable() - Disables all irqs from the APPS side 1650 * @hif_ctx: an opaque HIF handle to use 1651 * 1652 * As opposed to the standard hif_irq_disable, this function always applies to 1653 * the APPS side kernel interrupt handling. 1654 * 1655 * Return: errno 1656 */ 1657 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx); 1658 1659 /** 1660 * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side 1661 * @hif_ctx: an opaque HIF handle to use 1662 * 1663 * As opposed to the standard hif_irq_enable, this function always applies to 1664 * the APPS side kernel interrupt handling. 1665 * 1666 * Return: errno 1667 */ 1668 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx); 1669 1670 /** 1671 * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side 1672 * @hif_ctx: an opaque HIF handle to use 1673 * 1674 * As opposed to the standard hif_irq_disable, this function always applies to 1675 * the APPS side kernel interrupt handling. 1676 * 1677 * Return: errno 1678 */ 1679 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx); 1680 1681 /** 1682 * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side 1683 * @hif_ctx: an opaque HIF handle to use 1684 * 1685 * This function always applies to the APPS side kernel interrupt handling 1686 * to wake the system from suspend. 1687 * 1688 * Return: errno 1689 */ 1690 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx); 1691 1692 /** 1693 * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side 1694 * @hif_ctx: an opaque HIF handle to use 1695 * 1696 * This function always applies to the APPS side kernel interrupt handling 1697 * to disable the wake irq. 1698 * 1699 * Return: errno 1700 */ 1701 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx); 1702 1703 /** 1704 * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq 1705 * @hif_ctx: an opaque HIF handle to use 1706 * 1707 * As opposed to the standard hif_irq_enable, this function always applies to 1708 * the APPS side kernel interrupt handling. 1709 * 1710 * Return: errno 1711 */ 1712 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx); 1713 1714 /** 1715 * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq 1716 * @hif_ctx: an opaque HIF handle to use 1717 * 1718 * As opposed to the standard hif_irq_disable, this function always applies to 1719 * the APPS side kernel interrupt handling. 1720 * 1721 * Return: errno 1722 */ 1723 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx); 1724 1725 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size); 1726 int hif_dump_registers(struct hif_opaque_softc *scn); 1727 int ol_copy_ramdump(struct hif_opaque_softc *scn); 1728 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx); 1729 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version, 1730 u32 *revision, const char **target_name); 1731 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl); 1732 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc * 1733 scn); 1734 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx); 1735 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx); 1736 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx); 1737 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum 1738 hif_target_status); 1739 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx, 1740 struct hif_config_info *cfg); 1741 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls); 1742 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, 1743 uint32_t transfer_id, u_int32_t len, uint32_t sendhead); 1744 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, 1745 uint32_t transfer_id, u_int32_t len); 1746 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf, 1747 uint32_t transfer_id, uint32_t download_len); 1748 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len); 1749 void hif_ce_war_disable(void); 1750 void hif_ce_war_enable(void); 1751 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num); 1752 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT 1753 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, 1754 struct hif_pipe_addl_info *hif_info, uint32_t pipe_number); 1755 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, 1756 uint32_t pipe_num); 1757 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc); 1758 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */ 1759 1760 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled, 1761 int rx_bundle_cnt); 1762 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx); 1763 1764 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib); 1765 1766 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl); 1767 1768 enum hif_exec_type { 1769 HIF_EXEC_NAPI_TYPE, 1770 HIF_EXEC_TASKLET_TYPE, 1771 }; 1772 1773 typedef uint32_t (*ext_intr_handler)(void *, uint32_t); 1774 1775 /** 1776 * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id 1777 * @softc: hif opaque context owning the exec context 1778 * @id: the id of the interrupt context 1779 * 1780 * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID 1781 * 'id' registered with the OS 1782 */ 1783 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc, 1784 uint8_t id); 1785 1786 /** 1787 * hif_configure_ext_group_interrupts() - Congigure ext group intrrupts 1788 * @hif_ctx: hif opaque context 1789 * 1790 * Return: QDF_STATUS 1791 */ 1792 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx); 1793 1794 /** 1795 * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group intrrupts 1796 * @hif_ctx: hif opaque context 1797 * 1798 * Return: None 1799 */ 1800 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx); 1801 1802 /** 1803 * hif_register_ext_group() - API to register external group 1804 * interrupt handler. 1805 * @hif_ctx : HIF Context 1806 * @numirq: number of irq's in the group 1807 * @irq: array of irq values 1808 * @handler: callback interrupt handler function 1809 * @cb_ctx: context to passed in callback 1810 * @type: napi vs tasklet 1811 * 1812 * Return: QDF_STATUS 1813 */ 1814 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx, 1815 uint32_t numirq, uint32_t irq[], 1816 ext_intr_handler handler, 1817 void *cb_ctx, const char *context_name, 1818 enum hif_exec_type type, uint32_t scale); 1819 1820 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx, 1821 const char *context_name); 1822 1823 void hif_update_pipe_callback(struct hif_opaque_softc *osc, 1824 u_int8_t pipeid, 1825 struct hif_msg_callbacks *callbacks); 1826 1827 /** 1828 * hif_print_napi_stats() - Display HIF NAPI stats 1829 * @hif_ctx - HIF opaque context 1830 * 1831 * Return: None 1832 */ 1833 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx); 1834 1835 /* hif_clear_napi_stats() - function clears the stats of the 1836 * latency when called. 1837 * @hif_ctx - the HIF context to assign the callback to 1838 * 1839 * Return: None 1840 */ 1841 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx); 1842 1843 #ifdef __cplusplus 1844 } 1845 #endif 1846 1847 #ifdef FORCE_WAKE 1848 /** 1849 * hif_force_wake_request() - Function to wake from power collapse 1850 * @handle: HIF opaque handle 1851 * 1852 * Description: API to check if the device is awake or not before 1853 * read/write to BAR + 4K registers. If device is awake return 1854 * success otherwise write '1' to 1855 * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt 1856 * the device and does wakeup the PCI and MHI within 50ms 1857 * and then the device writes a value to 1858 * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the 1859 * handshake process to let the host know the device is awake. 1860 * 1861 * Return: zero - success/non-zero - failure 1862 */ 1863 int hif_force_wake_request(struct hif_opaque_softc *handle); 1864 1865 /** 1866 * hif_force_wake_release() - API to release/reset the SOC wake register 1867 * from interrupting the device. 1868 * @handle: HIF opaque handle 1869 * 1870 * Description: API to set the 1871 * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0' 1872 * to release the interrupt line. 1873 * 1874 * Return: zero - success/non-zero - failure 1875 */ 1876 int hif_force_wake_release(struct hif_opaque_softc *handle); 1877 #else 1878 static inline 1879 int hif_force_wake_request(struct hif_opaque_softc *handle) 1880 { 1881 return 0; 1882 } 1883 1884 static inline 1885 int hif_force_wake_release(struct hif_opaque_softc *handle) 1886 { 1887 return 0; 1888 } 1889 #endif /* FORCE_WAKE */ 1890 1891 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 1892 /** 1893 * hif_prevent_link_low_power_states() - Prevent from going to low power states 1894 * @hif - HIF opaque context 1895 * 1896 * Return: 0 on success. Error code on failure. 1897 */ 1898 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif); 1899 1900 /** 1901 * hif_allow_link_low_power_states() - Allow link to go to low power states 1902 * @hif - HIF opaque context 1903 * 1904 * Return: None 1905 */ 1906 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif); 1907 1908 #else 1909 1910 static inline 1911 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif) 1912 { 1913 return 0; 1914 } 1915 1916 static inline 1917 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif) 1918 { 1919 } 1920 #endif 1921 1922 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle); 1923 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle); 1924 1925 /** 1926 * hif_get_soc_version() - get soc major version from target info 1927 * @hif_ctx - the HIF context 1928 * 1929 * Return: version number 1930 */ 1931 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle); 1932 1933 /** 1934 * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function 1935 * @hif_ctx - the HIF context to assign the callback to 1936 * @callback - the callback to assign 1937 * @priv - the private data to pass to the callback when invoked 1938 * 1939 * Return: None 1940 */ 1941 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx, 1942 void (*callback)(void *), 1943 void *priv); 1944 /* 1945 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 1946 * for defined here 1947 */ 1948 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 1949 ssize_t hif_dump_desc_trace_buf(struct device *dev, 1950 struct device_attribute *attr, char *buf); 1951 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn, 1952 const char *buf, size_t size); 1953 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, 1954 const char *buf, size_t size); 1955 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf); 1956 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf); 1957 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/ 1958 1959 /** 1960 * hif_set_ce_service_max_yield_time() - sets CE service max yield time 1961 * @hif: hif context 1962 * @ce_service_max_yield_time: CE service max yield time to set 1963 * 1964 * This API storess CE service max yield time in hif context based 1965 * on ini value. 1966 * 1967 * Return: void 1968 */ 1969 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif, 1970 uint32_t ce_service_max_yield_time); 1971 1972 /** 1973 * hif_get_ce_service_max_yield_time() - get CE service max yield time 1974 * @hif: hif context 1975 * 1976 * This API returns CE service max yield time. 1977 * 1978 * Return: CE service max yield time 1979 */ 1980 unsigned long long 1981 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif); 1982 1983 /** 1984 * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush 1985 * @hif: hif context 1986 * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set 1987 * 1988 * This API stores CE service max rx ind flush in hif context based 1989 * on ini value. 1990 * 1991 * Return: void 1992 */ 1993 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif, 1994 uint8_t ce_service_max_rx_ind_flush); 1995 1996 #ifdef OL_ATH_SMART_LOGGING 1997 /* 1998 * hif_log_ce_dump() - Copy all the CE DEST ring to buf 1999 * @scn : HIF handler 2000 * @buf_cur: Current pointer in ring buffer 2001 * @buf_init:Start of the ring buffer 2002 * @buf_sz: Size of the ring buffer 2003 * @ce: Copy Engine id 2004 * @skb_sz: Max size of the SKB buffer to be copied 2005 * 2006 * Calls the respective function to dump all the CE SRC/DEST ring descriptors 2007 * and buffers pointed by them in to the given buf 2008 * 2009 * Return: Current pointer in ring buffer 2010 */ 2011 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur, 2012 uint8_t *buf_init, uint32_t buf_sz, 2013 uint32_t ce, uint32_t skb_sz); 2014 #endif /* OL_ATH_SMART_LOGGING */ 2015 2016 /* 2017 * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle 2018 * to hif_opaque_softc handle 2019 * @hif_handle - hif_softc type 2020 * 2021 * Return: hif_opaque_softc type 2022 */ 2023 static inline struct hif_opaque_softc * 2024 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle) 2025 { 2026 return (struct hif_opaque_softc *)hif_handle; 2027 } 2028 2029 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE) 2030 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx); 2031 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx); 2032 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx); 2033 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx, 2034 uint8_t type, uint8_t access); 2035 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx, 2036 uint8_t type); 2037 #else 2038 static inline QDF_STATUS 2039 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx) 2040 { 2041 return QDF_STATUS_SUCCESS; 2042 } 2043 2044 static inline void 2045 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx) 2046 { 2047 } 2048 2049 static inline void 2050 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx) 2051 { 2052 } 2053 2054 static inline void 2055 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx, 2056 uint8_t type, uint8_t access) 2057 { 2058 } 2059 2060 static inline uint8_t 2061 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx, 2062 uint8_t type) 2063 { 2064 return HIF_EP_VOTE_ACCESS_ENABLE; 2065 } 2066 #endif 2067 2068 #ifdef FORCE_WAKE 2069 /** 2070 * hif_srng_init_phase(): Indicate srng initialization phase 2071 * to avoid force wake as UMAC power collapse is not yet 2072 * enabled 2073 * @hif_ctx: hif opaque handle 2074 * @init_phase: initialization phase 2075 * 2076 * Return: None 2077 */ 2078 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx, 2079 bool init_phase); 2080 #else 2081 static inline 2082 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx, 2083 bool init_phase) 2084 { 2085 } 2086 #endif /* FORCE_WAKE */ 2087 2088 #ifdef HIF_IPCI 2089 /** 2090 * hif_shutdown_notifier_cb - Call back for shutdown notifier 2091 * @ctx: hif handle 2092 * 2093 * Return: None 2094 */ 2095 void hif_shutdown_notifier_cb(void *ctx); 2096 #else 2097 static inline 2098 void hif_shutdown_notifier_cb(void *ctx) 2099 { 2100 } 2101 #endif /* HIF_IPCI */ 2102 2103 #ifdef HIF_CE_LOG_INFO 2104 /** 2105 * hif_log_ce_info() - API to log ce info 2106 * @scn: hif handle 2107 * @data: hang event data buffer 2108 * @offset: offset at which data needs to be written 2109 * 2110 * Return: None 2111 */ 2112 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data, 2113 unsigned int *offset); 2114 #else 2115 static inline 2116 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data, 2117 unsigned int *offset) 2118 { 2119 } 2120 #endif 2121 2122 #ifdef HIF_CPU_PERF_AFFINE_MASK 2123 /** 2124 * hif_config_irq_set_perf_affinity_hint() - API to set affinity 2125 * @hif_ctx: hif opaque handle 2126 * 2127 * This function is used to move the WLAN IRQs to perf cores in 2128 * case of defconfig builds. 2129 * 2130 * Return: None 2131 */ 2132 void hif_config_irq_set_perf_affinity_hint( 2133 struct hif_opaque_softc *hif_ctx); 2134 2135 #else 2136 static inline void hif_config_irq_set_perf_affinity_hint( 2137 struct hif_opaque_softc *hif_ctx) 2138 { 2139 } 2140 #endif 2141 2142 /** 2143 * hif_apps_grp_irqs_enable() - enable ext grp irqs 2144 * @hif - HIF opaque context 2145 * 2146 * Return: 0 on success. Error code on failure. 2147 */ 2148 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx); 2149 2150 /** 2151 * hif_apps_grp_irqs_disable() - disable ext grp irqs 2152 * @hif - HIF opaque context 2153 * 2154 * Return: 0 on success. Error code on failure. 2155 */ 2156 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx); 2157 2158 /** 2159 * hif_disable_grp_irqs() - disable ext grp irqs 2160 * @hif - HIF opaque context 2161 * 2162 * Return: 0 on success. Error code on failure. 2163 */ 2164 int hif_disable_grp_irqs(struct hif_opaque_softc *scn); 2165 2166 /** 2167 * hif_enable_grp_irqs() - enable ext grp irqs 2168 * @hif - HIF opaque context 2169 * 2170 * Return: 0 on success. Error code on failure. 2171 */ 2172 int hif_enable_grp_irqs(struct hif_opaque_softc *scn); 2173 2174 enum hif_credit_exchange_type { 2175 HIF_REQUEST_CREDIT, 2176 HIF_PROCESS_CREDIT_REPORT, 2177 }; 2178 2179 enum hif_detect_latency_type { 2180 HIF_DETECT_TASKLET, 2181 HIF_DETECT_CREDIT, 2182 HIF_DETECT_UNKNOWN 2183 }; 2184 2185 #ifdef HIF_DETECTION_LATENCY_ENABLE 2186 void hif_latency_detect_credit_record_time( 2187 enum hif_credit_exchange_type type, 2188 struct hif_opaque_softc *hif_ctx); 2189 2190 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx); 2191 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx); 2192 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer); 2193 void hif_credit_latency(struct hif_softc *scn, bool from_timer); 2194 void hif_check_detection_latency(struct hif_softc *scn, 2195 bool from_timer, 2196 uint32_t bitmap_type); 2197 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value); 2198 #else 2199 static inline 2200 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx) 2201 {} 2202 2203 static inline 2204 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx) 2205 {} 2206 2207 static inline 2208 void hif_latency_detect_credit_record_time( 2209 enum hif_credit_exchange_type type, 2210 struct hif_opaque_softc *hif_ctx) 2211 {} 2212 static inline 2213 void hif_check_detection_latency(struct hif_softc *scn, 2214 bool from_timer, 2215 uint32_t bitmap_type) 2216 {} 2217 2218 static inline 2219 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value) 2220 {} 2221 #endif 2222 2223 #ifdef SYSTEM_PM_CHECK 2224 /** 2225 * __hif_system_pm_set_state() - Set system pm state 2226 * @hif: hif opaque handle 2227 * @state: system state 2228 * 2229 * Return: None 2230 */ 2231 void __hif_system_pm_set_state(struct hif_opaque_softc *hif, 2232 enum hif_system_pm_state state); 2233 2234 /** 2235 * hif_system_pm_set_state_on() - Set system pm state to ON 2236 * @hif: hif opaque handle 2237 * 2238 * Return: None 2239 */ 2240 static inline 2241 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif) 2242 { 2243 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON); 2244 } 2245 2246 /** 2247 * hif_system_pm_set_state_resuming() - Set system pm state to resuming 2248 * @hif: hif opaque handle 2249 * 2250 * Return: None 2251 */ 2252 static inline 2253 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif) 2254 { 2255 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING); 2256 } 2257 2258 /** 2259 * hif_system_pm_set_state_suspending() - Set system pm state to suspending 2260 * @hif: hif opaque handle 2261 * 2262 * Return: None 2263 */ 2264 static inline 2265 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif) 2266 { 2267 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING); 2268 } 2269 2270 /** 2271 * hif_system_pm_set_state_suspended() - Set system pm state to suspended 2272 * @hif: hif opaque handle 2273 * 2274 * Return: None 2275 */ 2276 static inline 2277 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif) 2278 { 2279 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED); 2280 } 2281 2282 /** 2283 * hif_system_pm_get_state() - Get system pm state 2284 * @hif: hif opaque handle 2285 * 2286 * Return: system state 2287 */ 2288 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif); 2289 2290 /** 2291 * hif_system_pm_state_check() - Check system state and trigger resume 2292 * if required 2293 * @hif: hif opaque handle 2294 * 2295 * Return: 0 if system is in on state else error code 2296 */ 2297 int hif_system_pm_state_check(struct hif_opaque_softc *hif); 2298 #else 2299 static inline 2300 void __hif_system_pm_set_state(struct hif_opaque_softc *hif, 2301 enum hif_system_pm_state state) 2302 { 2303 } 2304 2305 static inline 2306 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif) 2307 { 2308 } 2309 2310 static inline 2311 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif) 2312 { 2313 } 2314 2315 static inline 2316 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif) 2317 { 2318 } 2319 2320 static inline 2321 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif) 2322 { 2323 } 2324 2325 static inline 2326 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif) 2327 { 2328 return 0; 2329 } 2330 2331 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif) 2332 { 2333 return 0; 2334 } 2335 #endif 2336 2337 #ifdef FEATURE_IRQ_AFFINITY 2338 /** 2339 * hif_set_grp_intr_affinity() - API to set affinity for grp 2340 * intrs set in the bitmap 2341 * @scn: hif handle 2342 * @grp_intr_bitmask: grp intrs for which perf affinity should be 2343 * applied 2344 * @perf: affine to perf or non-perf cluster 2345 * 2346 * Return: None 2347 */ 2348 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn, 2349 uint32_t grp_intr_bitmask, bool perf); 2350 #else 2351 static inline 2352 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn, 2353 uint32_t grp_intr_bitmask, bool perf) 2354 { 2355 } 2356 #endif 2357 /** 2358 * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map 2359 * @hif_ctx: hif opaque handle 2360 * 2361 * Description: 2362 * Gets number of WMI EPs configured in target svc map. Since EP map 2363 * include IN and OUT direction pipes, count only OUT pipes to get EPs 2364 * configured for WMI service. 2365 * 2366 * Return: 2367 * uint8_t: count for WMI eps in target svc map 2368 */ 2369 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn); 2370 2371 #ifdef DP_UMAC_HW_RESET_SUPPORT 2372 /** 2373 * hif_register_umac_reset_handler() - Register UMAC HW reset handler 2374 * @hif_scn: hif opaque handle 2375 * @handler: callback handler function 2376 * @cb_ctx: context to passed to @handler 2377 * @irq: irq number to be used for UMAC HW reset interrupt 2378 * 2379 * Return: QDF_STATUS of operation 2380 */ 2381 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn, 2382 int (*handler)(void *cb_ctx), 2383 void *cb_ctx, int irq); 2384 2385 /** 2386 * hif_unregister_umac_reset_handler() - Unregister UMAC HW reset handler 2387 * @hif_scn: hif opaque handle 2388 * 2389 * Return: QDF_STATUS of operation 2390 */ 2391 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn); 2392 #else 2393 static inline 2394 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn, 2395 int (*handler)(void *cb_ctx), 2396 void *cb_ctx, int irq) 2397 { 2398 return QDF_STATUS_SUCCESS; 2399 } 2400 2401 static inline 2402 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn) 2403 { 2404 return QDF_STATUS_SUCCESS; 2405 } 2406 2407 #endif /* DP_UMAC_HW_RESET_SUPPORT */ 2408 2409 #endif /* _HIF_H_ */ 2410