1 /* 2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _HIF_H_ 20 #define _HIF_H_ 21 22 #ifdef __cplusplus 23 extern "C" { 24 #endif /* __cplusplus */ 25 26 /* Header files */ 27 #include <qdf_status.h> 28 #include "qdf_nbuf.h" 29 #include "qdf_lro.h" 30 #include "ol_if_athvar.h" 31 #include <linux/platform_device.h> 32 #ifdef HIF_PCI 33 #include <linux/pci.h> 34 #endif /* HIF_PCI */ 35 #ifdef HIF_USB 36 #include <linux/usb.h> 37 #endif /* HIF_USB */ 38 #ifdef IPA_OFFLOAD 39 #include <linux/ipa.h> 40 #endif 41 #include "cfg_ucfg_api.h" 42 #include "qdf_dev.h" 43 #include <wlan_init_cfg.h> 44 45 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1 46 47 typedef void __iomem *A_target_id_t; 48 typedef void *hif_handle_t; 49 50 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE) 51 #define HIF_WORK_DRAIN_WAIT_CNT 50 52 53 #define HIF_EP_WAKE_RESET_WAIT_CNT 10 54 #endif 55 56 #define HIF_TYPE_AR6002 2 57 #define HIF_TYPE_AR6003 3 58 #define HIF_TYPE_AR6004 5 59 #define HIF_TYPE_AR9888 6 60 #define HIF_TYPE_AR6320 7 61 #define HIF_TYPE_AR6320V2 8 62 /* For attaching Peregrine 2.0 board host_reg_tbl only */ 63 #define HIF_TYPE_AR9888V2 9 64 #define HIF_TYPE_ADRASTEA 10 65 #define HIF_TYPE_AR900B 11 66 #define HIF_TYPE_QCA9984 12 67 #define HIF_TYPE_IPQ4019 13 68 #define HIF_TYPE_QCA9888 14 69 #define HIF_TYPE_QCA8074 15 70 #define HIF_TYPE_QCA6290 16 71 #define HIF_TYPE_QCN7605 17 72 #define HIF_TYPE_QCA6390 18 73 #define HIF_TYPE_QCA8074V2 19 74 #define HIF_TYPE_QCA6018 20 75 #define HIF_TYPE_QCN9000 21 76 #define HIF_TYPE_QCA6490 22 77 #define HIF_TYPE_QCA6750 23 78 #define HIF_TYPE_QCA5018 24 79 #define HIF_TYPE_QCN6122 25 80 #define HIF_TYPE_WCN7850 26 81 #define HIF_TYPE_QCN9224 27 82 #define HIF_TYPE_QCA9574 28 83 84 #define DMA_COHERENT_MASK_DEFAULT 37 85 86 #ifdef IPA_OFFLOAD 87 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32 88 #endif 89 90 /* enum hif_ic_irq - enum defining integrated chip irq numbers 91 * defining irq nubers that can be used by external modules like datapath 92 */ 93 enum hif_ic_irq { 94 host2wbm_desc_feed = 16, 95 host2reo_re_injection, 96 host2reo_command, 97 host2rxdma_monitor_ring3, 98 host2rxdma_monitor_ring2, 99 host2rxdma_monitor_ring1, 100 reo2host_exception, 101 wbm2host_rx_release, 102 reo2host_status, 103 reo2host_destination_ring4, 104 reo2host_destination_ring3, 105 reo2host_destination_ring2, 106 reo2host_destination_ring1, 107 rxdma2host_monitor_destination_mac3, 108 rxdma2host_monitor_destination_mac2, 109 rxdma2host_monitor_destination_mac1, 110 ppdu_end_interrupts_mac3, 111 ppdu_end_interrupts_mac2, 112 ppdu_end_interrupts_mac1, 113 rxdma2host_monitor_status_ring_mac3, 114 rxdma2host_monitor_status_ring_mac2, 115 rxdma2host_monitor_status_ring_mac1, 116 host2rxdma_host_buf_ring_mac3, 117 host2rxdma_host_buf_ring_mac2, 118 host2rxdma_host_buf_ring_mac1, 119 rxdma2host_destination_ring_mac3, 120 rxdma2host_destination_ring_mac2, 121 rxdma2host_destination_ring_mac1, 122 host2tcl_input_ring4, 123 host2tcl_input_ring3, 124 host2tcl_input_ring2, 125 host2tcl_input_ring1, 126 wbm2host_tx_completions_ring3, 127 wbm2host_tx_completions_ring2, 128 wbm2host_tx_completions_ring1, 129 tcl2host_status_ring, 130 }; 131 132 struct CE_state; 133 #ifdef QCA_WIFI_QCN9224 134 #define CE_COUNT_MAX 16 135 #else 136 #define CE_COUNT_MAX 12 137 #endif 138 139 #ifndef HIF_MAX_GROUP 140 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS 141 #endif 142 143 #ifdef CONFIG_BERYLLIUM 144 #define HIF_MAX_GRP_IRQ 25 145 #else 146 #define HIF_MAX_GRP_IRQ 16 147 #endif 148 149 #ifndef NAPI_YIELD_BUDGET_BASED 150 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT 151 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4 152 #endif 153 #else /* NAPI_YIELD_BUDGET_BASED */ 154 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2 155 #endif /* NAPI_YIELD_BUDGET_BASED */ 156 157 #define QCA_NAPI_BUDGET 64 158 #define QCA_NAPI_DEF_SCALE \ 159 (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT) 160 161 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE) 162 /* NOTE: "napi->scale" can be changed, 163 * but this does not change the number of buckets 164 */ 165 #define QCA_NAPI_NUM_BUCKETS 4 166 167 /** 168 * qca_napi_stat - stats structure for execution contexts 169 * @napi_schedules - number of times the schedule function is called 170 * @napi_polls - number of times the execution context runs 171 * @napi_completes - number of times that the generating interrupt is reenabled 172 * @napi_workdone - cumulative of all work done reported by handler 173 * @cpu_corrected - incremented when execution context runs on a different core 174 * than the one that its irq is affined to. 175 * @napi_budget_uses - histogram of work done per execution run 176 * @time_limit_reache - count of yields due to time limit threshholds 177 * @rxpkt_thresh_reached - count of yields due to a work limit 178 * @poll_time_buckets - histogram of poll times for the napi 179 * 180 */ 181 struct qca_napi_stat { 182 uint32_t napi_schedules; 183 uint32_t napi_polls; 184 uint32_t napi_completes; 185 uint32_t napi_workdone; 186 uint32_t cpu_corrected; 187 uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS]; 188 uint32_t time_limit_reached; 189 uint32_t rxpkt_thresh_reached; 190 unsigned long long napi_max_poll_time; 191 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 192 uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS]; 193 #endif 194 }; 195 196 197 /** 198 * per NAPI instance data structure 199 * This data structure holds stuff per NAPI instance. 200 * Note that, in the current implementation, though scale is 201 * an instance variable, it is set to the same value for all 202 * instances. 203 */ 204 struct qca_napi_info { 205 struct net_device netdev; /* dummy net_dev */ 206 void *hif_ctx; 207 struct napi_struct napi; 208 uint8_t scale; /* currently same on all instances */ 209 uint8_t id; 210 uint8_t cpu; 211 int irq; 212 cpumask_t cpumask; 213 struct qca_napi_stat stats[NR_CPUS]; 214 #ifdef RECEIVE_OFFLOAD 215 /* will only be present for data rx CE's */ 216 void (*offld_flush_cb)(void *); 217 struct napi_struct rx_thread_napi; 218 struct net_device rx_thread_netdev; 219 #endif /* RECEIVE_OFFLOAD */ 220 qdf_lro_ctx_t lro_ctx; 221 }; 222 223 enum qca_napi_tput_state { 224 QCA_NAPI_TPUT_UNINITIALIZED, 225 QCA_NAPI_TPUT_LO, 226 QCA_NAPI_TPUT_HI 227 }; 228 enum qca_napi_cpu_state { 229 QCA_NAPI_CPU_UNINITIALIZED, 230 QCA_NAPI_CPU_DOWN, 231 QCA_NAPI_CPU_UP }; 232 233 /** 234 * struct qca_napi_cpu - an entry of the napi cpu table 235 * @core_id: physical core id of the core 236 * @cluster_id: cluster this core belongs to 237 * @core_mask: mask to match all core of this cluster 238 * @thread_mask: mask for this core within the cluster 239 * @max_freq: maximum clock this core can be clocked at 240 * same for all cpus of the same core. 241 * @napis: bitmap of napi instances on this core 242 * @execs: bitmap of execution contexts on this core 243 * cluster_nxt: chain to link cores within the same cluster 244 * 245 * This structure represents a single entry in the napi cpu 246 * table. The table is part of struct qca_napi_data. 247 * This table is initialized by the init function, called while 248 * the first napi instance is being created, updated by hotplug 249 * notifier and when cpu affinity decisions are made (by throughput 250 * detection), and deleted when the last napi instance is removed. 251 */ 252 struct qca_napi_cpu { 253 enum qca_napi_cpu_state state; 254 int core_id; 255 int cluster_id; 256 cpumask_t core_mask; 257 cpumask_t thread_mask; 258 unsigned int max_freq; 259 uint32_t napis; 260 uint32_t execs; 261 int cluster_nxt; /* index, not pointer */ 262 }; 263 264 /** 265 * struct qca_napi_data - collection of napi data for a single hif context 266 * @hif_softc: pointer to the hif context 267 * @lock: spinlock used in the event state machine 268 * @state: state variable used in the napi stat machine 269 * @ce_map: bit map indicating which ce's have napis running 270 * @exec_map: bit map of instanciated exec contexts 271 * @user_cpu_affin_map: CPU affinity map from INI config. 272 * @napi_cpu: cpu info for irq affinty 273 * @lilcl_head: 274 * @bigcl_head: 275 * @napi_mode: irq affinity & clock voting mode 276 * @cpuhp_handler: CPU hotplug event registration handle 277 */ 278 struct qca_napi_data { 279 struct hif_softc *hif_softc; 280 qdf_spinlock_t lock; 281 uint32_t state; 282 283 /* bitmap of created/registered NAPI instances, indexed by pipe_id, 284 * not used by clients (clients use an id returned by create) 285 */ 286 uint32_t ce_map; 287 uint32_t exec_map; 288 uint32_t user_cpu_affin_mask; 289 struct qca_napi_info *napis[CE_COUNT_MAX]; 290 struct qca_napi_cpu napi_cpu[NR_CPUS]; 291 int lilcl_head, bigcl_head; 292 enum qca_napi_tput_state napi_mode; 293 struct qdf_cpuhp_handler *cpuhp_handler; 294 uint8_t flags; 295 }; 296 297 /** 298 * struct hif_config_info - Place Holder for HIF configuration 299 * @enable_self_recovery: Self Recovery 300 * @enable_runtime_pm: Enable Runtime PM 301 * @runtime_pm_delay: Runtime PM Delay 302 * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq 303 * 304 * Structure for holding HIF ini parameters. 305 */ 306 struct hif_config_info { 307 bool enable_self_recovery; 308 #ifdef FEATURE_RUNTIME_PM 309 uint8_t enable_runtime_pm; 310 u_int32_t runtime_pm_delay; 311 #endif 312 uint64_t rx_softirq_max_yield_duration_ns; 313 }; 314 315 /** 316 * struct hif_target_info - Target Information 317 * @target_version: Target Version 318 * @target_type: Target Type 319 * @target_revision: Target Revision 320 * @soc_version: SOC Version 321 * @hw_name: pointer to hardware name 322 * 323 * Structure to hold target information. 324 */ 325 struct hif_target_info { 326 uint32_t target_version; 327 uint32_t target_type; 328 uint32_t target_revision; 329 uint32_t soc_version; 330 char *hw_name; 331 }; 332 333 struct hif_opaque_softc { 334 }; 335 336 /** 337 * enum hif_event_type - Type of DP events to be recorded 338 * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event 339 * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event 340 * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event 341 * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event 342 * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event 343 * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event 344 */ 345 enum hif_event_type { 346 HIF_EVENT_IRQ_TRIGGER, 347 HIF_EVENT_TIMER_ENTRY, 348 HIF_EVENT_TIMER_EXIT, 349 HIF_EVENT_BH_SCHED, 350 HIF_EVENT_SRNG_ACCESS_START, 351 HIF_EVENT_SRNG_ACCESS_END, 352 /* Do check hif_hist_skip_event_record when adding new events */ 353 }; 354 355 /** 356 * enum hif_system_pm_state - System PM state 357 * HIF_SYSTEM_PM_STATE_ON: System in active state 358 * HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of 359 * system resume 360 * HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of 361 * system suspend 362 * HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend 363 */ 364 enum hif_system_pm_state { 365 HIF_SYSTEM_PM_STATE_ON, 366 HIF_SYSTEM_PM_STATE_BUS_RESUMING, 367 HIF_SYSTEM_PM_STATE_BUS_SUSPENDING, 368 HIF_SYSTEM_PM_STATE_BUS_SUSPENDED, 369 }; 370 371 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 372 #define HIF_NUM_INT_CONTEXTS HIF_MAX_GROUP 373 374 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 375 /* HIF_EVENT_HIST_MAX should always be power of 2 */ 376 #define HIF_EVENT_HIST_MAX 512 377 378 #define HIF_EVENT_HIST_ENABLE_MASK 0x3F 379 380 static inline uint64_t hif_get_log_timestamp(void) 381 { 382 return qdf_get_log_timestamp(); 383 } 384 385 #else 386 387 #define HIF_EVENT_HIST_MAX 32 388 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */ 389 #define HIF_EVENT_HIST_ENABLE_MASK 0x19 390 391 static inline uint64_t hif_get_log_timestamp(void) 392 { 393 return qdf_sched_clock(); 394 } 395 396 #endif 397 398 /** 399 * struct hif_event_record - an entry of the DP event history 400 * @hal_ring_id: ring id for which event is recorded 401 * @hp: head pointer of the ring (may not be applicable for all events) 402 * @tp: tail pointer of the ring (may not be applicable for all events) 403 * @cpu_id: cpu id on which the event occurred 404 * @timestamp: timestamp when event occurred 405 * @type: type of the event 406 * 407 * This structure represents the information stored for every datapath 408 * event which is logged in the history. 409 */ 410 struct hif_event_record { 411 uint8_t hal_ring_id; 412 uint32_t hp; 413 uint32_t tp; 414 int cpu_id; 415 uint64_t timestamp; 416 enum hif_event_type type; 417 }; 418 419 /** 420 * struct hif_event_misc - history related misc info 421 * @last_irq_index: last irq event index in history 422 * @last_irq_ts: last irq timestamp 423 */ 424 struct hif_event_misc { 425 int32_t last_irq_index; 426 uint64_t last_irq_ts; 427 }; 428 429 /** 430 * struct hif_event_history - history for one interrupt group 431 * @index: index to store new event 432 * @event: event entry 433 * 434 * This structure represents the datapath history for one 435 * interrupt group. 436 */ 437 struct hif_event_history { 438 qdf_atomic_t index; 439 struct hif_event_misc misc; 440 struct hif_event_record event[HIF_EVENT_HIST_MAX]; 441 }; 442 443 /** 444 * hif_hist_record_event() - Record one datapath event in history 445 * @hif_ctx: HIF opaque context 446 * @event: DP event entry 447 * @intr_grp_id: interrupt group ID registered with hif 448 * 449 * Return: None 450 */ 451 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx, 452 struct hif_event_record *event, 453 uint8_t intr_grp_id); 454 455 /** 456 * hif_event_history_init() - Initialize SRNG event history buffers 457 * @hif_ctx: HIF opaque context 458 * @id: context group ID for which history is recorded 459 * 460 * Returns: None 461 */ 462 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id); 463 464 /** 465 * hif_event_history_deinit() - De-initialize SRNG event history buffers 466 * @hif_ctx: HIF opaque context 467 * @id: context group ID for which history is recorded 468 * 469 * Returns: None 470 */ 471 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id); 472 473 /** 474 * hif_record_event() - Wrapper function to form and record DP event 475 * @hif_ctx: HIF opaque context 476 * @intr_grp_id: interrupt group ID registered with hif 477 * @hal_ring_id: ring id for which event is recorded 478 * @hp: head pointer index of the srng 479 * @tp: tail pointer index of the srng 480 * @type: type of the event to be logged in history 481 * 482 * Return: None 483 */ 484 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx, 485 uint8_t intr_grp_id, 486 uint8_t hal_ring_id, 487 uint32_t hp, 488 uint32_t tp, 489 enum hif_event_type type) 490 { 491 struct hif_event_record event; 492 493 event.hal_ring_id = hal_ring_id; 494 event.hp = hp; 495 event.tp = tp; 496 event.type = type; 497 498 hif_hist_record_event(hif_ctx, &event, intr_grp_id); 499 500 return; 501 } 502 503 #else 504 505 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx, 506 uint8_t intr_grp_id, 507 uint8_t hal_ring_id, 508 uint32_t hp, 509 uint32_t tp, 510 enum hif_event_type type) 511 { 512 } 513 514 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx, 515 uint8_t id) 516 { 517 } 518 519 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, 520 uint8_t id) 521 { 522 } 523 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 524 525 /** 526 * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type 527 * 528 * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module 529 * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to 530 * minimize power 531 * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR 532 * platform-specific measures to completely power-off 533 * the module and associated hardware (i.e. cut power 534 * supplies) 535 */ 536 enum HIF_DEVICE_POWER_CHANGE_TYPE { 537 HIF_DEVICE_POWER_UP, 538 HIF_DEVICE_POWER_DOWN, 539 HIF_DEVICE_POWER_CUT 540 }; 541 542 /** 543 * enum hif_enable_type: what triggered the enabling of hif 544 * 545 * @HIF_ENABLE_TYPE_PROBE: probe triggered enable 546 * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable 547 */ 548 enum hif_enable_type { 549 HIF_ENABLE_TYPE_PROBE, 550 HIF_ENABLE_TYPE_REINIT, 551 HIF_ENABLE_TYPE_MAX 552 }; 553 554 /** 555 * enum hif_disable_type: what triggered the disabling of hif 556 * 557 * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable 558 * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable 559 * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable 560 * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable 561 */ 562 enum hif_disable_type { 563 HIF_DISABLE_TYPE_PROBE_ERROR, 564 HIF_DISABLE_TYPE_REINIT_ERROR, 565 HIF_DISABLE_TYPE_REMOVE, 566 HIF_DISABLE_TYPE_SHUTDOWN, 567 HIF_DISABLE_TYPE_MAX 568 }; 569 /** 570 * enum hif_device_config_opcode: configure mode 571 * 572 * @HIF_DEVICE_POWER_STATE: device power state 573 * @HIF_DEVICE_GET_BLOCK_SIZE: get block size 574 * @HIF_DEVICE_GET_ADDR: get block address 575 * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions 576 * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode 577 * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function 578 * @HIF_DEVICE_POWER_STATE_CHANGE: change power state 579 * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params 580 * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request 581 * @HIF_DEVICE_GET_OS_DEVICE: get OS device 582 * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state 583 * @HIF_BMI_DONE: bmi done 584 * @HIF_DEVICE_SET_TARGET_TYPE: set target type 585 * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context 586 * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context 587 */ 588 enum hif_device_config_opcode { 589 HIF_DEVICE_POWER_STATE = 0, 590 HIF_DEVICE_GET_BLOCK_SIZE, 591 HIF_DEVICE_GET_FIFO_ADDR, 592 HIF_DEVICE_GET_PENDING_EVENTS_FUNC, 593 HIF_DEVICE_GET_IRQ_PROC_MODE, 594 HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC, 595 HIF_DEVICE_POWER_STATE_CHANGE, 596 HIF_DEVICE_GET_IRQ_YIELD_PARAMS, 597 HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT, 598 HIF_DEVICE_GET_OS_DEVICE, 599 HIF_DEVICE_DEBUG_BUS_STATE, 600 HIF_BMI_DONE, 601 HIF_DEVICE_SET_TARGET_TYPE, 602 HIF_DEVICE_SET_HTC_CONTEXT, 603 HIF_DEVICE_GET_HTC_CONTEXT, 604 }; 605 606 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 607 struct HID_ACCESS_LOG { 608 uint32_t seqnum; 609 bool is_write; 610 void *addr; 611 uint32_t value; 612 }; 613 #endif 614 615 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset, 616 uint32_t value); 617 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset); 618 619 #define HIF_MAX_DEVICES 1 620 /** 621 * struct htc_callbacks - Structure for HTC Callbacks methods 622 * @context: context to pass to the dsrhandler 623 * note : rwCompletionHandler is provided the context 624 * passed to hif_read_write 625 * @rwCompletionHandler: Read / write completion handler 626 * @dsrHandler: DSR Handler 627 */ 628 struct htc_callbacks { 629 void *context; 630 QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status); 631 QDF_STATUS(*dsr_handler)(void *context); 632 }; 633 634 /** 635 * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state 636 * @context: Private data context 637 * @set_recovery_in_progress: To Set Driver state for recovery in progress 638 * @is_recovery_in_progress: Query if driver state is recovery in progress 639 * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress 640 * @is_driver_unloading: Query if driver is unloading. 641 * @get_bandwidth_level: Query current bandwidth level for the driver 642 * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem 643 * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool 644 * This Structure provides callback pointer for HIF to query hdd for driver 645 * states. 646 */ 647 struct hif_driver_state_callbacks { 648 void *context; 649 void (*set_recovery_in_progress)(void *context, uint8_t val); 650 bool (*is_recovery_in_progress)(void *context); 651 bool (*is_load_unload_in_progress)(void *context); 652 bool (*is_driver_unloading)(void *context); 653 bool (*is_target_ready)(void *context); 654 int (*get_bandwidth_level)(void *context); 655 void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size, 656 qdf_dma_addr_t *paddr, 657 uint32_t ring_type); 658 void (*prealloc_put_consistent_mem_unaligned)(void *vaddr); 659 }; 660 661 /* This API detaches the HTC layer from the HIF device */ 662 void hif_detach_htc(struct hif_opaque_softc *hif_ctx); 663 664 /****************************************************************/ 665 /* BMI and Diag window abstraction */ 666 /****************************************************************/ 667 668 #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0)) 669 670 #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be 671 * handled atomically by 672 * DiagRead/DiagWrite 673 */ 674 675 #ifdef WLAN_FEATURE_BMI 676 /* 677 * API to handle HIF-specific BMI message exchanges, this API is synchronous 678 * and only allowed to be called from a context that can block (sleep) 679 */ 680 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx, 681 qdf_dma_addr_t cmd, qdf_dma_addr_t rsp, 682 uint8_t *pSendMessage, uint32_t Length, 683 uint8_t *pResponseMessage, 684 uint32_t *pResponseLength, uint32_t TimeoutMS); 685 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx); 686 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx); 687 #else /* WLAN_FEATURE_BMI */ 688 static inline void 689 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx) 690 { 691 } 692 693 static inline bool 694 hif_needs_bmi(struct hif_opaque_softc *hif_ctx) 695 { 696 return false; 697 } 698 #endif /* WLAN_FEATURE_BMI */ 699 700 #ifdef HIF_CPU_CLEAR_AFFINITY 701 /** 702 * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ 703 * @scn: HIF handle 704 * @intr_ctxt_id: interrupt group index 705 * @cpu: CPU core to clear 706 * 707 * Return: None 708 */ 709 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn, 710 int intr_ctxt_id, int cpu); 711 #else 712 static inline 713 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn, 714 int intr_ctxt_id, int cpu) 715 { 716 } 717 #endif 718 719 /* 720 * APIs to handle HIF specific diagnostic read accesses. These APIs are 721 * synchronous and only allowed to be called from a context that 722 * can block (sleep). They are not high performance APIs. 723 * 724 * hif_diag_read_access reads a 4 Byte aligned/length value from a 725 * Target register or memory word. 726 * 727 * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory. 728 */ 729 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx, 730 uint32_t address, uint32_t *data); 731 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address, 732 uint8_t *data, int nbytes); 733 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx, 734 void *ramdump_base, uint32_t address, uint32_t size); 735 /* 736 * APIs to handle HIF specific diagnostic write accesses. These APIs are 737 * synchronous and only allowed to be called from a context that 738 * can block (sleep). 739 * They are not high performance APIs. 740 * 741 * hif_diag_write_access writes a 4 Byte aligned/length value to a 742 * Target register or memory word. 743 * 744 * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory. 745 */ 746 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx, 747 uint32_t address, uint32_t data); 748 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx, 749 uint32_t address, uint8_t *data, int nbytes); 750 751 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t); 752 753 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx); 754 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx); 755 756 /* 757 * Set the FASTPATH_mode_on flag in sc, for use by data path 758 */ 759 #ifdef WLAN_FEATURE_FASTPATH 760 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx); 761 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx); 762 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret); 763 764 /** 765 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler 766 * @handler: Callback funtcion 767 * @context: handle for callback function 768 * 769 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE 770 */ 771 QDF_STATUS hif_ce_fastpath_cb_register( 772 struct hif_opaque_softc *hif_ctx, 773 fastpath_msg_handler handler, void *context); 774 #else 775 static inline QDF_STATUS hif_ce_fastpath_cb_register( 776 struct hif_opaque_softc *hif_ctx, 777 fastpath_msg_handler handler, void *context) 778 { 779 return QDF_STATUS_E_FAILURE; 780 } 781 782 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret) 783 { 784 return NULL; 785 } 786 787 #endif 788 789 /* 790 * Enable/disable CDC max performance workaround 791 * For max-performace set this to 0 792 * To allow SoC to enter sleep set this to 1 793 */ 794 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0 795 796 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx, 797 qdf_shared_mem_t **ce_sr, 798 uint32_t *ce_sr_ring_size, 799 qdf_dma_addr_t *ce_reg_paddr); 800 801 /** 802 * @brief List of callbacks - filled in by HTC. 803 */ 804 struct hif_msg_callbacks { 805 void *Context; 806 /**< context meaningful to HTC */ 807 QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf, 808 uint32_t transferID, 809 uint32_t toeplitz_hash_result); 810 QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf, 811 uint8_t pipeID); 812 void (*txResourceAvailHandler)(void *context, uint8_t pipe); 813 void (*fwEventHandler)(void *context, QDF_STATUS status); 814 void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle); 815 }; 816 817 enum hif_target_status { 818 TARGET_STATUS_CONNECTED = 0, /* target connected */ 819 TARGET_STATUS_RESET, /* target got reset */ 820 TARGET_STATUS_EJECT, /* target got ejected */ 821 TARGET_STATUS_SUSPEND /*target got suspend */ 822 }; 823 824 /** 825 * enum hif_attribute_flags: configure hif 826 * 827 * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE 828 * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor 829 * + No pktlog CE 830 */ 831 enum hif_attribute_flags { 832 HIF_LOWDESC_CE_CFG = 1, 833 HIF_LOWDESC_CE_NO_PKTLOG_CFG 834 }; 835 836 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \ 837 (attr |= (v & 0x01) << 5) 838 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \ 839 (attr |= (v & 0x03) << 6) 840 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \ 841 (attr |= (v & 0x01) << 13) 842 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \ 843 (attr |= (v & 0x01) << 14) 844 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \ 845 (attr |= (v & 0x01) << 15) 846 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \ 847 (attr |= (v & 0x0FFF) << 16) 848 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \ 849 (attr |= (v & 0x01) << 30) 850 851 struct hif_ul_pipe_info { 852 unsigned int nentries; 853 unsigned int nentries_mask; 854 unsigned int sw_index; 855 unsigned int write_index; /* cached copy */ 856 unsigned int hw_index; /* cached copy */ 857 void *base_addr_owner_space; /* Host address space */ 858 qdf_dma_addr_t base_addr_CE_space; /* CE address space */ 859 }; 860 861 struct hif_dl_pipe_info { 862 unsigned int nentries; 863 unsigned int nentries_mask; 864 unsigned int sw_index; 865 unsigned int write_index; /* cached copy */ 866 unsigned int hw_index; /* cached copy */ 867 void *base_addr_owner_space; /* Host address space */ 868 qdf_dma_addr_t base_addr_CE_space; /* CE address space */ 869 }; 870 871 struct hif_pipe_addl_info { 872 uint32_t pci_mem; 873 uint32_t ctrl_addr; 874 struct hif_ul_pipe_info ul_pipe; 875 struct hif_dl_pipe_info dl_pipe; 876 }; 877 878 #ifdef CONFIG_SLUB_DEBUG_ON 879 #define MSG_FLUSH_NUM 16 880 #else /* PERF build */ 881 #define MSG_FLUSH_NUM 32 882 #endif /* SLUB_DEBUG_ON */ 883 884 struct hif_bus_id; 885 886 void hif_claim_device(struct hif_opaque_softc *hif_ctx); 887 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx, 888 int opcode, void *config, uint32_t config_len); 889 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx); 890 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx); 891 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC, 892 struct hif_msg_callbacks *callbacks); 893 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx); 894 void hif_stop(struct hif_opaque_softc *hif_ctx); 895 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx); 896 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start); 897 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, 898 uint8_t cmd_id, bool start); 899 900 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID, 901 uint32_t transferID, uint32_t nbytes, 902 qdf_nbuf_t wbuf, uint32_t data_attr); 903 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID, 904 int force); 905 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx); 906 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe, 907 uint8_t *DLPipe); 908 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id, 909 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, 910 int *dl_is_polled); 911 uint16_t 912 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID); 913 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx); 914 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset); 915 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok, 916 bool wait_for_it); 917 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx); 918 #ifndef HIF_PCI 919 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx) 920 { 921 return 0; 922 } 923 #else 924 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx); 925 #endif 926 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version, 927 u32 *revision, const char **target_name); 928 929 #ifdef RECEIVE_OFFLOAD 930 /** 931 * hif_offld_flush_cb_register() - Register the offld flush callback 932 * @scn: HIF opaque context 933 * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread 934 * Or GRO/LRO flush when RxThread is not enabled. Called 935 * with corresponding context for flush. 936 * Return: None 937 */ 938 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn, 939 void (offld_flush_handler)(void *ol_ctx)); 940 941 /** 942 * hif_offld_flush_cb_deregister() - deRegister the offld flush callback 943 * @scn: HIF opaque context 944 * 945 * Return: None 946 */ 947 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn); 948 #endif 949 950 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT 951 /** 952 * hif_exec_should_yield() - Check if hif napi context should yield 953 * @hif_ctx - HIF opaque context 954 * @grp_id - grp_id of the napi for which check needs to be done 955 * 956 * The function uses grp_id to look for NAPI and checks if NAPI needs to 957 * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for 958 * yield decision. 959 * 960 * Return: true if NAPI needs to yield, else false 961 */ 962 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id); 963 #else 964 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, 965 uint grp_id) 966 { 967 return false; 968 } 969 #endif 970 971 void hif_disable_isr(struct hif_opaque_softc *hif_ctx); 972 void hif_reset_soc(struct hif_opaque_softc *hif_ctx); 973 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx, 974 int htc_htt_tx_endpoint); 975 976 /** 977 * hif_open() - Create hif handle 978 * @qdf_ctx: qdf context 979 * @mode: Driver Mode 980 * @bus_type: Bus Type 981 * @cbk: CDS Callbacks 982 * @psoc: psoc object manager 983 * 984 * API to open HIF Context 985 * 986 * Return: HIF Opaque Pointer 987 */ 988 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, 989 uint32_t mode, 990 enum qdf_bus_type bus_type, 991 struct hif_driver_state_callbacks *cbk, 992 struct wlan_objmgr_psoc *psoc); 993 994 /** 995 * hif_init_dma_mask() - Set dma mask for the dev 996 * @dev: dev for which DMA mask is to be set 997 * @bus_type: bus type for the target 998 * 999 * This API sets the DMA mask for the device. before the datapath 1000 * memory pre-allocation is done. If the DMA mask is not set before 1001 * requesting the DMA memory, kernel defaults to a 32-bit DMA mask, 1002 * and does not utilize the full device capability. 1003 * 1004 * Return: 0 - success, non-zero on failure. 1005 */ 1006 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type); 1007 void hif_close(struct hif_opaque_softc *hif_ctx); 1008 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev, 1009 void *bdev, const struct hif_bus_id *bid, 1010 enum qdf_bus_type bus_type, 1011 enum hif_enable_type type); 1012 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type); 1013 #ifdef CE_TASKLET_DEBUG_ENABLE 1014 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, 1015 uint8_t value); 1016 #endif 1017 void hif_display_stats(struct hif_opaque_softc *hif_ctx); 1018 void hif_clear_stats(struct hif_opaque_softc *hif_ctx); 1019 1020 /** 1021 * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management 1022 * HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured 1023 * HIF_PM_MSI_WAKE: Wake irq is MSI interrupt 1024 * HIF_PM_CE_WAKE: Wake irq is CE interrupt 1025 */ 1026 typedef enum { 1027 HIF_PM_INVALID_WAKE, 1028 HIF_PM_MSI_WAKE, 1029 HIF_PM_CE_WAKE, 1030 } hif_pm_wake_irq_type; 1031 1032 /** 1033 * hif_pm_get_wake_irq_type - Get wake irq type for Power Management 1034 * @hif_ctx: HIF context 1035 * 1036 * Return: enum hif_pm_wake_irq_type 1037 */ 1038 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx); 1039 1040 /** 1041 * enum wlan_rtpm_dbgid - runtime pm put/get debug id 1042 * @RTPM_ID_RESVERD: Reserved 1043 * @RTPM_ID_WMI: WMI sending msg, expect put happen at 1044 * tx completion from CE level directly. 1045 * @RTPM_ID_HTC: pkt sending by HTT_DATA_MSG_SVC, expect 1046 * put from fw response or just in 1047 * htc_issue_packets 1048 * @RTPM_ID_QOS_NOTIFY: pm qos notifer 1049 * @RTPM_ID_DP_TX_DESC_ALLOC_FREE: tx desc alloc/free 1050 * @RTPM_ID_CE_SEND_FAST: operation in ce_send_fast, not include 1051 * the pkt put happens outside this function 1052 * @RTPM_ID_SUSPEND_RESUME: suspend/resume in hdd 1053 * @RTPM_ID_DW_TX_HW_ENQUEUE: operation in functin dp_tx_hw_enqueue 1054 * @RTPM_ID_HAL_REO_CMD: HAL_REO_CMD operation 1055 * @RTPM_ID_DP_PRINT_RING_STATS: operation in dp_print_ring_stats 1056 * @RTPM_ID_PM_STOP: operation in hif_pm_runtime_stop 1057 * @RTPM_ID_CONN_DISCONNECT:operation when issue disconnect 1058 * @RTPM_ID_SOC_REMOVE: operation in soc remove 1059 * @RTPM_ID_DRIVER_UNLOAD: operation in driver unload 1060 * @RTPM_ID_CE_INTR_HANDLER: operation from ce interrupt handler 1061 * @RTPM_ID_WAKE_INTR_HANDLER: operation from wake interrupt handler 1062 */ 1063 /* New value added to the enum must also be reflected in function 1064 * rtpm_string_from_dbgid() 1065 */ 1066 typedef enum { 1067 RTPM_ID_RESVERD = 0, 1068 RTPM_ID_WMI, 1069 RTPM_ID_HTC, 1070 RTPM_ID_QOS_NOTIFY, 1071 RTPM_ID_DP_TX_DESC_ALLOC_FREE, 1072 RTPM_ID_CE_SEND_FAST, 1073 RTPM_ID_SUSPEND_RESUME, 1074 RTPM_ID_DW_TX_HW_ENQUEUE, 1075 RTPM_ID_HAL_REO_CMD, 1076 RTPM_ID_DP_PRINT_RING_STATS, 1077 RTPM_ID_PM_STOP, 1078 RTPM_ID_CONN_DISCONNECT, 1079 RTPM_ID_SOC_REMOVE, 1080 RTPM_ID_DRIVER_UNLOAD, 1081 RTPM_ID_CE_INTR_HANDLER, 1082 RTPM_ID_WAKE_INTR_HANDLER, 1083 1084 RTPM_ID_MAX, 1085 } wlan_rtpm_dbgid; 1086 1087 /** 1088 * rtpm_string_from_dbgid() - Convert dbgid to respective string 1089 * @id - debug id 1090 * 1091 * Debug support function to convert dbgid to string. 1092 * Please note to add new string in the array at index equal to 1093 * its enum value in wlan_rtpm_dbgid. 1094 */ 1095 static inline char *rtpm_string_from_dbgid(wlan_rtpm_dbgid id) 1096 { 1097 static const char *strings[] = { "RTPM_ID_RESVERD", 1098 "RTPM_ID_WMI", 1099 "RTPM_ID_HTC", 1100 "RTPM_ID_QOS_NOTIFY", 1101 "RTPM_ID_DP_TX_DESC_ALLOC_FREE", 1102 "RTPM_ID_CE_SEND_FAST", 1103 "RTPM_ID_SUSPEND_RESUME", 1104 "RTPM_ID_DW_TX_HW_ENQUEUE", 1105 "RTPM_ID_HAL_REO_CMD", 1106 "RTPM_ID_DP_PRINT_RING_STATS", 1107 "RTPM_ID_PM_STOP", 1108 "RTPM_ID_CONN_DISCONNECT", 1109 "RTPM_ID_SOC_REMOVE", 1110 "RTPM_ID_DRIVER_UNLOAD", 1111 "RTPM_ID_CE_INTR_HANDLER", 1112 "RTPM_ID_WAKE_INTR_HANDLER", 1113 "RTPM_ID_MAX"}; 1114 1115 return (char *)strings[id]; 1116 } 1117 1118 /** 1119 * enum hif_ep_vote_type - hif ep vote type 1120 * HIF_EP_VOTE_DP_ACCESS: vote type is specific DP 1121 * HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access 1122 */ 1123 enum hif_ep_vote_type { 1124 HIF_EP_VOTE_DP_ACCESS, 1125 HIF_EP_VOTE_NONDP_ACCESS 1126 }; 1127 1128 /** 1129 * enum hif_ep_vote_access - hif ep vote access 1130 * HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting 1131 * HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transistion 1132 * HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting 1133 */ 1134 enum hif_ep_vote_access { 1135 HIF_EP_VOTE_ACCESS_ENABLE, 1136 HIF_EP_VOTE_INTERMEDIATE_ACCESS, 1137 HIF_EP_VOTE_ACCESS_DISABLE 1138 }; 1139 1140 /** 1141 * enum hif_pm_link_state - hif link state 1142 * HIF_PM_LINK_STATE_DOWN: hif link state is down 1143 * HIF_PM_LINK_STATE_UP: hif link state is up 1144 */ 1145 enum hif_pm_link_state { 1146 HIF_PM_LINK_STATE_DOWN, 1147 HIF_PM_LINK_STATE_UP 1148 }; 1149 1150 /** 1151 * enum hif_pm_htc_stats - hif runtime PM stats for HTC layer 1152 * HIF_PM_HTC_STATS_GET_HTT_RESPONSE: PM stats for RTPM GET for HTT packets 1153 with response 1154 * HIF_PM_HTC_STATS_GET_HTT_NO_RESPONSE: PM stats for RTPM GET for HTT packets 1155 with no response 1156 * HIF_PM_HTC_STATS_PUT_HTT_RESPONSE: PM stats for RTPM PUT for HTT packets 1157 with response 1158 * HIF_PM_HTC_STATS_PUT_HTT_NO_RESPONSE: PM stats for RTPM PUT for HTT packets 1159 with no response 1160 * HIF_PM_HTC_STATS_PUT_HTT_ERROR: PM stats for RTPM PUT for failed HTT packets 1161 * HIF_PM_HTC_STATS_PUT_HTC_CLEANUP: PM stats for RTPM PUT during HTC cleanup 1162 * HIF_PM_HTC_STATS_GET_HTC_KICK_QUEUES: PM stats for RTPM GET done during 1163 * htc_kick_queues() 1164 * HIF_PM_HTC_STATS_PUT_HTC_KICK_QUEUES: PM stats for RTPM PUT done during 1165 * htc_kick_queues() 1166 * HIF_PM_HTC_STATS_GET_HTT_FETCH_PKTS: PM stats for RTPM GET while fetching 1167 * HTT packets from endpoint TX queue 1168 * HIF_PM_HTC_STATS_PUT_HTT_FETCH_PKTS: PM stats for RTPM PUT while fetching 1169 * HTT packets from endpoint TX queue 1170 */ 1171 enum hif_pm_htc_stats { 1172 HIF_PM_HTC_STATS_GET_HTT_RESPONSE, 1173 HIF_PM_HTC_STATS_GET_HTT_NO_RESPONSE, 1174 HIF_PM_HTC_STATS_PUT_HTT_RESPONSE, 1175 HIF_PM_HTC_STATS_PUT_HTT_NO_RESPONSE, 1176 HIF_PM_HTC_STATS_PUT_HTT_ERROR, 1177 HIF_PM_HTC_STATS_PUT_HTC_CLEANUP, 1178 HIF_PM_HTC_STATS_GET_HTC_KICK_QUEUES, 1179 HIF_PM_HTC_STATS_PUT_HTC_KICK_QUEUES, 1180 HIF_PM_HTC_STATS_GET_HTT_FETCH_PKTS, 1181 HIF_PM_HTC_STATS_PUT_HTT_FETCH_PKTS, 1182 }; 1183 1184 #ifdef FEATURE_RUNTIME_PM 1185 struct hif_pm_runtime_lock; 1186 1187 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx); 1188 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx, 1189 wlan_rtpm_dbgid rtpm_dbgid); 1190 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx, 1191 wlan_rtpm_dbgid rtpm_dbgid); 1192 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx, 1193 wlan_rtpm_dbgid rtpm_dbgid); 1194 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx, 1195 wlan_rtpm_dbgid rtpm_dbgid, 1196 bool is_critical_ctx); 1197 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx, 1198 wlan_rtpm_dbgid rtpm_dbgid); 1199 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx, 1200 wlan_rtpm_dbgid rtpm_dbgid); 1201 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx, 1202 wlan_rtpm_dbgid rtpm_dbgid); 1203 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx); 1204 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name); 1205 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, 1206 struct hif_pm_runtime_lock *lock); 1207 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, 1208 struct hif_pm_runtime_lock *lock); 1209 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, 1210 struct hif_pm_runtime_lock *lock); 1211 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx); 1212 void hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx); 1213 void hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx); 1214 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx); 1215 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, 1216 int val); 1217 void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx); 1218 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx); 1219 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx); 1220 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx); 1221 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx, 1222 wlan_rtpm_dbgid rtpm_dbgid); 1223 void hif_pm_runtime_update_stats(struct hif_opaque_softc *hif_ctx, 1224 wlan_rtpm_dbgid rtpm_dbgid, 1225 enum hif_pm_htc_stats stats); 1226 1227 /** 1228 * hif_pm_set_link_state() - set link state during RTPM 1229 * @hif_sc: HIF Context 1230 * 1231 * Return: None 1232 */ 1233 void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val); 1234 1235 /** 1236 * hif_is_link_state_up() - Is link state up 1237 * @hif_sc: HIF Context 1238 * 1239 * Return: 1 link is up, 0 link is down 1240 */ 1241 uint8_t hif_pm_get_link_state(struct hif_opaque_softc *hif_handle); 1242 #else 1243 struct hif_pm_runtime_lock { 1244 const char *name; 1245 }; 1246 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {} 1247 static inline int 1248 hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx, 1249 wlan_rtpm_dbgid rtpm_dbgid) 1250 { return 0; } 1251 static inline int 1252 hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx, 1253 wlan_rtpm_dbgid rtpm_dbgid) 1254 { return 0; } 1255 static inline int 1256 hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx, 1257 wlan_rtpm_dbgid rtpm_dbgid) 1258 { return 0; } 1259 static inline void 1260 hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx, 1261 wlan_rtpm_dbgid rtpm_dbgid) 1262 {} 1263 1264 static inline int 1265 hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid, 1266 bool is_critical_ctx) 1267 { return 0; } 1268 static inline int 1269 hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid) 1270 { return 0; } 1271 static inline int 1272 hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx, 1273 wlan_rtpm_dbgid rtpm_dbgid) 1274 { return 0; } 1275 static inline void 1276 hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {}; 1277 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock, 1278 const char *name) 1279 { return 0; } 1280 static inline void 1281 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, 1282 struct hif_pm_runtime_lock *lock) {} 1283 1284 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, 1285 struct hif_pm_runtime_lock *lock) 1286 { return 0; } 1287 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, 1288 struct hif_pm_runtime_lock *lock) 1289 { return 0; } 1290 static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx) 1291 { return false; } 1292 static inline void 1293 hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx) 1294 { return; } 1295 static inline void 1296 hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx) 1297 { return; } 1298 static inline int 1299 hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx) 1300 { return 0; } 1301 static inline void 1302 hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val) 1303 { return; } 1304 static inline void 1305 hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx) 1306 { return; } 1307 static inline void 1308 hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {}; 1309 static inline int 1310 hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx) 1311 { return 0; } 1312 static inline qdf_time_t 1313 hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx) 1314 { return 0; } 1315 static inline int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx, 1316 wlan_rtpm_dbgid rtpm_dbgid) 1317 { return 0; } 1318 static inline 1319 void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val) 1320 {} 1321 1322 static inline 1323 void hif_pm_runtime_update_stats(struct hif_opaque_softc *hif_ctx, 1324 wlan_rtpm_dbgid rtpm_dbgid, 1325 enum hif_pm_htc_stats stats) 1326 {} 1327 #endif 1328 1329 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx, 1330 bool is_packet_log_enabled); 1331 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx); 1332 1333 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx); 1334 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx); 1335 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx); 1336 1337 #ifdef IPA_OFFLOAD 1338 /** 1339 * hif_get_ipa_hw_type() - get IPA hw type 1340 * 1341 * This API return the IPA hw type. 1342 * 1343 * Return: IPA hw type 1344 */ 1345 static inline 1346 enum ipa_hw_type hif_get_ipa_hw_type(void) 1347 { 1348 return ipa_get_hw_type(); 1349 } 1350 1351 /** 1352 * hif_get_ipa_present() - get IPA hw status 1353 * 1354 * This API return the IPA hw status. 1355 * 1356 * Return: true if IPA is present or false otherwise 1357 */ 1358 static inline 1359 bool hif_get_ipa_present(void) 1360 { 1361 if (ipa_uc_reg_rdyCB(NULL) != -EPERM) 1362 return true; 1363 else 1364 return false; 1365 } 1366 #endif 1367 int hif_bus_resume(struct hif_opaque_softc *hif_ctx); 1368 /** 1369 * hif_bus_ealry_suspend() - stop non wmi tx traffic 1370 * @context: hif context 1371 */ 1372 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx); 1373 1374 /** 1375 * hif_bus_late_resume() - resume non wmi traffic 1376 * @context: hif context 1377 */ 1378 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx); 1379 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx); 1380 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx); 1381 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx); 1382 1383 /** 1384 * hif_apps_irqs_enable() - Enables all irqs from the APPS side 1385 * @hif_ctx: an opaque HIF handle to use 1386 * 1387 * As opposed to the standard hif_irq_enable, this function always applies to 1388 * the APPS side kernel interrupt handling. 1389 * 1390 * Return: errno 1391 */ 1392 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx); 1393 1394 /** 1395 * hif_apps_irqs_disable() - Disables all irqs from the APPS side 1396 * @hif_ctx: an opaque HIF handle to use 1397 * 1398 * As opposed to the standard hif_irq_disable, this function always applies to 1399 * the APPS side kernel interrupt handling. 1400 * 1401 * Return: errno 1402 */ 1403 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx); 1404 1405 /** 1406 * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side 1407 * @hif_ctx: an opaque HIF handle to use 1408 * 1409 * As opposed to the standard hif_irq_enable, this function always applies to 1410 * the APPS side kernel interrupt handling. 1411 * 1412 * Return: errno 1413 */ 1414 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx); 1415 1416 /** 1417 * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side 1418 * @hif_ctx: an opaque HIF handle to use 1419 * 1420 * As opposed to the standard hif_irq_disable, this function always applies to 1421 * the APPS side kernel interrupt handling. 1422 * 1423 * Return: errno 1424 */ 1425 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx); 1426 1427 /** 1428 * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side 1429 * @hif_ctx: an opaque HIF handle to use 1430 * 1431 * This function always applies to the APPS side kernel interrupt handling 1432 * to wake the system from suspend. 1433 * 1434 * Return: errno 1435 */ 1436 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx); 1437 1438 /** 1439 * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side 1440 * @hif_ctx: an opaque HIF handle to use 1441 * 1442 * This function always applies to the APPS side kernel interrupt handling 1443 * to disable the wake irq. 1444 * 1445 * Return: errno 1446 */ 1447 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx); 1448 1449 /** 1450 * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq 1451 * @hif_ctx: an opaque HIF handle to use 1452 * 1453 * As opposed to the standard hif_irq_enable, this function always applies to 1454 * the APPS side kernel interrupt handling. 1455 * 1456 * Return: errno 1457 */ 1458 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx); 1459 1460 /** 1461 * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq 1462 * @hif_ctx: an opaque HIF handle to use 1463 * 1464 * As opposed to the standard hif_irq_disable, this function always applies to 1465 * the APPS side kernel interrupt handling. 1466 * 1467 * Return: errno 1468 */ 1469 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx); 1470 1471 #ifdef FEATURE_RUNTIME_PM 1472 void hif_print_runtime_pm_prevent_list(struct hif_opaque_softc *hif_ctx); 1473 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx); 1474 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx); 1475 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx); 1476 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx); 1477 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx); 1478 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx); 1479 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx); 1480 #else 1481 static inline void 1482 hif_print_runtime_pm_prevent_list(struct hif_opaque_softc *hif_ctx) 1483 {} 1484 #endif 1485 1486 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size); 1487 int hif_dump_registers(struct hif_opaque_softc *scn); 1488 int ol_copy_ramdump(struct hif_opaque_softc *scn); 1489 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx); 1490 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version, 1491 u32 *revision, const char **target_name); 1492 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl); 1493 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc * 1494 scn); 1495 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx); 1496 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx); 1497 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx); 1498 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum 1499 hif_target_status); 1500 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx, 1501 struct hif_config_info *cfg); 1502 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls); 1503 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, 1504 uint32_t transfer_id, u_int32_t len, uint32_t sendhead); 1505 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, 1506 uint32_t transfer_id, u_int32_t len); 1507 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf, 1508 uint32_t transfer_id, uint32_t download_len); 1509 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len); 1510 void hif_ce_war_disable(void); 1511 void hif_ce_war_enable(void); 1512 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num); 1513 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT 1514 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, 1515 struct hif_pipe_addl_info *hif_info, uint32_t pipe_number); 1516 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, 1517 uint32_t pipe_num); 1518 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc); 1519 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */ 1520 1521 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled, 1522 int rx_bundle_cnt); 1523 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx); 1524 1525 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib); 1526 1527 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl); 1528 1529 enum hif_exec_type { 1530 HIF_EXEC_NAPI_TYPE, 1531 HIF_EXEC_TASKLET_TYPE, 1532 }; 1533 1534 typedef uint32_t (*ext_intr_handler)(void *, uint32_t); 1535 1536 /** 1537 * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id 1538 * @softc: hif opaque context owning the exec context 1539 * @id: the id of the interrupt context 1540 * 1541 * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID 1542 * 'id' registered with the OS 1543 */ 1544 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc, 1545 uint8_t id); 1546 1547 /** 1548 * hif_configure_ext_group_interrupts() - Congigure ext group intrrupts 1549 * @hif_ctx: hif opaque context 1550 * 1551 * Return: QDF_STATUS 1552 */ 1553 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx); 1554 1555 /** 1556 * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group intrrupts 1557 * @hif_ctx: hif opaque context 1558 * 1559 * Return: None 1560 */ 1561 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx); 1562 1563 /** 1564 * hif_register_ext_group() - API to register external group 1565 * interrupt handler. 1566 * @hif_ctx : HIF Context 1567 * @numirq: number of irq's in the group 1568 * @irq: array of irq values 1569 * @handler: callback interrupt handler function 1570 * @cb_ctx: context to passed in callback 1571 * @type: napi vs tasklet 1572 * 1573 * Return: QDF_STATUS 1574 */ 1575 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx, 1576 uint32_t numirq, uint32_t irq[], 1577 ext_intr_handler handler, 1578 void *cb_ctx, const char *context_name, 1579 enum hif_exec_type type, uint32_t scale); 1580 1581 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx, 1582 const char *context_name); 1583 1584 void hif_update_pipe_callback(struct hif_opaque_softc *osc, 1585 u_int8_t pipeid, 1586 struct hif_msg_callbacks *callbacks); 1587 1588 /** 1589 * hif_print_napi_stats() - Display HIF NAPI stats 1590 * @hif_ctx - HIF opaque context 1591 * 1592 * Return: None 1593 */ 1594 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx); 1595 1596 /* hif_clear_napi_stats() - function clears the stats of the 1597 * latency when called. 1598 * @hif_ctx - the HIF context to assign the callback to 1599 * 1600 * Return: None 1601 */ 1602 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx); 1603 1604 #ifdef __cplusplus 1605 } 1606 #endif 1607 1608 #ifdef FORCE_WAKE 1609 /** 1610 * hif_force_wake_request() - Function to wake from power collapse 1611 * @handle: HIF opaque handle 1612 * 1613 * Description: API to check if the device is awake or not before 1614 * read/write to BAR + 4K registers. If device is awake return 1615 * success otherwise write '1' to 1616 * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt 1617 * the device and does wakeup the PCI and MHI within 50ms 1618 * and then the device writes a value to 1619 * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the 1620 * handshake process to let the host know the device is awake. 1621 * 1622 * Return: zero - success/non-zero - failure 1623 */ 1624 int hif_force_wake_request(struct hif_opaque_softc *handle); 1625 1626 /** 1627 * hif_force_wake_release() - API to release/reset the SOC wake register 1628 * from interrupting the device. 1629 * @handle: HIF opaque handle 1630 * 1631 * Description: API to set the 1632 * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0' 1633 * to release the interrupt line. 1634 * 1635 * Return: zero - success/non-zero - failure 1636 */ 1637 int hif_force_wake_release(struct hif_opaque_softc *handle); 1638 #else 1639 static inline 1640 int hif_force_wake_request(struct hif_opaque_softc *handle) 1641 { 1642 return 0; 1643 } 1644 1645 static inline 1646 int hif_force_wake_release(struct hif_opaque_softc *handle) 1647 { 1648 return 0; 1649 } 1650 #endif /* FORCE_WAKE */ 1651 1652 #ifdef FEATURE_HAL_DELAYED_REG_WRITE 1653 /** 1654 * hif_prevent_link_low_power_states() - Prevent from going to low power states 1655 * @hif - HIF opaque context 1656 * 1657 * Return: 0 on success. Error code on failure. 1658 */ 1659 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif); 1660 1661 /** 1662 * hif_allow_link_low_power_states() - Allow link to go to low power states 1663 * @hif - HIF opaque context 1664 * 1665 * Return: None 1666 */ 1667 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif); 1668 1669 #else 1670 1671 static inline 1672 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif) 1673 { 1674 return 0; 1675 } 1676 1677 static inline 1678 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif) 1679 { 1680 } 1681 #endif 1682 1683 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle); 1684 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle); 1685 1686 /** 1687 * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function 1688 * @hif_ctx - the HIF context to assign the callback to 1689 * @callback - the callback to assign 1690 * @priv - the private data to pass to the callback when invoked 1691 * 1692 * Return: None 1693 */ 1694 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx, 1695 void (*callback)(void *), 1696 void *priv); 1697 /* 1698 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 1699 * for defined here 1700 */ 1701 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 1702 ssize_t hif_dump_desc_trace_buf(struct device *dev, 1703 struct device_attribute *attr, char *buf); 1704 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn, 1705 const char *buf, size_t size); 1706 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, 1707 const char *buf, size_t size); 1708 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf); 1709 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf); 1710 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/ 1711 1712 /** 1713 * hif_set_ce_service_max_yield_time() - sets CE service max yield time 1714 * @hif: hif context 1715 * @ce_service_max_yield_time: CE service max yield time to set 1716 * 1717 * This API storess CE service max yield time in hif context based 1718 * on ini value. 1719 * 1720 * Return: void 1721 */ 1722 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif, 1723 uint32_t ce_service_max_yield_time); 1724 1725 /** 1726 * hif_get_ce_service_max_yield_time() - get CE service max yield time 1727 * @hif: hif context 1728 * 1729 * This API returns CE service max yield time. 1730 * 1731 * Return: CE service max yield time 1732 */ 1733 unsigned long long 1734 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif); 1735 1736 /** 1737 * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush 1738 * @hif: hif context 1739 * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set 1740 * 1741 * This API stores CE service max rx ind flush in hif context based 1742 * on ini value. 1743 * 1744 * Return: void 1745 */ 1746 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif, 1747 uint8_t ce_service_max_rx_ind_flush); 1748 1749 #ifdef OL_ATH_SMART_LOGGING 1750 /* 1751 * hif_log_ce_dump() - Copy all the CE DEST ring to buf 1752 * @scn : HIF handler 1753 * @buf_cur: Current pointer in ring buffer 1754 * @buf_init:Start of the ring buffer 1755 * @buf_sz: Size of the ring buffer 1756 * @ce: Copy Engine id 1757 * @skb_sz: Max size of the SKB buffer to be copied 1758 * 1759 * Calls the respective function to dump all the CE SRC/DEST ring descriptors 1760 * and buffers pointed by them in to the given buf 1761 * 1762 * Return: Current pointer in ring buffer 1763 */ 1764 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur, 1765 uint8_t *buf_init, uint32_t buf_sz, 1766 uint32_t ce, uint32_t skb_sz); 1767 #endif /* OL_ATH_SMART_LOGGING */ 1768 1769 /* 1770 * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle 1771 * to hif_opaque_softc handle 1772 * @hif_handle - hif_softc type 1773 * 1774 * Return: hif_opaque_softc type 1775 */ 1776 static inline struct hif_opaque_softc * 1777 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle) 1778 { 1779 return (struct hif_opaque_softc *)hif_handle; 1780 } 1781 1782 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE) 1783 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx); 1784 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx); 1785 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx); 1786 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx, 1787 uint8_t type, uint8_t access); 1788 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx, 1789 uint8_t type); 1790 #else 1791 static inline QDF_STATUS 1792 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx) 1793 { 1794 return QDF_STATUS_SUCCESS; 1795 } 1796 1797 static inline void 1798 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx) 1799 { 1800 } 1801 1802 static inline void 1803 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx) 1804 { 1805 } 1806 1807 static inline void 1808 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx, 1809 uint8_t type, uint8_t access) 1810 { 1811 } 1812 1813 static inline uint8_t 1814 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx, 1815 uint8_t type) 1816 { 1817 return HIF_EP_VOTE_ACCESS_ENABLE; 1818 } 1819 #endif 1820 1821 #ifdef FORCE_WAKE 1822 /** 1823 * hif_srng_init_phase(): Indicate srng initialization phase 1824 * to avoid force wake as UMAC power collapse is not yet 1825 * enabled 1826 * @hif_ctx: hif opaque handle 1827 * @init_phase: initialization phase 1828 * 1829 * Return: None 1830 */ 1831 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx, 1832 bool init_phase); 1833 #else 1834 static inline 1835 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx, 1836 bool init_phase) 1837 { 1838 } 1839 #endif /* FORCE_WAKE */ 1840 1841 #ifdef HIF_IPCI 1842 /** 1843 * hif_shutdown_notifier_cb - Call back for shutdown notifier 1844 * @ctx: hif handle 1845 * 1846 * Return: None 1847 */ 1848 void hif_shutdown_notifier_cb(void *ctx); 1849 #else 1850 static inline 1851 void hif_shutdown_notifier_cb(void *ctx) 1852 { 1853 } 1854 #endif /* HIF_IPCI */ 1855 1856 #ifdef HIF_CE_LOG_INFO 1857 /** 1858 * hif_log_ce_info() - API to log ce info 1859 * @scn: hif handle 1860 * @data: hang event data buffer 1861 * @offset: offset at which data needs to be written 1862 * 1863 * Return: None 1864 */ 1865 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data, 1866 unsigned int *offset); 1867 #else 1868 static inline 1869 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data, 1870 unsigned int *offset) 1871 { 1872 } 1873 #endif 1874 1875 #ifdef HIF_CPU_PERF_AFFINE_MASK 1876 /** 1877 * hif_config_irq_set_perf_affinity_hint() - API to set affinity 1878 * @hif_ctx: hif opaque handle 1879 * 1880 * This function is used to move the WLAN IRQs to perf cores in 1881 * case of defconfig builds. 1882 * 1883 * Return: None 1884 */ 1885 void hif_config_irq_set_perf_affinity_hint( 1886 struct hif_opaque_softc *hif_ctx); 1887 1888 #else 1889 static inline void hif_config_irq_set_perf_affinity_hint( 1890 struct hif_opaque_softc *hif_ctx) 1891 { 1892 } 1893 #endif 1894 1895 /** 1896 * hif_apps_grp_irqs_enable() - enable ext grp irqs 1897 * @hif - HIF opaque context 1898 * 1899 * Return: 0 on success. Error code on failure. 1900 */ 1901 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx); 1902 1903 /** 1904 * hif_apps_grp_irqs_disable() - disable ext grp irqs 1905 * @hif - HIF opaque context 1906 * 1907 * Return: 0 on success. Error code on failure. 1908 */ 1909 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx); 1910 1911 /** 1912 * hif_disable_grp_irqs() - disable ext grp irqs 1913 * @hif - HIF opaque context 1914 * 1915 * Return: 0 on success. Error code on failure. 1916 */ 1917 int hif_disable_grp_irqs(struct hif_opaque_softc *scn); 1918 1919 /** 1920 * hif_enable_grp_irqs() - enable ext grp irqs 1921 * @hif - HIF opaque context 1922 * 1923 * Return: 0 on success. Error code on failure. 1924 */ 1925 int hif_enable_grp_irqs(struct hif_opaque_softc *scn); 1926 1927 enum hif_credit_exchange_type { 1928 HIF_REQUEST_CREDIT, 1929 HIF_PROCESS_CREDIT_REPORT, 1930 }; 1931 1932 enum hif_detect_latency_type { 1933 HIF_DETECT_TASKLET, 1934 HIF_DETECT_CREDIT, 1935 HIF_DETECT_UNKNOWN 1936 }; 1937 1938 #ifdef HIF_DETECTION_LATENCY_ENABLE 1939 void hif_latency_detect_credit_record_time( 1940 enum hif_credit_exchange_type type, 1941 struct hif_opaque_softc *hif_ctx); 1942 1943 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx); 1944 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx); 1945 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer); 1946 void hif_credit_latency(struct hif_softc *scn, bool from_timer); 1947 void hif_check_detection_latency(struct hif_softc *scn, 1948 bool from_timer, 1949 uint32_t bitmap_type); 1950 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value); 1951 #else 1952 static inline 1953 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx) 1954 {} 1955 1956 static inline 1957 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx) 1958 {} 1959 1960 static inline 1961 void hif_latency_detect_credit_record_time( 1962 enum hif_credit_exchange_type type, 1963 struct hif_opaque_softc *hif_ctx) 1964 {} 1965 static inline 1966 void hif_check_detection_latency(struct hif_softc *scn, 1967 bool from_timer, 1968 uint32_t bitmap_type) 1969 {} 1970 1971 static inline 1972 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value) 1973 {} 1974 #endif 1975 1976 #ifdef SYSTEM_PM_CHECK 1977 /** 1978 * __hif_system_pm_set_state() - Set system pm state 1979 * @hif: hif opaque handle 1980 * @state: system state 1981 * 1982 * Return: None 1983 */ 1984 void __hif_system_pm_set_state(struct hif_opaque_softc *hif, 1985 enum hif_system_pm_state state); 1986 1987 /** 1988 * hif_system_pm_set_state_on() - Set system pm state to ON 1989 * @hif: hif opaque handle 1990 * 1991 * Return: None 1992 */ 1993 static inline 1994 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif) 1995 { 1996 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON); 1997 } 1998 1999 /** 2000 * hif_system_pm_set_state_resuming() - Set system pm state to resuming 2001 * @hif: hif opaque handle 2002 * 2003 * Return: None 2004 */ 2005 static inline 2006 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif) 2007 { 2008 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING); 2009 } 2010 2011 /** 2012 * hif_system_pm_set_state_suspending() - Set system pm state to suspending 2013 * @hif: hif opaque handle 2014 * 2015 * Return: None 2016 */ 2017 static inline 2018 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif) 2019 { 2020 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING); 2021 } 2022 2023 /** 2024 * hif_system_pm_set_state_suspended() - Set system pm state to suspended 2025 * @hif: hif opaque handle 2026 * 2027 * Return: None 2028 */ 2029 static inline 2030 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif) 2031 { 2032 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED); 2033 } 2034 2035 /** 2036 * hif_system_pm_get_state() - Get system pm state 2037 * @hif: hif opaque handle 2038 * 2039 * Return: system state 2040 */ 2041 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif); 2042 2043 /** 2044 * hif_system_pm_state_check() - Check system state and trigger resume 2045 * if required 2046 * @hif: hif opaque handle 2047 * 2048 * Return: 0 if system is in on state else error code 2049 */ 2050 int hif_system_pm_state_check(struct hif_opaque_softc *hif); 2051 #else 2052 static inline 2053 void __hif_system_pm_set_state(struct hif_opaque_softc *hif, 2054 enum hif_system_pm_state state) 2055 { 2056 } 2057 2058 static inline 2059 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif) 2060 { 2061 } 2062 2063 static inline 2064 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif) 2065 { 2066 } 2067 2068 static inline 2069 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif) 2070 { 2071 } 2072 2073 static inline 2074 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif) 2075 { 2076 } 2077 2078 static inline 2079 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif) 2080 { 2081 return 0; 2082 } 2083 2084 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif) 2085 { 2086 return 0; 2087 } 2088 #endif 2089 #endif /* _HIF_H_ */ 2090