1 /* 2 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. 3 * 4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc. 5 * 6 * 7 * Permission to use, copy, modify, and/or distribute this software for 8 * any purpose with or without fee is hereby granted, provided that the 9 * above copyright notice and this permission notice appear in all 10 * copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 19 * PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * This file was originally distributed by Qualcomm Atheros, Inc. 24 * under proprietary terms before Copyright ownership was assigned 25 * to the Linux Foundation. 26 */ 27 28 #ifndef _HIF_H_ 29 #define _HIF_H_ 30 31 #ifdef __cplusplus 32 extern "C" { 33 #endif /* __cplusplus */ 34 35 /* Header files */ 36 #include <qdf_status.h> 37 #include "qdf_nbuf.h" 38 #include "qdf_lro.h" 39 #include "ol_if_athvar.h" 40 #include <linux/platform_device.h> 41 #ifdef HIF_PCI 42 #include <linux/pci.h> 43 #endif /* HIF_PCI */ 44 #ifdef HIF_USB 45 #include <linux/usb.h> 46 #endif /* HIF_USB */ 47 #ifdef IPA_OFFLOAD 48 #include <linux/ipa.h> 49 #endif 50 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1 51 52 typedef void __iomem *A_target_id_t; 53 typedef void *hif_handle_t; 54 55 #define HIF_DBG_PRINT_RATE 1000 56 57 #define HIF_TYPE_AR6002 2 58 #define HIF_TYPE_AR6003 3 59 #define HIF_TYPE_AR6004 5 60 #define HIF_TYPE_AR9888 6 61 #define HIF_TYPE_AR6320 7 62 #define HIF_TYPE_AR6320V2 8 63 /* For attaching Peregrine 2.0 board host_reg_tbl only */ 64 #define HIF_TYPE_AR9888V2 9 65 #define HIF_TYPE_ADRASTEA 10 66 #define HIF_TYPE_AR900B 11 67 #define HIF_TYPE_QCA9984 12 68 #define HIF_TYPE_IPQ4019 13 69 #define HIF_TYPE_QCA9888 14 70 #define HIF_TYPE_QCA8074 15 71 #define HIF_TYPE_QCA6290 16 72 73 #ifdef IPA_OFFLOAD 74 #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE 37 75 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32 76 #endif 77 78 /* enum hif_ic_irq - enum defining integrated chip irq numbers 79 * defining irq nubers that can be used by external modules like datapath 80 */ 81 enum hif_ic_irq { 82 host2wbm_desc_feed = 18, 83 host2reo_re_injection, 84 host2reo_command, 85 host2rxdma_monitor_ring3, 86 host2rxdma_monitor_ring2, 87 host2rxdma_monitor_ring1, 88 reo2host_exception, 89 wbm2host_rx_release, 90 reo2host_status, 91 reo2host_destination_ring4, 92 reo2host_destination_ring3, 93 reo2host_destination_ring2, 94 reo2host_destination_ring1, 95 rxdma2host_monitor_destination_mac3, 96 rxdma2host_monitor_destination_mac2, 97 rxdma2host_monitor_destination_mac1, 98 ppdu_end_interrupts_mac3, 99 ppdu_end_interrupts_mac2, 100 ppdu_end_interrupts_mac1, 101 rxdma2host_monitor_status_ring_mac3, 102 rxdma2host_monitor_status_ring_mac2, 103 rxdma2host_monitor_status_ring_mac1, 104 host2rxdma_host_buf_ring_mac3, 105 host2rxdma_host_buf_ring_mac2, 106 host2rxdma_host_buf_ring_mac1, 107 rxdma2host_destination_ring_mac3, 108 rxdma2host_destination_ring_mac2, 109 rxdma2host_destination_ring_mac1, 110 host2tcl_input_ring4, 111 host2tcl_input_ring3, 112 host2tcl_input_ring2, 113 host2tcl_input_ring1, 114 wbm2host_tx_completions_ring3, 115 wbm2host_tx_completions_ring2, 116 wbm2host_tx_completions_ring1, 117 tcl2host_status_ring, 118 }; 119 120 struct CE_state; 121 #define CE_COUNT_MAX 12 122 #define HIF_MAX_GRP_IRQ 16 123 #define HIF_MAX_GROUP 8 124 125 #ifdef CONFIG_SLUB_DEBUG_ON 126 #ifndef CONFIG_WIN 127 #define HIF_CONFIG_SLUB_DEBUG_ON 128 #endif 129 #endif 130 131 #ifndef NAPI_YIELD_BUDGET_BASED 132 #ifdef HIF_CONFIG_SLUB_DEBUG_ON 133 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1 134 #else /* PERF build */ 135 #ifdef CONFIG_WIN 136 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1 137 #else 138 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4 139 #endif /* CONFIG_WIN */ 140 #endif /* SLUB_DEBUG_ON */ 141 #else /* NAPI_YIELD_BUDGET_BASED */ 142 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2 143 #endif /* NAPI_YIELD_BUDGET_BASED */ 144 #define QCA_NAPI_BUDGET 64 145 #define QCA_NAPI_DEF_SCALE \ 146 (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT) 147 148 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE) 149 /* NOTE: "napi->scale" can be changed, 150 * but this does not change the number of buckets 151 */ 152 #define QCA_NAPI_NUM_BUCKETS 4 153 /** 154 * qca_napi_stat - stats structure for execution contexts 155 * @napi_schedules - number of times the schedule function is called 156 * @napi_polls - number of times the execution context runs 157 * @napi_completes - number of times that the generating interrupt is reenabled 158 * @napi_workdone - cumulative of all work done reported by handler 159 * @cpu_corrected - incremented when execution context runs on a different core 160 * than the one that its irq is affined to. 161 * @napi_budget_uses - histogram of work done per execution run 162 * @time_limit_reache - count of yields due to time limit threshholds 163 * @rxpkt_thresh_reached - count of yields due to a work limit 164 * 165 * needs to be renamed 166 */ 167 struct qca_napi_stat { 168 uint32_t napi_schedules; 169 uint32_t napi_polls; 170 uint32_t napi_completes; 171 uint32_t napi_workdone; 172 uint32_t cpu_corrected; 173 uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS]; 174 uint32_t time_limit_reached; 175 uint32_t rxpkt_thresh_reached; 176 }; 177 178 179 /** 180 * per NAPI instance data structure 181 * This data structure holds stuff per NAPI instance. 182 * Note that, in the current implementation, though scale is 183 * an instance variable, it is set to the same value for all 184 * instances. 185 */ 186 struct qca_napi_info { 187 struct net_device netdev; /* dummy net_dev */ 188 void *hif_ctx; 189 struct napi_struct napi; 190 uint8_t scale; /* currently same on all instances */ 191 uint8_t id; 192 uint8_t cpu; 193 int irq; 194 struct qca_napi_stat stats[NR_CPUS]; 195 /* will only be present for data rx CE's */ 196 void (*lro_flush_cb)(void *); 197 qdf_lro_ctx_t lro_ctx; 198 }; 199 200 enum qca_napi_tput_state { 201 QCA_NAPI_TPUT_UNINITIALIZED, 202 QCA_NAPI_TPUT_LO, 203 QCA_NAPI_TPUT_HI 204 }; 205 enum qca_napi_cpu_state { 206 QCA_NAPI_CPU_UNINITIALIZED, 207 QCA_NAPI_CPU_DOWN, 208 QCA_NAPI_CPU_UP }; 209 210 /** 211 * struct qca_napi_cpu - an entry of the napi cpu table 212 * @core_id: physical core id of the core 213 * @cluster_id: cluster this core belongs to 214 * @core_mask: mask to match all core of this cluster 215 * @thread_mask: mask for this core within the cluster 216 * @max_freq: maximum clock this core can be clocked at 217 * same for all cpus of the same core. 218 * @napis: bitmap of napi instances on this core 219 * @execs: bitmap of execution contexts on this core 220 * cluster_nxt: chain to link cores within the same cluster 221 * 222 * This structure represents a single entry in the napi cpu 223 * table. The table is part of struct qca_napi_data. 224 * This table is initialized by the init function, called while 225 * the first napi instance is being created, updated by hotplug 226 * notifier and when cpu affinity decisions are made (by throughput 227 * detection), and deleted when the last napi instance is removed. 228 */ 229 struct qca_napi_cpu { 230 enum qca_napi_cpu_state state; 231 int core_id; 232 int cluster_id; 233 cpumask_t core_mask; 234 cpumask_t thread_mask; 235 unsigned int max_freq; 236 uint32_t napis; 237 uint32_t execs; 238 int cluster_nxt; /* index, not pointer */ 239 }; 240 241 /** 242 * struct qca_napi_data - collection of napi data for a single hif context 243 * @hif_softc: pointer to the hif context 244 * @lock: spinlock used in the event state machine 245 * @state: state variable used in the napi stat machine 246 * @ce_map: bit map indicating which ce's have napis running 247 * @exec_map: bit map of instanciated exec contexts 248 * @napi_cpu: cpu info for irq affinty 249 * @lilcl_head: 250 * @bigcl_head: 251 * @napi_mode: irq affinity & clock voting mode 252 * @cpuhp_handler: CPU hotplug event registration handle 253 */ 254 struct qca_napi_data { 255 struct hif_softc *hif_softc; 256 qdf_spinlock_t lock; 257 uint32_t state; 258 259 /* bitmap of created/registered NAPI instances, indexed by pipe_id, 260 * not used by clients (clients use an id returned by create) 261 */ 262 uint32_t ce_map; 263 uint32_t exec_map; 264 struct qca_napi_info *napis[CE_COUNT_MAX]; 265 struct qca_napi_cpu napi_cpu[NR_CPUS]; 266 int lilcl_head, bigcl_head; 267 enum qca_napi_tput_state napi_mode; 268 struct qdf_cpuhp_handler *cpuhp_handler; 269 uint8_t flags; 270 }; 271 272 /** 273 * struct hif_config_info - Place Holder for hif confiruation 274 * @enable_self_recovery: Self Recovery 275 * 276 * Structure for holding hif ini parameters. 277 */ 278 struct hif_config_info { 279 bool enable_self_recovery; 280 #ifdef FEATURE_RUNTIME_PM 281 bool enable_runtime_pm; 282 u_int32_t runtime_pm_delay; 283 #endif 284 }; 285 286 /** 287 * struct hif_target_info - Target Information 288 * @target_version: Target Version 289 * @target_type: Target Type 290 * @target_revision: Target Revision 291 * @soc_version: SOC Version 292 * 293 * Structure to hold target information. 294 */ 295 struct hif_target_info { 296 uint32_t target_version; 297 uint32_t target_type; 298 uint32_t target_revision; 299 uint32_t soc_version; 300 char *hw_name; 301 }; 302 303 struct hif_opaque_softc { 304 }; 305 306 /** 307 * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type 308 * 309 * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module 310 * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to 311 * minimize power 312 * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR 313 * platform-specific measures to completely power-off 314 * the module and associated hardware (i.e. cut power 315 * supplies) 316 */ 317 enum HIF_DEVICE_POWER_CHANGE_TYPE { 318 HIF_DEVICE_POWER_UP, 319 HIF_DEVICE_POWER_DOWN, 320 HIF_DEVICE_POWER_CUT 321 }; 322 323 /** 324 * enum hif_enable_type: what triggered the enabling of hif 325 * 326 * @HIF_ENABLE_TYPE_PROBE: probe triggered enable 327 * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable 328 */ 329 enum hif_enable_type { 330 HIF_ENABLE_TYPE_PROBE, 331 HIF_ENABLE_TYPE_REINIT, 332 HIF_ENABLE_TYPE_MAX 333 }; 334 335 /** 336 * enum hif_disable_type: what triggered the disabling of hif 337 * 338 * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable 339 * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable 340 * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable 341 * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable 342 */ 343 enum hif_disable_type { 344 HIF_DISABLE_TYPE_PROBE_ERROR, 345 HIF_DISABLE_TYPE_REINIT_ERROR, 346 HIF_DISABLE_TYPE_REMOVE, 347 HIF_DISABLE_TYPE_SHUTDOWN, 348 HIF_DISABLE_TYPE_MAX 349 }; 350 /** 351 * enum hif_device_config_opcode: configure mode 352 * 353 * @HIF_DEVICE_POWER_STATE: device power state 354 * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size 355 * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address 356 * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions 357 * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode 358 * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function 359 * @HIF_DEVICE_POWER_STATE_CHANGE: change power state 360 * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params 361 * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request 362 * @HIF_DEVICE_GET_OS_DEVICE: get OS device 363 * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state 364 * @HIF_BMI_DONE: bmi done 365 * @HIF_DEVICE_SET_TARGET_TYPE: set target type 366 * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context 367 * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context 368 */ 369 enum hif_device_config_opcode { 370 HIF_DEVICE_POWER_STATE = 0, 371 HIF_DEVICE_GET_MBOX_BLOCK_SIZE, 372 HIF_DEVICE_GET_MBOX_ADDR, 373 HIF_DEVICE_GET_PENDING_EVENTS_FUNC, 374 HIF_DEVICE_GET_IRQ_PROC_MODE, 375 HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC, 376 HIF_DEVICE_POWER_STATE_CHANGE, 377 HIF_DEVICE_GET_IRQ_YIELD_PARAMS, 378 HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT, 379 HIF_DEVICE_GET_OS_DEVICE, 380 HIF_DEVICE_DEBUG_BUS_STATE, 381 HIF_BMI_DONE, 382 HIF_DEVICE_SET_TARGET_TYPE, 383 HIF_DEVICE_SET_HTC_CONTEXT, 384 HIF_DEVICE_GET_HTC_CONTEXT, 385 }; 386 387 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 388 struct HID_ACCESS_LOG { 389 uint32_t seqnum; 390 bool is_write; 391 void *addr; 392 uint32_t value; 393 }; 394 #endif 395 396 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset, 397 uint32_t value); 398 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset); 399 400 #define HIF_MAX_DEVICES 1 401 /** 402 * struct htc_callbacks - Structure for HTC Callbacks methods 403 * @context: context to pass to the dsrhandler 404 * note : rwCompletionHandler is provided the context 405 * passed to hif_read_write 406 * @rwCompletionHandler: Read / write completion handler 407 * @dsrHandler: DSR Handler 408 */ 409 struct htc_callbacks { 410 void *context; 411 QDF_STATUS(*rwCompletionHandler)(void *rwContext, QDF_STATUS status); 412 QDF_STATUS(*dsrHandler)(void *context); 413 }; 414 415 /** 416 * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state 417 * @context: Private data context 418 * @set_recovery_in_progress: To Set Driver state for recovery in progress 419 * @is_recovery_in_progress: Query if driver state is recovery in progress 420 * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress 421 * @is_driver_unloading: Query if driver is unloading. 422 * 423 * This Structure provides callback pointer for HIF to query hdd for driver 424 * states. 425 */ 426 struct hif_driver_state_callbacks { 427 void *context; 428 void (*set_recovery_in_progress)(void *context, uint8_t val); 429 bool (*is_recovery_in_progress)(void *context); 430 bool (*is_load_unload_in_progress)(void *context); 431 bool (*is_driver_unloading)(void *context); 432 bool (*is_target_ready)(void *context); 433 }; 434 435 /* This API detaches the HTC layer from the HIF device */ 436 void hif_detach_htc(struct hif_opaque_softc *hif_ctx); 437 438 /****************************************************************/ 439 /* BMI and Diag window abstraction */ 440 /****************************************************************/ 441 442 #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0)) 443 444 #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be 445 * handled atomically by 446 * DiagRead/DiagWrite 447 */ 448 449 /* 450 * API to handle HIF-specific BMI message exchanges, this API is synchronous 451 * and only allowed to be called from a context that can block (sleep) 452 */ 453 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx, 454 qdf_dma_addr_t cmd, qdf_dma_addr_t rsp, 455 uint8_t *pSendMessage, uint32_t Length, 456 uint8_t *pResponseMessage, 457 uint32_t *pResponseLength, uint32_t TimeoutMS); 458 void hif_register_bmi_callbacks(struct hif_softc *hif_sc); 459 /* 460 * APIs to handle HIF specific diagnostic read accesses. These APIs are 461 * synchronous and only allowed to be called from a context that 462 * can block (sleep). They are not high performance APIs. 463 * 464 * hif_diag_read_access reads a 4 Byte aligned/length value from a 465 * Target register or memory word. 466 * 467 * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory. 468 */ 469 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx, 470 uint32_t address, uint32_t *data); 471 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address, 472 uint8_t *data, int nbytes); 473 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx, 474 void *ramdump_base, uint32_t address, uint32_t size); 475 /* 476 * APIs to handle HIF specific diagnostic write accesses. These APIs are 477 * synchronous and only allowed to be called from a context that 478 * can block (sleep). 479 * They are not high performance APIs. 480 * 481 * hif_diag_write_access writes a 4 Byte aligned/length value to a 482 * Target register or memory word. 483 * 484 * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory. 485 */ 486 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx, 487 uint32_t address, uint32_t data); 488 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx, 489 uint32_t address, uint8_t *data, int nbytes); 490 491 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t); 492 493 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx); 494 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx); 495 496 /* 497 * Set the FASTPATH_mode_on flag in sc, for use by data path 498 */ 499 #ifdef WLAN_FEATURE_FASTPATH 500 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx); 501 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx); 502 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret); 503 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx, 504 fastpath_msg_handler handler, void *context); 505 #else 506 static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx, 507 fastpath_msg_handler handler, 508 void *context) 509 { 510 return QDF_STATUS_E_FAILURE; 511 } 512 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret) 513 { 514 return NULL; 515 } 516 517 #endif 518 519 /* 520 * Enable/disable CDC max performance workaround 521 * For max-performace set this to 0 522 * To allow SoC to enter sleep set this to 1 523 */ 524 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0 525 526 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx, 527 qdf_dma_addr_t *ce_sr_base_paddr, 528 uint32_t *ce_sr_ring_size, 529 qdf_dma_addr_t *ce_reg_paddr); 530 531 /** 532 * @brief List of callbacks - filled in by HTC. 533 */ 534 struct hif_msg_callbacks { 535 void *Context; 536 /**< context meaningful to HTC */ 537 QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf, 538 uint32_t transferID, 539 uint32_t toeplitz_hash_result); 540 QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf, 541 uint8_t pipeID); 542 void (*txResourceAvailHandler)(void *context, uint8_t pipe); 543 void (*fwEventHandler)(void *context, QDF_STATUS status); 544 }; 545 546 enum hif_target_status { 547 TARGET_STATUS_CONNECTED = 0, /* target connected */ 548 TARGET_STATUS_RESET, /* target got reset */ 549 TARGET_STATUS_EJECT, /* target got ejected */ 550 TARGET_STATUS_SUSPEND /*target got suspend */ 551 }; 552 553 /** 554 * enum hif_attribute_flags: configure hif 555 * 556 * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE 557 * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor 558 * + No pktlog CE 559 */ 560 enum hif_attribute_flags { 561 HIF_LOWDESC_CE_CFG = 1, 562 HIF_LOWDESC_CE_NO_PKTLOG_CFG 563 }; 564 565 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \ 566 (attr |= (v & 0x01) << 5) 567 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \ 568 (attr |= (v & 0x03) << 6) 569 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \ 570 (attr |= (v & 0x01) << 13) 571 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \ 572 (attr |= (v & 0x01) << 14) 573 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \ 574 (attr |= (v & 0x01) << 15) 575 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \ 576 (attr |= (v & 0x0FFF) << 16) 577 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \ 578 (attr |= (v & 0x01) << 30) 579 580 struct hif_ul_pipe_info { 581 unsigned int nentries; 582 unsigned int nentries_mask; 583 unsigned int sw_index; 584 unsigned int write_index; /* cached copy */ 585 unsigned int hw_index; /* cached copy */ 586 void *base_addr_owner_space; /* Host address space */ 587 qdf_dma_addr_t base_addr_CE_space; /* CE address space */ 588 }; 589 590 struct hif_dl_pipe_info { 591 unsigned int nentries; 592 unsigned int nentries_mask; 593 unsigned int sw_index; 594 unsigned int write_index; /* cached copy */ 595 unsigned int hw_index; /* cached copy */ 596 void *base_addr_owner_space; /* Host address space */ 597 qdf_dma_addr_t base_addr_CE_space; /* CE address space */ 598 }; 599 600 struct hif_pipe_addl_info { 601 uint32_t pci_mem; 602 uint32_t ctrl_addr; 603 struct hif_ul_pipe_info ul_pipe; 604 struct hif_dl_pipe_info dl_pipe; 605 }; 606 607 struct hif_bus_id; 608 609 void hif_claim_device(struct hif_opaque_softc *hif_ctx); 610 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx, 611 int opcode, void *config, uint32_t config_len); 612 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx); 613 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx); 614 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC, 615 struct hif_msg_callbacks *callbacks); 616 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx); 617 void hif_stop(struct hif_opaque_softc *hif_ctx); 618 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx); 619 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start); 620 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, 621 uint8_t cmd_id, bool start); 622 623 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID, 624 uint32_t transferID, uint32_t nbytes, 625 qdf_nbuf_t wbuf, uint32_t data_attr); 626 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID, 627 int force); 628 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx); 629 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe, 630 uint8_t *DLPipe); 631 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id, 632 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, 633 int *dl_is_polled); 634 uint16_t 635 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID); 636 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx); 637 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset); 638 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok, 639 bool wait_for_it); 640 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx); 641 #ifndef HIF_PCI 642 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx) 643 { 644 return 0; 645 } 646 #else 647 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx); 648 #endif 649 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version, 650 u32 *revision, const char **target_name); 651 void hif_disable_isr(struct hif_opaque_softc *hif_ctx); 652 void hif_reset_soc(struct hif_opaque_softc *hif_ctx); 653 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx, 654 int htc_htt_tx_endpoint); 655 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode, 656 enum qdf_bus_type bus_type, 657 struct hif_driver_state_callbacks *cbk); 658 void hif_close(struct hif_opaque_softc *hif_ctx); 659 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev, 660 void *bdev, const struct hif_bus_id *bid, 661 enum qdf_bus_type bus_type, 662 enum hif_enable_type type); 663 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type); 664 void hif_display_stats(struct hif_opaque_softc *hif_ctx); 665 void hif_clear_stats(struct hif_opaque_softc *hif_ctx); 666 #ifdef FEATURE_RUNTIME_PM 667 struct hif_pm_runtime_lock; 668 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx); 669 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx); 670 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx); 671 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx); 672 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name); 673 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, 674 struct hif_pm_runtime_lock *lock); 675 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, 676 struct hif_pm_runtime_lock *lock); 677 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, 678 struct hif_pm_runtime_lock *lock); 679 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc, 680 struct hif_pm_runtime_lock *lock, unsigned int delay); 681 #else 682 struct hif_pm_runtime_lock { 683 const char *name; 684 }; 685 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {} 686 static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx) 687 {} 688 689 static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx) 690 { return 0; } 691 static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx) 692 { return 0; } 693 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock, 694 const char *name) 695 { return 0; } 696 static inline void 697 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, 698 struct hif_pm_runtime_lock *lock) {} 699 700 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, 701 struct hif_pm_runtime_lock *lock) 702 { return 0; } 703 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, 704 struct hif_pm_runtime_lock *lock) 705 { return 0; } 706 static inline int 707 hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc, 708 struct hif_pm_runtime_lock *lock, unsigned int delay) 709 { return 0; } 710 #endif 711 712 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx, 713 bool is_packet_log_enabled); 714 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx); 715 716 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx); 717 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx); 718 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx); 719 720 #ifdef IPA_OFFLOAD 721 /** 722 * hif_get_ipa_hw_type() - get IPA hw type 723 * 724 * This API return the IPA hw type. 725 * 726 * Return: IPA hw type 727 */ 728 static inline 729 enum ipa_hw_type hif_get_ipa_hw_type(void) 730 { 731 return ipa_get_hw_type(); 732 } 733 734 /** 735 * hif_get_ipa_present() - get IPA hw status 736 * 737 * This API return the IPA hw status. 738 * 739 * Return: true if IPA is present or false otherwise 740 */ 741 static inline 742 bool hif_get_ipa_present(void) 743 { 744 if (ipa_uc_reg_rdyCB(NULL) != -EPERM) 745 return true; 746 else 747 return false; 748 } 749 #endif 750 int hif_bus_resume(struct hif_opaque_softc *hif_ctx); 751 /** 752 * hif_bus_ealry_suspend() - stop non wmi tx traffic 753 * @context: hif context 754 */ 755 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx); 756 757 /** 758 * hif_bus_late_resume() - resume non wmi traffic 759 * @context: hif context 760 */ 761 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx); 762 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx); 763 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx); 764 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx); 765 766 /** 767 * hif_apps_irqs_enable() - Enables all irqs from the APPS side 768 * @hif_ctx: an opaque HIF handle to use 769 * 770 * As opposed to the standard hif_irq_enable, this function always applies to 771 * the APPS side kernel interrupt handling. 772 * 773 * Return: errno 774 */ 775 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx); 776 777 /** 778 * hif_apps_irqs_disable() - Disables all irqs from the APPS side 779 * @hif_ctx: an opaque HIF handle to use 780 * 781 * As opposed to the standard hif_irq_disable, this function always applies to 782 * the APPS side kernel interrupt handling. 783 * 784 * Return: errno 785 */ 786 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx); 787 788 /** 789 * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side 790 * @hif_ctx: an opaque HIF handle to use 791 * 792 * As opposed to the standard hif_irq_enable, this function always applies to 793 * the APPS side kernel interrupt handling. 794 * 795 * Return: errno 796 */ 797 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx); 798 799 /** 800 * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side 801 * @hif_ctx: an opaque HIF handle to use 802 * 803 * As opposed to the standard hif_irq_disable, this function always applies to 804 * the APPS side kernel interrupt handling. 805 * 806 * Return: errno 807 */ 808 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx); 809 810 #ifdef FEATURE_RUNTIME_PM 811 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx); 812 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx); 813 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx); 814 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx); 815 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx); 816 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx); 817 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx); 818 #endif 819 820 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size); 821 int hif_dump_registers(struct hif_opaque_softc *scn); 822 int ol_copy_ramdump(struct hif_opaque_softc *scn); 823 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx); 824 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version, 825 u32 *revision, const char **target_name); 826 void hif_lro_flush_cb_register(struct hif_opaque_softc *hif_ctx, 827 void (lro_flush_handler)(void *arg), 828 void *(lro_init_handler)(void)); 829 void hif_lro_flush_cb_deregister(struct hif_opaque_softc *hif_ctx, 830 void (lro_deinit_cb)(void *arg)); 831 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx); 832 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl); 833 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc * 834 scn); 835 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx); 836 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx); 837 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx); 838 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum 839 hif_target_status); 840 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx, 841 struct hif_config_info *cfg); 842 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls); 843 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, 844 uint32_t transfer_id, u_int32_t len, uint32_t sendhead); 845 int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t 846 transfer_id, u_int32_t len); 847 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf, 848 uint32_t transfer_id, uint32_t download_len); 849 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len); 850 void hif_ce_war_disable(void); 851 void hif_ce_war_enable(void); 852 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num); 853 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT 854 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, 855 struct hif_pipe_addl_info *hif_info, uint32_t pipe_number); 856 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, 857 uint32_t pipe_num); 858 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc); 859 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */ 860 861 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled, 862 int rx_bundle_cnt); 863 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx); 864 865 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib); 866 867 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl); 868 869 enum hif_exec_type { 870 HIF_EXEC_NAPI_TYPE, 871 HIF_EXEC_TASKLET_TYPE, 872 }; 873 874 typedef uint32_t (*ext_intr_handler)(void *, uint32_t); 875 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx); 876 uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx, 877 uint32_t numirq, uint32_t irq[], ext_intr_handler handler, 878 void *cb_ctx, const char *context_name, 879 enum hif_exec_type type, uint32_t scale); 880 881 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx, 882 const char *context_name); 883 884 void hif_update_pipe_callback(struct hif_opaque_softc *osc, 885 u_int8_t pipeid, 886 struct hif_msg_callbacks *callbacks); 887 888 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx); 889 #ifdef __cplusplus 890 } 891 #endif 892 893 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle); 894 895 /** 896 * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function 897 * @hif_ctx - the HIF context to assign the callback to 898 * @callback - the callback to assign 899 * @priv - the private data to pass to the callback when invoked 900 * 901 * Return: None 902 */ 903 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx, 904 void (*callback)(void *), 905 void *priv); 906 #ifndef CONFIG_WIN 907 #ifndef HIF_CE_DEBUG_DATA_BUF 908 #define HIF_CE_DEBUG_DATA_BUF 0 909 #endif 910 #endif 911 /* 912 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 913 * for defined here 914 */ 915 #if HIF_CE_DEBUG_DATA_BUF 916 ssize_t hif_dump_desc_trace_buf(struct device *dev, 917 struct device_attribute *attr, char *buf); 918 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn, 919 const char *buf, size_t size); 920 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, 921 const char *buf, size_t size); 922 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf); 923 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf); 924 #endif /* Note: for MCL, #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ 925 #endif /* _HIF_H_ */ 926