xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HIF_H_
20 #define _HIF_H_
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif /* __cplusplus */
25 
26 /* Header files */
27 #include <qdf_status.h>
28 #include "qdf_nbuf.h"
29 #include "qdf_lro.h"
30 #include "ol_if_athvar.h"
31 #include <linux/platform_device.h>
32 #ifdef HIF_PCI
33 #include <linux/pci.h>
34 #endif /* HIF_PCI */
35 #ifdef HIF_USB
36 #include <linux/usb.h>
37 #endif /* HIF_USB */
38 #ifdef IPA_OFFLOAD
39 #include <linux/ipa.h>
40 #endif
41 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
42 
43 typedef void __iomem *A_target_id_t;
44 typedef void *hif_handle_t;
45 
46 #define HIF_TYPE_AR6002   2
47 #define HIF_TYPE_AR6003   3
48 #define HIF_TYPE_AR6004   5
49 #define HIF_TYPE_AR9888   6
50 #define HIF_TYPE_AR6320   7
51 #define HIF_TYPE_AR6320V2 8
52 /* For attaching Peregrine 2.0 board host_reg_tbl only */
53 #define HIF_TYPE_AR9888V2 9
54 #define HIF_TYPE_ADRASTEA 10
55 #define HIF_TYPE_AR900B 11
56 #define HIF_TYPE_QCA9984 12
57 #define HIF_TYPE_IPQ4019 13
58 #define HIF_TYPE_QCA9888 14
59 #define HIF_TYPE_QCA8074 15
60 #define HIF_TYPE_QCA6290 16
61 #define HIF_TYPE_QCN7605 17
62 #define HIF_TYPE_QCA6390 18
63 #define HIF_TYPE_QCA8074V2 19
64 #define HIF_TYPE_QCA6018  20
65 #define HIF_TYPE_QCN9000 21
66 #define HIF_TYPE_QCA6490 22
67 
68 #ifdef IPA_OFFLOAD
69 #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE   37
70 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
71 #endif
72 
73 /* enum hif_ic_irq - enum defining integrated chip irq numbers
74  * defining irq nubers that can be used by external modules like datapath
75  */
76 enum hif_ic_irq {
77 	host2wbm_desc_feed = 16,
78 	host2reo_re_injection,
79 	host2reo_command,
80 	host2rxdma_monitor_ring3,
81 	host2rxdma_monitor_ring2,
82 	host2rxdma_monitor_ring1,
83 	reo2host_exception,
84 	wbm2host_rx_release,
85 	reo2host_status,
86 	reo2host_destination_ring4,
87 	reo2host_destination_ring3,
88 	reo2host_destination_ring2,
89 	reo2host_destination_ring1,
90 	rxdma2host_monitor_destination_mac3,
91 	rxdma2host_monitor_destination_mac2,
92 	rxdma2host_monitor_destination_mac1,
93 	ppdu_end_interrupts_mac3,
94 	ppdu_end_interrupts_mac2,
95 	ppdu_end_interrupts_mac1,
96 	rxdma2host_monitor_status_ring_mac3,
97 	rxdma2host_monitor_status_ring_mac2,
98 	rxdma2host_monitor_status_ring_mac1,
99 	host2rxdma_host_buf_ring_mac3,
100 	host2rxdma_host_buf_ring_mac2,
101 	host2rxdma_host_buf_ring_mac1,
102 	rxdma2host_destination_ring_mac3,
103 	rxdma2host_destination_ring_mac2,
104 	rxdma2host_destination_ring_mac1,
105 	host2tcl_input_ring4,
106 	host2tcl_input_ring3,
107 	host2tcl_input_ring2,
108 	host2tcl_input_ring1,
109 	wbm2host_tx_completions_ring3,
110 	wbm2host_tx_completions_ring2,
111 	wbm2host_tx_completions_ring1,
112 	tcl2host_status_ring,
113 };
114 
115 struct CE_state;
116 #define CE_COUNT_MAX 12
117 #define HIF_MAX_GRP_IRQ 16
118 
119 #ifndef HIF_MAX_GROUP
120 #define HIF_MAX_GROUP 7
121 #endif
122 
123 #ifndef NAPI_YIELD_BUDGET_BASED
124 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
125 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 3
126 #else
127 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
128 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
129 #endif
130 #endif /* SLUB_DEBUG_ON */
131 #else  /* NAPI_YIELD_BUDGET_BASED */
132 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
133 #endif /* NAPI_YIELD_BUDGET_BASED */
134 
135 #define QCA_NAPI_BUDGET    64
136 #define QCA_NAPI_DEF_SCALE  \
137 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
138 
139 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
140 /* NOTE: "napi->scale" can be changed,
141  * but this does not change the number of buckets
142  */
143 #define QCA_NAPI_NUM_BUCKETS 4
144 
145 /**
146  * qca_napi_stat - stats structure for execution contexts
147  * @napi_schedules - number of times the schedule function is called
148  * @napi_polls - number of times the execution context runs
149  * @napi_completes - number of times that the generating interrupt is reenabled
150  * @napi_workdone - cumulative of all work done reported by handler
151  * @cpu_corrected - incremented when execution context runs on a different core
152  *			than the one that its irq is affined to.
153  * @napi_budget_uses - histogram of work done per execution run
154  * @time_limit_reache - count of yields due to time limit threshholds
155  * @rxpkt_thresh_reached - count of yields due to a work limit
156  * @poll_time_buckets - histogram of poll times for the napi
157  *
158  */
159 struct qca_napi_stat {
160 	uint32_t napi_schedules;
161 	uint32_t napi_polls;
162 	uint32_t napi_completes;
163 	uint32_t napi_workdone;
164 	uint32_t cpu_corrected;
165 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
166 	uint32_t time_limit_reached;
167 	uint32_t rxpkt_thresh_reached;
168 	unsigned long long napi_max_poll_time;
169 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
170 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
171 #endif
172 };
173 
174 
175 /**
176  * per NAPI instance data structure
177  * This data structure holds stuff per NAPI instance.
178  * Note that, in the current implementation, though scale is
179  * an instance variable, it is set to the same value for all
180  * instances.
181  */
182 struct qca_napi_info {
183 	struct net_device    netdev; /* dummy net_dev */
184 	void                 *hif_ctx;
185 	struct napi_struct   napi;
186 	uint8_t              scale;   /* currently same on all instances */
187 	uint8_t              id;
188 	uint8_t              cpu;
189 	int                  irq;
190 	cpumask_t            cpumask;
191 	struct qca_napi_stat stats[NR_CPUS];
192 #ifdef RECEIVE_OFFLOAD
193 	/* will only be present for data rx CE's */
194 	void (*offld_flush_cb)(void *);
195 	struct napi_struct   rx_thread_napi;
196 	struct net_device    rx_thread_netdev;
197 #endif /* RECEIVE_OFFLOAD */
198 	qdf_lro_ctx_t        lro_ctx;
199 };
200 
201 enum qca_napi_tput_state {
202 	QCA_NAPI_TPUT_UNINITIALIZED,
203 	QCA_NAPI_TPUT_LO,
204 	QCA_NAPI_TPUT_HI
205 };
206 enum qca_napi_cpu_state {
207 	QCA_NAPI_CPU_UNINITIALIZED,
208 	QCA_NAPI_CPU_DOWN,
209 	QCA_NAPI_CPU_UP };
210 
211 /**
212  * struct qca_napi_cpu - an entry of the napi cpu table
213  * @core_id:     physical core id of the core
214  * @cluster_id:  cluster this core belongs to
215  * @core_mask:   mask to match all core of this cluster
216  * @thread_mask: mask for this core within the cluster
217  * @max_freq:    maximum clock this core can be clocked at
218  *               same for all cpus of the same core.
219  * @napis:       bitmap of napi instances on this core
220  * @execs:       bitmap of execution contexts on this core
221  * cluster_nxt:  chain to link cores within the same cluster
222  *
223  * This structure represents a single entry in the napi cpu
224  * table. The table is part of struct qca_napi_data.
225  * This table is initialized by the init function, called while
226  * the first napi instance is being created, updated by hotplug
227  * notifier and when cpu affinity decisions are made (by throughput
228  * detection), and deleted when the last napi instance is removed.
229  */
230 struct qca_napi_cpu {
231 	enum qca_napi_cpu_state state;
232 	int			core_id;
233 	int			cluster_id;
234 	cpumask_t		core_mask;
235 	cpumask_t		thread_mask;
236 	unsigned int		max_freq;
237 	uint32_t		napis;
238 	uint32_t		execs;
239 	int			cluster_nxt;  /* index, not pointer */
240 };
241 
242 /**
243  * struct qca_napi_data - collection of napi data for a single hif context
244  * @hif_softc: pointer to the hif context
245  * @lock: spinlock used in the event state machine
246  * @state: state variable used in the napi stat machine
247  * @ce_map: bit map indicating which ce's have napis running
248  * @exec_map: bit map of instanciated exec contexts
249  * @user_cpu_affin_map: CPU affinity map from INI config.
250  * @napi_cpu: cpu info for irq affinty
251  * @lilcl_head:
252  * @bigcl_head:
253  * @napi_mode: irq affinity & clock voting mode
254  * @cpuhp_handler: CPU hotplug event registration handle
255  */
256 struct qca_napi_data {
257 	struct               hif_softc *hif_softc;
258 	qdf_spinlock_t       lock;
259 	uint32_t             state;
260 
261 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
262 	 * not used by clients (clients use an id returned by create)
263 	 */
264 	uint32_t             ce_map;
265 	uint32_t             exec_map;
266 	uint32_t             user_cpu_affin_mask;
267 	struct qca_napi_info *napis[CE_COUNT_MAX];
268 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
269 	int                  lilcl_head, bigcl_head;
270 	enum qca_napi_tput_state napi_mode;
271 	struct qdf_cpuhp_handler *cpuhp_handler;
272 	uint8_t              flags;
273 };
274 
275 /**
276  * struct hif_config_info - Place Holder for HIF configuration
277  * @enable_self_recovery: Self Recovery
278  * @enable_runtime_pm: Enable Runtime PM
279  * @runtime_pm_delay: Runtime PM Delay
280  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
281  *
282  * Structure for holding HIF ini parameters.
283  */
284 struct hif_config_info {
285 	bool enable_self_recovery;
286 #ifdef FEATURE_RUNTIME_PM
287 	bool enable_runtime_pm;
288 	u_int32_t runtime_pm_delay;
289 #endif
290 	uint64_t rx_softirq_max_yield_duration_ns;
291 };
292 
293 /**
294  * struct hif_target_info - Target Information
295  * @target_version: Target Version
296  * @target_type: Target Type
297  * @target_revision: Target Revision
298  * @soc_version: SOC Version
299  * @hw_name: pointer to hardware name
300  *
301  * Structure to hold target information.
302  */
303 struct hif_target_info {
304 	uint32_t target_version;
305 	uint32_t target_type;
306 	uint32_t target_revision;
307 	uint32_t soc_version;
308 	char *hw_name;
309 };
310 
311 struct hif_opaque_softc {
312 };
313 
314 /**
315  * enum hif_event_type - Type of DP events to be recorded
316  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
317  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
318  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
319  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
320  */
321 enum hif_event_type {
322 	HIF_EVENT_IRQ_TRIGGER,
323 	HIF_EVENT_BH_SCHED,
324 	HIF_EVENT_SRNG_ACCESS_START,
325 	HIF_EVENT_SRNG_ACCESS_END,
326 };
327 
328 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
329 
330 /* HIF_EVENT_HIST_MAX should always be power of 2 */
331 #define HIF_EVENT_HIST_MAX		512
332 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
333 #define HIF_EVENT_HIST_DISABLE_MASK	0
334 
335 /**
336  * struct hif_event_record - an entry of the DP event history
337  * @hal_ring_id: ring id for which event is recorded
338  * @hp: head pointer of the ring (may not be applicable for all events)
339  * @tp: tail pointer of the ring (may not be applicable for all events)
340  * @cpu_id: cpu id on which the event occurred
341  * @timestamp: timestamp when event occurred
342  * @type: type of the event
343  *
344  * This structure represents the information stored for every datapath
345  * event which is logged in the history.
346  */
347 struct hif_event_record {
348 	uint8_t hal_ring_id;
349 	uint32_t hp;
350 	uint32_t tp;
351 	int cpu_id;
352 	uint64_t timestamp;
353 	enum hif_event_type type;
354 };
355 
356 /**
357  * struct hif_event_history - history for one interrupt group
358  * @index: index to store new event
359  * @event: event entry
360  *
361  * This structure represents the datapath history for one
362  * interrupt group.
363  */
364 struct hif_event_history {
365 	qdf_atomic_t index;
366 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
367 };
368 
369 /**
370  * hif_hist_record_event() - Record one datapath event in history
371  * @hif_ctx: HIF opaque context
372  * @event: DP event entry
373  * @intr_grp_id: interrupt group ID registered with hif
374  *
375  * Return: None
376  */
377 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
378 			   struct hif_event_record *event,
379 			   uint8_t intr_grp_id);
380 
381 /**
382  * hif_record_event() - Wrapper function to form and record DP event
383  * @hif_ctx: HIF opaque context
384  * @intr_grp_id: interrupt group ID registered with hif
385  * @hal_ring_id: ring id for which event is recorded
386  * @hp: head pointer index of the srng
387  * @tp: tail pointer index of the srng
388  * @type: type of the event to be logged in history
389  *
390  * Return: None
391  */
392 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
393 				    uint8_t intr_grp_id,
394 				    uint8_t hal_ring_id,
395 				    uint32_t hp,
396 				    uint32_t tp,
397 				    enum hif_event_type type)
398 {
399 	struct hif_event_record event;
400 
401 	event.hal_ring_id = hal_ring_id;
402 	event.hp = hp;
403 	event.tp = tp;
404 	event.type = type;
405 
406 	return hif_hist_record_event(hif_ctx, &event,
407 				     intr_grp_id);
408 }
409 
410 #else
411 
412 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
413 				    uint8_t intr_grp_id,
414 				    uint8_t hal_ring_id,
415 				    uint32_t hp,
416 				    uint32_t tp,
417 				    enum hif_event_type type)
418 {
419 }
420 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
421 
422 /**
423  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
424  *
425  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
426  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
427  *                         minimize power
428  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
429  *                         platform-specific measures to completely power-off
430  *                         the module and associated hardware (i.e. cut power
431  *                         supplies)
432  */
433 enum HIF_DEVICE_POWER_CHANGE_TYPE {
434 	HIF_DEVICE_POWER_UP,
435 	HIF_DEVICE_POWER_DOWN,
436 	HIF_DEVICE_POWER_CUT
437 };
438 
439 /**
440  * enum hif_enable_type: what triggered the enabling of hif
441  *
442  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
443  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
444  */
445 enum hif_enable_type {
446 	HIF_ENABLE_TYPE_PROBE,
447 	HIF_ENABLE_TYPE_REINIT,
448 	HIF_ENABLE_TYPE_MAX
449 };
450 
451 /**
452  * enum hif_disable_type: what triggered the disabling of hif
453  *
454  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
455  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
456  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
457  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
458  */
459 enum hif_disable_type {
460 	HIF_DISABLE_TYPE_PROBE_ERROR,
461 	HIF_DISABLE_TYPE_REINIT_ERROR,
462 	HIF_DISABLE_TYPE_REMOVE,
463 	HIF_DISABLE_TYPE_SHUTDOWN,
464 	HIF_DISABLE_TYPE_MAX
465 };
466 /**
467  * enum hif_device_config_opcode: configure mode
468  *
469  * @HIF_DEVICE_POWER_STATE: device power state
470  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
471  * @HIF_DEVICE_GET_ADDR: get block address
472  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
473  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
474  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
475  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
476  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
477  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
478  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
479  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
480  * @HIF_BMI_DONE: bmi done
481  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
482  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
483  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
484  */
485 enum hif_device_config_opcode {
486 	HIF_DEVICE_POWER_STATE = 0,
487 	HIF_DEVICE_GET_BLOCK_SIZE,
488 	HIF_DEVICE_GET_FIFO_ADDR,
489 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
490 	HIF_DEVICE_GET_IRQ_PROC_MODE,
491 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
492 	HIF_DEVICE_POWER_STATE_CHANGE,
493 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
494 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
495 	HIF_DEVICE_GET_OS_DEVICE,
496 	HIF_DEVICE_DEBUG_BUS_STATE,
497 	HIF_BMI_DONE,
498 	HIF_DEVICE_SET_TARGET_TYPE,
499 	HIF_DEVICE_SET_HTC_CONTEXT,
500 	HIF_DEVICE_GET_HTC_CONTEXT,
501 };
502 
503 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
504 struct HID_ACCESS_LOG {
505 	uint32_t seqnum;
506 	bool is_write;
507 	void *addr;
508 	uint32_t value;
509 };
510 #endif
511 
512 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
513 		uint32_t value);
514 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
515 
516 #define HIF_MAX_DEVICES                 1
517 /**
518  * struct htc_callbacks - Structure for HTC Callbacks methods
519  * @context:             context to pass to the dsrhandler
520  *                       note : rwCompletionHandler is provided the context
521  *                       passed to hif_read_write
522  * @rwCompletionHandler: Read / write completion handler
523  * @dsrHandler:          DSR Handler
524  */
525 struct htc_callbacks {
526 	void *context;
527 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
528 	QDF_STATUS(*dsr_handler)(void *context);
529 };
530 
531 /**
532  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
533  * @context: Private data context
534  * @set_recovery_in_progress: To Set Driver state for recovery in progress
535  * @is_recovery_in_progress: Query if driver state is recovery in progress
536  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
537  * @is_driver_unloading: Query if driver is unloading.
538  *
539  * This Structure provides callback pointer for HIF to query hdd for driver
540  * states.
541  */
542 struct hif_driver_state_callbacks {
543 	void *context;
544 	void (*set_recovery_in_progress)(void *context, uint8_t val);
545 	bool (*is_recovery_in_progress)(void *context);
546 	bool (*is_load_unload_in_progress)(void *context);
547 	bool (*is_driver_unloading)(void *context);
548 	bool (*is_target_ready)(void *context);
549 };
550 
551 /* This API detaches the HTC layer from the HIF device */
552 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
553 
554 /****************************************************************/
555 /* BMI and Diag window abstraction                              */
556 /****************************************************************/
557 
558 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
559 
560 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
561 				     * handled atomically by
562 				     * DiagRead/DiagWrite
563 				     */
564 
565 #ifdef WLAN_FEATURE_BMI
566 /*
567  * API to handle HIF-specific BMI message exchanges, this API is synchronous
568  * and only allowed to be called from a context that can block (sleep)
569  */
570 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
571 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
572 				uint8_t *pSendMessage, uint32_t Length,
573 				uint8_t *pResponseMessage,
574 				uint32_t *pResponseLength, uint32_t TimeoutMS);
575 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
576 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
577 #else /* WLAN_FEATURE_BMI */
578 static inline void
579 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
580 {
581 }
582 
583 static inline bool
584 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
585 {
586 	return false;
587 }
588 #endif /* WLAN_FEATURE_BMI */
589 
590 /*
591  * APIs to handle HIF specific diagnostic read accesses. These APIs are
592  * synchronous and only allowed to be called from a context that
593  * can block (sleep). They are not high performance APIs.
594  *
595  * hif_diag_read_access reads a 4 Byte aligned/length value from a
596  * Target register or memory word.
597  *
598  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
599  */
600 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
601 				uint32_t address, uint32_t *data);
602 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
603 		      uint8_t *data, int nbytes);
604 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
605 			void *ramdump_base, uint32_t address, uint32_t size);
606 /*
607  * APIs to handle HIF specific diagnostic write accesses. These APIs are
608  * synchronous and only allowed to be called from a context that
609  * can block (sleep).
610  * They are not high performance APIs.
611  *
612  * hif_diag_write_access writes a 4 Byte aligned/length value to a
613  * Target register or memory word.
614  *
615  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
616  */
617 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
618 				 uint32_t address, uint32_t data);
619 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
620 			uint32_t address, uint8_t *data, int nbytes);
621 
622 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
623 
624 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
625 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
626 
627 /*
628  * Set the FASTPATH_mode_on flag in sc, for use by data path
629  */
630 #ifdef WLAN_FEATURE_FASTPATH
631 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
632 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
633 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
634 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
635 				fastpath_msg_handler handler, void *context);
636 #else
637 static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
638 					      fastpath_msg_handler handler,
639 					      void *context)
640 {
641 	return QDF_STATUS_E_FAILURE;
642 }
643 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
644 {
645 	return NULL;
646 }
647 
648 #endif
649 
650 /*
651  * Enable/disable CDC max performance workaround
652  * For max-performace set this to 0
653  * To allow SoC to enter sleep set this to 1
654  */
655 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
656 
657 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
658 			     qdf_shared_mem_t **ce_sr,
659 			     uint32_t *ce_sr_ring_size,
660 			     qdf_dma_addr_t *ce_reg_paddr);
661 
662 /**
663  * @brief List of callbacks - filled in by HTC.
664  */
665 struct hif_msg_callbacks {
666 	void *Context;
667 	/**< context meaningful to HTC */
668 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
669 					uint32_t transferID,
670 					uint32_t toeplitz_hash_result);
671 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
672 					uint8_t pipeID);
673 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
674 	void (*fwEventHandler)(void *context, QDF_STATUS status);
675 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
676 };
677 
678 enum hif_target_status {
679 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
680 	TARGET_STATUS_RESET,  /* target got reset */
681 	TARGET_STATUS_EJECT,  /* target got ejected */
682 	TARGET_STATUS_SUSPEND /*target got suspend */
683 };
684 
685 /**
686  * enum hif_attribute_flags: configure hif
687  *
688  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
689  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
690  *  							+ No pktlog CE
691  */
692 enum hif_attribute_flags {
693 	HIF_LOWDESC_CE_CFG = 1,
694 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
695 };
696 
697 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
698 	(attr |= (v & 0x01) << 5)
699 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
700 	(attr |= (v & 0x03) << 6)
701 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
702 	(attr |= (v & 0x01) << 13)
703 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
704 	(attr |= (v & 0x01) << 14)
705 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
706 	(attr |= (v & 0x01) << 15)
707 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
708 	(attr |= (v & 0x0FFF) << 16)
709 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
710 	(attr |= (v & 0x01) << 30)
711 
712 struct hif_ul_pipe_info {
713 	unsigned int nentries;
714 	unsigned int nentries_mask;
715 	unsigned int sw_index;
716 	unsigned int write_index; /* cached copy */
717 	unsigned int hw_index;    /* cached copy */
718 	void *base_addr_owner_space; /* Host address space */
719 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
720 };
721 
722 struct hif_dl_pipe_info {
723 	unsigned int nentries;
724 	unsigned int nentries_mask;
725 	unsigned int sw_index;
726 	unsigned int write_index; /* cached copy */
727 	unsigned int hw_index;    /* cached copy */
728 	void *base_addr_owner_space; /* Host address space */
729 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
730 };
731 
732 struct hif_pipe_addl_info {
733 	uint32_t pci_mem;
734 	uint32_t ctrl_addr;
735 	struct hif_ul_pipe_info ul_pipe;
736 	struct hif_dl_pipe_info dl_pipe;
737 };
738 
739 #ifdef CONFIG_SLUB_DEBUG_ON
740 #define MSG_FLUSH_NUM 16
741 #else /* PERF build */
742 #define MSG_FLUSH_NUM 32
743 #endif /* SLUB_DEBUG_ON */
744 
745 struct hif_bus_id;
746 
747 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
748 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
749 		     int opcode, void *config, uint32_t config_len);
750 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
751 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
752 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
753 		   struct hif_msg_callbacks *callbacks);
754 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
755 void hif_stop(struct hif_opaque_softc *hif_ctx);
756 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
757 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
758 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
759 		      uint8_t cmd_id, bool start);
760 
761 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
762 				  uint32_t transferID, uint32_t nbytes,
763 				  qdf_nbuf_t wbuf, uint32_t data_attr);
764 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
765 			     int force);
766 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
767 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
768 			  uint8_t *DLPipe);
769 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
770 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
771 			int *dl_is_polled);
772 uint16_t
773 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
774 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
775 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
776 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
777 		     bool wait_for_it);
778 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
779 #ifndef HIF_PCI
780 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
781 {
782 	return 0;
783 }
784 #else
785 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
786 #endif
787 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
788 			u32 *revision, const char **target_name);
789 
790 #ifdef RECEIVE_OFFLOAD
791 /**
792  * hif_offld_flush_cb_register() - Register the offld flush callback
793  * @scn: HIF opaque context
794  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
795  *			 Or GRO/LRO flush when RxThread is not enabled. Called
796  *			 with corresponding context for flush.
797  * Return: None
798  */
799 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
800 				 void (offld_flush_handler)(void *ol_ctx));
801 
802 /**
803  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
804  * @scn: HIF opaque context
805  *
806  * Return: None
807  */
808 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
809 #endif
810 
811 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
812 /**
813  * hif_exec_should_yield() - Check if hif napi context should yield
814  * @hif_ctx - HIF opaque context
815  * @grp_id - grp_id of the napi for which check needs to be done
816  *
817  * The function uses grp_id to look for NAPI and checks if NAPI needs to
818  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
819  * yield decision.
820  *
821  * Return: true if NAPI needs to yield, else false
822  */
823 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
824 #else
825 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
826 					 uint grp_id)
827 {
828 	return false;
829 }
830 #endif
831 
832 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
833 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
834 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
835 				      int htc_htt_tx_endpoint);
836 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
837 				  enum qdf_bus_type bus_type,
838 				  struct hif_driver_state_callbacks *cbk);
839 void hif_close(struct hif_opaque_softc *hif_ctx);
840 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
841 		      void *bdev, const struct hif_bus_id *bid,
842 		      enum qdf_bus_type bus_type,
843 		      enum hif_enable_type type);
844 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
845 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
846 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
847 #ifdef FEATURE_RUNTIME_PM
848 struct hif_pm_runtime_lock;
849 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
850 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx);
851 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx);
852 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx);
853 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
854 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
855 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
856 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx);
857 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
858 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
859 			struct hif_pm_runtime_lock *lock);
860 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
861 		struct hif_pm_runtime_lock *lock);
862 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
863 		struct hif_pm_runtime_lock *lock);
864 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
865 		struct hif_pm_runtime_lock *lock, unsigned int delay);
866 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx);
867 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx);
868 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
869 					  int val);
870 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
871 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
872 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx);
873 #else
874 struct hif_pm_runtime_lock {
875 	const char *name;
876 };
877 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
878 static inline int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx)
879 { return 0; }
880 static inline int
881 hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx)
882 { return 0; }
883 static inline int
884 hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
885 { return 0; }
886 static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
887 {}
888 
889 static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
890 { return 0; }
891 static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
892 { return 0; }
893 static inline void
894 hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {};
895 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
896 					const char *name)
897 { return 0; }
898 static inline void
899 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
900 			struct hif_pm_runtime_lock *lock) {}
901 
902 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
903 		struct hif_pm_runtime_lock *lock)
904 { return 0; }
905 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
906 		struct hif_pm_runtime_lock *lock)
907 { return 0; }
908 static inline int
909 hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
910 		struct hif_pm_runtime_lock *lock, unsigned int delay)
911 { return 0; }
912 static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
913 { return false; }
914 static inline int
915 hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
916 { return 0; }
917 static inline void
918 hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val)
919 { return; }
920 static inline void
921 hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {};
922 static inline int
923 hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
924 { return 0; }
925 static inline qdf_time_t
926 hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
927 { return 0; }
928 #endif
929 
930 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
931 				 bool is_packet_log_enabled);
932 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
933 
934 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
935 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
936 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
937 
938 #ifdef IPA_OFFLOAD
939 /**
940  * hif_get_ipa_hw_type() - get IPA hw type
941  *
942  * This API return the IPA hw type.
943  *
944  * Return: IPA hw type
945  */
946 static inline
947 enum ipa_hw_type hif_get_ipa_hw_type(void)
948 {
949 	return ipa_get_hw_type();
950 }
951 
952 /**
953  * hif_get_ipa_present() - get IPA hw status
954  *
955  * This API return the IPA hw status.
956  *
957  * Return: true if IPA is present or false otherwise
958  */
959 static inline
960 bool hif_get_ipa_present(void)
961 {
962 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
963 		return true;
964 	else
965 		return false;
966 }
967 #endif
968 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
969 /**
970  * hif_bus_ealry_suspend() - stop non wmi tx traffic
971  * @context: hif context
972  */
973 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
974 
975 /**
976  * hif_bus_late_resume() - resume non wmi traffic
977  * @context: hif context
978  */
979 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
980 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
981 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
982 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
983 
984 /**
985  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
986  * @hif_ctx: an opaque HIF handle to use
987  *
988  * As opposed to the standard hif_irq_enable, this function always applies to
989  * the APPS side kernel interrupt handling.
990  *
991  * Return: errno
992  */
993 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
994 
995 /**
996  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
997  * @hif_ctx: an opaque HIF handle to use
998  *
999  * As opposed to the standard hif_irq_disable, this function always applies to
1000  * the APPS side kernel interrupt handling.
1001  *
1002  * Return: errno
1003  */
1004 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1005 
1006 /**
1007  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1008  * @hif_ctx: an opaque HIF handle to use
1009  *
1010  * As opposed to the standard hif_irq_enable, this function always applies to
1011  * the APPS side kernel interrupt handling.
1012  *
1013  * Return: errno
1014  */
1015 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1016 
1017 /**
1018  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1019  * @hif_ctx: an opaque HIF handle to use
1020  *
1021  * As opposed to the standard hif_irq_disable, this function always applies to
1022  * the APPS side kernel interrupt handling.
1023  *
1024  * Return: errno
1025  */
1026 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1027 
1028 #ifdef FEATURE_RUNTIME_PM
1029 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1030 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
1031 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1032 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1033 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
1034 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
1035 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
1036 #endif
1037 
1038 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1039 int hif_dump_registers(struct hif_opaque_softc *scn);
1040 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1041 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1042 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1043 		     u32 *revision, const char **target_name);
1044 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1045 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1046 						   scn);
1047 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1048 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1049 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1050 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1051 			   hif_target_status);
1052 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1053 			 struct hif_config_info *cfg);
1054 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1055 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1056 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1057 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1058 			   uint32_t transfer_id, u_int32_t len);
1059 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1060 	uint32_t transfer_id, uint32_t download_len);
1061 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1062 void hif_ce_war_disable(void);
1063 void hif_ce_war_enable(void);
1064 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1065 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1066 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1067 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1068 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1069 		uint32_t pipe_num);
1070 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1071 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1072 
1073 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1074 				int rx_bundle_cnt);
1075 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1076 
1077 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1078 
1079 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1080 
1081 enum hif_exec_type {
1082 	HIF_EXEC_NAPI_TYPE,
1083 	HIF_EXEC_TASKLET_TYPE,
1084 };
1085 
1086 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
1087 
1088 /**
1089  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1090  * @softc: hif opaque context owning the exec context
1091  * @id: the id of the interrupt context
1092  *
1093  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1094  *         'id' registered with the OS
1095  */
1096 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1097 				uint8_t id);
1098 
1099 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1100 uint32_t  hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1101 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
1102 		void *cb_ctx, const char *context_name,
1103 		enum hif_exec_type type, uint32_t scale);
1104 
1105 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1106 				const char *context_name);
1107 
1108 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1109 				u_int8_t pipeid,
1110 				struct hif_msg_callbacks *callbacks);
1111 
1112 /**
1113  * hif_print_napi_stats() - Display HIF NAPI stats
1114  * @hif_ctx - HIF opaque context
1115  *
1116  * Return: None
1117  */
1118 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
1119 
1120 /* hif_clear_napi_stats() - function clears the stats of the
1121  * latency when called.
1122  * @hif_ctx - the HIF context to assign the callback to
1123  *
1124  * Return: None
1125  */
1126 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
1127 
1128 #ifdef __cplusplus
1129 }
1130 #endif
1131 
1132 #ifdef FORCE_WAKE
1133 /**
1134  * hif_force_wake_request() - Function to wake from power collapse
1135  * @handle: HIF opaque handle
1136  *
1137  * Description: API to check if the device is awake or not before
1138  * read/write to BAR + 4K registers. If device is awake return
1139  * success otherwise write '1' to
1140  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
1141  * the device and does wakeup the PCI and MHI within 50ms
1142  * and then the device writes a value to
1143  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
1144  * handshake process to let the host know the device is awake.
1145  *
1146  * Return: zero - success/non-zero - failure
1147  */
1148 int hif_force_wake_request(struct hif_opaque_softc *handle);
1149 
1150 /**
1151  * hif_force_wake_release() - API to release/reset the SOC wake register
1152  * from interrupting the device.
1153  * @handle: HIF opaque handle
1154  *
1155  * Description: API to set the
1156  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
1157  * to release the interrupt line.
1158  *
1159  * Return: zero - success/non-zero - failure
1160  */
1161 int hif_force_wake_release(struct hif_opaque_softc *handle);
1162 #else
1163 static inline
1164 int hif_force_wake_request(struct hif_opaque_softc *handle)
1165 {
1166 	return 0;
1167 }
1168 
1169 static inline
1170 int hif_force_wake_release(struct hif_opaque_softc *handle)
1171 {
1172 	return 0;
1173 }
1174 #endif /* FORCE_WAKE */
1175 
1176 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
1177 
1178 /**
1179  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
1180  * @hif_ctx - the HIF context to assign the callback to
1181  * @callback - the callback to assign
1182  * @priv - the private data to pass to the callback when invoked
1183  *
1184  * Return: None
1185  */
1186 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1187 			       void (*callback)(void *),
1188 			       void *priv);
1189 /*
1190  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1191  * for defined here
1192  */
1193 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1194 ssize_t hif_dump_desc_trace_buf(struct device *dev,
1195 				struct device_attribute *attr, char *buf);
1196 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1197 					const char *buf, size_t size);
1198 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
1199 				const char *buf, size_t size);
1200 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
1201 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
1202 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
1203 
1204 /**
1205  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
1206  * @hif: hif context
1207  * @ce_service_max_yield_time: CE service max yield time to set
1208  *
1209  * This API storess CE service max yield time in hif context based
1210  * on ini value.
1211  *
1212  * Return: void
1213  */
1214 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1215 				       uint32_t ce_service_max_yield_time);
1216 
1217 /**
1218  * hif_get_ce_service_max_yield_time() - get CE service max yield time
1219  * @hif: hif context
1220  *
1221  * This API returns CE service max yield time.
1222  *
1223  * Return: CE service max yield time
1224  */
1225 unsigned long long
1226 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
1227 
1228 /**
1229  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
1230  * @hif: hif context
1231  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
1232  *
1233  * This API stores CE service max rx ind flush in hif context based
1234  * on ini value.
1235  *
1236  * Return: void
1237  */
1238 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1239 				       uint8_t ce_service_max_rx_ind_flush);
1240 #ifdef OL_ATH_SMART_LOGGING
1241 /*
1242  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1243  * @scn : HIF handler
1244  * @buf_cur: Current pointer in ring buffer
1245  * @buf_init:Start of the ring buffer
1246  * @buf_sz: Size of the ring buffer
1247  * @ce: Copy Engine id
1248  * @skb_sz: Max size of the SKB buffer to be copied
1249  *
1250  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1251  * and buffers pointed by them in to the given buf
1252  *
1253  * Return: Current pointer in ring buffer
1254  */
1255 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1256 			 uint8_t *buf_init, uint32_t buf_sz,
1257 			 uint32_t ce, uint32_t skb_sz);
1258 #endif /* OL_ATH_SMART_LOGGING */
1259 
1260 /*
1261  * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
1262  * to hif_opaque_softc handle
1263  * @hif_handle - hif_softc type
1264  *
1265  * Return: hif_opaque_softc type
1266  */
1267 static inline struct hif_opaque_softc *
1268 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
1269 {
1270 	return (struct hif_opaque_softc *)hif_handle;
1271 }
1272 
1273 #ifdef FORCE_WAKE
1274 /**
1275  * hif_srng_init_phase(): Indicate srng initialization phase
1276  * to avoid force wake as UMAC power collapse is not yet
1277  * enabled
1278  * @hif_ctx: hif opaque handle
1279  * @init_phase: initialization phase
1280  *
1281  * Return:  None
1282  */
1283 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1284 			 bool init_phase);
1285 #else
1286 static inline
1287 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1288 			 bool init_phase)
1289 {
1290 }
1291 #endif /* FORCE_WAKE */
1292 #endif /* _HIF_H_ */
1293