xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HIF_H_
20 #define _HIF_H_
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif /* __cplusplus */
25 
26 /* Header files */
27 #include <qdf_status.h>
28 #include "qdf_nbuf.h"
29 #include "qdf_lro.h"
30 #include "ol_if_athvar.h"
31 #include <linux/platform_device.h>
32 #ifdef HIF_PCI
33 #include <linux/pci.h>
34 #endif /* HIF_PCI */
35 #ifdef HIF_USB
36 #include <linux/usb.h>
37 #endif /* HIF_USB */
38 #ifdef IPA_OFFLOAD
39 #include <linux/ipa.h>
40 #endif
41 #include "cfg_ucfg_api.h"
42 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
43 
44 typedef void __iomem *A_target_id_t;
45 typedef void *hif_handle_t;
46 
47 #define HIF_TYPE_AR6002   2
48 #define HIF_TYPE_AR6003   3
49 #define HIF_TYPE_AR6004   5
50 #define HIF_TYPE_AR9888   6
51 #define HIF_TYPE_AR6320   7
52 #define HIF_TYPE_AR6320V2 8
53 /* For attaching Peregrine 2.0 board host_reg_tbl only */
54 #define HIF_TYPE_AR9888V2 9
55 #define HIF_TYPE_ADRASTEA 10
56 #define HIF_TYPE_AR900B 11
57 #define HIF_TYPE_QCA9984 12
58 #define HIF_TYPE_IPQ4019 13
59 #define HIF_TYPE_QCA9888 14
60 #define HIF_TYPE_QCA8074 15
61 #define HIF_TYPE_QCA6290 16
62 #define HIF_TYPE_QCN7605 17
63 #define HIF_TYPE_QCA6390 18
64 #define HIF_TYPE_QCA8074V2 19
65 #define HIF_TYPE_QCA6018  20
66 #define HIF_TYPE_QCN9000 21
67 #define HIF_TYPE_QCA6490 22
68 #define HIF_TYPE_QCA6750 23
69 
70 #ifdef IPA_OFFLOAD
71 #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE   37
72 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
73 #endif
74 
75 /* enum hif_ic_irq - enum defining integrated chip irq numbers
76  * defining irq nubers that can be used by external modules like datapath
77  */
78 enum hif_ic_irq {
79 	host2wbm_desc_feed = 16,
80 	host2reo_re_injection,
81 	host2reo_command,
82 	host2rxdma_monitor_ring3,
83 	host2rxdma_monitor_ring2,
84 	host2rxdma_monitor_ring1,
85 	reo2host_exception,
86 	wbm2host_rx_release,
87 	reo2host_status,
88 	reo2host_destination_ring4,
89 	reo2host_destination_ring3,
90 	reo2host_destination_ring2,
91 	reo2host_destination_ring1,
92 	rxdma2host_monitor_destination_mac3,
93 	rxdma2host_monitor_destination_mac2,
94 	rxdma2host_monitor_destination_mac1,
95 	ppdu_end_interrupts_mac3,
96 	ppdu_end_interrupts_mac2,
97 	ppdu_end_interrupts_mac1,
98 	rxdma2host_monitor_status_ring_mac3,
99 	rxdma2host_monitor_status_ring_mac2,
100 	rxdma2host_monitor_status_ring_mac1,
101 	host2rxdma_host_buf_ring_mac3,
102 	host2rxdma_host_buf_ring_mac2,
103 	host2rxdma_host_buf_ring_mac1,
104 	rxdma2host_destination_ring_mac3,
105 	rxdma2host_destination_ring_mac2,
106 	rxdma2host_destination_ring_mac1,
107 	host2tcl_input_ring4,
108 	host2tcl_input_ring3,
109 	host2tcl_input_ring2,
110 	host2tcl_input_ring1,
111 	wbm2host_tx_completions_ring3,
112 	wbm2host_tx_completions_ring2,
113 	wbm2host_tx_completions_ring1,
114 	tcl2host_status_ring,
115 };
116 
117 struct CE_state;
118 #define CE_COUNT_MAX 12
119 #define HIF_MAX_GRP_IRQ 16
120 
121 #ifndef HIF_MAX_GROUP
122 #define HIF_MAX_GROUP 7
123 #endif
124 
125 #ifndef NAPI_YIELD_BUDGET_BASED
126 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
127 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 3
128 #else
129 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
130 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
131 #endif
132 #endif /* SLUB_DEBUG_ON */
133 #else  /* NAPI_YIELD_BUDGET_BASED */
134 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
135 #endif /* NAPI_YIELD_BUDGET_BASED */
136 
137 #define QCA_NAPI_BUDGET    64
138 #define QCA_NAPI_DEF_SCALE  \
139 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
140 
141 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
142 /* NOTE: "napi->scale" can be changed,
143  * but this does not change the number of buckets
144  */
145 #define QCA_NAPI_NUM_BUCKETS 4
146 
147 /**
148  * qca_napi_stat - stats structure for execution contexts
149  * @napi_schedules - number of times the schedule function is called
150  * @napi_polls - number of times the execution context runs
151  * @napi_completes - number of times that the generating interrupt is reenabled
152  * @napi_workdone - cumulative of all work done reported by handler
153  * @cpu_corrected - incremented when execution context runs on a different core
154  *			than the one that its irq is affined to.
155  * @napi_budget_uses - histogram of work done per execution run
156  * @time_limit_reache - count of yields due to time limit threshholds
157  * @rxpkt_thresh_reached - count of yields due to a work limit
158  * @poll_time_buckets - histogram of poll times for the napi
159  *
160  */
161 struct qca_napi_stat {
162 	uint32_t napi_schedules;
163 	uint32_t napi_polls;
164 	uint32_t napi_completes;
165 	uint32_t napi_workdone;
166 	uint32_t cpu_corrected;
167 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
168 	uint32_t time_limit_reached;
169 	uint32_t rxpkt_thresh_reached;
170 	unsigned long long napi_max_poll_time;
171 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
172 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
173 #endif
174 };
175 
176 
177 /**
178  * per NAPI instance data structure
179  * This data structure holds stuff per NAPI instance.
180  * Note that, in the current implementation, though scale is
181  * an instance variable, it is set to the same value for all
182  * instances.
183  */
184 struct qca_napi_info {
185 	struct net_device    netdev; /* dummy net_dev */
186 	void                 *hif_ctx;
187 	struct napi_struct   napi;
188 	uint8_t              scale;   /* currently same on all instances */
189 	uint8_t              id;
190 	uint8_t              cpu;
191 	int                  irq;
192 	cpumask_t            cpumask;
193 	struct qca_napi_stat stats[NR_CPUS];
194 #ifdef RECEIVE_OFFLOAD
195 	/* will only be present for data rx CE's */
196 	void (*offld_flush_cb)(void *);
197 	struct napi_struct   rx_thread_napi;
198 	struct net_device    rx_thread_netdev;
199 #endif /* RECEIVE_OFFLOAD */
200 	qdf_lro_ctx_t        lro_ctx;
201 };
202 
203 enum qca_napi_tput_state {
204 	QCA_NAPI_TPUT_UNINITIALIZED,
205 	QCA_NAPI_TPUT_LO,
206 	QCA_NAPI_TPUT_HI
207 };
208 enum qca_napi_cpu_state {
209 	QCA_NAPI_CPU_UNINITIALIZED,
210 	QCA_NAPI_CPU_DOWN,
211 	QCA_NAPI_CPU_UP };
212 
213 /**
214  * struct qca_napi_cpu - an entry of the napi cpu table
215  * @core_id:     physical core id of the core
216  * @cluster_id:  cluster this core belongs to
217  * @core_mask:   mask to match all core of this cluster
218  * @thread_mask: mask for this core within the cluster
219  * @max_freq:    maximum clock this core can be clocked at
220  *               same for all cpus of the same core.
221  * @napis:       bitmap of napi instances on this core
222  * @execs:       bitmap of execution contexts on this core
223  * cluster_nxt:  chain to link cores within the same cluster
224  *
225  * This structure represents a single entry in the napi cpu
226  * table. The table is part of struct qca_napi_data.
227  * This table is initialized by the init function, called while
228  * the first napi instance is being created, updated by hotplug
229  * notifier and when cpu affinity decisions are made (by throughput
230  * detection), and deleted when the last napi instance is removed.
231  */
232 struct qca_napi_cpu {
233 	enum qca_napi_cpu_state state;
234 	int			core_id;
235 	int			cluster_id;
236 	cpumask_t		core_mask;
237 	cpumask_t		thread_mask;
238 	unsigned int		max_freq;
239 	uint32_t		napis;
240 	uint32_t		execs;
241 	int			cluster_nxt;  /* index, not pointer */
242 };
243 
244 /**
245  * struct qca_napi_data - collection of napi data for a single hif context
246  * @hif_softc: pointer to the hif context
247  * @lock: spinlock used in the event state machine
248  * @state: state variable used in the napi stat machine
249  * @ce_map: bit map indicating which ce's have napis running
250  * @exec_map: bit map of instanciated exec contexts
251  * @user_cpu_affin_map: CPU affinity map from INI config.
252  * @napi_cpu: cpu info for irq affinty
253  * @lilcl_head:
254  * @bigcl_head:
255  * @napi_mode: irq affinity & clock voting mode
256  * @cpuhp_handler: CPU hotplug event registration handle
257  */
258 struct qca_napi_data {
259 	struct               hif_softc *hif_softc;
260 	qdf_spinlock_t       lock;
261 	uint32_t             state;
262 
263 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
264 	 * not used by clients (clients use an id returned by create)
265 	 */
266 	uint32_t             ce_map;
267 	uint32_t             exec_map;
268 	uint32_t             user_cpu_affin_mask;
269 	struct qca_napi_info *napis[CE_COUNT_MAX];
270 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
271 	int                  lilcl_head, bigcl_head;
272 	enum qca_napi_tput_state napi_mode;
273 	struct qdf_cpuhp_handler *cpuhp_handler;
274 	uint8_t              flags;
275 };
276 
277 /**
278  * struct hif_config_info - Place Holder for HIF configuration
279  * @enable_self_recovery: Self Recovery
280  * @enable_runtime_pm: Enable Runtime PM
281  * @runtime_pm_delay: Runtime PM Delay
282  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
283  *
284  * Structure for holding HIF ini parameters.
285  */
286 struct hif_config_info {
287 	bool enable_self_recovery;
288 #ifdef FEATURE_RUNTIME_PM
289 	uint8_t enable_runtime_pm;
290 	u_int32_t runtime_pm_delay;
291 #endif
292 	uint64_t rx_softirq_max_yield_duration_ns;
293 };
294 
295 /**
296  * struct hif_target_info - Target Information
297  * @target_version: Target Version
298  * @target_type: Target Type
299  * @target_revision: Target Revision
300  * @soc_version: SOC Version
301  * @hw_name: pointer to hardware name
302  *
303  * Structure to hold target information.
304  */
305 struct hif_target_info {
306 	uint32_t target_version;
307 	uint32_t target_type;
308 	uint32_t target_revision;
309 	uint32_t soc_version;
310 	char *hw_name;
311 };
312 
313 struct hif_opaque_softc {
314 };
315 
316 /**
317  * enum hif_event_type - Type of DP events to be recorded
318  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
319  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
320  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
321  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
322  */
323 enum hif_event_type {
324 	HIF_EVENT_IRQ_TRIGGER,
325 	HIF_EVENT_BH_SCHED,
326 	HIF_EVENT_SRNG_ACCESS_START,
327 	HIF_EVENT_SRNG_ACCESS_END,
328 };
329 
330 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
331 
332 /* HIF_EVENT_HIST_MAX should always be power of 2 */
333 #define HIF_EVENT_HIST_MAX		512
334 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
335 #define HIF_EVENT_HIST_DISABLE_MASK	0
336 
337 /**
338  * struct hif_event_record - an entry of the DP event history
339  * @hal_ring_id: ring id for which event is recorded
340  * @hp: head pointer of the ring (may not be applicable for all events)
341  * @tp: tail pointer of the ring (may not be applicable for all events)
342  * @cpu_id: cpu id on which the event occurred
343  * @timestamp: timestamp when event occurred
344  * @type: type of the event
345  *
346  * This structure represents the information stored for every datapath
347  * event which is logged in the history.
348  */
349 struct hif_event_record {
350 	uint8_t hal_ring_id;
351 	uint32_t hp;
352 	uint32_t tp;
353 	int cpu_id;
354 	uint64_t timestamp;
355 	enum hif_event_type type;
356 };
357 
358 /**
359  * struct hif_event_history - history for one interrupt group
360  * @index: index to store new event
361  * @event: event entry
362  *
363  * This structure represents the datapath history for one
364  * interrupt group.
365  */
366 struct hif_event_history {
367 	qdf_atomic_t index;
368 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
369 };
370 
371 /**
372  * hif_hist_record_event() - Record one datapath event in history
373  * @hif_ctx: HIF opaque context
374  * @event: DP event entry
375  * @intr_grp_id: interrupt group ID registered with hif
376  *
377  * Return: None
378  */
379 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
380 			   struct hif_event_record *event,
381 			   uint8_t intr_grp_id);
382 
383 /**
384  * hif_record_event() - Wrapper function to form and record DP event
385  * @hif_ctx: HIF opaque context
386  * @intr_grp_id: interrupt group ID registered with hif
387  * @hal_ring_id: ring id for which event is recorded
388  * @hp: head pointer index of the srng
389  * @tp: tail pointer index of the srng
390  * @type: type of the event to be logged in history
391  *
392  * Return: None
393  */
394 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
395 				    uint8_t intr_grp_id,
396 				    uint8_t hal_ring_id,
397 				    uint32_t hp,
398 				    uint32_t tp,
399 				    enum hif_event_type type)
400 {
401 	struct hif_event_record event;
402 
403 	event.hal_ring_id = hal_ring_id;
404 	event.hp = hp;
405 	event.tp = tp;
406 	event.type = type;
407 
408 	return hif_hist_record_event(hif_ctx, &event,
409 				     intr_grp_id);
410 }
411 
412 #else
413 
414 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
415 				    uint8_t intr_grp_id,
416 				    uint8_t hal_ring_id,
417 				    uint32_t hp,
418 				    uint32_t tp,
419 				    enum hif_event_type type)
420 {
421 }
422 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
423 
424 /**
425  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
426  *
427  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
428  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
429  *                         minimize power
430  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
431  *                         platform-specific measures to completely power-off
432  *                         the module and associated hardware (i.e. cut power
433  *                         supplies)
434  */
435 enum HIF_DEVICE_POWER_CHANGE_TYPE {
436 	HIF_DEVICE_POWER_UP,
437 	HIF_DEVICE_POWER_DOWN,
438 	HIF_DEVICE_POWER_CUT
439 };
440 
441 /**
442  * enum hif_enable_type: what triggered the enabling of hif
443  *
444  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
445  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
446  */
447 enum hif_enable_type {
448 	HIF_ENABLE_TYPE_PROBE,
449 	HIF_ENABLE_TYPE_REINIT,
450 	HIF_ENABLE_TYPE_MAX
451 };
452 
453 /**
454  * enum hif_disable_type: what triggered the disabling of hif
455  *
456  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
457  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
458  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
459  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
460  */
461 enum hif_disable_type {
462 	HIF_DISABLE_TYPE_PROBE_ERROR,
463 	HIF_DISABLE_TYPE_REINIT_ERROR,
464 	HIF_DISABLE_TYPE_REMOVE,
465 	HIF_DISABLE_TYPE_SHUTDOWN,
466 	HIF_DISABLE_TYPE_MAX
467 };
468 /**
469  * enum hif_device_config_opcode: configure mode
470  *
471  * @HIF_DEVICE_POWER_STATE: device power state
472  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
473  * @HIF_DEVICE_GET_ADDR: get block address
474  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
475  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
476  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
477  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
478  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
479  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
480  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
481  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
482  * @HIF_BMI_DONE: bmi done
483  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
484  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
485  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
486  */
487 enum hif_device_config_opcode {
488 	HIF_DEVICE_POWER_STATE = 0,
489 	HIF_DEVICE_GET_BLOCK_SIZE,
490 	HIF_DEVICE_GET_FIFO_ADDR,
491 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
492 	HIF_DEVICE_GET_IRQ_PROC_MODE,
493 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
494 	HIF_DEVICE_POWER_STATE_CHANGE,
495 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
496 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
497 	HIF_DEVICE_GET_OS_DEVICE,
498 	HIF_DEVICE_DEBUG_BUS_STATE,
499 	HIF_BMI_DONE,
500 	HIF_DEVICE_SET_TARGET_TYPE,
501 	HIF_DEVICE_SET_HTC_CONTEXT,
502 	HIF_DEVICE_GET_HTC_CONTEXT,
503 };
504 
505 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
506 struct HID_ACCESS_LOG {
507 	uint32_t seqnum;
508 	bool is_write;
509 	void *addr;
510 	uint32_t value;
511 };
512 #endif
513 
514 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
515 		uint32_t value);
516 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
517 
518 #define HIF_MAX_DEVICES                 1
519 /**
520  * struct htc_callbacks - Structure for HTC Callbacks methods
521  * @context:             context to pass to the dsrhandler
522  *                       note : rwCompletionHandler is provided the context
523  *                       passed to hif_read_write
524  * @rwCompletionHandler: Read / write completion handler
525  * @dsrHandler:          DSR Handler
526  */
527 struct htc_callbacks {
528 	void *context;
529 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
530 	QDF_STATUS(*dsr_handler)(void *context);
531 };
532 
533 /**
534  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
535  * @context: Private data context
536  * @set_recovery_in_progress: To Set Driver state for recovery in progress
537  * @is_recovery_in_progress: Query if driver state is recovery in progress
538  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
539  * @is_driver_unloading: Query if driver is unloading.
540  *
541  * This Structure provides callback pointer for HIF to query hdd for driver
542  * states.
543  */
544 struct hif_driver_state_callbacks {
545 	void *context;
546 	void (*set_recovery_in_progress)(void *context, uint8_t val);
547 	bool (*is_recovery_in_progress)(void *context);
548 	bool (*is_load_unload_in_progress)(void *context);
549 	bool (*is_driver_unloading)(void *context);
550 	bool (*is_target_ready)(void *context);
551 };
552 
553 /* This API detaches the HTC layer from the HIF device */
554 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
555 
556 /****************************************************************/
557 /* BMI and Diag window abstraction                              */
558 /****************************************************************/
559 
560 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
561 
562 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
563 				     * handled atomically by
564 				     * DiagRead/DiagWrite
565 				     */
566 
567 #ifdef WLAN_FEATURE_BMI
568 /*
569  * API to handle HIF-specific BMI message exchanges, this API is synchronous
570  * and only allowed to be called from a context that can block (sleep)
571  */
572 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
573 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
574 				uint8_t *pSendMessage, uint32_t Length,
575 				uint8_t *pResponseMessage,
576 				uint32_t *pResponseLength, uint32_t TimeoutMS);
577 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
578 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
579 #else /* WLAN_FEATURE_BMI */
580 static inline void
581 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
582 {
583 }
584 
585 static inline bool
586 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
587 {
588 	return false;
589 }
590 #endif /* WLAN_FEATURE_BMI */
591 
592 /*
593  * APIs to handle HIF specific diagnostic read accesses. These APIs are
594  * synchronous and only allowed to be called from a context that
595  * can block (sleep). They are not high performance APIs.
596  *
597  * hif_diag_read_access reads a 4 Byte aligned/length value from a
598  * Target register or memory word.
599  *
600  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
601  */
602 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
603 				uint32_t address, uint32_t *data);
604 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
605 		      uint8_t *data, int nbytes);
606 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
607 			void *ramdump_base, uint32_t address, uint32_t size);
608 /*
609  * APIs to handle HIF specific diagnostic write accesses. These APIs are
610  * synchronous and only allowed to be called from a context that
611  * can block (sleep).
612  * They are not high performance APIs.
613  *
614  * hif_diag_write_access writes a 4 Byte aligned/length value to a
615  * Target register or memory word.
616  *
617  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
618  */
619 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
620 				 uint32_t address, uint32_t data);
621 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
622 			uint32_t address, uint8_t *data, int nbytes);
623 
624 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
625 
626 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
627 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
628 
629 /*
630  * Set the FASTPATH_mode_on flag in sc, for use by data path
631  */
632 #ifdef WLAN_FEATURE_FASTPATH
633 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
634 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
635 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
636 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
637 				fastpath_msg_handler handler, void *context);
638 #else
639 static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
640 					      fastpath_msg_handler handler,
641 					      void *context)
642 {
643 	return QDF_STATUS_E_FAILURE;
644 }
645 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
646 {
647 	return NULL;
648 }
649 
650 #endif
651 
652 /*
653  * Enable/disable CDC max performance workaround
654  * For max-performace set this to 0
655  * To allow SoC to enter sleep set this to 1
656  */
657 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
658 
659 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
660 			     qdf_shared_mem_t **ce_sr,
661 			     uint32_t *ce_sr_ring_size,
662 			     qdf_dma_addr_t *ce_reg_paddr);
663 
664 /**
665  * @brief List of callbacks - filled in by HTC.
666  */
667 struct hif_msg_callbacks {
668 	void *Context;
669 	/**< context meaningful to HTC */
670 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
671 					uint32_t transferID,
672 					uint32_t toeplitz_hash_result);
673 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
674 					uint8_t pipeID);
675 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
676 	void (*fwEventHandler)(void *context, QDF_STATUS status);
677 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
678 };
679 
680 enum hif_target_status {
681 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
682 	TARGET_STATUS_RESET,  /* target got reset */
683 	TARGET_STATUS_EJECT,  /* target got ejected */
684 	TARGET_STATUS_SUSPEND /*target got suspend */
685 };
686 
687 /**
688  * enum hif_attribute_flags: configure hif
689  *
690  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
691  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
692  *  							+ No pktlog CE
693  */
694 enum hif_attribute_flags {
695 	HIF_LOWDESC_CE_CFG = 1,
696 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
697 };
698 
699 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
700 	(attr |= (v & 0x01) << 5)
701 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
702 	(attr |= (v & 0x03) << 6)
703 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
704 	(attr |= (v & 0x01) << 13)
705 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
706 	(attr |= (v & 0x01) << 14)
707 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
708 	(attr |= (v & 0x01) << 15)
709 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
710 	(attr |= (v & 0x0FFF) << 16)
711 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
712 	(attr |= (v & 0x01) << 30)
713 
714 struct hif_ul_pipe_info {
715 	unsigned int nentries;
716 	unsigned int nentries_mask;
717 	unsigned int sw_index;
718 	unsigned int write_index; /* cached copy */
719 	unsigned int hw_index;    /* cached copy */
720 	void *base_addr_owner_space; /* Host address space */
721 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
722 };
723 
724 struct hif_dl_pipe_info {
725 	unsigned int nentries;
726 	unsigned int nentries_mask;
727 	unsigned int sw_index;
728 	unsigned int write_index; /* cached copy */
729 	unsigned int hw_index;    /* cached copy */
730 	void *base_addr_owner_space; /* Host address space */
731 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
732 };
733 
734 struct hif_pipe_addl_info {
735 	uint32_t pci_mem;
736 	uint32_t ctrl_addr;
737 	struct hif_ul_pipe_info ul_pipe;
738 	struct hif_dl_pipe_info dl_pipe;
739 };
740 
741 #ifdef CONFIG_SLUB_DEBUG_ON
742 #define MSG_FLUSH_NUM 16
743 #else /* PERF build */
744 #define MSG_FLUSH_NUM 32
745 #endif /* SLUB_DEBUG_ON */
746 
747 struct hif_bus_id;
748 
749 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
750 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
751 		     int opcode, void *config, uint32_t config_len);
752 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
753 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
754 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
755 		   struct hif_msg_callbacks *callbacks);
756 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
757 void hif_stop(struct hif_opaque_softc *hif_ctx);
758 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
759 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
760 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
761 		      uint8_t cmd_id, bool start);
762 
763 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
764 				  uint32_t transferID, uint32_t nbytes,
765 				  qdf_nbuf_t wbuf, uint32_t data_attr);
766 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
767 			     int force);
768 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
769 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
770 			  uint8_t *DLPipe);
771 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
772 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
773 			int *dl_is_polled);
774 uint16_t
775 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
776 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
777 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
778 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
779 		     bool wait_for_it);
780 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
781 #ifndef HIF_PCI
782 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
783 {
784 	return 0;
785 }
786 #else
787 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
788 #endif
789 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
790 			u32 *revision, const char **target_name);
791 
792 #ifdef RECEIVE_OFFLOAD
793 /**
794  * hif_offld_flush_cb_register() - Register the offld flush callback
795  * @scn: HIF opaque context
796  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
797  *			 Or GRO/LRO flush when RxThread is not enabled. Called
798  *			 with corresponding context for flush.
799  * Return: None
800  */
801 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
802 				 void (offld_flush_handler)(void *ol_ctx));
803 
804 /**
805  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
806  * @scn: HIF opaque context
807  *
808  * Return: None
809  */
810 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
811 #endif
812 
813 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
814 /**
815  * hif_exec_should_yield() - Check if hif napi context should yield
816  * @hif_ctx - HIF opaque context
817  * @grp_id - grp_id of the napi for which check needs to be done
818  *
819  * The function uses grp_id to look for NAPI and checks if NAPI needs to
820  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
821  * yield decision.
822  *
823  * Return: true if NAPI needs to yield, else false
824  */
825 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
826 #else
827 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
828 					 uint grp_id)
829 {
830 	return false;
831 }
832 #endif
833 
834 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
835 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
836 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
837 				      int htc_htt_tx_endpoint);
838 
839 /**
840  * hif_open() - Create hif handle
841  * @qdf_ctx: qdf context
842  * @mode: Driver Mode
843  * @bus_type: Bus Type
844  * @cbk: CDS Callbacks
845  * @psoc: psoc object manager
846  *
847  * API to open HIF Context
848  *
849  * Return: HIF Opaque Pointer
850  */
851 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
852 				  uint32_t mode,
853 				  enum qdf_bus_type bus_type,
854 				  struct hif_driver_state_callbacks *cbk,
855 				  struct wlan_objmgr_psoc *psoc);
856 
857 void hif_close(struct hif_opaque_softc *hif_ctx);
858 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
859 		      void *bdev, const struct hif_bus_id *bid,
860 		      enum qdf_bus_type bus_type,
861 		      enum hif_enable_type type);
862 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
863 #ifdef CE_TASKLET_DEBUG_ENABLE
864 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
865 				 uint8_t value);
866 #endif
867 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
868 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
869 #ifdef FEATURE_RUNTIME_PM
870 struct hif_pm_runtime_lock;
871 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
872 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx);
873 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx);
874 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx);
875 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
876 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
877 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
878 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx);
879 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx);
880 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
881 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
882 			struct hif_pm_runtime_lock *lock);
883 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
884 		struct hif_pm_runtime_lock *lock);
885 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
886 		struct hif_pm_runtime_lock *lock);
887 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
888 		struct hif_pm_runtime_lock *lock, unsigned int delay);
889 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx);
890 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx);
891 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
892 					  int val);
893 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
894 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
895 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx);
896 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx);
897 #else
898 struct hif_pm_runtime_lock {
899 	const char *name;
900 };
901 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
902 static inline int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx)
903 { return 0; }
904 static inline int
905 hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx)
906 { return 0; }
907 static inline int
908 hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
909 { return 0; }
910 static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
911 {}
912 
913 static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
914 { return 0; }
915 static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
916 { return 0; }
917 static inline int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx)
918 { return 0; }
919 static inline void
920 hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {};
921 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
922 					const char *name)
923 { return 0; }
924 static inline void
925 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
926 			struct hif_pm_runtime_lock *lock) {}
927 
928 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
929 		struct hif_pm_runtime_lock *lock)
930 { return 0; }
931 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
932 		struct hif_pm_runtime_lock *lock)
933 { return 0; }
934 static inline int
935 hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
936 		struct hif_pm_runtime_lock *lock, unsigned int delay)
937 { return 0; }
938 static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
939 { return false; }
940 static inline int
941 hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
942 { return 0; }
943 static inline void
944 hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val)
945 { return; }
946 static inline void
947 hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {};
948 static inline int
949 hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
950 { return 0; }
951 static inline qdf_time_t
952 hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
953 { return 0; }
954 static inline int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
955 { return 0; }
956 #endif
957 
958 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
959 				 bool is_packet_log_enabled);
960 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
961 
962 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
963 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
964 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
965 
966 #ifdef IPA_OFFLOAD
967 /**
968  * hif_get_ipa_hw_type() - get IPA hw type
969  *
970  * This API return the IPA hw type.
971  *
972  * Return: IPA hw type
973  */
974 static inline
975 enum ipa_hw_type hif_get_ipa_hw_type(void)
976 {
977 	return ipa_get_hw_type();
978 }
979 
980 /**
981  * hif_get_ipa_present() - get IPA hw status
982  *
983  * This API return the IPA hw status.
984  *
985  * Return: true if IPA is present or false otherwise
986  */
987 static inline
988 bool hif_get_ipa_present(void)
989 {
990 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
991 		return true;
992 	else
993 		return false;
994 }
995 #endif
996 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
997 /**
998  * hif_bus_ealry_suspend() - stop non wmi tx traffic
999  * @context: hif context
1000  */
1001 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1002 
1003 /**
1004  * hif_bus_late_resume() - resume non wmi traffic
1005  * @context: hif context
1006  */
1007 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1008 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1009 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1010 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1011 
1012 /**
1013  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1014  * @hif_ctx: an opaque HIF handle to use
1015  *
1016  * As opposed to the standard hif_irq_enable, this function always applies to
1017  * the APPS side kernel interrupt handling.
1018  *
1019  * Return: errno
1020  */
1021 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1022 
1023 /**
1024  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1025  * @hif_ctx: an opaque HIF handle to use
1026  *
1027  * As opposed to the standard hif_irq_disable, this function always applies to
1028  * the APPS side kernel interrupt handling.
1029  *
1030  * Return: errno
1031  */
1032 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1033 
1034 /**
1035  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1036  * @hif_ctx: an opaque HIF handle to use
1037  *
1038  * As opposed to the standard hif_irq_enable, this function always applies to
1039  * the APPS side kernel interrupt handling.
1040  *
1041  * Return: errno
1042  */
1043 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1044 
1045 /**
1046  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1047  * @hif_ctx: an opaque HIF handle to use
1048  *
1049  * As opposed to the standard hif_irq_disable, this function always applies to
1050  * the APPS side kernel interrupt handling.
1051  *
1052  * Return: errno
1053  */
1054 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1055 
1056 #ifdef FEATURE_RUNTIME_PM
1057 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1058 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
1059 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1060 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1061 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
1062 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
1063 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
1064 #endif
1065 
1066 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1067 int hif_dump_registers(struct hif_opaque_softc *scn);
1068 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1069 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1070 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1071 		     u32 *revision, const char **target_name);
1072 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1073 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1074 						   scn);
1075 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1076 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1077 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1078 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1079 			   hif_target_status);
1080 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1081 			 struct hif_config_info *cfg);
1082 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1083 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1084 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1085 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1086 			   uint32_t transfer_id, u_int32_t len);
1087 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1088 	uint32_t transfer_id, uint32_t download_len);
1089 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1090 void hif_ce_war_disable(void);
1091 void hif_ce_war_enable(void);
1092 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1093 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1094 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1095 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1096 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1097 		uint32_t pipe_num);
1098 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1099 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1100 
1101 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1102 				int rx_bundle_cnt);
1103 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1104 
1105 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1106 
1107 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1108 
1109 enum hif_exec_type {
1110 	HIF_EXEC_NAPI_TYPE,
1111 	HIF_EXEC_TASKLET_TYPE,
1112 };
1113 
1114 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
1115 
1116 /**
1117  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1118  * @softc: hif opaque context owning the exec context
1119  * @id: the id of the interrupt context
1120  *
1121  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1122  *         'id' registered with the OS
1123  */
1124 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1125 				uint8_t id);
1126 
1127 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1128 uint32_t  hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1129 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
1130 		void *cb_ctx, const char *context_name,
1131 		enum hif_exec_type type, uint32_t scale);
1132 
1133 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1134 				const char *context_name);
1135 
1136 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1137 				u_int8_t pipeid,
1138 				struct hif_msg_callbacks *callbacks);
1139 
1140 /**
1141  * hif_print_napi_stats() - Display HIF NAPI stats
1142  * @hif_ctx - HIF opaque context
1143  *
1144  * Return: None
1145  */
1146 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
1147 
1148 /* hif_clear_napi_stats() - function clears the stats of the
1149  * latency when called.
1150  * @hif_ctx - the HIF context to assign the callback to
1151  *
1152  * Return: None
1153  */
1154 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
1155 
1156 #ifdef __cplusplus
1157 }
1158 #endif
1159 
1160 #ifdef FORCE_WAKE
1161 /**
1162  * hif_force_wake_request() - Function to wake from power collapse
1163  * @handle: HIF opaque handle
1164  *
1165  * Description: API to check if the device is awake or not before
1166  * read/write to BAR + 4K registers. If device is awake return
1167  * success otherwise write '1' to
1168  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
1169  * the device and does wakeup the PCI and MHI within 50ms
1170  * and then the device writes a value to
1171  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
1172  * handshake process to let the host know the device is awake.
1173  *
1174  * Return: zero - success/non-zero - failure
1175  */
1176 int hif_force_wake_request(struct hif_opaque_softc *handle);
1177 
1178 /**
1179  * hif_force_wake_release() - API to release/reset the SOC wake register
1180  * from interrupting the device.
1181  * @handle: HIF opaque handle
1182  *
1183  * Description: API to set the
1184  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
1185  * to release the interrupt line.
1186  *
1187  * Return: zero - success/non-zero - failure
1188  */
1189 int hif_force_wake_release(struct hif_opaque_softc *handle);
1190 #else
1191 static inline
1192 int hif_force_wake_request(struct hif_opaque_softc *handle)
1193 {
1194 	return 0;
1195 }
1196 
1197 static inline
1198 int hif_force_wake_release(struct hif_opaque_softc *handle)
1199 {
1200 	return 0;
1201 }
1202 #endif /* FORCE_WAKE */
1203 
1204 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
1205 
1206 /**
1207  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
1208  * @hif_ctx - the HIF context to assign the callback to
1209  * @callback - the callback to assign
1210  * @priv - the private data to pass to the callback when invoked
1211  *
1212  * Return: None
1213  */
1214 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1215 			       void (*callback)(void *),
1216 			       void *priv);
1217 /*
1218  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1219  * for defined here
1220  */
1221 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1222 ssize_t hif_dump_desc_trace_buf(struct device *dev,
1223 				struct device_attribute *attr, char *buf);
1224 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1225 					const char *buf, size_t size);
1226 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
1227 				const char *buf, size_t size);
1228 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
1229 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
1230 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
1231 
1232 /**
1233  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
1234  * @hif: hif context
1235  * @ce_service_max_yield_time: CE service max yield time to set
1236  *
1237  * This API storess CE service max yield time in hif context based
1238  * on ini value.
1239  *
1240  * Return: void
1241  */
1242 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1243 				       uint32_t ce_service_max_yield_time);
1244 
1245 /**
1246  * hif_get_ce_service_max_yield_time() - get CE service max yield time
1247  * @hif: hif context
1248  *
1249  * This API returns CE service max yield time.
1250  *
1251  * Return: CE service max yield time
1252  */
1253 unsigned long long
1254 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
1255 
1256 /**
1257  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
1258  * @hif: hif context
1259  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
1260  *
1261  * This API stores CE service max rx ind flush in hif context based
1262  * on ini value.
1263  *
1264  * Return: void
1265  */
1266 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1267 					 uint8_t ce_service_max_rx_ind_flush);
1268 
1269 #ifdef OL_ATH_SMART_LOGGING
1270 /*
1271  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1272  * @scn : HIF handler
1273  * @buf_cur: Current pointer in ring buffer
1274  * @buf_init:Start of the ring buffer
1275  * @buf_sz: Size of the ring buffer
1276  * @ce: Copy Engine id
1277  * @skb_sz: Max size of the SKB buffer to be copied
1278  *
1279  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1280  * and buffers pointed by them in to the given buf
1281  *
1282  * Return: Current pointer in ring buffer
1283  */
1284 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1285 			 uint8_t *buf_init, uint32_t buf_sz,
1286 			 uint32_t ce, uint32_t skb_sz);
1287 #endif /* OL_ATH_SMART_LOGGING */
1288 
1289 /*
1290  * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
1291  * to hif_opaque_softc handle
1292  * @hif_handle - hif_softc type
1293  *
1294  * Return: hif_opaque_softc type
1295  */
1296 static inline struct hif_opaque_softc *
1297 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
1298 {
1299 	return (struct hif_opaque_softc *)hif_handle;
1300 }
1301 
1302 #ifdef FORCE_WAKE
1303 /**
1304  * hif_srng_init_phase(): Indicate srng initialization phase
1305  * to avoid force wake as UMAC power collapse is not yet
1306  * enabled
1307  * @hif_ctx: hif opaque handle
1308  * @init_phase: initialization phase
1309  *
1310  * Return:  None
1311  */
1312 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1313 			 bool init_phase);
1314 #else
1315 static inline
1316 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1317 			 bool init_phase)
1318 {
1319 }
1320 #endif /* FORCE_WAKE */
1321 #endif /* _HIF_H_ */
1322