xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HIF_H_
21 #define _HIF_H_
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif /* __cplusplus */
26 
27 /* Header files */
28 #include <qdf_status.h>
29 #include "qdf_nbuf.h"
30 #include "qdf_lro.h"
31 #include "ol_if_athvar.h"
32 #include <linux/platform_device.h>
33 #ifdef HIF_PCI
34 #include <linux/pci.h>
35 #endif /* HIF_PCI */
36 #ifdef HIF_USB
37 #include <linux/usb.h>
38 #endif /* HIF_USB */
39 #ifdef IPA_OFFLOAD
40 #include <linux/ipa.h>
41 #endif
42 #include "cfg_ucfg_api.h"
43 #include "qdf_dev.h"
44 #include <wlan_init_cfg.h>
45 
46 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
47 
48 typedef void __iomem *A_target_id_t;
49 typedef void *hif_handle_t;
50 
51 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
52 #define HIF_WORK_DRAIN_WAIT_CNT 50
53 
54 #define HIF_EP_WAKE_RESET_WAIT_CNT 10
55 #endif
56 
57 #define HIF_TYPE_AR6002   2
58 #define HIF_TYPE_AR6003   3
59 #define HIF_TYPE_AR6004   5
60 #define HIF_TYPE_AR9888   6
61 #define HIF_TYPE_AR6320   7
62 #define HIF_TYPE_AR6320V2 8
63 /* For attaching Peregrine 2.0 board host_reg_tbl only */
64 #define HIF_TYPE_AR9888V2 9
65 #define HIF_TYPE_ADRASTEA 10
66 #define HIF_TYPE_AR900B 11
67 #define HIF_TYPE_QCA9984 12
68 #define HIF_TYPE_QCA9888 14
69 #define HIF_TYPE_QCA8074 15
70 #define HIF_TYPE_QCA6290 16
71 #define HIF_TYPE_QCN7605 17
72 #define HIF_TYPE_QCA6390 18
73 #define HIF_TYPE_QCA8074V2 19
74 #define HIF_TYPE_QCA6018  20
75 #define HIF_TYPE_QCN9000 21
76 #define HIF_TYPE_QCA6490 22
77 #define HIF_TYPE_QCA6750 23
78 #define HIF_TYPE_QCA5018 24
79 #define HIF_TYPE_QCN6122 25
80 #define HIF_TYPE_KIWI 26
81 #define HIF_TYPE_QCN9224 27
82 #define HIF_TYPE_QCA9574 28
83 #define HIF_TYPE_MANGO 29
84 #define HIF_TYPE_QCA5332 30
85 
86 #define DMA_COHERENT_MASK_DEFAULT   37
87 
88 #ifdef IPA_OFFLOAD
89 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
90 #endif
91 
92 /* enum hif_ic_irq - enum defining integrated chip irq numbers
93  * defining irq nubers that can be used by external modules like datapath
94  */
95 enum hif_ic_irq {
96 	host2wbm_desc_feed = 16,
97 	host2reo_re_injection,
98 	host2reo_command,
99 	host2rxdma_monitor_ring3,
100 	host2rxdma_monitor_ring2,
101 	host2rxdma_monitor_ring1,
102 	reo2host_exception,
103 	wbm2host_rx_release,
104 	reo2host_status,
105 	reo2host_destination_ring4,
106 	reo2host_destination_ring3,
107 	reo2host_destination_ring2,
108 	reo2host_destination_ring1,
109 	rxdma2host_monitor_destination_mac3,
110 	rxdma2host_monitor_destination_mac2,
111 	rxdma2host_monitor_destination_mac1,
112 	ppdu_end_interrupts_mac3,
113 	ppdu_end_interrupts_mac2,
114 	ppdu_end_interrupts_mac1,
115 	rxdma2host_monitor_status_ring_mac3,
116 	rxdma2host_monitor_status_ring_mac2,
117 	rxdma2host_monitor_status_ring_mac1,
118 	host2rxdma_host_buf_ring_mac3,
119 	host2rxdma_host_buf_ring_mac2,
120 	host2rxdma_host_buf_ring_mac1,
121 	rxdma2host_destination_ring_mac3,
122 	rxdma2host_destination_ring_mac2,
123 	rxdma2host_destination_ring_mac1,
124 	host2tcl_input_ring4,
125 	host2tcl_input_ring3,
126 	host2tcl_input_ring2,
127 	host2tcl_input_ring1,
128 	wbm2host_tx_completions_ring4,
129 	wbm2host_tx_completions_ring3,
130 	wbm2host_tx_completions_ring2,
131 	wbm2host_tx_completions_ring1,
132 	tcl2host_status_ring,
133 };
134 
135 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
136 enum hif_legacy_pci_irq {
137 	ce0,
138 	ce1,
139 	ce2,
140 	ce3,
141 	ce4,
142 	ce5,
143 	ce6,
144 	ce7,
145 	ce8,
146 	ce9,
147 	ce10,
148 	ce11,
149 	ce12,
150 	ce13,
151 	ce14,
152 	ce15,
153 	reo2sw8_intr2,
154 	reo2sw7_intr2,
155 	reo2sw6_intr2,
156 	reo2sw5_intr2,
157 	reo2sw4_intr2,
158 	reo2sw3_intr2,
159 	reo2sw2_intr2,
160 	reo2sw1_intr2,
161 	reo2sw0_intr2,
162 	reo2sw8_intr,
163 	reo2sw7_intr,
164 	reo2sw6_inrr,
165 	reo2sw5_intr,
166 	reo2sw4_intr,
167 	reo2sw3_intr,
168 	reo2sw2_intr,
169 	reo2sw1_intr,
170 	reo2sw0_intr,
171 	reo2status_intr2,
172 	reo_status,
173 	reo2rxdma_out_2,
174 	reo2rxdma_out_1,
175 	reo_cmd,
176 	sw2reo6,
177 	sw2reo5,
178 	sw2reo1,
179 	sw2reo,
180 	rxdma2reo_mlo_0_dst_ring1,
181 	rxdma2reo_mlo_0_dst_ring0,
182 	rxdma2reo_mlo_1_dst_ring1,
183 	rxdma2reo_mlo_1_dst_ring0,
184 	rxdma2reo_dst_ring1,
185 	rxdma2reo_dst_ring0,
186 	rxdma2sw_dst_ring1,
187 	rxdma2sw_dst_ring0,
188 	rxdma2release_dst_ring1,
189 	rxdma2release_dst_ring0,
190 	sw2rxdma_2_src_ring,
191 	sw2rxdma_1_src_ring,
192 	sw2rxdma_0,
193 	wbm2sw6_release2,
194 	wbm2sw5_release2,
195 	wbm2sw4_release2,
196 	wbm2sw3_release2,
197 	wbm2sw2_release2,
198 	wbm2sw1_release2,
199 	wbm2sw0_release2,
200 	wbm2sw6_release,
201 	wbm2sw5_release,
202 	wbm2sw4_release,
203 	wbm2sw3_release,
204 	wbm2sw2_release,
205 	wbm2sw1_release,
206 	wbm2sw0_release,
207 	wbm2sw_link,
208 	wbm_error_release,
209 	sw2txmon_src_ring,
210 	sw2rxmon_src_ring,
211 	txmon2sw_p1_intr1,
212 	txmon2sw_p1_intr0,
213 	txmon2sw_p0_dest1,
214 	txmon2sw_p0_dest0,
215 	rxmon2sw_p1_intr1,
216 	rxmon2sw_p1_intr0,
217 	rxmon2sw_p0_dest1,
218 	rxmon2sw_p0_dest0,
219 	sw_release,
220 	sw2tcl_credit2,
221 	sw2tcl_credit,
222 	sw2tcl4,
223 	sw2tcl5,
224 	sw2tcl3,
225 	sw2tcl2,
226 	sw2tcl1,
227 	sw2wbm1,
228 	misc_8,
229 	misc_7,
230 	misc_6,
231 	misc_5,
232 	misc_4,
233 	misc_3,
234 	misc_2,
235 	misc_1,
236 	misc_0,
237 };
238 #endif
239 
240 struct CE_state;
241 #ifdef QCA_WIFI_QCN9224
242 #define CE_COUNT_MAX 16
243 #else
244 #define CE_COUNT_MAX 12
245 #endif
246 
247 #ifndef HIF_MAX_GROUP
248 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS
249 #endif
250 
251 #ifdef CONFIG_BERYLLIUM
252 #define HIF_MAX_GRP_IRQ 25
253 #else
254 #define HIF_MAX_GRP_IRQ 16
255 #endif
256 
257 #ifndef NAPI_YIELD_BUDGET_BASED
258 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
259 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
260 #endif
261 #else  /* NAPI_YIELD_BUDGET_BASED */
262 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
263 #endif /* NAPI_YIELD_BUDGET_BASED */
264 
265 #define QCA_NAPI_BUDGET    64
266 #define QCA_NAPI_DEF_SCALE  \
267 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
268 
269 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
270 /* NOTE: "napi->scale" can be changed,
271  * but this does not change the number of buckets
272  */
273 #define QCA_NAPI_NUM_BUCKETS 4
274 
275 /**
276  * qca_napi_stat - stats structure for execution contexts
277  * @napi_schedules - number of times the schedule function is called
278  * @napi_polls - number of times the execution context runs
279  * @napi_completes - number of times that the generating interrupt is reenabled
280  * @napi_workdone - cumulative of all work done reported by handler
281  * @cpu_corrected - incremented when execution context runs on a different core
282  *			than the one that its irq is affined to.
283  * @napi_budget_uses - histogram of work done per execution run
284  * @time_limit_reache - count of yields due to time limit threshholds
285  * @rxpkt_thresh_reached - count of yields due to a work limit
286  * @poll_time_buckets - histogram of poll times for the napi
287  *
288  */
289 struct qca_napi_stat {
290 	uint32_t napi_schedules;
291 	uint32_t napi_polls;
292 	uint32_t napi_completes;
293 	uint32_t napi_workdone;
294 	uint32_t cpu_corrected;
295 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
296 	uint32_t time_limit_reached;
297 	uint32_t rxpkt_thresh_reached;
298 	unsigned long long napi_max_poll_time;
299 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
300 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
301 #endif
302 };
303 
304 
305 /**
306  * per NAPI instance data structure
307  * This data structure holds stuff per NAPI instance.
308  * Note that, in the current implementation, though scale is
309  * an instance variable, it is set to the same value for all
310  * instances.
311  */
312 struct qca_napi_info {
313 	struct net_device    netdev; /* dummy net_dev */
314 	void                 *hif_ctx;
315 	struct napi_struct   napi;
316 	uint8_t              scale;   /* currently same on all instances */
317 	uint8_t              id;
318 	uint8_t              cpu;
319 	int                  irq;
320 	cpumask_t            cpumask;
321 	struct qca_napi_stat stats[NR_CPUS];
322 #ifdef RECEIVE_OFFLOAD
323 	/* will only be present for data rx CE's */
324 	void (*offld_flush_cb)(void *);
325 	struct napi_struct   rx_thread_napi;
326 	struct net_device    rx_thread_netdev;
327 #endif /* RECEIVE_OFFLOAD */
328 	qdf_lro_ctx_t        lro_ctx;
329 };
330 
331 enum qca_napi_tput_state {
332 	QCA_NAPI_TPUT_UNINITIALIZED,
333 	QCA_NAPI_TPUT_LO,
334 	QCA_NAPI_TPUT_HI
335 };
336 enum qca_napi_cpu_state {
337 	QCA_NAPI_CPU_UNINITIALIZED,
338 	QCA_NAPI_CPU_DOWN,
339 	QCA_NAPI_CPU_UP };
340 
341 /**
342  * struct qca_napi_cpu - an entry of the napi cpu table
343  * @core_id:     physical core id of the core
344  * @cluster_id:  cluster this core belongs to
345  * @core_mask:   mask to match all core of this cluster
346  * @thread_mask: mask for this core within the cluster
347  * @max_freq:    maximum clock this core can be clocked at
348  *               same for all cpus of the same core.
349  * @napis:       bitmap of napi instances on this core
350  * @execs:       bitmap of execution contexts on this core
351  * cluster_nxt:  chain to link cores within the same cluster
352  *
353  * This structure represents a single entry in the napi cpu
354  * table. The table is part of struct qca_napi_data.
355  * This table is initialized by the init function, called while
356  * the first napi instance is being created, updated by hotplug
357  * notifier and when cpu affinity decisions are made (by throughput
358  * detection), and deleted when the last napi instance is removed.
359  */
360 struct qca_napi_cpu {
361 	enum qca_napi_cpu_state state;
362 	int			core_id;
363 	int			cluster_id;
364 	cpumask_t		core_mask;
365 	cpumask_t		thread_mask;
366 	unsigned int		max_freq;
367 	uint32_t		napis;
368 	uint32_t		execs;
369 	int			cluster_nxt;  /* index, not pointer */
370 };
371 
372 /**
373  * struct qca_napi_data - collection of napi data for a single hif context
374  * @hif_softc: pointer to the hif context
375  * @lock: spinlock used in the event state machine
376  * @state: state variable used in the napi stat machine
377  * @ce_map: bit map indicating which ce's have napis running
378  * @exec_map: bit map of instanciated exec contexts
379  * @user_cpu_affin_map: CPU affinity map from INI config.
380  * @napi_cpu: cpu info for irq affinty
381  * @lilcl_head:
382  * @bigcl_head:
383  * @napi_mode: irq affinity & clock voting mode
384  * @cpuhp_handler: CPU hotplug event registration handle
385  */
386 struct qca_napi_data {
387 	struct               hif_softc *hif_softc;
388 	qdf_spinlock_t       lock;
389 	uint32_t             state;
390 
391 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
392 	 * not used by clients (clients use an id returned by create)
393 	 */
394 	uint32_t             ce_map;
395 	uint32_t             exec_map;
396 	uint32_t             user_cpu_affin_mask;
397 	struct qca_napi_info *napis[CE_COUNT_MAX];
398 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
399 	int                  lilcl_head, bigcl_head;
400 	enum qca_napi_tput_state napi_mode;
401 	struct qdf_cpuhp_handler *cpuhp_handler;
402 	uint8_t              flags;
403 };
404 
405 /**
406  * struct hif_config_info - Place Holder for HIF configuration
407  * @enable_self_recovery: Self Recovery
408  * @enable_runtime_pm: Enable Runtime PM
409  * @runtime_pm_delay: Runtime PM Delay
410  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
411  *
412  * Structure for holding HIF ini parameters.
413  */
414 struct hif_config_info {
415 	bool enable_self_recovery;
416 #ifdef FEATURE_RUNTIME_PM
417 	uint8_t enable_runtime_pm;
418 	u_int32_t runtime_pm_delay;
419 #endif
420 	uint64_t rx_softirq_max_yield_duration_ns;
421 };
422 
423 /**
424  * struct hif_target_info - Target Information
425  * @target_version: Target Version
426  * @target_type: Target Type
427  * @target_revision: Target Revision
428  * @soc_version: SOC Version
429  * @hw_name: pointer to hardware name
430  *
431  * Structure to hold target information.
432  */
433 struct hif_target_info {
434 	uint32_t target_version;
435 	uint32_t target_type;
436 	uint32_t target_revision;
437 	uint32_t soc_version;
438 	char *hw_name;
439 };
440 
441 struct hif_opaque_softc {
442 };
443 
444 /**
445  * enum hif_event_type - Type of DP events to be recorded
446  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
447  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
448  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
449  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
450  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
451  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
452  * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event
453  * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event
454  */
455 enum hif_event_type {
456 	HIF_EVENT_IRQ_TRIGGER,
457 	HIF_EVENT_TIMER_ENTRY,
458 	HIF_EVENT_TIMER_EXIT,
459 	HIF_EVENT_BH_SCHED,
460 	HIF_EVENT_SRNG_ACCESS_START,
461 	HIF_EVENT_SRNG_ACCESS_END,
462 	HIF_EVENT_BH_COMPLETE,
463 	HIF_EVENT_BH_FORCE_BREAK,
464 	/* Do check hif_hist_skip_event_record when adding new events */
465 };
466 
467 /**
468  * enum hif_system_pm_state - System PM state
469  * HIF_SYSTEM_PM_STATE_ON: System in active state
470  * HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
471  *  system resume
472  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
473  *  system suspend
474  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
475  */
476 enum hif_system_pm_state {
477 	HIF_SYSTEM_PM_STATE_ON,
478 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
479 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
480 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
481 };
482 
483 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
484 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
485 
486 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
487 /* HIF_EVENT_HIST_MAX should always be power of 2 */
488 #define HIF_EVENT_HIST_MAX		512
489 
490 #define HIF_EVENT_HIST_ENABLE_MASK	0xFF
491 
492 static inline uint64_t hif_get_log_timestamp(void)
493 {
494 	return qdf_get_log_timestamp();
495 }
496 
497 #else
498 
499 #define HIF_EVENT_HIST_MAX		32
500 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
501 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
502 
503 static inline uint64_t hif_get_log_timestamp(void)
504 {
505 	return qdf_sched_clock();
506 }
507 
508 #endif
509 
510 /**
511  * struct hif_event_record - an entry of the DP event history
512  * @hal_ring_id: ring id for which event is recorded
513  * @hp: head pointer of the ring (may not be applicable for all events)
514  * @tp: tail pointer of the ring (may not be applicable for all events)
515  * @cpu_id: cpu id on which the event occurred
516  * @timestamp: timestamp when event occurred
517  * @type: type of the event
518  *
519  * This structure represents the information stored for every datapath
520  * event which is logged in the history.
521  */
522 struct hif_event_record {
523 	uint8_t hal_ring_id;
524 	uint32_t hp;
525 	uint32_t tp;
526 	int cpu_id;
527 	uint64_t timestamp;
528 	enum hif_event_type type;
529 };
530 
531 /**
532  * struct hif_event_misc - history related misc info
533  * @last_irq_index: last irq event index in history
534  * @last_irq_ts: last irq timestamp
535  */
536 struct hif_event_misc {
537 	int32_t last_irq_index;
538 	uint64_t last_irq_ts;
539 };
540 
541 /**
542  * struct hif_event_history - history for one interrupt group
543  * @index: index to store new event
544  * @event: event entry
545  *
546  * This structure represents the datapath history for one
547  * interrupt group.
548  */
549 struct hif_event_history {
550 	qdf_atomic_t index;
551 	struct hif_event_misc misc;
552 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
553 };
554 
555 /**
556  * hif_hist_record_event() - Record one datapath event in history
557  * @hif_ctx: HIF opaque context
558  * @event: DP event entry
559  * @intr_grp_id: interrupt group ID registered with hif
560  *
561  * Return: None
562  */
563 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
564 			   struct hif_event_record *event,
565 			   uint8_t intr_grp_id);
566 
567 /**
568  * hif_event_history_init() - Initialize SRNG event history buffers
569  * @hif_ctx: HIF opaque context
570  * @id: context group ID for which history is recorded
571  *
572  * Returns: None
573  */
574 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
575 
576 /**
577  * hif_event_history_deinit() - De-initialize SRNG event history buffers
578  * @hif_ctx: HIF opaque context
579  * @id: context group ID for which history is recorded
580  *
581  * Returns: None
582  */
583 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
584 
585 /**
586  * hif_record_event() - Wrapper function to form and record DP event
587  * @hif_ctx: HIF opaque context
588  * @intr_grp_id: interrupt group ID registered with hif
589  * @hal_ring_id: ring id for which event is recorded
590  * @hp: head pointer index of the srng
591  * @tp: tail pointer index of the srng
592  * @type: type of the event to be logged in history
593  *
594  * Return: None
595  */
596 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
597 				    uint8_t intr_grp_id,
598 				    uint8_t hal_ring_id,
599 				    uint32_t hp,
600 				    uint32_t tp,
601 				    enum hif_event_type type)
602 {
603 	struct hif_event_record event;
604 
605 	event.hal_ring_id = hal_ring_id;
606 	event.hp = hp;
607 	event.tp = tp;
608 	event.type = type;
609 
610 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
611 
612 	return;
613 }
614 
615 #else
616 
617 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
618 				    uint8_t intr_grp_id,
619 				    uint8_t hal_ring_id,
620 				    uint32_t hp,
621 				    uint32_t tp,
622 				    enum hif_event_type type)
623 {
624 }
625 
626 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
627 					  uint8_t id)
628 {
629 }
630 
631 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
632 					    uint8_t id)
633 {
634 }
635 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
636 
637 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx);
638 
639 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
640 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx);
641 #else
642 static
643 inline void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) {}
644 #endif
645 
646 /**
647  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
648  *
649  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
650  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
651  *                         minimize power
652  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
653  *                         platform-specific measures to completely power-off
654  *                         the module and associated hardware (i.e. cut power
655  *                         supplies)
656  */
657 enum HIF_DEVICE_POWER_CHANGE_TYPE {
658 	HIF_DEVICE_POWER_UP,
659 	HIF_DEVICE_POWER_DOWN,
660 	HIF_DEVICE_POWER_CUT
661 };
662 
663 /**
664  * enum hif_enable_type: what triggered the enabling of hif
665  *
666  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
667  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
668  */
669 enum hif_enable_type {
670 	HIF_ENABLE_TYPE_PROBE,
671 	HIF_ENABLE_TYPE_REINIT,
672 	HIF_ENABLE_TYPE_MAX
673 };
674 
675 /**
676  * enum hif_disable_type: what triggered the disabling of hif
677  *
678  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
679  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
680  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
681  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
682  */
683 enum hif_disable_type {
684 	HIF_DISABLE_TYPE_PROBE_ERROR,
685 	HIF_DISABLE_TYPE_REINIT_ERROR,
686 	HIF_DISABLE_TYPE_REMOVE,
687 	HIF_DISABLE_TYPE_SHUTDOWN,
688 	HIF_DISABLE_TYPE_MAX
689 };
690 /**
691  * enum hif_device_config_opcode: configure mode
692  *
693  * @HIF_DEVICE_POWER_STATE: device power state
694  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
695  * @HIF_DEVICE_GET_ADDR: get block address
696  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
697  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
698  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
699  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
700  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
701  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
702  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
703  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
704  * @HIF_BMI_DONE: bmi done
705  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
706  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
707  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
708  */
709 enum hif_device_config_opcode {
710 	HIF_DEVICE_POWER_STATE = 0,
711 	HIF_DEVICE_GET_BLOCK_SIZE,
712 	HIF_DEVICE_GET_FIFO_ADDR,
713 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
714 	HIF_DEVICE_GET_IRQ_PROC_MODE,
715 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
716 	HIF_DEVICE_POWER_STATE_CHANGE,
717 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
718 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
719 	HIF_DEVICE_GET_OS_DEVICE,
720 	HIF_DEVICE_DEBUG_BUS_STATE,
721 	HIF_BMI_DONE,
722 	HIF_DEVICE_SET_TARGET_TYPE,
723 	HIF_DEVICE_SET_HTC_CONTEXT,
724 	HIF_DEVICE_GET_HTC_CONTEXT,
725 };
726 
727 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
728 struct HID_ACCESS_LOG {
729 	uint32_t seqnum;
730 	bool is_write;
731 	void *addr;
732 	uint32_t value;
733 };
734 #endif
735 
736 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
737 		uint32_t value);
738 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
739 
740 #define HIF_MAX_DEVICES                 1
741 /**
742  * struct htc_callbacks - Structure for HTC Callbacks methods
743  * @context:             context to pass to the dsrhandler
744  *                       note : rwCompletionHandler is provided the context
745  *                       passed to hif_read_write
746  * @rwCompletionHandler: Read / write completion handler
747  * @dsrHandler:          DSR Handler
748  */
749 struct htc_callbacks {
750 	void *context;
751 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
752 	QDF_STATUS(*dsr_handler)(void *context);
753 };
754 
755 /**
756  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
757  * @context: Private data context
758  * @set_recovery_in_progress: To Set Driver state for recovery in progress
759  * @is_recovery_in_progress: Query if driver state is recovery in progress
760  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
761  * @is_driver_unloading: Query if driver is unloading.
762  * @get_bandwidth_level: Query current bandwidth level for the driver
763  * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem
764  * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool
765  * This Structure provides callback pointer for HIF to query hdd for driver
766  * states.
767  */
768 struct hif_driver_state_callbacks {
769 	void *context;
770 	void (*set_recovery_in_progress)(void *context, uint8_t val);
771 	bool (*is_recovery_in_progress)(void *context);
772 	bool (*is_load_unload_in_progress)(void *context);
773 	bool (*is_driver_unloading)(void *context);
774 	bool (*is_target_ready)(void *context);
775 	int (*get_bandwidth_level)(void *context);
776 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
777 						       qdf_dma_addr_t *paddr,
778 						       uint32_t ring_type);
779 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
780 };
781 
782 /* This API detaches the HTC layer from the HIF device */
783 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
784 
785 /****************************************************************/
786 /* BMI and Diag window abstraction                              */
787 /****************************************************************/
788 
789 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
790 
791 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
792 				     * handled atomically by
793 				     * DiagRead/DiagWrite
794 				     */
795 
796 #ifdef WLAN_FEATURE_BMI
797 /*
798  * API to handle HIF-specific BMI message exchanges, this API is synchronous
799  * and only allowed to be called from a context that can block (sleep)
800  */
801 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
802 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
803 				uint8_t *pSendMessage, uint32_t Length,
804 				uint8_t *pResponseMessage,
805 				uint32_t *pResponseLength, uint32_t TimeoutMS);
806 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
807 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
808 #else /* WLAN_FEATURE_BMI */
809 static inline void
810 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
811 {
812 }
813 
814 static inline bool
815 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
816 {
817 	return false;
818 }
819 #endif /* WLAN_FEATURE_BMI */
820 
821 #ifdef HIF_CPU_CLEAR_AFFINITY
822 /**
823  * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
824  * @scn: HIF handle
825  * @intr_ctxt_id: interrupt group index
826  * @cpu: CPU core to clear
827  *
828  * Return: None
829  */
830 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
831 				       int intr_ctxt_id, int cpu);
832 #else
833 static inline
834 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
835 				       int intr_ctxt_id, int cpu)
836 {
837 }
838 #endif
839 
840 /*
841  * APIs to handle HIF specific diagnostic read accesses. These APIs are
842  * synchronous and only allowed to be called from a context that
843  * can block (sleep). They are not high performance APIs.
844  *
845  * hif_diag_read_access reads a 4 Byte aligned/length value from a
846  * Target register or memory word.
847  *
848  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
849  */
850 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
851 				uint32_t address, uint32_t *data);
852 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
853 		      uint8_t *data, int nbytes);
854 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
855 			void *ramdump_base, uint32_t address, uint32_t size);
856 /*
857  * APIs to handle HIF specific diagnostic write accesses. These APIs are
858  * synchronous and only allowed to be called from a context that
859  * can block (sleep).
860  * They are not high performance APIs.
861  *
862  * hif_diag_write_access writes a 4 Byte aligned/length value to a
863  * Target register or memory word.
864  *
865  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
866  */
867 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
868 				 uint32_t address, uint32_t data);
869 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
870 			uint32_t address, uint8_t *data, int nbytes);
871 
872 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
873 
874 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
875 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
876 
877 /*
878  * Set the FASTPATH_mode_on flag in sc, for use by data path
879  */
880 #ifdef WLAN_FEATURE_FASTPATH
881 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
882 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
883 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
884 
885 /**
886  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
887  * @handler: Callback funtcion
888  * @context: handle for callback function
889  *
890  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
891  */
892 QDF_STATUS hif_ce_fastpath_cb_register(
893 		struct hif_opaque_softc *hif_ctx,
894 		fastpath_msg_handler handler, void *context);
895 #else
896 static inline QDF_STATUS hif_ce_fastpath_cb_register(
897 		struct hif_opaque_softc *hif_ctx,
898 		fastpath_msg_handler handler, void *context)
899 {
900 	return QDF_STATUS_E_FAILURE;
901 }
902 
903 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
904 {
905 	return NULL;
906 }
907 
908 #endif
909 
910 /*
911  * Enable/disable CDC max performance workaround
912  * For max-performace set this to 0
913  * To allow SoC to enter sleep set this to 1
914  */
915 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
916 
917 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
918 			     qdf_shared_mem_t **ce_sr,
919 			     uint32_t *ce_sr_ring_size,
920 			     qdf_dma_addr_t *ce_reg_paddr);
921 
922 /**
923  * @brief List of callbacks - filled in by HTC.
924  */
925 struct hif_msg_callbacks {
926 	void *Context;
927 	/**< context meaningful to HTC */
928 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
929 					uint32_t transferID,
930 					uint32_t toeplitz_hash_result);
931 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
932 					uint8_t pipeID);
933 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
934 	void (*fwEventHandler)(void *context, QDF_STATUS status);
935 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
936 };
937 
938 enum hif_target_status {
939 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
940 	TARGET_STATUS_RESET,  /* target got reset */
941 	TARGET_STATUS_EJECT,  /* target got ejected */
942 	TARGET_STATUS_SUSPEND /*target got suspend */
943 };
944 
945 /**
946  * enum hif_attribute_flags: configure hif
947  *
948  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
949  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
950  *  							+ No pktlog CE
951  */
952 enum hif_attribute_flags {
953 	HIF_LOWDESC_CE_CFG = 1,
954 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
955 };
956 
957 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
958 	(attr |= (v & 0x01) << 5)
959 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
960 	(attr |= (v & 0x03) << 6)
961 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
962 	(attr |= (v & 0x01) << 13)
963 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
964 	(attr |= (v & 0x01) << 14)
965 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
966 	(attr |= (v & 0x01) << 15)
967 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
968 	(attr |= (v & 0x0FFF) << 16)
969 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
970 	(attr |= (v & 0x01) << 30)
971 
972 struct hif_ul_pipe_info {
973 	unsigned int nentries;
974 	unsigned int nentries_mask;
975 	unsigned int sw_index;
976 	unsigned int write_index; /* cached copy */
977 	unsigned int hw_index;    /* cached copy */
978 	void *base_addr_owner_space; /* Host address space */
979 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
980 };
981 
982 struct hif_dl_pipe_info {
983 	unsigned int nentries;
984 	unsigned int nentries_mask;
985 	unsigned int sw_index;
986 	unsigned int write_index; /* cached copy */
987 	unsigned int hw_index;    /* cached copy */
988 	void *base_addr_owner_space; /* Host address space */
989 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
990 };
991 
992 struct hif_pipe_addl_info {
993 	uint32_t pci_mem;
994 	uint32_t ctrl_addr;
995 	struct hif_ul_pipe_info ul_pipe;
996 	struct hif_dl_pipe_info dl_pipe;
997 };
998 
999 #ifdef CONFIG_SLUB_DEBUG_ON
1000 #define MSG_FLUSH_NUM 16
1001 #else /* PERF build */
1002 #define MSG_FLUSH_NUM 32
1003 #endif /* SLUB_DEBUG_ON */
1004 
1005 struct hif_bus_id;
1006 
1007 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
1008 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
1009 		     int opcode, void *config, uint32_t config_len);
1010 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
1011 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
1012 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
1013 		   struct hif_msg_callbacks *callbacks);
1014 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
1015 void hif_stop(struct hif_opaque_softc *hif_ctx);
1016 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
1017 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
1018 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
1019 		      uint8_t cmd_id, bool start);
1020 
1021 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1022 				  uint32_t transferID, uint32_t nbytes,
1023 				  qdf_nbuf_t wbuf, uint32_t data_attr);
1024 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1025 			     int force);
1026 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1027 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
1028 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
1029 			  uint8_t *DLPipe);
1030 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
1031 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1032 			int *dl_is_polled);
1033 uint16_t
1034 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1035 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
1036 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
1037 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
1038 		     bool wait_for_it);
1039 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
1040 #ifndef HIF_PCI
1041 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
1042 {
1043 	return 0;
1044 }
1045 #else
1046 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
1047 #endif
1048 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1049 			u32 *revision, const char **target_name);
1050 
1051 #ifdef RECEIVE_OFFLOAD
1052 /**
1053  * hif_offld_flush_cb_register() - Register the offld flush callback
1054  * @scn: HIF opaque context
1055  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
1056  *			 Or GRO/LRO flush when RxThread is not enabled. Called
1057  *			 with corresponding context for flush.
1058  * Return: None
1059  */
1060 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1061 				 void (offld_flush_handler)(void *ol_ctx));
1062 
1063 /**
1064  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
1065  * @scn: HIF opaque context
1066  *
1067  * Return: None
1068  */
1069 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
1070 #endif
1071 
1072 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1073 /**
1074  * hif_exec_should_yield() - Check if hif napi context should yield
1075  * @hif_ctx - HIF opaque context
1076  * @grp_id - grp_id of the napi for which check needs to be done
1077  *
1078  * The function uses grp_id to look for NAPI and checks if NAPI needs to
1079  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
1080  * yield decision.
1081  *
1082  * Return: true if NAPI needs to yield, else false
1083  */
1084 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
1085 #else
1086 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
1087 					 uint grp_id)
1088 {
1089 	return false;
1090 }
1091 #endif
1092 
1093 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
1094 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
1095 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
1096 				      int htc_htt_tx_endpoint);
1097 
1098 /**
1099  * hif_open() - Create hif handle
1100  * @qdf_ctx: qdf context
1101  * @mode: Driver Mode
1102  * @bus_type: Bus Type
1103  * @cbk: CDS Callbacks
1104  * @psoc: psoc object manager
1105  *
1106  * API to open HIF Context
1107  *
1108  * Return: HIF Opaque Pointer
1109  */
1110 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1111 				  uint32_t mode,
1112 				  enum qdf_bus_type bus_type,
1113 				  struct hif_driver_state_callbacks *cbk,
1114 				  struct wlan_objmgr_psoc *psoc);
1115 
1116 /**
1117  * hif_init_dma_mask() - Set dma mask for the dev
1118  * @dev: dev for which DMA mask is to be set
1119  * @bus_type: bus type for the target
1120  *
1121  * This API sets the DMA mask for the device. before the datapath
1122  * memory pre-allocation is done. If the DMA mask is not set before
1123  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
1124  * and does not utilize the full device capability.
1125  *
1126  * Return: 0 - success, non-zero on failure.
1127  */
1128 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
1129 void hif_close(struct hif_opaque_softc *hif_ctx);
1130 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1131 		      void *bdev, const struct hif_bus_id *bid,
1132 		      enum qdf_bus_type bus_type,
1133 		      enum hif_enable_type type);
1134 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
1135 #ifdef CE_TASKLET_DEBUG_ENABLE
1136 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
1137 				 uint8_t value);
1138 #endif
1139 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
1140 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
1141 
1142 /**
1143  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
1144  * HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
1145  * HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
1146  * HIF_PM_CE_WAKE: Wake irq is CE interrupt
1147  */
1148 typedef enum {
1149 	HIF_PM_INVALID_WAKE,
1150 	HIF_PM_MSI_WAKE,
1151 	HIF_PM_CE_WAKE,
1152 } hif_pm_wake_irq_type;
1153 
1154 /**
1155  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1156  * @hif_ctx: HIF context
1157  *
1158  * Return: enum hif_pm_wake_irq_type
1159  */
1160 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1161 
1162 /**
1163  * enum hif_ep_vote_type - hif ep vote type
1164  * HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1165  * HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1166  */
1167 enum hif_ep_vote_type {
1168 	HIF_EP_VOTE_DP_ACCESS,
1169 	HIF_EP_VOTE_NONDP_ACCESS
1170 };
1171 
1172 /**
1173  * enum hif_ep_vote_access - hif ep vote access
1174  * HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1175  * HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transistion
1176  * HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1177  */
1178 enum hif_ep_vote_access {
1179 	HIF_EP_VOTE_ACCESS_ENABLE,
1180 	HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1181 	HIF_EP_VOTE_ACCESS_DISABLE
1182 };
1183 
1184 /**
1185  * enum hif_rpm_id - modules registered with runtime pm module
1186  * @HIF_RTPM_ID_RESERVED: Reserved ID
1187  * @HIF_RTPM_ID_HAL_REO_CMD: HAL REO commands
1188  * @HIF_RTPM_ID_WMI: WMI commands Tx
1189  * @HIF_RTPM_ID_HTT: HTT commands Tx
1190  * @HIF_RTPM_ID_DP_TX: Datapath Tx path
1191  * @HIF_RTPM_ID_DP_RING_STATS: Datapath ring stats
1192  * @HIF_RTPM_ID_CE_SEND_FAST: CE Tx buffer posting
1193  * @HIF_RTPM_ID_FORCE_WAKE: Force wake request
1194  * @HIF_RTPM_ID_PREVENT_LINKDOWN: Prevent linkdown by not allowing runtime PM
1195  * @HIF_RTPM_ID_PREVENT_ALLOW_LOCK: Generic ID for runtime PM lock contexts
1196  * @HIF_RTPM_ID_MAX: Max id
1197  */
1198 enum  hif_rtpm_client_id {
1199 	HIF_RTPM_ID_RESERVED,
1200 	HIF_RTPM_ID_HAL_REO_CMD,
1201 	HIF_RTPM_ID_WMI,
1202 	HIF_RTPM_ID_HTT,
1203 	HIF_RTPM_ID_DP,
1204 	HIF_RTPM_ID_DP_RING_STATS,
1205 	HIF_RTPM_ID_CE,
1206 	HIF_RTPM_ID_FORCE_WAKE,
1207 	HIF_RTPM_ID_PM_QOS_NOTIFY,
1208 	HIF_RTPM_ID_WIPHY_SUSPEND,
1209 	HIF_RTPM_ID_MAX
1210 };
1211 
1212 /**
1213  * enum hif_rpm_type - Get and Put calls types
1214  * HIF_RTPM_GET_ASYNC: Increment usage count and when system is suspended
1215  *		      schedule resume process, return depends on pm state.
1216  * HIF_RTPM_GET_FORCE: Increment usage count and when system is suspended
1217  *		      shedule resume process, returns success irrespective of
1218  *		      pm_state.
1219  * HIF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
1220  *		     wait till process is resumed.
1221  * HIF_RTPM_GET_NORESUME: Only increments usage count.
1222  * HIF_RTPM_PUT_ASYNC: Decrements usage count and puts system in idle state.
1223  * HIF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
1224  *			     suspended state.
1225  * HIF_RTPM_PUT_NOIDLE: Decrements usage count.
1226  */
1227 enum rpm_type {
1228 	HIF_RTPM_GET_ASYNC,
1229 	HIF_RTPM_GET_FORCE,
1230 	HIF_RTPM_GET_SYNC,
1231 	HIF_RTPM_GET_NORESUME,
1232 	HIF_RTPM_PUT_ASYNC,
1233 	HIF_RTPM_PUT_SYNC_SUSPEND,
1234 	HIF_RTPM_PUT_NOIDLE,
1235 };
1236 
1237 /**
1238  * struct hif_pm_runtime_lock - data structure for preventing runtime suspend
1239  * @list - global list of runtime locks
1240  * @active - true if this lock is preventing suspend
1241  * @name - character string for tracking this lock
1242  */
1243 struct hif_pm_runtime_lock {
1244 	struct list_head list;
1245 	bool active;
1246 	const char *name;
1247 };
1248 
1249 #ifdef FEATURE_RUNTIME_PM
1250 /**
1251  * hif_rtpm_register() - Register a module with runtime PM.
1252  * @id: ID of the module which needs to be registered
1253  * @hif_rpm_cbk: callback to be called when get was called in suspended state.
1254  * @prevent_multiple_get: not allow simultaneous get calls or put calls
1255  *
1256  * Return: success status if successfully registered
1257  */
1258 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
1259 
1260 /**
1261  * hif_rtpm_deregister() - Deregister the module
1262  * @id: ID of the module which needs to be de-registered
1263  */
1264 QDF_STATUS hif_rtpm_deregister(uint32_t id);
1265 
1266 /**
1267  * hif_runtime_lock_init() - API to initialize Runtime PM context
1268  * @lock: QDF lock context
1269  * @name: Context name
1270  *
1271  * This API initializes the Runtime PM context of the caller and
1272  * return the pointer.
1273  *
1274  * Return: None
1275  */
1276 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1277 
1278 /**
1279  * hif_runtime_lock_deinit() - This API frees the runtime pm context
1280  * @data: Runtime PM context
1281  *
1282  * Return: void
1283  */
1284 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data);
1285 
1286 /**
1287  * hif_rtpm_get() - Increment usage_count on the device to avoid suspend.
1288  * @type: get call types from hif_rpm_type
1289  * @id: ID of the module calling get()
1290  *
1291  * A get operation will prevent a runtime suspend until a
1292  * corresponding put is done.  This api should be used when accessing bus.
1293  *
1294  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1295  * THIS API WILL ONLY REQUEST THE RESUME AND NOT DO A GET!!!
1296  *
1297  * return: success if a get has been issued, else error code.
1298  */
1299 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id);
1300 
1301 /**
1302  * hif_pm_runtime_put() - do a put operation on the device
1303  * @type: put call types from hif_rpm_type
1304  * @id: ID of the module calling put()
1305  *
1306  * A put operation will allow a runtime suspend after a corresponding
1307  * get was done.  This api should be used when finished accessing bus.
1308  *
1309  * This api will return a failure if runtime pm is stopped
1310  * This api will return failure if it would decrement the usage count below 0.
1311  *
1312  * return: QDF_STATUS_SUCCESS if the put is performed
1313  */
1314 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id);
1315 
1316 /**
1317  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1318  * @data: runtime PM lock
1319  *
1320  * This function will prevent runtime suspend, by incrementing
1321  * device's usage count.
1322  *
1323  * Return: status
1324  */
1325 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data);
1326 
1327 /**
1328  * hif_pm_runtime_prevent_suspend_sync() - Synchronized prevent Runtime suspend
1329  * @data: runtime PM lock
1330  *
1331  * This function will prevent runtime suspend, by incrementing
1332  * device's usage count.
1333  *
1334  * Return: status
1335  */
1336 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data);
1337 
1338 /**
1339  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1340  * @data: runtime PM lock
1341  *
1342  * This function will allow runtime suspend, by decrementing
1343  * device's usage count.
1344  *
1345  * Return: status
1346  */
1347 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data);
1348 
1349 /**
1350  * hif_rtpm_request_resume() - Request resume if bus is suspended
1351  *
1352  * Return: None
1353  */
1354 void hif_rtpm_request_resume(void);
1355 
1356 /**
1357  * hif_rtpm_sync_resume() - Invoke synchronous runtime resume.
1358  *
1359  * This function will invoke synchronous runtime resume.
1360  *
1361  * Return: status
1362  */
1363 QDF_STATUS hif_rtpm_sync_resume(void);
1364 
1365 /**
1366  * hif_rtpm_check_and_request_resume() - check if bus is suspended and
1367  *                                       request resume.
1368  *
1369  * Return: void
1370  */
1371 void hif_rtpm_check_and_request_resume(void);
1372 
1373 /**
1374  * hif_rtpm_set_client_job() - Set job for the client.
1375  * @client_id: Client id for which job needs to be set
1376  *
1377  * If get failed due to system being in suspended state, set the client job so
1378  * when system resumes the client's job is called.
1379  *
1380  * Return: None
1381  */
1382 void hif_rtpm_set_client_job(uint32_t client_id);
1383 
1384 /**
1385  * hif_rtpm_mark_last_busy() - Mark last busy to delay retry to suspend
1386  * @id: ID marking last busy
1387  *
1388  * Return: None
1389  */
1390 void hif_rtpm_mark_last_busy(uint32_t id);
1391 
1392 /**
1393  * hif_rtpm_get_monitor_wake_intr() - API to get monitor_wake_intr
1394  *
1395  * monitor_wake_intr variable can be used to indicate if driver expects wake
1396  * MSI for runtime PM
1397  *
1398  * Return: monitor_wake_intr variable
1399  */
1400 int hif_rtpm_get_monitor_wake_intr(void);
1401 
1402 /**
1403  * hif_rtpm_set_monitor_wake_intr() - API to set monitor_wake_intr
1404  * @val: value to set
1405  *
1406  * monitor_wake_intr variable can be used to indicate if driver expects wake
1407  * MSI for runtime PM
1408  *
1409  * Return: void
1410  */
1411 void hif_rtpm_set_monitor_wake_intr(int val);
1412 
1413 /**
1414  * hif_pre_runtime_suspend() - book keeping before beginning runtime suspend.
1415  * @hif_ctx: HIF context
1416  *
1417  * Makes sure that the pci link will be taken down by the suspend opperation.
1418  * If the hif layer is configured to leave the bus on, runtime suspend will
1419  * not save any power.
1420  *
1421  * Set the runtime suspend state to SUSPENDING.
1422  *
1423  * return -EINVAL if the bus won't go down.  otherwise return 0
1424  */
1425 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1426 
1427 /**
1428  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
1429  *
1430  * update the runtime pm state to RESUMING.
1431  * Return: void
1432  */
1433 void hif_pre_runtime_resume(void);
1434 
1435 /**
1436  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
1437  *
1438  * Record the success.
1439  * update the runtime_pm state to SUSPENDED
1440  * Return: void
1441  */
1442 void hif_process_runtime_suspend_success(void);
1443 
1444 /**
1445  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
1446  *
1447  * Record the failure.
1448  * mark last busy to delay a retry.
1449  * update the runtime_pm state back to ON
1450  *
1451  * Return: void
1452  */
1453 void hif_process_runtime_suspend_failure(void);
1454 
1455 /**
1456  * hif_process_runtime_suspend_failure() - bookkeeping of resuming link up
1457  *
1458  * update the runtime_pm state to RESUMING_LINKUP
1459  * Return: void
1460  */
1461 void hif_process_runtime_resume_linkup(void);
1462 
1463 /**
1464  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
1465  *
1466  * record the success.
1467  * update the runtime_pm state to SUSPENDED
1468  * Return: void
1469  */
1470 void hif_process_runtime_resume_success(void);
1471 
1472 /**
1473  * hif_rtpm_print_prevent_list() - list the clients preventing suspend.
1474  *
1475  * Return: None
1476  */
1477 void hif_rtpm_print_prevent_list(void);
1478 
1479 /**
1480  * hif_rtpm_suspend_lock() - spin_lock on marking runtime suspend
1481  *
1482  * Return: void
1483  */
1484 void hif_rtpm_suspend_lock(void);
1485 
1486 /**
1487  * hif_rtpm_suspend_unlock() - spin_unlock on marking runtime suspend
1488  *
1489  * Return: void
1490  */
1491 void hif_rtpm_suspend_unlock(void);
1492 
1493 /**
1494  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
1495  * @hif_ctx: HIF context
1496  *
1497  * Return: 0 for success and non-zero error code for failure
1498  */
1499 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1500 
1501 /**
1502  * hif_runtime_resume() - do the bus resume part of a runtime resume
1503  * @hif_ctx: HIF context
1504  *
1505  * Return: 0 for success and non-zero error code for failure
1506  */
1507 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1508 
1509 /**
1510  * hif_fastpath_resume() - resume fastpath for runtimepm
1511  * @hif_ctx: HIF context
1512  *
1513  * ensure that the fastpath write index register is up to date
1514  * since runtime pm may cause ce_send_fast to skip the register
1515  * write.
1516  *
1517  * fastpath only applicable to legacy copy engine
1518  */
1519 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1520 
1521 /**
1522  * hif_rtpm_get_state(): get rtpm link state
1523  *
1524  * Return: state
1525  */
1526 int hif_rtpm_get_state(void);
1527 #else
1528 static inline
1529 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
1530 { return QDF_STATUS_SUCCESS; }
1531 
1532 static inline
1533 QDF_STATUS hif_rtpm_deregister(uint32_t id)
1534 { return QDF_STATUS_SUCCESS; }
1535 
1536 static inline
1537 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1538 { return 0; }
1539 
1540 static inline
1541 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data)
1542 {}
1543 
1544 static inline
1545 int hif_rtpm_get(uint8_t type, uint32_t id)
1546 { return QDF_STATUS_SUCCESS; }
1547 
1548 static inline
1549 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
1550 { return QDF_STATUS_SUCCESS; }
1551 
1552 static inline
1553 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data)
1554 { return 0; }
1555 
1556 static inline
1557 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data)
1558 { return 0; }
1559 
1560 static inline
1561 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data)
1562 { return 0; }
1563 
1564 static inline
1565 QDF_STATUS hif_rtpm_sync_resume(void)
1566 { return QDF_STATUS_SUCCESS; }
1567 
1568 static inline
1569 void hif_rtpm_request_resume(void)
1570 {}
1571 
1572 static inline
1573 void hif_rtpm_check_and_request_resume(void)
1574 {}
1575 
1576 static inline
1577 void hif_rtpm_set_client_job(uint32_t client_id)
1578 {}
1579 
1580 static inline
1581 void hif_rtpm_print_prevent_list(void)
1582 {}
1583 
1584 static inline
1585 void hif_rtpm_suspend_unlock(void)
1586 {}
1587 
1588 static inline
1589 void hif_rtpm_suspend_lock(void)
1590 {}
1591 
1592 static inline
1593 int hif_rtpm_get_monitor_wake_intr(void)
1594 { return 0; }
1595 
1596 static inline
1597 void hif_rtpm_set_monitor_wake_intr(int val)
1598 {}
1599 
1600 static inline
1601 void hif_rtpm_mark_last_busy(uint32_t id)
1602 {}
1603 #endif
1604 
1605 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1606 				 bool is_packet_log_enabled);
1607 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1608 
1609 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1610 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1611 
1612 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1613 
1614 #ifdef IPA_OFFLOAD
1615 /**
1616  * hif_get_ipa_hw_type() - get IPA hw type
1617  *
1618  * This API return the IPA hw type.
1619  *
1620  * Return: IPA hw type
1621  */
1622 static inline
1623 enum ipa_hw_type hif_get_ipa_hw_type(void)
1624 {
1625 	return ipa_get_hw_type();
1626 }
1627 
1628 /**
1629  * hif_get_ipa_present() - get IPA hw status
1630  *
1631  * This API return the IPA hw status.
1632  *
1633  * Return: true if IPA is present or false otherwise
1634  */
1635 static inline
1636 bool hif_get_ipa_present(void)
1637 {
1638 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
1639 		return true;
1640 	else
1641 		return false;
1642 }
1643 #endif
1644 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1645 /**
1646  * hif_bus_ealry_suspend() - stop non wmi tx traffic
1647  * @context: hif context
1648  */
1649 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1650 
1651 /**
1652  * hif_bus_late_resume() - resume non wmi traffic
1653  * @context: hif context
1654  */
1655 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1656 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1657 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1658 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1659 
1660 /**
1661  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1662  * @hif_ctx: an opaque HIF handle to use
1663  *
1664  * As opposed to the standard hif_irq_enable, this function always applies to
1665  * the APPS side kernel interrupt handling.
1666  *
1667  * Return: errno
1668  */
1669 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1670 
1671 /**
1672  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1673  * @hif_ctx: an opaque HIF handle to use
1674  *
1675  * As opposed to the standard hif_irq_disable, this function always applies to
1676  * the APPS side kernel interrupt handling.
1677  *
1678  * Return: errno
1679  */
1680 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1681 
1682 /**
1683  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1684  * @hif_ctx: an opaque HIF handle to use
1685  *
1686  * As opposed to the standard hif_irq_enable, this function always applies to
1687  * the APPS side kernel interrupt handling.
1688  *
1689  * Return: errno
1690  */
1691 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1692 
1693 /**
1694  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1695  * @hif_ctx: an opaque HIF handle to use
1696  *
1697  * As opposed to the standard hif_irq_disable, this function always applies to
1698  * the APPS side kernel interrupt handling.
1699  *
1700  * Return: errno
1701  */
1702 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1703 
1704 /**
1705  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1706  * @hif_ctx: an opaque HIF handle to use
1707  *
1708  * This function always applies to the APPS side kernel interrupt handling
1709  * to wake the system from suspend.
1710  *
1711  * Return: errno
1712  */
1713 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1714 
1715 /**
1716  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1717  * @hif_ctx: an opaque HIF handle to use
1718  *
1719  * This function always applies to the APPS side kernel interrupt handling
1720  * to disable the wake irq.
1721  *
1722  * Return: errno
1723  */
1724 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1725 
1726 /**
1727  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
1728  * @hif_ctx: an opaque HIF handle to use
1729  *
1730  * As opposed to the standard hif_irq_enable, this function always applies to
1731  * the APPS side kernel interrupt handling.
1732  *
1733  * Return: errno
1734  */
1735 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1736 
1737 /**
1738  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
1739  * @hif_ctx: an opaque HIF handle to use
1740  *
1741  * As opposed to the standard hif_irq_disable, this function always applies to
1742  * the APPS side kernel interrupt handling.
1743  *
1744  * Return: errno
1745  */
1746 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1747 
1748 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1749 int hif_dump_registers(struct hif_opaque_softc *scn);
1750 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1751 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1752 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1753 		     u32 *revision, const char **target_name);
1754 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1755 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1756 						   scn);
1757 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1758 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1759 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1760 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1761 			   hif_target_status);
1762 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1763 			 struct hif_config_info *cfg);
1764 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1765 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1766 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1767 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1768 			   uint32_t transfer_id, u_int32_t len);
1769 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1770 	uint32_t transfer_id, uint32_t download_len);
1771 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1772 void hif_ce_war_disable(void);
1773 void hif_ce_war_enable(void);
1774 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1775 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1776 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1777 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1778 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1779 		uint32_t pipe_num);
1780 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1781 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1782 
1783 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1784 				int rx_bundle_cnt);
1785 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1786 
1787 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1788 
1789 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1790 
1791 enum hif_exec_type {
1792 	HIF_EXEC_NAPI_TYPE,
1793 	HIF_EXEC_TASKLET_TYPE,
1794 };
1795 
1796 typedef uint32_t (*ext_intr_handler)(void *, uint32_t, int);
1797 
1798 /**
1799  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1800  * @softc: hif opaque context owning the exec context
1801  * @id: the id of the interrupt context
1802  *
1803  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1804  *         'id' registered with the OS
1805  */
1806 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1807 				uint8_t id);
1808 
1809 /**
1810  * hif_configure_ext_group_interrupts() - Congigure ext group intrrupts
1811  * @hif_ctx: hif opaque context
1812  *
1813  * Return: QDF_STATUS
1814  */
1815 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1816 
1817 /**
1818  * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group intrrupts
1819  * @hif_ctx: hif opaque context
1820  *
1821  * Return: None
1822  */
1823 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1824 
1825 /**
1826  * hif_register_ext_group() - API to register external group
1827  * interrupt handler.
1828  * @hif_ctx : HIF Context
1829  * @numirq: number of irq's in the group
1830  * @irq: array of irq values
1831  * @handler: callback interrupt handler function
1832  * @cb_ctx: context to passed in callback
1833  * @type: napi vs tasklet
1834  *
1835  * Return: QDF_STATUS
1836  */
1837 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1838 				  uint32_t numirq, uint32_t irq[],
1839 				  ext_intr_handler handler,
1840 				  void *cb_ctx, const char *context_name,
1841 				  enum hif_exec_type type, uint32_t scale);
1842 
1843 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1844 				const char *context_name);
1845 
1846 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1847 				u_int8_t pipeid,
1848 				struct hif_msg_callbacks *callbacks);
1849 
1850 /**
1851  * hif_print_napi_stats() - Display HIF NAPI stats
1852  * @hif_ctx - HIF opaque context
1853  *
1854  * Return: None
1855  */
1856 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
1857 
1858 /* hif_clear_napi_stats() - function clears the stats of the
1859  * latency when called.
1860  * @hif_ctx - the HIF context to assign the callback to
1861  *
1862  * Return: None
1863  */
1864 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
1865 
1866 #ifdef __cplusplus
1867 }
1868 #endif
1869 
1870 #ifdef FORCE_WAKE
1871 /**
1872  * hif_force_wake_request() - Function to wake from power collapse
1873  * @handle: HIF opaque handle
1874  *
1875  * Description: API to check if the device is awake or not before
1876  * read/write to BAR + 4K registers. If device is awake return
1877  * success otherwise write '1' to
1878  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
1879  * the device and does wakeup the PCI and MHI within 50ms
1880  * and then the device writes a value to
1881  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
1882  * handshake process to let the host know the device is awake.
1883  *
1884  * Return: zero - success/non-zero - failure
1885  */
1886 int hif_force_wake_request(struct hif_opaque_softc *handle);
1887 
1888 /**
1889  * hif_force_wake_release() - API to release/reset the SOC wake register
1890  * from interrupting the device.
1891  * @handle: HIF opaque handle
1892  *
1893  * Description: API to set the
1894  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
1895  * to release the interrupt line.
1896  *
1897  * Return: zero - success/non-zero - failure
1898  */
1899 int hif_force_wake_release(struct hif_opaque_softc *handle);
1900 #else
1901 static inline
1902 int hif_force_wake_request(struct hif_opaque_softc *handle)
1903 {
1904 	return 0;
1905 }
1906 
1907 static inline
1908 int hif_force_wake_release(struct hif_opaque_softc *handle)
1909 {
1910 	return 0;
1911 }
1912 #endif /* FORCE_WAKE */
1913 
1914 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1915 /**
1916  * hif_prevent_link_low_power_states() - Prevent from going to low power states
1917  * @hif - HIF opaque context
1918  *
1919  * Return: 0 on success. Error code on failure.
1920  */
1921 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
1922 
1923 /**
1924  * hif_allow_link_low_power_states() - Allow link to go to low power states
1925  * @hif - HIF opaque context
1926  *
1927  * Return: None
1928  */
1929 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
1930 
1931 #else
1932 
1933 static inline
1934 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
1935 {
1936 	return 0;
1937 }
1938 
1939 static inline
1940 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
1941 {
1942 }
1943 #endif
1944 
1945 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
1946 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
1947 
1948 /**
1949  * hif_get_soc_version() - get soc major version from target info
1950  * @hif_ctx - the HIF context
1951  *
1952  * Return: version number
1953  */
1954 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle);
1955 
1956 /**
1957  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
1958  * @hif_ctx - the HIF context to assign the callback to
1959  * @callback - the callback to assign
1960  * @priv - the private data to pass to the callback when invoked
1961  *
1962  * Return: None
1963  */
1964 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1965 			       void (*callback)(void *),
1966 			       void *priv);
1967 /*
1968  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1969  * for defined here
1970  */
1971 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1972 ssize_t hif_dump_desc_trace_buf(struct device *dev,
1973 				struct device_attribute *attr, char *buf);
1974 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1975 					const char *buf, size_t size);
1976 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
1977 				const char *buf, size_t size);
1978 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
1979 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
1980 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
1981 
1982 /**
1983  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
1984  * @hif: hif context
1985  * @ce_service_max_yield_time: CE service max yield time to set
1986  *
1987  * This API storess CE service max yield time in hif context based
1988  * on ini value.
1989  *
1990  * Return: void
1991  */
1992 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1993 				       uint32_t ce_service_max_yield_time);
1994 
1995 /**
1996  * hif_get_ce_service_max_yield_time() - get CE service max yield time
1997  * @hif: hif context
1998  *
1999  * This API returns CE service max yield time.
2000  *
2001  * Return: CE service max yield time
2002  */
2003 unsigned long long
2004 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
2005 
2006 /**
2007  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
2008  * @hif: hif context
2009  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
2010  *
2011  * This API stores CE service max rx ind flush in hif context based
2012  * on ini value.
2013  *
2014  * Return: void
2015  */
2016 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2017 					 uint8_t ce_service_max_rx_ind_flush);
2018 
2019 #ifdef OL_ATH_SMART_LOGGING
2020 /*
2021  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
2022  * @scn : HIF handler
2023  * @buf_cur: Current pointer in ring buffer
2024  * @buf_init:Start of the ring buffer
2025  * @buf_sz: Size of the ring buffer
2026  * @ce: Copy Engine id
2027  * @skb_sz: Max size of the SKB buffer to be copied
2028  *
2029  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2030  * and buffers pointed by them in to the given buf
2031  *
2032  * Return: Current pointer in ring buffer
2033  */
2034 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2035 			 uint8_t *buf_init, uint32_t buf_sz,
2036 			 uint32_t ce, uint32_t skb_sz);
2037 #endif /* OL_ATH_SMART_LOGGING */
2038 
2039 /*
2040  * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
2041  * to hif_opaque_softc handle
2042  * @hif_handle - hif_softc type
2043  *
2044  * Return: hif_opaque_softc type
2045  */
2046 static inline struct hif_opaque_softc *
2047 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
2048 {
2049 	return (struct hif_opaque_softc *)hif_handle;
2050 }
2051 
2052 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
2053 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2054 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx);
2055 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2056 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2057 			    uint8_t type, uint8_t access);
2058 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2059 			       uint8_t type);
2060 #else
2061 static inline QDF_STATUS
2062 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2063 {
2064 	return QDF_STATUS_SUCCESS;
2065 }
2066 
2067 static inline void
2068 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
2069 {
2070 }
2071 
2072 static inline void
2073 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2074 {
2075 }
2076 
2077 static inline void
2078 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2079 		       uint8_t type, uint8_t access)
2080 {
2081 }
2082 
2083 static inline uint8_t
2084 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2085 		       uint8_t type)
2086 {
2087 	return HIF_EP_VOTE_ACCESS_ENABLE;
2088 }
2089 #endif
2090 
2091 #ifdef FORCE_WAKE
2092 /**
2093  * hif_srng_init_phase(): Indicate srng initialization phase
2094  * to avoid force wake as UMAC power collapse is not yet
2095  * enabled
2096  * @hif_ctx: hif opaque handle
2097  * @init_phase: initialization phase
2098  *
2099  * Return:  None
2100  */
2101 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2102 			 bool init_phase);
2103 #else
2104 static inline
2105 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2106 			 bool init_phase)
2107 {
2108 }
2109 #endif /* FORCE_WAKE */
2110 
2111 #ifdef HIF_IPCI
2112 /**
2113  * hif_shutdown_notifier_cb - Call back for shutdown notifier
2114  * @ctx: hif handle
2115  *
2116  * Return:  None
2117  */
2118 void hif_shutdown_notifier_cb(void *ctx);
2119 #else
2120 static inline
2121 void hif_shutdown_notifier_cb(void *ctx)
2122 {
2123 }
2124 #endif /* HIF_IPCI */
2125 
2126 #ifdef HIF_CE_LOG_INFO
2127 /**
2128  * hif_log_ce_info() - API to log ce info
2129  * @scn: hif handle
2130  * @data: hang event data buffer
2131  * @offset: offset at which data needs to be written
2132  *
2133  * Return:  None
2134  */
2135 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2136 		     unsigned int *offset);
2137 #else
2138 static inline
2139 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2140 		     unsigned int *offset)
2141 {
2142 }
2143 #endif
2144 
2145 #ifdef HIF_CPU_PERF_AFFINE_MASK
2146 /**
2147  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
2148  * @hif_ctx: hif opaque handle
2149  *
2150  * This function is used to move the WLAN IRQs to perf cores in
2151  * case of defconfig builds.
2152  *
2153  * Return:  None
2154  */
2155 void hif_config_irq_set_perf_affinity_hint(
2156 	struct hif_opaque_softc *hif_ctx);
2157 
2158 #else
2159 static inline void hif_config_irq_set_perf_affinity_hint(
2160 	struct hif_opaque_softc *hif_ctx)
2161 {
2162 }
2163 #endif
2164 
2165 /**
2166  * hif_apps_grp_irqs_enable() - enable ext grp irqs
2167  * @hif - HIF opaque context
2168  *
2169  * Return: 0 on success. Error code on failure.
2170  */
2171 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
2172 
2173 /**
2174  * hif_apps_grp_irqs_disable() - disable ext grp irqs
2175  * @hif - HIF opaque context
2176  *
2177  * Return: 0 on success. Error code on failure.
2178  */
2179 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
2180 
2181 /**
2182  * hif_disable_grp_irqs() - disable ext grp irqs
2183  * @hif - HIF opaque context
2184  *
2185  * Return: 0 on success. Error code on failure.
2186  */
2187 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
2188 
2189 /**
2190  * hif_enable_grp_irqs() - enable ext grp irqs
2191  * @hif - HIF opaque context
2192  *
2193  * Return: 0 on success. Error code on failure.
2194  */
2195 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
2196 
2197 enum hif_credit_exchange_type {
2198 	HIF_REQUEST_CREDIT,
2199 	HIF_PROCESS_CREDIT_REPORT,
2200 };
2201 
2202 enum hif_detect_latency_type {
2203 	HIF_DETECT_TASKLET,
2204 	HIF_DETECT_CREDIT,
2205 	HIF_DETECT_UNKNOWN
2206 };
2207 
2208 #ifdef HIF_DETECTION_LATENCY_ENABLE
2209 void hif_latency_detect_credit_record_time(
2210 	enum hif_credit_exchange_type type,
2211 	struct hif_opaque_softc *hif_ctx);
2212 
2213 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
2214 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
2215 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer);
2216 void hif_credit_latency(struct hif_softc *scn, bool from_timer);
2217 void hif_check_detection_latency(struct hif_softc *scn,
2218 				 bool from_timer,
2219 				 uint32_t bitmap_type);
2220 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
2221 #else
2222 static inline
2223 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
2224 {}
2225 
2226 static inline
2227 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
2228 {}
2229 
2230 static inline
2231 void hif_latency_detect_credit_record_time(
2232 	enum hif_credit_exchange_type type,
2233 	struct hif_opaque_softc *hif_ctx)
2234 {}
2235 static inline
2236 void hif_check_detection_latency(struct hif_softc *scn,
2237 				 bool from_timer,
2238 				 uint32_t bitmap_type)
2239 {}
2240 
2241 static inline
2242 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
2243 {}
2244 #endif
2245 
2246 #ifdef SYSTEM_PM_CHECK
2247 /**
2248  * __hif_system_pm_set_state() - Set system pm state
2249  * @hif: hif opaque handle
2250  * @state: system state
2251  *
2252  * Return:  None
2253  */
2254 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2255 			       enum hif_system_pm_state state);
2256 
2257 /**
2258  * hif_system_pm_set_state_on() - Set system pm state to ON
2259  * @hif: hif opaque handle
2260  *
2261  * Return:  None
2262  */
2263 static inline
2264 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2265 {
2266 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
2267 }
2268 
2269 /**
2270  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
2271  * @hif: hif opaque handle
2272  *
2273  * Return:  None
2274  */
2275 static inline
2276 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2277 {
2278 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
2279 }
2280 
2281 /**
2282  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
2283  * @hif: hif opaque handle
2284  *
2285  * Return:  None
2286  */
2287 static inline
2288 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2289 {
2290 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
2291 }
2292 
2293 /**
2294  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
2295  * @hif: hif opaque handle
2296  *
2297  * Return:  None
2298  */
2299 static inline
2300 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2301 {
2302 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
2303 }
2304 
2305 /**
2306  * hif_system_pm_get_state() - Get system pm state
2307  * @hif: hif opaque handle
2308  *
2309  * Return:  system state
2310  */
2311 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
2312 
2313 /**
2314  * hif_system_pm_state_check() - Check system state and trigger resume
2315  *  if required
2316  * @hif: hif opaque handle
2317  *
2318  * Return: 0 if system is in on state else error code
2319  */
2320 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
2321 #else
2322 static inline
2323 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2324 			       enum hif_system_pm_state state)
2325 {
2326 }
2327 
2328 static inline
2329 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2330 {
2331 }
2332 
2333 static inline
2334 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2335 {
2336 }
2337 
2338 static inline
2339 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2340 {
2341 }
2342 
2343 static inline
2344 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2345 {
2346 }
2347 
2348 static inline
2349 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2350 {
2351 	return 0;
2352 }
2353 
2354 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2355 {
2356 	return 0;
2357 }
2358 #endif
2359 
2360 #ifdef FEATURE_IRQ_AFFINITY
2361 /**
2362  * hif_set_grp_intr_affinity() - API to set affinity for grp
2363  *  intrs set in the bitmap
2364  * @scn: hif handle
2365  * @grp_intr_bitmask: grp intrs for which perf affinity should be
2366  *  applied
2367  * @perf: affine to perf or non-perf cluster
2368  *
2369  * Return: None
2370  */
2371 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2372 			       uint32_t grp_intr_bitmask, bool perf);
2373 #else
2374 static inline
2375 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2376 			       uint32_t grp_intr_bitmask, bool perf)
2377 {
2378 }
2379 #endif
2380 /**
2381  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
2382  * @hif_ctx: hif opaque handle
2383  *
2384  * Description:
2385  *   Gets number of WMI EPs configured in target svc map. Since EP map
2386  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
2387  *   configured for WMI service.
2388  *
2389  * Return:
2390  *  uint8_t: count for WMI eps in target svc map
2391  */
2392 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn);
2393 
2394 #ifdef DP_UMAC_HW_RESET_SUPPORT
2395 /**
2396  * hif_register_umac_reset_handler() - Register UMAC HW reset handler
2397  * @hif_scn: hif opaque handle
2398  * @handler: callback handler function
2399  * @cb_ctx: context to passed to @handler
2400  * @irq: irq number to be used for UMAC HW reset interrupt
2401  *
2402  * Return: QDF_STATUS of operation
2403  */
2404 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2405 					   int (*handler)(void *cb_ctx),
2406 					   void *cb_ctx, int irq);
2407 
2408 /**
2409  * hif_unregister_umac_reset_handler() - Unregister UMAC HW reset handler
2410  * @hif_scn: hif opaque handle
2411  *
2412  * Return: QDF_STATUS of operation
2413  */
2414 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn);
2415 #else
2416 static inline
2417 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2418 					   int (*handler)(void *cb_ctx),
2419 					   void *cb_ctx, int irq)
2420 {
2421 	return QDF_STATUS_SUCCESS;
2422 }
2423 
2424 static inline
2425 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
2426 {
2427 	return QDF_STATUS_SUCCESS;
2428 }
2429 
2430 #endif /* DP_UMAC_HW_RESET_SUPPORT */
2431 
2432 #endif /* _HIF_H_ */
2433