xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision ea172ef154f8781b39809b88f37271c196c4c332)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HIF_H_
21 #define _HIF_H_
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif /* __cplusplus */
26 
27 /* Header files */
28 #include <qdf_status.h>
29 #include "qdf_nbuf.h"
30 #include "qdf_lro.h"
31 #include "ol_if_athvar.h"
32 #include <linux/platform_device.h>
33 #ifdef HIF_PCI
34 #include <linux/pci.h>
35 #endif /* HIF_PCI */
36 #ifdef HIF_USB
37 #include <linux/usb.h>
38 #endif /* HIF_USB */
39 #ifdef IPA_OFFLOAD
40 #include <linux/ipa.h>
41 #endif
42 #include "cfg_ucfg_api.h"
43 #include "qdf_dev.h"
44 #include <wlan_init_cfg.h>
45 
46 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
47 
48 typedef void __iomem *A_target_id_t;
49 typedef void *hif_handle_t;
50 
51 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
52 #define HIF_WORK_DRAIN_WAIT_CNT 50
53 
54 #define HIF_EP_WAKE_RESET_WAIT_CNT 10
55 #endif
56 
57 #define HIF_TYPE_AR6002   2
58 #define HIF_TYPE_AR6003   3
59 #define HIF_TYPE_AR6004   5
60 #define HIF_TYPE_AR9888   6
61 #define HIF_TYPE_AR6320   7
62 #define HIF_TYPE_AR6320V2 8
63 /* For attaching Peregrine 2.0 board host_reg_tbl only */
64 #define HIF_TYPE_AR9888V2 9
65 #define HIF_TYPE_ADRASTEA 10
66 #define HIF_TYPE_AR900B 11
67 #define HIF_TYPE_QCA9984 12
68 #define HIF_TYPE_QCA9888 14
69 #define HIF_TYPE_QCA8074 15
70 #define HIF_TYPE_QCA6290 16
71 #define HIF_TYPE_QCN7605 17
72 #define HIF_TYPE_QCA6390 18
73 #define HIF_TYPE_QCA8074V2 19
74 #define HIF_TYPE_QCA6018  20
75 #define HIF_TYPE_QCN9000 21
76 #define HIF_TYPE_QCA6490 22
77 #define HIF_TYPE_QCA6750 23
78 #define HIF_TYPE_QCA5018 24
79 #define HIF_TYPE_QCN6122 25
80 #define HIF_TYPE_KIWI 26
81 #define HIF_TYPE_QCN9224 27
82 #define HIF_TYPE_QCA9574 28
83 #define HIF_TYPE_MANGO 29
84 #define HIF_TYPE_QCA5332 30
85 #define HIF_TYPE_QCN9160 31
86 
87 #define DMA_COHERENT_MASK_DEFAULT   37
88 
89 #ifdef IPA_OFFLOAD
90 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
91 #endif
92 
93 /* enum hif_ic_irq - enum defining integrated chip irq numbers
94  * defining irq nubers that can be used by external modules like datapath
95  */
96 enum hif_ic_irq {
97 	host2wbm_desc_feed = 16,
98 	host2reo_re_injection,
99 	host2reo_command,
100 	host2rxdma_monitor_ring3,
101 	host2rxdma_monitor_ring2,
102 	host2rxdma_monitor_ring1,
103 	reo2host_exception,
104 	wbm2host_rx_release,
105 	reo2host_status,
106 	reo2host_destination_ring4,
107 	reo2host_destination_ring3,
108 	reo2host_destination_ring2,
109 	reo2host_destination_ring1,
110 	rxdma2host_monitor_destination_mac3,
111 	rxdma2host_monitor_destination_mac2,
112 	rxdma2host_monitor_destination_mac1,
113 	ppdu_end_interrupts_mac3,
114 	ppdu_end_interrupts_mac2,
115 	ppdu_end_interrupts_mac1,
116 	rxdma2host_monitor_status_ring_mac3,
117 	rxdma2host_monitor_status_ring_mac2,
118 	rxdma2host_monitor_status_ring_mac1,
119 	host2rxdma_host_buf_ring_mac3,
120 	host2rxdma_host_buf_ring_mac2,
121 	host2rxdma_host_buf_ring_mac1,
122 	rxdma2host_destination_ring_mac3,
123 	rxdma2host_destination_ring_mac2,
124 	rxdma2host_destination_ring_mac1,
125 	host2tcl_input_ring4,
126 	host2tcl_input_ring3,
127 	host2tcl_input_ring2,
128 	host2tcl_input_ring1,
129 	wbm2host_tx_completions_ring4,
130 	wbm2host_tx_completions_ring3,
131 	wbm2host_tx_completions_ring2,
132 	wbm2host_tx_completions_ring1,
133 	tcl2host_status_ring,
134 };
135 
136 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
137 enum hif_legacy_pci_irq {
138 	ce0,
139 	ce1,
140 	ce2,
141 	ce3,
142 	ce4,
143 	ce5,
144 	ce6,
145 	ce7,
146 	ce8,
147 	ce9,
148 	ce10,
149 	ce11,
150 	ce12,
151 	ce13,
152 	ce14,
153 	ce15,
154 	reo2sw8_intr2,
155 	reo2sw7_intr2,
156 	reo2sw6_intr2,
157 	reo2sw5_intr2,
158 	reo2sw4_intr2,
159 	reo2sw3_intr2,
160 	reo2sw2_intr2,
161 	reo2sw1_intr2,
162 	reo2sw0_intr2,
163 	reo2sw8_intr,
164 	reo2sw7_intr,
165 	reo2sw6_inrr,
166 	reo2sw5_intr,
167 	reo2sw4_intr,
168 	reo2sw3_intr,
169 	reo2sw2_intr,
170 	reo2sw1_intr,
171 	reo2sw0_intr,
172 	reo2status_intr2,
173 	reo_status,
174 	reo2rxdma_out_2,
175 	reo2rxdma_out_1,
176 	reo_cmd,
177 	sw2reo6,
178 	sw2reo5,
179 	sw2reo1,
180 	sw2reo,
181 	rxdma2reo_mlo_0_dst_ring1,
182 	rxdma2reo_mlo_0_dst_ring0,
183 	rxdma2reo_mlo_1_dst_ring1,
184 	rxdma2reo_mlo_1_dst_ring0,
185 	rxdma2reo_dst_ring1,
186 	rxdma2reo_dst_ring0,
187 	rxdma2sw_dst_ring1,
188 	rxdma2sw_dst_ring0,
189 	rxdma2release_dst_ring1,
190 	rxdma2release_dst_ring0,
191 	sw2rxdma_2_src_ring,
192 	sw2rxdma_1_src_ring,
193 	sw2rxdma_0,
194 	wbm2sw6_release2,
195 	wbm2sw5_release2,
196 	wbm2sw4_release2,
197 	wbm2sw3_release2,
198 	wbm2sw2_release2,
199 	wbm2sw1_release2,
200 	wbm2sw0_release2,
201 	wbm2sw6_release,
202 	wbm2sw5_release,
203 	wbm2sw4_release,
204 	wbm2sw3_release,
205 	wbm2sw2_release,
206 	wbm2sw1_release,
207 	wbm2sw0_release,
208 	wbm2sw_link,
209 	wbm_error_release,
210 	sw2txmon_src_ring,
211 	sw2rxmon_src_ring,
212 	txmon2sw_p1_intr1,
213 	txmon2sw_p1_intr0,
214 	txmon2sw_p0_dest1,
215 	txmon2sw_p0_dest0,
216 	rxmon2sw_p1_intr1,
217 	rxmon2sw_p1_intr0,
218 	rxmon2sw_p0_dest1,
219 	rxmon2sw_p0_dest0,
220 	sw_release,
221 	sw2tcl_credit2,
222 	sw2tcl_credit,
223 	sw2tcl4,
224 	sw2tcl5,
225 	sw2tcl3,
226 	sw2tcl2,
227 	sw2tcl1,
228 	sw2wbm1,
229 	misc_8,
230 	misc_7,
231 	misc_6,
232 	misc_5,
233 	misc_4,
234 	misc_3,
235 	misc_2,
236 	misc_1,
237 	misc_0,
238 };
239 #endif
240 
241 struct CE_state;
242 #ifdef QCA_WIFI_QCN9224
243 #define CE_COUNT_MAX 16
244 #else
245 #define CE_COUNT_MAX 12
246 #endif
247 
248 #ifndef HIF_MAX_GROUP
249 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS
250 #endif
251 
252 #ifdef CONFIG_BERYLLIUM
253 #define HIF_MAX_GRP_IRQ 25
254 #else
255 #define HIF_MAX_GRP_IRQ 16
256 #endif
257 
258 #ifndef NAPI_YIELD_BUDGET_BASED
259 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
260 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
261 #endif
262 #else  /* NAPI_YIELD_BUDGET_BASED */
263 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
264 #endif /* NAPI_YIELD_BUDGET_BASED */
265 
266 #define QCA_NAPI_BUDGET    64
267 #define QCA_NAPI_DEF_SCALE  \
268 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
269 
270 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
271 /* NOTE: "napi->scale" can be changed,
272  * but this does not change the number of buckets
273  */
274 #define QCA_NAPI_NUM_BUCKETS 4
275 
276 /**
277  * qca_napi_stat - stats structure for execution contexts
278  * @napi_schedules - number of times the schedule function is called
279  * @napi_polls - number of times the execution context runs
280  * @napi_completes - number of times that the generating interrupt is re-enabled
281  * @napi_workdone - cumulative of all work done reported by handler
282  * @cpu_corrected - incremented when execution context runs on a different core
283  *			than the one that its irq is affined to.
284  * @napi_budget_uses - histogram of work done per execution run
285  * @time_limit_reache - count of yields due to time limit thresholds
286  * @rxpkt_thresh_reached - count of yields due to a work limit
287  * @poll_time_buckets - histogram of poll times for the napi
288  *
289  */
290 struct qca_napi_stat {
291 	uint32_t napi_schedules;
292 	uint32_t napi_polls;
293 	uint32_t napi_completes;
294 	uint32_t napi_workdone;
295 	uint32_t cpu_corrected;
296 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
297 	uint32_t time_limit_reached;
298 	uint32_t rxpkt_thresh_reached;
299 	unsigned long long napi_max_poll_time;
300 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
301 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
302 #endif
303 };
304 
305 
306 /**
307  * per NAPI instance data structure
308  * This data structure holds stuff per NAPI instance.
309  * Note that, in the current implementation, though scale is
310  * an instance variable, it is set to the same value for all
311  * instances.
312  */
313 struct qca_napi_info {
314 	struct net_device    netdev; /* dummy net_dev */
315 	void                 *hif_ctx;
316 	struct napi_struct   napi;
317 	uint8_t              scale;   /* currently same on all instances */
318 	uint8_t              id;
319 	uint8_t              cpu;
320 	int                  irq;
321 	cpumask_t            cpumask;
322 	struct qca_napi_stat stats[NR_CPUS];
323 #ifdef RECEIVE_OFFLOAD
324 	/* will only be present for data rx CE's */
325 	void (*offld_flush_cb)(void *);
326 	struct napi_struct   rx_thread_napi;
327 	struct net_device    rx_thread_netdev;
328 #endif /* RECEIVE_OFFLOAD */
329 	qdf_lro_ctx_t        lro_ctx;
330 };
331 
332 enum qca_napi_tput_state {
333 	QCA_NAPI_TPUT_UNINITIALIZED,
334 	QCA_NAPI_TPUT_LO,
335 	QCA_NAPI_TPUT_HI
336 };
337 enum qca_napi_cpu_state {
338 	QCA_NAPI_CPU_UNINITIALIZED,
339 	QCA_NAPI_CPU_DOWN,
340 	QCA_NAPI_CPU_UP };
341 
342 /**
343  * struct qca_napi_cpu - an entry of the napi cpu table
344  * @core_id:     physical core id of the core
345  * @cluster_id:  cluster this core belongs to
346  * @core_mask:   mask to match all core of this cluster
347  * @thread_mask: mask for this core within the cluster
348  * @max_freq:    maximum clock this core can be clocked at
349  *               same for all cpus of the same core.
350  * @napis:       bitmap of napi instances on this core
351  * @execs:       bitmap of execution contexts on this core
352  * cluster_nxt:  chain to link cores within the same cluster
353  *
354  * This structure represents a single entry in the napi cpu
355  * table. The table is part of struct qca_napi_data.
356  * This table is initialized by the init function, called while
357  * the first napi instance is being created, updated by hotplug
358  * notifier and when cpu affinity decisions are made (by throughput
359  * detection), and deleted when the last napi instance is removed.
360  */
361 struct qca_napi_cpu {
362 	enum qca_napi_cpu_state state;
363 	int			core_id;
364 	int			cluster_id;
365 	cpumask_t		core_mask;
366 	cpumask_t		thread_mask;
367 	unsigned int		max_freq;
368 	uint32_t		napis;
369 	uint32_t		execs;
370 	int			cluster_nxt;  /* index, not pointer */
371 };
372 
373 /**
374  * struct qca_napi_data - collection of napi data for a single hif context
375  * @hif_softc: pointer to the hif context
376  * @lock: spinlock used in the event state machine
377  * @state: state variable used in the napi stat machine
378  * @ce_map: bit map indicating which ce's have napis running
379  * @exec_map: bit map of instantiated exec contexts
380  * @user_cpu_affin_map: CPU affinity map from INI config.
381  * @napi_cpu: cpu info for irq affinty
382  * @lilcl_head:
383  * @bigcl_head:
384  * @napi_mode: irq affinity & clock voting mode
385  * @cpuhp_handler: CPU hotplug event registration handle
386  */
387 struct qca_napi_data {
388 	struct               hif_softc *hif_softc;
389 	qdf_spinlock_t       lock;
390 	uint32_t             state;
391 
392 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
393 	 * not used by clients (clients use an id returned by create)
394 	 */
395 	uint32_t             ce_map;
396 	uint32_t             exec_map;
397 	uint32_t             user_cpu_affin_mask;
398 	struct qca_napi_info *napis[CE_COUNT_MAX];
399 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
400 	int                  lilcl_head, bigcl_head;
401 	enum qca_napi_tput_state napi_mode;
402 	struct qdf_cpuhp_handler *cpuhp_handler;
403 	uint8_t              flags;
404 };
405 
406 /**
407  * struct hif_config_info - Place Holder for HIF configuration
408  * @enable_self_recovery: Self Recovery
409  * @enable_runtime_pm: Enable Runtime PM
410  * @runtime_pm_delay: Runtime PM Delay
411  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
412  *
413  * Structure for holding HIF ini parameters.
414  */
415 struct hif_config_info {
416 	bool enable_self_recovery;
417 #ifdef FEATURE_RUNTIME_PM
418 	uint8_t enable_runtime_pm;
419 	u_int32_t runtime_pm_delay;
420 #endif
421 	uint64_t rx_softirq_max_yield_duration_ns;
422 };
423 
424 /**
425  * struct hif_target_info - Target Information
426  * @target_version: Target Version
427  * @target_type: Target Type
428  * @target_revision: Target Revision
429  * @soc_version: SOC Version
430  * @hw_name: pointer to hardware name
431  *
432  * Structure to hold target information.
433  */
434 struct hif_target_info {
435 	uint32_t target_version;
436 	uint32_t target_type;
437 	uint32_t target_revision;
438 	uint32_t soc_version;
439 	char *hw_name;
440 };
441 
442 struct hif_opaque_softc {
443 };
444 
445 /**
446  * enum hif_event_type - Type of DP events to be recorded
447  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
448  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
449  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
450  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
451  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
452  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
453  * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event
454  * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event
455  */
456 enum hif_event_type {
457 	HIF_EVENT_IRQ_TRIGGER,
458 	HIF_EVENT_TIMER_ENTRY,
459 	HIF_EVENT_TIMER_EXIT,
460 	HIF_EVENT_BH_SCHED,
461 	HIF_EVENT_SRNG_ACCESS_START,
462 	HIF_EVENT_SRNG_ACCESS_END,
463 	HIF_EVENT_BH_COMPLETE,
464 	HIF_EVENT_BH_FORCE_BREAK,
465 	/* Do check hif_hist_skip_event_record when adding new events */
466 };
467 
468 /**
469  * enum hif_system_pm_state - System PM state
470  * HIF_SYSTEM_PM_STATE_ON: System in active state
471  * HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
472  *  system resume
473  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
474  *  system suspend
475  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
476  */
477 enum hif_system_pm_state {
478 	HIF_SYSTEM_PM_STATE_ON,
479 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
480 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
481 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
482 };
483 
484 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
485 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
486 
487 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
488 /* HIF_EVENT_HIST_MAX should always be power of 2 */
489 #define HIF_EVENT_HIST_MAX		512
490 
491 #define HIF_EVENT_HIST_ENABLE_MASK	0xFF
492 
493 static inline uint64_t hif_get_log_timestamp(void)
494 {
495 	return qdf_get_log_timestamp();
496 }
497 
498 #else
499 
500 #define HIF_EVENT_HIST_MAX		32
501 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
502 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
503 
504 static inline uint64_t hif_get_log_timestamp(void)
505 {
506 	return qdf_sched_clock();
507 }
508 
509 #endif
510 
511 /**
512  * struct hif_event_record - an entry of the DP event history
513  * @hal_ring_id: ring id for which event is recorded
514  * @hp: head pointer of the ring (may not be applicable for all events)
515  * @tp: tail pointer of the ring (may not be applicable for all events)
516  * @cpu_id: cpu id on which the event occurred
517  * @timestamp: timestamp when event occurred
518  * @type: type of the event
519  *
520  * This structure represents the information stored for every datapath
521  * event which is logged in the history.
522  */
523 struct hif_event_record {
524 	uint8_t hal_ring_id;
525 	uint32_t hp;
526 	uint32_t tp;
527 	int cpu_id;
528 	uint64_t timestamp;
529 	enum hif_event_type type;
530 };
531 
532 /**
533  * struct hif_event_misc - history related misc info
534  * @last_irq_index: last irq event index in history
535  * @last_irq_ts: last irq timestamp
536  */
537 struct hif_event_misc {
538 	int32_t last_irq_index;
539 	uint64_t last_irq_ts;
540 };
541 
542 /**
543  * struct hif_event_history - history for one interrupt group
544  * @index: index to store new event
545  * @event: event entry
546  *
547  * This structure represents the datapath history for one
548  * interrupt group.
549  */
550 struct hif_event_history {
551 	qdf_atomic_t index;
552 	struct hif_event_misc misc;
553 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
554 };
555 
556 /**
557  * hif_hist_record_event() - Record one datapath event in history
558  * @hif_ctx: HIF opaque context
559  * @event: DP event entry
560  * @intr_grp_id: interrupt group ID registered with hif
561  *
562  * Return: None
563  */
564 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
565 			   struct hif_event_record *event,
566 			   uint8_t intr_grp_id);
567 
568 /**
569  * hif_event_history_init() - Initialize SRNG event history buffers
570  * @hif_ctx: HIF opaque context
571  * @id: context group ID for which history is recorded
572  *
573  * Returns: None
574  */
575 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
576 
577 /**
578  * hif_event_history_deinit() - De-initialize SRNG event history buffers
579  * @hif_ctx: HIF opaque context
580  * @id: context group ID for which history is recorded
581  *
582  * Returns: None
583  */
584 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
585 
586 /**
587  * hif_record_event() - Wrapper function to form and record DP event
588  * @hif_ctx: HIF opaque context
589  * @intr_grp_id: interrupt group ID registered with hif
590  * @hal_ring_id: ring id for which event is recorded
591  * @hp: head pointer index of the srng
592  * @tp: tail pointer index of the srng
593  * @type: type of the event to be logged in history
594  *
595  * Return: None
596  */
597 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
598 				    uint8_t intr_grp_id,
599 				    uint8_t hal_ring_id,
600 				    uint32_t hp,
601 				    uint32_t tp,
602 				    enum hif_event_type type)
603 {
604 	struct hif_event_record event;
605 
606 	event.hal_ring_id = hal_ring_id;
607 	event.hp = hp;
608 	event.tp = tp;
609 	event.type = type;
610 
611 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
612 
613 	return;
614 }
615 
616 #else
617 
618 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
619 				    uint8_t intr_grp_id,
620 				    uint8_t hal_ring_id,
621 				    uint32_t hp,
622 				    uint32_t tp,
623 				    enum hif_event_type type)
624 {
625 }
626 
627 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
628 					  uint8_t id)
629 {
630 }
631 
632 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
633 					    uint8_t id)
634 {
635 }
636 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
637 
638 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx);
639 
640 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
641 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx);
642 #else
643 static
644 inline void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) {}
645 #endif
646 
647 /**
648  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
649  *
650  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
651  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
652  *                         minimize power
653  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
654  *                         platform-specific measures to completely power-off
655  *                         the module and associated hardware (i.e. cut power
656  *                         supplies)
657  */
658 enum HIF_DEVICE_POWER_CHANGE_TYPE {
659 	HIF_DEVICE_POWER_UP,
660 	HIF_DEVICE_POWER_DOWN,
661 	HIF_DEVICE_POWER_CUT
662 };
663 
664 /**
665  * enum hif_enable_type: what triggered the enabling of hif
666  *
667  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
668  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
669  */
670 enum hif_enable_type {
671 	HIF_ENABLE_TYPE_PROBE,
672 	HIF_ENABLE_TYPE_REINIT,
673 	HIF_ENABLE_TYPE_MAX
674 };
675 
676 /**
677  * enum hif_disable_type: what triggered the disabling of hif
678  *
679  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
680  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
681  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
682  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
683  */
684 enum hif_disable_type {
685 	HIF_DISABLE_TYPE_PROBE_ERROR,
686 	HIF_DISABLE_TYPE_REINIT_ERROR,
687 	HIF_DISABLE_TYPE_REMOVE,
688 	HIF_DISABLE_TYPE_SHUTDOWN,
689 	HIF_DISABLE_TYPE_MAX
690 };
691 /**
692  * enum hif_device_config_opcode: configure mode
693  *
694  * @HIF_DEVICE_POWER_STATE: device power state
695  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
696  * @HIF_DEVICE_GET_ADDR: get block address
697  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
698  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
699  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
700  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
701  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
702  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
703  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
704  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
705  * @HIF_BMI_DONE: bmi done
706  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
707  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
708  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
709  */
710 enum hif_device_config_opcode {
711 	HIF_DEVICE_POWER_STATE = 0,
712 	HIF_DEVICE_GET_BLOCK_SIZE,
713 	HIF_DEVICE_GET_FIFO_ADDR,
714 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
715 	HIF_DEVICE_GET_IRQ_PROC_MODE,
716 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
717 	HIF_DEVICE_POWER_STATE_CHANGE,
718 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
719 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
720 	HIF_DEVICE_GET_OS_DEVICE,
721 	HIF_DEVICE_DEBUG_BUS_STATE,
722 	HIF_BMI_DONE,
723 	HIF_DEVICE_SET_TARGET_TYPE,
724 	HIF_DEVICE_SET_HTC_CONTEXT,
725 	HIF_DEVICE_GET_HTC_CONTEXT,
726 };
727 
728 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
729 struct HID_ACCESS_LOG {
730 	uint32_t seqnum;
731 	bool is_write;
732 	void *addr;
733 	uint32_t value;
734 };
735 #endif
736 
737 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
738 		uint32_t value);
739 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
740 
741 #define HIF_MAX_DEVICES                 1
742 /**
743  * struct htc_callbacks - Structure for HTC Callbacks methods
744  * @context:             context to pass to the dsrhandler
745  *                       note : rwCompletionHandler is provided the context
746  *                       passed to hif_read_write
747  * @rwCompletionHandler: Read / write completion handler
748  * @dsrHandler:          DSR Handler
749  */
750 struct htc_callbacks {
751 	void *context;
752 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
753 	QDF_STATUS(*dsr_handler)(void *context);
754 };
755 
756 /**
757  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
758  * @context: Private data context
759  * @set_recovery_in_progress: To Set Driver state for recovery in progress
760  * @is_recovery_in_progress: Query if driver state is recovery in progress
761  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
762  * @is_driver_unloading: Query if driver is unloading.
763  * @get_bandwidth_level: Query current bandwidth level for the driver
764  * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem
765  * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool
766  * This Structure provides callback pointer for HIF to query hdd for driver
767  * states.
768  */
769 struct hif_driver_state_callbacks {
770 	void *context;
771 	void (*set_recovery_in_progress)(void *context, uint8_t val);
772 	bool (*is_recovery_in_progress)(void *context);
773 	bool (*is_load_unload_in_progress)(void *context);
774 	bool (*is_driver_unloading)(void *context);
775 	bool (*is_target_ready)(void *context);
776 	int (*get_bandwidth_level)(void *context);
777 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
778 						       qdf_dma_addr_t *paddr,
779 						       uint32_t ring_type);
780 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
781 };
782 
783 /* This API detaches the HTC layer from the HIF device */
784 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
785 
786 /****************************************************************/
787 /* BMI and Diag window abstraction                              */
788 /****************************************************************/
789 
790 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
791 
792 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
793 				     * handled atomically by
794 				     * DiagRead/DiagWrite
795 				     */
796 
797 #ifdef WLAN_FEATURE_BMI
798 /*
799  * API to handle HIF-specific BMI message exchanges, this API is synchronous
800  * and only allowed to be called from a context that can block (sleep)
801  */
802 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
803 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
804 				uint8_t *pSendMessage, uint32_t Length,
805 				uint8_t *pResponseMessage,
806 				uint32_t *pResponseLength, uint32_t TimeoutMS);
807 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
808 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
809 #else /* WLAN_FEATURE_BMI */
810 static inline void
811 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
812 {
813 }
814 
815 static inline bool
816 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
817 {
818 	return false;
819 }
820 #endif /* WLAN_FEATURE_BMI */
821 
822 #ifdef HIF_CPU_CLEAR_AFFINITY
823 /**
824  * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
825  * @scn: HIF handle
826  * @intr_ctxt_id: interrupt group index
827  * @cpu: CPU core to clear
828  *
829  * Return: None
830  */
831 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
832 				       int intr_ctxt_id, int cpu);
833 #else
834 static inline
835 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
836 				       int intr_ctxt_id, int cpu)
837 {
838 }
839 #endif
840 
841 /*
842  * APIs to handle HIF specific diagnostic read accesses. These APIs are
843  * synchronous and only allowed to be called from a context that
844  * can block (sleep). They are not high performance APIs.
845  *
846  * hif_diag_read_access reads a 4 Byte aligned/length value from a
847  * Target register or memory word.
848  *
849  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
850  */
851 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
852 				uint32_t address, uint32_t *data);
853 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
854 		      uint8_t *data, int nbytes);
855 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
856 			void *ramdump_base, uint32_t address, uint32_t size);
857 /*
858  * APIs to handle HIF specific diagnostic write accesses. These APIs are
859  * synchronous and only allowed to be called from a context that
860  * can block (sleep).
861  * They are not high performance APIs.
862  *
863  * hif_diag_write_access writes a 4 Byte aligned/length value to a
864  * Target register or memory word.
865  *
866  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
867  */
868 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
869 				 uint32_t address, uint32_t data);
870 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
871 			uint32_t address, uint8_t *data, int nbytes);
872 
873 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
874 
875 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
876 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
877 
878 /*
879  * Set the FASTPATH_mode_on flag in sc, for use by data path
880  */
881 #ifdef WLAN_FEATURE_FASTPATH
882 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
883 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
884 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
885 
886 /**
887  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
888  * @handler: Callback funtcion
889  * @context: handle for callback function
890  *
891  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
892  */
893 QDF_STATUS hif_ce_fastpath_cb_register(
894 		struct hif_opaque_softc *hif_ctx,
895 		fastpath_msg_handler handler, void *context);
896 #else
897 static inline QDF_STATUS hif_ce_fastpath_cb_register(
898 		struct hif_opaque_softc *hif_ctx,
899 		fastpath_msg_handler handler, void *context)
900 {
901 	return QDF_STATUS_E_FAILURE;
902 }
903 
904 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
905 {
906 	return NULL;
907 }
908 
909 #endif
910 
911 /*
912  * Enable/disable CDC max performance workaround
913  * For max-performance set this to 0
914  * To allow SoC to enter sleep set this to 1
915  */
916 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
917 
918 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
919 			     qdf_shared_mem_t **ce_sr,
920 			     uint32_t *ce_sr_ring_size,
921 			     qdf_dma_addr_t *ce_reg_paddr);
922 
923 /**
924  * @brief List of callbacks - filled in by HTC.
925  */
926 struct hif_msg_callbacks {
927 	void *Context;
928 	/**< context meaningful to HTC */
929 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
930 					uint32_t transferID,
931 					uint32_t toeplitz_hash_result);
932 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
933 					uint8_t pipeID);
934 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
935 	void (*fwEventHandler)(void *context, QDF_STATUS status);
936 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
937 };
938 
939 enum hif_target_status {
940 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
941 	TARGET_STATUS_RESET,  /* target got reset */
942 	TARGET_STATUS_EJECT,  /* target got ejected */
943 	TARGET_STATUS_SUSPEND /*target got suspend */
944 };
945 
946 /**
947  * enum hif_attribute_flags: configure hif
948  *
949  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
950  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
951  *  							+ No pktlog CE
952  */
953 enum hif_attribute_flags {
954 	HIF_LOWDESC_CE_CFG = 1,
955 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
956 };
957 
958 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
959 	(attr |= (v & 0x01) << 5)
960 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
961 	(attr |= (v & 0x03) << 6)
962 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
963 	(attr |= (v & 0x01) << 13)
964 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
965 	(attr |= (v & 0x01) << 14)
966 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
967 	(attr |= (v & 0x01) << 15)
968 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
969 	(attr |= (v & 0x0FFF) << 16)
970 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
971 	(attr |= (v & 0x01) << 30)
972 
973 struct hif_ul_pipe_info {
974 	unsigned int nentries;
975 	unsigned int nentries_mask;
976 	unsigned int sw_index;
977 	unsigned int write_index; /* cached copy */
978 	unsigned int hw_index;    /* cached copy */
979 	void *base_addr_owner_space; /* Host address space */
980 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
981 };
982 
983 struct hif_dl_pipe_info {
984 	unsigned int nentries;
985 	unsigned int nentries_mask;
986 	unsigned int sw_index;
987 	unsigned int write_index; /* cached copy */
988 	unsigned int hw_index;    /* cached copy */
989 	void *base_addr_owner_space; /* Host address space */
990 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
991 };
992 
993 struct hif_pipe_addl_info {
994 	uint32_t pci_mem;
995 	uint32_t ctrl_addr;
996 	struct hif_ul_pipe_info ul_pipe;
997 	struct hif_dl_pipe_info dl_pipe;
998 };
999 
1000 #ifdef CONFIG_SLUB_DEBUG_ON
1001 #define MSG_FLUSH_NUM 16
1002 #else /* PERF build */
1003 #define MSG_FLUSH_NUM 32
1004 #endif /* SLUB_DEBUG_ON */
1005 
1006 struct hif_bus_id;
1007 
1008 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
1009 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
1010 		     int opcode, void *config, uint32_t config_len);
1011 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
1012 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
1013 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
1014 		   struct hif_msg_callbacks *callbacks);
1015 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
1016 void hif_stop(struct hif_opaque_softc *hif_ctx);
1017 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
1018 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
1019 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
1020 		      uint8_t cmd_id, bool start);
1021 
1022 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1023 				  uint32_t transferID, uint32_t nbytes,
1024 				  qdf_nbuf_t wbuf, uint32_t data_attr);
1025 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1026 			     int force);
1027 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1028 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
1029 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
1030 			  uint8_t *DLPipe);
1031 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
1032 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1033 			int *dl_is_polled);
1034 uint16_t
1035 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1036 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
1037 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
1038 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
1039 		     bool wait_for_it);
1040 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
1041 #ifndef HIF_PCI
1042 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
1043 {
1044 	return 0;
1045 }
1046 #else
1047 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
1048 #endif
1049 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1050 			u32 *revision, const char **target_name);
1051 
1052 #ifdef RECEIVE_OFFLOAD
1053 /**
1054  * hif_offld_flush_cb_register() - Register the offld flush callback
1055  * @scn: HIF opaque context
1056  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
1057  *			 Or GRO/LRO flush when RxThread is not enabled. Called
1058  *			 with corresponding context for flush.
1059  * Return: None
1060  */
1061 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1062 				 void (offld_flush_handler)(void *ol_ctx));
1063 
1064 /**
1065  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
1066  * @scn: HIF opaque context
1067  *
1068  * Return: None
1069  */
1070 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
1071 #endif
1072 
1073 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1074 /**
1075  * hif_exec_should_yield() - Check if hif napi context should yield
1076  * @hif_ctx - HIF opaque context
1077  * @grp_id - grp_id of the napi for which check needs to be done
1078  *
1079  * The function uses grp_id to look for NAPI and checks if NAPI needs to
1080  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
1081  * yield decision.
1082  *
1083  * Return: true if NAPI needs to yield, else false
1084  */
1085 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
1086 #else
1087 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
1088 					 uint grp_id)
1089 {
1090 	return false;
1091 }
1092 #endif
1093 
1094 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
1095 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
1096 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
1097 				      int htc_htt_tx_endpoint);
1098 
1099 /**
1100  * hif_open() - Create hif handle
1101  * @qdf_ctx: qdf context
1102  * @mode: Driver Mode
1103  * @bus_type: Bus Type
1104  * @cbk: CDS Callbacks
1105  * @psoc: psoc object manager
1106  *
1107  * API to open HIF Context
1108  *
1109  * Return: HIF Opaque Pointer
1110  */
1111 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1112 				  uint32_t mode,
1113 				  enum qdf_bus_type bus_type,
1114 				  struct hif_driver_state_callbacks *cbk,
1115 				  struct wlan_objmgr_psoc *psoc);
1116 
1117 /**
1118  * hif_init_dma_mask() - Set dma mask for the dev
1119  * @dev: dev for which DMA mask is to be set
1120  * @bus_type: bus type for the target
1121  *
1122  * This API sets the DMA mask for the device. before the datapath
1123  * memory pre-allocation is done. If the DMA mask is not set before
1124  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
1125  * and does not utilize the full device capability.
1126  *
1127  * Return: 0 - success, non-zero on failure.
1128  */
1129 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
1130 void hif_close(struct hif_opaque_softc *hif_ctx);
1131 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1132 		      void *bdev, const struct hif_bus_id *bid,
1133 		      enum qdf_bus_type bus_type,
1134 		      enum hif_enable_type type);
1135 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
1136 #ifdef CE_TASKLET_DEBUG_ENABLE
1137 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
1138 				 uint8_t value);
1139 #endif
1140 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
1141 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
1142 
1143 /**
1144  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
1145  * HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
1146  * HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
1147  * HIF_PM_CE_WAKE: Wake irq is CE interrupt
1148  */
1149 typedef enum {
1150 	HIF_PM_INVALID_WAKE,
1151 	HIF_PM_MSI_WAKE,
1152 	HIF_PM_CE_WAKE,
1153 } hif_pm_wake_irq_type;
1154 
1155 /**
1156  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1157  * @hif_ctx: HIF context
1158  *
1159  * Return: enum hif_pm_wake_irq_type
1160  */
1161 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1162 
1163 /**
1164  * enum hif_ep_vote_type - hif ep vote type
1165  * HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1166  * HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1167  */
1168 enum hif_ep_vote_type {
1169 	HIF_EP_VOTE_DP_ACCESS,
1170 	HIF_EP_VOTE_NONDP_ACCESS
1171 };
1172 
1173 /**
1174  * enum hif_ep_vote_access - hif ep vote access
1175  * HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1176  * HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transition
1177  * HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1178  */
1179 enum hif_ep_vote_access {
1180 	HIF_EP_VOTE_ACCESS_ENABLE,
1181 	HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1182 	HIF_EP_VOTE_ACCESS_DISABLE
1183 };
1184 
1185 /**
1186  * enum hif_rpm_id - modules registered with runtime pm module
1187  * @HIF_RTPM_ID_RESERVED: Reserved ID
1188  * @HIF_RTPM_ID_HAL_REO_CMD: HAL REO commands
1189  * @HIF_RTPM_ID_WMI: WMI commands Tx
1190  * @HIF_RTPM_ID_HTT: HTT commands Tx
1191  * @HIF_RTPM_ID_DP_TX: Datapath Tx path
1192  * @HIF_RTPM_ID_DP_RING_STATS: Datapath ring stats
1193  * @HIF_RTPM_ID_CE_SEND_FAST: CE Tx buffer posting
1194  * @HIF_RTPM_ID_FORCE_WAKE: Force wake request
1195  * @HIF_RTPM_ID_PREVENT_LINKDOWN: Prevent linkdown by not allowing runtime PM
1196  * @HIF_RTPM_ID_PREVENT_ALLOW_LOCK: Generic ID for runtime PM lock contexts
1197  * @HIF_RTPM_ID_MAX: Max id
1198  */
1199 enum  hif_rtpm_client_id {
1200 	HIF_RTPM_ID_RESERVED,
1201 	HIF_RTPM_ID_HAL_REO_CMD,
1202 	HIF_RTPM_ID_WMI,
1203 	HIF_RTPM_ID_HTT,
1204 	HIF_RTPM_ID_DP,
1205 	HIF_RTPM_ID_DP_RING_STATS,
1206 	HIF_RTPM_ID_CE,
1207 	HIF_RTPM_ID_FORCE_WAKE,
1208 	HIF_RTPM_ID_PM_QOS_NOTIFY,
1209 	HIF_RTPM_ID_WIPHY_SUSPEND,
1210 	HIF_RTPM_ID_MAX
1211 };
1212 
1213 /**
1214  * enum hif_rpm_type - Get and Put calls types
1215  * HIF_RTPM_GET_ASYNC: Increment usage count and when system is suspended
1216  *		      schedule resume process, return depends on pm state.
1217  * HIF_RTPM_GET_FORCE: Increment usage count and when system is suspended
1218  *		      schedule resume process, returns success irrespective of
1219  *		      pm_state.
1220  * HIF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
1221  *		     wait till process is resumed.
1222  * HIF_RTPM_GET_NORESUME: Only increments usage count.
1223  * HIF_RTPM_PUT_ASYNC: Decrements usage count and puts system in idle state.
1224  * HIF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
1225  *			     suspended state.
1226  * HIF_RTPM_PUT_NOIDLE: Decrements usage count.
1227  */
1228 enum rpm_type {
1229 	HIF_RTPM_GET_ASYNC,
1230 	HIF_RTPM_GET_FORCE,
1231 	HIF_RTPM_GET_SYNC,
1232 	HIF_RTPM_GET_NORESUME,
1233 	HIF_RTPM_PUT_ASYNC,
1234 	HIF_RTPM_PUT_SYNC_SUSPEND,
1235 	HIF_RTPM_PUT_NOIDLE,
1236 };
1237 
1238 /**
1239  * struct hif_pm_runtime_lock - data structure for preventing runtime suspend
1240  * @list - global list of runtime locks
1241  * @active - true if this lock is preventing suspend
1242  * @name - character string for tracking this lock
1243  */
1244 struct hif_pm_runtime_lock {
1245 	struct list_head list;
1246 	bool active;
1247 	const char *name;
1248 };
1249 
1250 #ifdef FEATURE_RUNTIME_PM
1251 /**
1252  * hif_rtpm_register() - Register a module with runtime PM.
1253  * @id: ID of the module which needs to be registered
1254  * @hif_rpm_cbk: callback to be called when get was called in suspended state.
1255  * @prevent_multiple_get: not allow simultaneous get calls or put calls
1256  *
1257  * Return: success status if successfully registered
1258  */
1259 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
1260 
1261 /**
1262  * hif_rtpm_deregister() - Deregister the module
1263  * @id: ID of the module which needs to be de-registered
1264  */
1265 QDF_STATUS hif_rtpm_deregister(uint32_t id);
1266 
1267 /**
1268  * hif_runtime_lock_init() - API to initialize Runtime PM context
1269  * @lock: QDF lock context
1270  * @name: Context name
1271  *
1272  * This API initializes the Runtime PM context of the caller and
1273  * return the pointer.
1274  *
1275  * Return: None
1276  */
1277 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1278 
1279 /**
1280  * hif_runtime_lock_deinit() - This API frees the runtime pm context
1281  * @data: Runtime PM context
1282  *
1283  * Return: void
1284  */
1285 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data);
1286 
1287 /**
1288  * hif_rtpm_get() - Increment usage_count on the device to avoid suspend.
1289  * @type: get call types from hif_rpm_type
1290  * @id: ID of the module calling get()
1291  *
1292  * A get operation will prevent a runtime suspend until a
1293  * corresponding put is done.  This api should be used when accessing bus.
1294  *
1295  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1296  * THIS API WILL ONLY REQUEST THE RESUME AND NOT DO A GET!!!
1297  *
1298  * return: success if a get has been issued, else error code.
1299  */
1300 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id);
1301 
1302 /**
1303  * hif_pm_runtime_put() - do a put operation on the device
1304  * @type: put call types from hif_rpm_type
1305  * @id: ID of the module calling put()
1306  *
1307  * A put operation will allow a runtime suspend after a corresponding
1308  * get was done.  This api should be used when finished accessing bus.
1309  *
1310  * This api will return a failure if runtime pm is stopped
1311  * This api will return failure if it would decrement the usage count below 0.
1312  *
1313  * return: QDF_STATUS_SUCCESS if the put is performed
1314  */
1315 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id);
1316 
1317 /**
1318  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1319  * @data: runtime PM lock
1320  *
1321  * This function will prevent runtime suspend, by incrementing
1322  * device's usage count.
1323  *
1324  * Return: status
1325  */
1326 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data);
1327 
1328 /**
1329  * hif_pm_runtime_prevent_suspend_sync() - Synchronized prevent Runtime suspend
1330  * @data: runtime PM lock
1331  *
1332  * This function will prevent runtime suspend, by incrementing
1333  * device's usage count.
1334  *
1335  * Return: status
1336  */
1337 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data);
1338 
1339 /**
1340  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1341  * @data: runtime PM lock
1342  *
1343  * This function will allow runtime suspend, by decrementing
1344  * device's usage count.
1345  *
1346  * Return: status
1347  */
1348 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data);
1349 
1350 /**
1351  * hif_rtpm_request_resume() - Request resume if bus is suspended
1352  *
1353  * Return: None
1354  */
1355 void hif_rtpm_request_resume(void);
1356 
1357 /**
1358  * hif_rtpm_sync_resume() - Invoke synchronous runtime resume.
1359  *
1360  * This function will invoke synchronous runtime resume.
1361  *
1362  * Return: status
1363  */
1364 QDF_STATUS hif_rtpm_sync_resume(void);
1365 
1366 /**
1367  * hif_rtpm_check_and_request_resume() - check if bus is suspended and
1368  *                                       request resume.
1369  *
1370  * Return: void
1371  */
1372 void hif_rtpm_check_and_request_resume(void);
1373 
1374 /**
1375  * hif_rtpm_set_client_job() - Set job for the client.
1376  * @client_id: Client id for which job needs to be set
1377  *
1378  * If get failed due to system being in suspended state, set the client job so
1379  * when system resumes the client's job is called.
1380  *
1381  * Return: None
1382  */
1383 void hif_rtpm_set_client_job(uint32_t client_id);
1384 
1385 /**
1386  * hif_rtpm_mark_last_busy() - Mark last busy to delay retry to suspend
1387  * @id: ID marking last busy
1388  *
1389  * Return: None
1390  */
1391 void hif_rtpm_mark_last_busy(uint32_t id);
1392 
1393 /**
1394  * hif_rtpm_get_monitor_wake_intr() - API to get monitor_wake_intr
1395  *
1396  * monitor_wake_intr variable can be used to indicate if driver expects wake
1397  * MSI for runtime PM
1398  *
1399  * Return: monitor_wake_intr variable
1400  */
1401 int hif_rtpm_get_monitor_wake_intr(void);
1402 
1403 /**
1404  * hif_rtpm_set_monitor_wake_intr() - API to set monitor_wake_intr
1405  * @val: value to set
1406  *
1407  * monitor_wake_intr variable can be used to indicate if driver expects wake
1408  * MSI for runtime PM
1409  *
1410  * Return: void
1411  */
1412 void hif_rtpm_set_monitor_wake_intr(int val);
1413 
1414 /**
1415  * hif_pre_runtime_suspend() - book keeping before beginning runtime suspend.
1416  * @hif_ctx: HIF context
1417  *
1418  * Makes sure that the pci link will be taken down by the suspend operation.
1419  * If the hif layer is configured to leave the bus on, runtime suspend will
1420  * not save any power.
1421  *
1422  * Set the runtime suspend state to SUSPENDING.
1423  *
1424  * return -EINVAL if the bus won't go down.  otherwise return 0
1425  */
1426 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1427 
1428 /**
1429  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
1430  *
1431  * update the runtime pm state to RESUMING.
1432  * Return: void
1433  */
1434 void hif_pre_runtime_resume(void);
1435 
1436 /**
1437  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
1438  *
1439  * Record the success.
1440  * update the runtime_pm state to SUSPENDED
1441  * Return: void
1442  */
1443 void hif_process_runtime_suspend_success(void);
1444 
1445 /**
1446  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
1447  *
1448  * Record the failure.
1449  * mark last busy to delay a retry.
1450  * update the runtime_pm state back to ON
1451  *
1452  * Return: void
1453  */
1454 void hif_process_runtime_suspend_failure(void);
1455 
1456 /**
1457  * hif_process_runtime_suspend_failure() - bookkeeping of resuming link up
1458  *
1459  * update the runtime_pm state to RESUMING_LINKUP
1460  * Return: void
1461  */
1462 void hif_process_runtime_resume_linkup(void);
1463 
1464 /**
1465  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
1466  *
1467  * record the success.
1468  * update the runtime_pm state to SUSPENDED
1469  * Return: void
1470  */
1471 void hif_process_runtime_resume_success(void);
1472 
1473 /**
1474  * hif_rtpm_print_prevent_list() - list the clients preventing suspend.
1475  *
1476  * Return: None
1477  */
1478 void hif_rtpm_print_prevent_list(void);
1479 
1480 /**
1481  * hif_rtpm_suspend_lock() - spin_lock on marking runtime suspend
1482  *
1483  * Return: void
1484  */
1485 void hif_rtpm_suspend_lock(void);
1486 
1487 /**
1488  * hif_rtpm_suspend_unlock() - spin_unlock on marking runtime suspend
1489  *
1490  * Return: void
1491  */
1492 void hif_rtpm_suspend_unlock(void);
1493 
1494 /**
1495  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
1496  * @hif_ctx: HIF context
1497  *
1498  * Return: 0 for success and non-zero error code for failure
1499  */
1500 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1501 
1502 /**
1503  * hif_runtime_resume() - do the bus resume part of a runtime resume
1504  * @hif_ctx: HIF context
1505  *
1506  * Return: 0 for success and non-zero error code for failure
1507  */
1508 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1509 
1510 /**
1511  * hif_fastpath_resume() - resume fastpath for runtimepm
1512  * @hif_ctx: HIF context
1513  *
1514  * ensure that the fastpath write index register is up to date
1515  * since runtime pm may cause ce_send_fast to skip the register
1516  * write.
1517  *
1518  * fastpath only applicable to legacy copy engine
1519  */
1520 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1521 
1522 /**
1523  * hif_rtpm_get_state(): get rtpm link state
1524  *
1525  * Return: state
1526  */
1527 int hif_rtpm_get_state(void);
1528 #else
1529 static inline
1530 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
1531 { return QDF_STATUS_SUCCESS; }
1532 
1533 static inline
1534 QDF_STATUS hif_rtpm_deregister(uint32_t id)
1535 { return QDF_STATUS_SUCCESS; }
1536 
1537 static inline
1538 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1539 { return 0; }
1540 
1541 static inline
1542 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data)
1543 {}
1544 
1545 static inline
1546 int hif_rtpm_get(uint8_t type, uint32_t id)
1547 { return QDF_STATUS_SUCCESS; }
1548 
1549 static inline
1550 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
1551 { return QDF_STATUS_SUCCESS; }
1552 
1553 static inline
1554 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data)
1555 { return 0; }
1556 
1557 static inline
1558 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data)
1559 { return 0; }
1560 
1561 static inline
1562 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data)
1563 { return 0; }
1564 
1565 static inline
1566 QDF_STATUS hif_rtpm_sync_resume(void)
1567 { return QDF_STATUS_SUCCESS; }
1568 
1569 static inline
1570 void hif_rtpm_request_resume(void)
1571 {}
1572 
1573 static inline
1574 void hif_rtpm_check_and_request_resume(void)
1575 {}
1576 
1577 static inline
1578 void hif_rtpm_set_client_job(uint32_t client_id)
1579 {}
1580 
1581 static inline
1582 void hif_rtpm_print_prevent_list(void)
1583 {}
1584 
1585 static inline
1586 void hif_rtpm_suspend_unlock(void)
1587 {}
1588 
1589 static inline
1590 void hif_rtpm_suspend_lock(void)
1591 {}
1592 
1593 static inline
1594 int hif_rtpm_get_monitor_wake_intr(void)
1595 { return 0; }
1596 
1597 static inline
1598 void hif_rtpm_set_monitor_wake_intr(int val)
1599 {}
1600 
1601 static inline
1602 void hif_rtpm_mark_last_busy(uint32_t id)
1603 {}
1604 #endif
1605 
1606 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1607 				 bool is_packet_log_enabled);
1608 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1609 
1610 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1611 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1612 
1613 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1614 
1615 #ifdef IPA_OFFLOAD
1616 /**
1617  * hif_get_ipa_hw_type() - get IPA hw type
1618  *
1619  * This API return the IPA hw type.
1620  *
1621  * Return: IPA hw type
1622  */
1623 static inline
1624 enum ipa_hw_type hif_get_ipa_hw_type(void)
1625 {
1626 	return ipa_get_hw_type();
1627 }
1628 
1629 /**
1630  * hif_get_ipa_present() - get IPA hw status
1631  *
1632  * This API return the IPA hw status.
1633  *
1634  * Return: true if IPA is present or false otherwise
1635  */
1636 static inline
1637 bool hif_get_ipa_present(void)
1638 {
1639 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
1640 		return true;
1641 	else
1642 		return false;
1643 }
1644 #endif
1645 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1646 /**
1647  * hif_bus_ealry_suspend() - stop non wmi tx traffic
1648  * @context: hif context
1649  */
1650 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1651 
1652 /**
1653  * hif_bus_late_resume() - resume non wmi traffic
1654  * @context: hif context
1655  */
1656 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1657 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1658 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1659 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1660 
1661 /**
1662  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1663  * @hif_ctx: an opaque HIF handle to use
1664  *
1665  * As opposed to the standard hif_irq_enable, this function always applies to
1666  * the APPS side kernel interrupt handling.
1667  *
1668  * Return: errno
1669  */
1670 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1671 
1672 /**
1673  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1674  * @hif_ctx: an opaque HIF handle to use
1675  *
1676  * As opposed to the standard hif_irq_disable, this function always applies to
1677  * the APPS side kernel interrupt handling.
1678  *
1679  * Return: errno
1680  */
1681 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1682 
1683 /**
1684  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1685  * @hif_ctx: an opaque HIF handle to use
1686  *
1687  * As opposed to the standard hif_irq_enable, this function always applies to
1688  * the APPS side kernel interrupt handling.
1689  *
1690  * Return: errno
1691  */
1692 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1693 
1694 /**
1695  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1696  * @hif_ctx: an opaque HIF handle to use
1697  *
1698  * As opposed to the standard hif_irq_disable, this function always applies to
1699  * the APPS side kernel interrupt handling.
1700  *
1701  * Return: errno
1702  */
1703 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1704 
1705 /**
1706  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1707  * @hif_ctx: an opaque HIF handle to use
1708  *
1709  * This function always applies to the APPS side kernel interrupt handling
1710  * to wake the system from suspend.
1711  *
1712  * Return: errno
1713  */
1714 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1715 
1716 /**
1717  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1718  * @hif_ctx: an opaque HIF handle to use
1719  *
1720  * This function always applies to the APPS side kernel interrupt handling
1721  * to disable the wake irq.
1722  *
1723  * Return: errno
1724  */
1725 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1726 
1727 /**
1728  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
1729  * @hif_ctx: an opaque HIF handle to use
1730  *
1731  * As opposed to the standard hif_irq_enable, this function always applies to
1732  * the APPS side kernel interrupt handling.
1733  *
1734  * Return: errno
1735  */
1736 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1737 
1738 /**
1739  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
1740  * @hif_ctx: an opaque HIF handle to use
1741  *
1742  * As opposed to the standard hif_irq_disable, this function always applies to
1743  * the APPS side kernel interrupt handling.
1744  *
1745  * Return: errno
1746  */
1747 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1748 
1749 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1750 int hif_dump_registers(struct hif_opaque_softc *scn);
1751 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1752 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1753 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1754 		     u32 *revision, const char **target_name);
1755 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1756 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1757 						   scn);
1758 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1759 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1760 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1761 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1762 			   hif_target_status);
1763 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1764 			 struct hif_config_info *cfg);
1765 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1766 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1767 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1768 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1769 			   uint32_t transfer_id, u_int32_t len);
1770 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1771 	uint32_t transfer_id, uint32_t download_len);
1772 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1773 void hif_ce_war_disable(void);
1774 void hif_ce_war_enable(void);
1775 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1776 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1777 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1778 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1779 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1780 		uint32_t pipe_num);
1781 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1782 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1783 
1784 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1785 				int rx_bundle_cnt);
1786 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1787 
1788 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1789 
1790 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1791 
1792 enum hif_exec_type {
1793 	HIF_EXEC_NAPI_TYPE,
1794 	HIF_EXEC_TASKLET_TYPE,
1795 };
1796 
1797 typedef uint32_t (*ext_intr_handler)(void *, uint32_t, int);
1798 
1799 /**
1800  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1801  * @softc: hif opaque context owning the exec context
1802  * @id: the id of the interrupt context
1803  *
1804  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1805  *         'id' registered with the OS
1806  */
1807 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1808 				uint8_t id);
1809 
1810 /**
1811  * hif_configure_ext_group_interrupts() - Configure ext group interrupts
1812  * @hif_ctx: hif opaque context
1813  *
1814  * Return: QDF_STATUS
1815  */
1816 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1817 
1818 /**
1819  * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group interrupts
1820  * @hif_ctx: hif opaque context
1821  *
1822  * Return: None
1823  */
1824 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1825 
1826 /**
1827  * hif_register_ext_group() - API to register external group
1828  * interrupt handler.
1829  * @hif_ctx : HIF Context
1830  * @numirq: number of irq's in the group
1831  * @irq: array of irq values
1832  * @handler: callback interrupt handler function
1833  * @cb_ctx: context to passed in callback
1834  * @type: napi vs tasklet
1835  *
1836  * Return: QDF_STATUS
1837  */
1838 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1839 				  uint32_t numirq, uint32_t irq[],
1840 				  ext_intr_handler handler,
1841 				  void *cb_ctx, const char *context_name,
1842 				  enum hif_exec_type type, uint32_t scale);
1843 
1844 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1845 				const char *context_name);
1846 
1847 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1848 				u_int8_t pipeid,
1849 				struct hif_msg_callbacks *callbacks);
1850 
1851 /**
1852  * hif_print_napi_stats() - Display HIF NAPI stats
1853  * @hif_ctx - HIF opaque context
1854  *
1855  * Return: None
1856  */
1857 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
1858 
1859 /* hif_clear_napi_stats() - function clears the stats of the
1860  * latency when called.
1861  * @hif_ctx - the HIF context to assign the callback to
1862  *
1863  * Return: None
1864  */
1865 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
1866 
1867 #ifdef __cplusplus
1868 }
1869 #endif
1870 
1871 #ifdef FORCE_WAKE
1872 /**
1873  * hif_force_wake_request() - Function to wake from power collapse
1874  * @handle: HIF opaque handle
1875  *
1876  * Description: API to check if the device is awake or not before
1877  * read/write to BAR + 4K registers. If device is awake return
1878  * success otherwise write '1' to
1879  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
1880  * the device and does wakeup the PCI and MHI within 50ms
1881  * and then the device writes a value to
1882  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
1883  * handshake process to let the host know the device is awake.
1884  *
1885  * Return: zero - success/non-zero - failure
1886  */
1887 int hif_force_wake_request(struct hif_opaque_softc *handle);
1888 
1889 /**
1890  * hif_force_wake_release() - API to release/reset the SOC wake register
1891  * from interrupting the device.
1892  * @handle: HIF opaque handle
1893  *
1894  * Description: API to set the
1895  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
1896  * to release the interrupt line.
1897  *
1898  * Return: zero - success/non-zero - failure
1899  */
1900 int hif_force_wake_release(struct hif_opaque_softc *handle);
1901 #else
1902 static inline
1903 int hif_force_wake_request(struct hif_opaque_softc *handle)
1904 {
1905 	return 0;
1906 }
1907 
1908 static inline
1909 int hif_force_wake_release(struct hif_opaque_softc *handle)
1910 {
1911 	return 0;
1912 }
1913 #endif /* FORCE_WAKE */
1914 
1915 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1916 /**
1917  * hif_prevent_link_low_power_states() - Prevent from going to low power states
1918  * @hif - HIF opaque context
1919  *
1920  * Return: 0 on success. Error code on failure.
1921  */
1922 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
1923 
1924 /**
1925  * hif_allow_link_low_power_states() - Allow link to go to low power states
1926  * @hif - HIF opaque context
1927  *
1928  * Return: None
1929  */
1930 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
1931 
1932 #else
1933 
1934 static inline
1935 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
1936 {
1937 	return 0;
1938 }
1939 
1940 static inline
1941 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
1942 {
1943 }
1944 #endif
1945 
1946 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
1947 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
1948 
1949 /**
1950  * hif_get_dev_ba_cmem() - get base address of CMEM
1951  * @hif_ctx - the HIF context
1952  *
1953  */
1954 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle);
1955 
1956 /**
1957  * hif_get_soc_version() - get soc major version from target info
1958  * @hif_ctx - the HIF context
1959  *
1960  * Return: version number
1961  */
1962 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle);
1963 
1964 /**
1965  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
1966  * @hif_ctx - the HIF context to assign the callback to
1967  * @callback - the callback to assign
1968  * @priv - the private data to pass to the callback when invoked
1969  *
1970  * Return: None
1971  */
1972 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1973 			       void (*callback)(void *),
1974 			       void *priv);
1975 /*
1976  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1977  * for defined here
1978  */
1979 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1980 ssize_t hif_dump_desc_trace_buf(struct device *dev,
1981 				struct device_attribute *attr, char *buf);
1982 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1983 					const char *buf, size_t size);
1984 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
1985 				const char *buf, size_t size);
1986 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
1987 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
1988 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
1989 
1990 /**
1991  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
1992  * @hif: hif context
1993  * @ce_service_max_yield_time: CE service max yield time to set
1994  *
1995  * This API storess CE service max yield time in hif context based
1996  * on ini value.
1997  *
1998  * Return: void
1999  */
2000 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2001 				       uint32_t ce_service_max_yield_time);
2002 
2003 /**
2004  * hif_get_ce_service_max_yield_time() - get CE service max yield time
2005  * @hif: hif context
2006  *
2007  * This API returns CE service max yield time.
2008  *
2009  * Return: CE service max yield time
2010  */
2011 unsigned long long
2012 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
2013 
2014 /**
2015  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
2016  * @hif: hif context
2017  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
2018  *
2019  * This API stores CE service max rx ind flush in hif context based
2020  * on ini value.
2021  *
2022  * Return: void
2023  */
2024 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2025 					 uint8_t ce_service_max_rx_ind_flush);
2026 
2027 #ifdef OL_ATH_SMART_LOGGING
2028 /*
2029  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
2030  * @scn : HIF handler
2031  * @buf_cur: Current pointer in ring buffer
2032  * @buf_init:Start of the ring buffer
2033  * @buf_sz: Size of the ring buffer
2034  * @ce: Copy Engine id
2035  * @skb_sz: Max size of the SKB buffer to be copied
2036  *
2037  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2038  * and buffers pointed by them in to the given buf
2039  *
2040  * Return: Current pointer in ring buffer
2041  */
2042 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2043 			 uint8_t *buf_init, uint32_t buf_sz,
2044 			 uint32_t ce, uint32_t skb_sz);
2045 #endif /* OL_ATH_SMART_LOGGING */
2046 
2047 /*
2048  * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
2049  * to hif_opaque_softc handle
2050  * @hif_handle - hif_softc type
2051  *
2052  * Return: hif_opaque_softc type
2053  */
2054 static inline struct hif_opaque_softc *
2055 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
2056 {
2057 	return (struct hif_opaque_softc *)hif_handle;
2058 }
2059 
2060 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
2061 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2062 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx);
2063 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2064 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2065 			    uint8_t type, uint8_t access);
2066 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2067 			       uint8_t type);
2068 #else
2069 static inline QDF_STATUS
2070 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2071 {
2072 	return QDF_STATUS_SUCCESS;
2073 }
2074 
2075 static inline void
2076 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
2077 {
2078 }
2079 
2080 static inline void
2081 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2082 {
2083 }
2084 
2085 static inline void
2086 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2087 		       uint8_t type, uint8_t access)
2088 {
2089 }
2090 
2091 static inline uint8_t
2092 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2093 		       uint8_t type)
2094 {
2095 	return HIF_EP_VOTE_ACCESS_ENABLE;
2096 }
2097 #endif
2098 
2099 #ifdef FORCE_WAKE
2100 /**
2101  * hif_srng_init_phase(): Indicate srng initialization phase
2102  * to avoid force wake as UMAC power collapse is not yet
2103  * enabled
2104  * @hif_ctx: hif opaque handle
2105  * @init_phase: initialization phase
2106  *
2107  * Return:  None
2108  */
2109 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2110 			 bool init_phase);
2111 #else
2112 static inline
2113 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2114 			 bool init_phase)
2115 {
2116 }
2117 #endif /* FORCE_WAKE */
2118 
2119 #ifdef HIF_IPCI
2120 /**
2121  * hif_shutdown_notifier_cb - Call back for shutdown notifier
2122  * @ctx: hif handle
2123  *
2124  * Return:  None
2125  */
2126 void hif_shutdown_notifier_cb(void *ctx);
2127 #else
2128 static inline
2129 void hif_shutdown_notifier_cb(void *ctx)
2130 {
2131 }
2132 #endif /* HIF_IPCI */
2133 
2134 #ifdef HIF_CE_LOG_INFO
2135 /**
2136  * hif_log_ce_info() - API to log ce info
2137  * @scn: hif handle
2138  * @data: hang event data buffer
2139  * @offset: offset at which data needs to be written
2140  *
2141  * Return:  None
2142  */
2143 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2144 		     unsigned int *offset);
2145 #else
2146 static inline
2147 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2148 		     unsigned int *offset)
2149 {
2150 }
2151 #endif
2152 
2153 #ifdef HIF_CPU_PERF_AFFINE_MASK
2154 /**
2155  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
2156  * @hif_ctx: hif opaque handle
2157  *
2158  * This function is used to move the WLAN IRQs to perf cores in
2159  * case of defconfig builds.
2160  *
2161  * Return:  None
2162  */
2163 void hif_config_irq_set_perf_affinity_hint(
2164 	struct hif_opaque_softc *hif_ctx);
2165 
2166 #else
2167 static inline void hif_config_irq_set_perf_affinity_hint(
2168 	struct hif_opaque_softc *hif_ctx)
2169 {
2170 }
2171 #endif
2172 
2173 /**
2174  * hif_apps_grp_irqs_enable() - enable ext grp irqs
2175  * @hif - HIF opaque context
2176  *
2177  * Return: 0 on success. Error code on failure.
2178  */
2179 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
2180 
2181 /**
2182  * hif_apps_grp_irqs_disable() - disable ext grp irqs
2183  * @hif - HIF opaque context
2184  *
2185  * Return: 0 on success. Error code on failure.
2186  */
2187 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
2188 
2189 /**
2190  * hif_disable_grp_irqs() - disable ext grp irqs
2191  * @hif - HIF opaque context
2192  *
2193  * Return: 0 on success. Error code on failure.
2194  */
2195 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
2196 
2197 /**
2198  * hif_enable_grp_irqs() - enable ext grp irqs
2199  * @hif - HIF opaque context
2200  *
2201  * Return: 0 on success. Error code on failure.
2202  */
2203 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
2204 
2205 enum hif_credit_exchange_type {
2206 	HIF_REQUEST_CREDIT,
2207 	HIF_PROCESS_CREDIT_REPORT,
2208 };
2209 
2210 enum hif_detect_latency_type {
2211 	HIF_DETECT_TASKLET,
2212 	HIF_DETECT_CREDIT,
2213 	HIF_DETECT_UNKNOWN
2214 };
2215 
2216 #ifdef HIF_DETECTION_LATENCY_ENABLE
2217 void hif_latency_detect_credit_record_time(
2218 	enum hif_credit_exchange_type type,
2219 	struct hif_opaque_softc *hif_ctx);
2220 
2221 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
2222 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
2223 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer);
2224 void hif_credit_latency(struct hif_softc *scn, bool from_timer);
2225 void hif_check_detection_latency(struct hif_softc *scn,
2226 				 bool from_timer,
2227 				 uint32_t bitmap_type);
2228 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
2229 #else
2230 static inline
2231 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
2232 {}
2233 
2234 static inline
2235 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
2236 {}
2237 
2238 static inline
2239 void hif_latency_detect_credit_record_time(
2240 	enum hif_credit_exchange_type type,
2241 	struct hif_opaque_softc *hif_ctx)
2242 {}
2243 static inline
2244 void hif_check_detection_latency(struct hif_softc *scn,
2245 				 bool from_timer,
2246 				 uint32_t bitmap_type)
2247 {}
2248 
2249 static inline
2250 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
2251 {}
2252 #endif
2253 
2254 #ifdef SYSTEM_PM_CHECK
2255 /**
2256  * __hif_system_pm_set_state() - Set system pm state
2257  * @hif: hif opaque handle
2258  * @state: system state
2259  *
2260  * Return:  None
2261  */
2262 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2263 			       enum hif_system_pm_state state);
2264 
2265 /**
2266  * hif_system_pm_set_state_on() - Set system pm state to ON
2267  * @hif: hif opaque handle
2268  *
2269  * Return:  None
2270  */
2271 static inline
2272 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2273 {
2274 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
2275 }
2276 
2277 /**
2278  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
2279  * @hif: hif opaque handle
2280  *
2281  * Return:  None
2282  */
2283 static inline
2284 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2285 {
2286 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
2287 }
2288 
2289 /**
2290  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
2291  * @hif: hif opaque handle
2292  *
2293  * Return:  None
2294  */
2295 static inline
2296 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2297 {
2298 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
2299 }
2300 
2301 /**
2302  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
2303  * @hif: hif opaque handle
2304  *
2305  * Return:  None
2306  */
2307 static inline
2308 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2309 {
2310 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
2311 }
2312 
2313 /**
2314  * hif_system_pm_get_state() - Get system pm state
2315  * @hif: hif opaque handle
2316  *
2317  * Return:  system state
2318  */
2319 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
2320 
2321 /**
2322  * hif_system_pm_state_check() - Check system state and trigger resume
2323  *  if required
2324  * @hif: hif opaque handle
2325  *
2326  * Return: 0 if system is in on state else error code
2327  */
2328 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
2329 #else
2330 static inline
2331 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2332 			       enum hif_system_pm_state state)
2333 {
2334 }
2335 
2336 static inline
2337 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2338 {
2339 }
2340 
2341 static inline
2342 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2343 {
2344 }
2345 
2346 static inline
2347 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2348 {
2349 }
2350 
2351 static inline
2352 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2353 {
2354 }
2355 
2356 static inline
2357 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2358 {
2359 	return 0;
2360 }
2361 
2362 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2363 {
2364 	return 0;
2365 }
2366 #endif
2367 
2368 #ifdef FEATURE_IRQ_AFFINITY
2369 /**
2370  * hif_set_grp_intr_affinity() - API to set affinity for grp
2371  *  intrs set in the bitmap
2372  * @scn: hif handle
2373  * @grp_intr_bitmask: grp intrs for which perf affinity should be
2374  *  applied
2375  * @perf: affine to perf or non-perf cluster
2376  *
2377  * Return: None
2378  */
2379 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2380 			       uint32_t grp_intr_bitmask, bool perf);
2381 #else
2382 static inline
2383 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2384 			       uint32_t grp_intr_bitmask, bool perf)
2385 {
2386 }
2387 #endif
2388 /**
2389  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
2390  * @hif_ctx: hif opaque handle
2391  *
2392  * Description:
2393  *   Gets number of WMI EPs configured in target svc map. Since EP map
2394  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
2395  *   configured for WMI service.
2396  *
2397  * Return:
2398  *  uint8_t: count for WMI eps in target svc map
2399  */
2400 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn);
2401 
2402 #ifdef DP_UMAC_HW_RESET_SUPPORT
2403 /**
2404  * hif_register_umac_reset_handler() - Register UMAC HW reset handler
2405  * @hif_scn: hif opaque handle
2406  * @handler: callback handler function
2407  * @cb_ctx: context to passed to @handler
2408  * @irq: irq number to be used for UMAC HW reset interrupt
2409  *
2410  * Return: QDF_STATUS of operation
2411  */
2412 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2413 					   int (*handler)(void *cb_ctx),
2414 					   void *cb_ctx, int irq);
2415 
2416 /**
2417  * hif_unregister_umac_reset_handler() - Unregister UMAC HW reset handler
2418  * @hif_scn: hif opaque handle
2419  *
2420  * Return: QDF_STATUS of operation
2421  */
2422 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn);
2423 #else
2424 static inline
2425 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2426 					   int (*handler)(void *cb_ctx),
2427 					   void *cb_ctx, int irq)
2428 {
2429 	return QDF_STATUS_SUCCESS;
2430 }
2431 
2432 static inline
2433 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
2434 {
2435 	return QDF_STATUS_SUCCESS;
2436 }
2437 
2438 #endif /* DP_UMAC_HW_RESET_SUPPORT */
2439 
2440 #endif /* _HIF_H_ */
2441