xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HIF_H_
21 #define _HIF_H_
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif /* __cplusplus */
26 
27 /* Header files */
28 #include <qdf_status.h>
29 #include "qdf_nbuf.h"
30 #include "qdf_lro.h"
31 #include "ol_if_athvar.h"
32 #include <linux/platform_device.h>
33 #ifdef HIF_PCI
34 #include <linux/pci.h>
35 #endif /* HIF_PCI */
36 #ifdef HIF_USB
37 #include <linux/usb.h>
38 #endif /* HIF_USB */
39 #ifdef IPA_OFFLOAD
40 #include <linux/ipa.h>
41 #endif
42 #include "cfg_ucfg_api.h"
43 #include "qdf_dev.h"
44 #include <wlan_init_cfg.h>
45 
46 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
47 
48 typedef void __iomem *A_target_id_t;
49 typedef void *hif_handle_t;
50 
51 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
52 #define HIF_WORK_DRAIN_WAIT_CNT 50
53 
54 #define HIF_EP_WAKE_RESET_WAIT_CNT 10
55 #endif
56 
57 #define HIF_TYPE_AR6002   2
58 #define HIF_TYPE_AR6003   3
59 #define HIF_TYPE_AR6004   5
60 #define HIF_TYPE_AR9888   6
61 #define HIF_TYPE_AR6320   7
62 #define HIF_TYPE_AR6320V2 8
63 /* For attaching Peregrine 2.0 board host_reg_tbl only */
64 #define HIF_TYPE_AR9888V2 9
65 #define HIF_TYPE_ADRASTEA 10
66 #define HIF_TYPE_AR900B 11
67 #define HIF_TYPE_QCA9984 12
68 #define HIF_TYPE_QCA9888 14
69 #define HIF_TYPE_QCA8074 15
70 #define HIF_TYPE_QCA6290 16
71 #define HIF_TYPE_QCN7605 17
72 #define HIF_TYPE_QCA6390 18
73 #define HIF_TYPE_QCA8074V2 19
74 #define HIF_TYPE_QCA6018  20
75 #define HIF_TYPE_QCN9000 21
76 #define HIF_TYPE_QCA6490 22
77 #define HIF_TYPE_QCA6750 23
78 #define HIF_TYPE_QCA5018 24
79 #define HIF_TYPE_QCN6122 25
80 #define HIF_TYPE_KIWI 26
81 #define HIF_TYPE_QCN9224 27
82 #define HIF_TYPE_QCA9574 28
83 #define HIF_TYPE_MANGO 29
84 #define HIF_TYPE_QCA5332 30
85 #define HIF_TYPE_QCN9160 31
86 #define HIF_TYPE_PEACH 32
87 
88 #define DMA_COHERENT_MASK_DEFAULT   37
89 
90 #ifdef IPA_OFFLOAD
91 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
92 #endif
93 
94 /* enum hif_ic_irq - enum defining integrated chip irq numbers
95  * defining irq nubers that can be used by external modules like datapath
96  */
97 enum hif_ic_irq {
98 	host2wbm_desc_feed = 16,
99 	host2reo_re_injection,
100 	host2reo_command,
101 	host2rxdma_monitor_ring3,
102 	host2rxdma_monitor_ring2,
103 	host2rxdma_monitor_ring1,
104 	reo2host_exception,
105 	wbm2host_rx_release,
106 	reo2host_status,
107 	reo2host_destination_ring4,
108 	reo2host_destination_ring3,
109 	reo2host_destination_ring2,
110 	reo2host_destination_ring1,
111 	rxdma2host_monitor_destination_mac3,
112 	rxdma2host_monitor_destination_mac2,
113 	rxdma2host_monitor_destination_mac1,
114 	ppdu_end_interrupts_mac3,
115 	ppdu_end_interrupts_mac2,
116 	ppdu_end_interrupts_mac1,
117 	rxdma2host_monitor_status_ring_mac3,
118 	rxdma2host_monitor_status_ring_mac2,
119 	rxdma2host_monitor_status_ring_mac1,
120 	host2rxdma_host_buf_ring_mac3,
121 	host2rxdma_host_buf_ring_mac2,
122 	host2rxdma_host_buf_ring_mac1,
123 	rxdma2host_destination_ring_mac3,
124 	rxdma2host_destination_ring_mac2,
125 	rxdma2host_destination_ring_mac1,
126 	host2tcl_input_ring4,
127 	host2tcl_input_ring3,
128 	host2tcl_input_ring2,
129 	host2tcl_input_ring1,
130 	wbm2host_tx_completions_ring4,
131 	wbm2host_tx_completions_ring3,
132 	wbm2host_tx_completions_ring2,
133 	wbm2host_tx_completions_ring1,
134 	tcl2host_status_ring,
135 	txmon2host_monitor_destination_mac3,
136 	txmon2host_monitor_destination_mac2,
137 	txmon2host_monitor_destination_mac1,
138 	host2tx_monitor_ring1,
139 	umac_reset,
140 };
141 
142 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
143 enum hif_legacy_pci_irq {
144 	ce0,
145 	ce1,
146 	ce2,
147 	ce3,
148 	ce4,
149 	ce5,
150 	ce6,
151 	ce7,
152 	ce8,
153 	ce9,
154 	ce10,
155 	ce11,
156 	ce12,
157 	ce13,
158 	ce14,
159 	ce15,
160 	reo2sw8_intr2,
161 	reo2sw7_intr2,
162 	reo2sw6_intr2,
163 	reo2sw5_intr2,
164 	reo2sw4_intr2,
165 	reo2sw3_intr2,
166 	reo2sw2_intr2,
167 	reo2sw1_intr2,
168 	reo2sw0_intr2,
169 	reo2sw8_intr,
170 	reo2sw7_intr,
171 	reo2sw6_inrr,
172 	reo2sw5_intr,
173 	reo2sw4_intr,
174 	reo2sw3_intr,
175 	reo2sw2_intr,
176 	reo2sw1_intr,
177 	reo2sw0_intr,
178 	reo2status_intr2,
179 	reo_status,
180 	reo2rxdma_out_2,
181 	reo2rxdma_out_1,
182 	reo_cmd,
183 	sw2reo6,
184 	sw2reo5,
185 	sw2reo1,
186 	sw2reo,
187 	rxdma2reo_mlo_0_dst_ring1,
188 	rxdma2reo_mlo_0_dst_ring0,
189 	rxdma2reo_mlo_1_dst_ring1,
190 	rxdma2reo_mlo_1_dst_ring0,
191 	rxdma2reo_dst_ring1,
192 	rxdma2reo_dst_ring0,
193 	rxdma2sw_dst_ring1,
194 	rxdma2sw_dst_ring0,
195 	rxdma2release_dst_ring1,
196 	rxdma2release_dst_ring0,
197 	sw2rxdma_2_src_ring,
198 	sw2rxdma_1_src_ring,
199 	sw2rxdma_0,
200 	wbm2sw6_release2,
201 	wbm2sw5_release2,
202 	wbm2sw4_release2,
203 	wbm2sw3_release2,
204 	wbm2sw2_release2,
205 	wbm2sw1_release2,
206 	wbm2sw0_release2,
207 	wbm2sw6_release,
208 	wbm2sw5_release,
209 	wbm2sw4_release,
210 	wbm2sw3_release,
211 	wbm2sw2_release,
212 	wbm2sw1_release,
213 	wbm2sw0_release,
214 	wbm2sw_link,
215 	wbm_error_release,
216 	sw2txmon_src_ring,
217 	sw2rxmon_src_ring,
218 	txmon2sw_p1_intr1,
219 	txmon2sw_p1_intr0,
220 	txmon2sw_p0_dest1,
221 	txmon2sw_p0_dest0,
222 	rxmon2sw_p1_intr1,
223 	rxmon2sw_p1_intr0,
224 	rxmon2sw_p0_dest1,
225 	rxmon2sw_p0_dest0,
226 	sw_release,
227 	sw2tcl_credit2,
228 	sw2tcl_credit,
229 	sw2tcl4,
230 	sw2tcl5,
231 	sw2tcl3,
232 	sw2tcl2,
233 	sw2tcl1,
234 	sw2wbm1,
235 	misc_8,
236 	misc_7,
237 	misc_6,
238 	misc_5,
239 	misc_4,
240 	misc_3,
241 	misc_2,
242 	misc_1,
243 	misc_0,
244 };
245 #endif
246 
247 struct CE_state;
248 #ifdef QCA_WIFI_QCN9224
249 #define CE_COUNT_MAX 16
250 #else
251 #define CE_COUNT_MAX 12
252 #endif
253 
254 #ifndef HIF_MAX_GROUP
255 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS
256 #endif
257 
258 #ifdef CONFIG_BERYLLIUM
259 #define HIF_MAX_GRP_IRQ 25
260 #else
261 #define HIF_MAX_GRP_IRQ 16
262 #endif
263 
264 #ifndef NAPI_YIELD_BUDGET_BASED
265 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
266 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
267 #endif
268 #else  /* NAPI_YIELD_BUDGET_BASED */
269 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
270 #endif /* NAPI_YIELD_BUDGET_BASED */
271 
272 #define QCA_NAPI_BUDGET    64
273 #define QCA_NAPI_DEF_SCALE  \
274 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
275 
276 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
277 /* NOTE: "napi->scale" can be changed,
278  * but this does not change the number of buckets
279  */
280 #define QCA_NAPI_NUM_BUCKETS 4
281 
282 /**
283  * struct qca_napi_stat - stats structure for execution contexts
284  * @napi_schedules: number of times the schedule function is called
285  * @napi_polls: number of times the execution context runs
286  * @napi_completes: number of times that the generating interrupt is re-enabled
287  * @napi_workdone: cumulative of all work done reported by handler
288  * @cpu_corrected: incremented when execution context runs on a different core
289  *			than the one that its irq is affined to.
290  * @napi_budget_uses: histogram of work done per execution run
291  * @time_limit_reached: count of yields due to time limit thresholds
292  * @rxpkt_thresh_reached: count of yields due to a work limit
293  * @napi_max_poll_time:
294  * @poll_time_buckets: histogram of poll times for the napi
295  *
296  */
297 struct qca_napi_stat {
298 	uint32_t napi_schedules;
299 	uint32_t napi_polls;
300 	uint32_t napi_completes;
301 	uint32_t napi_workdone;
302 	uint32_t cpu_corrected;
303 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
304 	uint32_t time_limit_reached;
305 	uint32_t rxpkt_thresh_reached;
306 	unsigned long long napi_max_poll_time;
307 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
308 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
309 #endif
310 };
311 
312 
313 /**
314  * struct qca_napi_info - per NAPI instance data structure
315  * @netdev: dummy net_dev
316  * @hif_ctx:
317  * @napi:
318  * @scale:
319  * @id:
320  * @cpu:
321  * @irq:
322  * @cpumask:
323  * @stats:
324  * @offld_flush_cb:
325  * @rx_thread_napi:
326  * @rx_thread_netdev:
327  * @lro_ctx:
328  *
329  * This data structure holds stuff per NAPI instance.
330  * Note that, in the current implementation, though scale is
331  * an instance variable, it is set to the same value for all
332  * instances.
333  */
334 struct qca_napi_info {
335 	struct net_device    netdev; /* dummy net_dev */
336 	void                 *hif_ctx;
337 	struct napi_struct   napi;
338 	uint8_t              scale;   /* currently same on all instances */
339 	uint8_t              id;
340 	uint8_t              cpu;
341 	int                  irq;
342 	cpumask_t            cpumask;
343 	struct qca_napi_stat stats[NR_CPUS];
344 #ifdef RECEIVE_OFFLOAD
345 	/* will only be present for data rx CE's */
346 	void (*offld_flush_cb)(void *);
347 	struct napi_struct   rx_thread_napi;
348 	struct net_device    rx_thread_netdev;
349 #endif /* RECEIVE_OFFLOAD */
350 	qdf_lro_ctx_t        lro_ctx;
351 };
352 
353 enum qca_napi_tput_state {
354 	QCA_NAPI_TPUT_UNINITIALIZED,
355 	QCA_NAPI_TPUT_LO,
356 	QCA_NAPI_TPUT_HI
357 };
358 enum qca_napi_cpu_state {
359 	QCA_NAPI_CPU_UNINITIALIZED,
360 	QCA_NAPI_CPU_DOWN,
361 	QCA_NAPI_CPU_UP };
362 
363 /**
364  * struct qca_napi_cpu - an entry of the napi cpu table
365  * @state:
366  * @core_id:     physical core id of the core
367  * @cluster_id:  cluster this core belongs to
368  * @core_mask:   mask to match all core of this cluster
369  * @thread_mask: mask for this core within the cluster
370  * @max_freq:    maximum clock this core can be clocked at
371  *               same for all cpus of the same core.
372  * @napis:       bitmap of napi instances on this core
373  * @execs:       bitmap of execution contexts on this core
374  * @cluster_nxt: chain to link cores within the same cluster
375  *
376  * This structure represents a single entry in the napi cpu
377  * table. The table is part of struct qca_napi_data.
378  * This table is initialized by the init function, called while
379  * the first napi instance is being created, updated by hotplug
380  * notifier and when cpu affinity decisions are made (by throughput
381  * detection), and deleted when the last napi instance is removed.
382  */
383 struct qca_napi_cpu {
384 	enum qca_napi_cpu_state state;
385 	int			core_id;
386 	int			cluster_id;
387 	cpumask_t		core_mask;
388 	cpumask_t		thread_mask;
389 	unsigned int		max_freq;
390 	uint32_t		napis;
391 	uint32_t		execs;
392 	int			cluster_nxt;  /* index, not pointer */
393 };
394 
395 /**
396  * struct qca_napi_data - collection of napi data for a single hif context
397  * @hif_softc: pointer to the hif context
398  * @lock: spinlock used in the event state machine
399  * @state: state variable used in the napi stat machine
400  * @ce_map: bit map indicating which ce's have napis running
401  * @exec_map: bit map of instantiated exec contexts
402  * @user_cpu_affin_mask: CPU affinity mask from INI config.
403  * @napis:
404  * @napi_cpu: cpu info for irq affinty
405  * @lilcl_head:
406  * @bigcl_head:
407  * @napi_mode: irq affinity & clock voting mode
408  * @cpuhp_handler: CPU hotplug event registration handle
409  * @flags:
410  */
411 struct qca_napi_data {
412 	struct               hif_softc *hif_softc;
413 	qdf_spinlock_t       lock;
414 	uint32_t             state;
415 
416 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
417 	 * not used by clients (clients use an id returned by create)
418 	 */
419 	uint32_t             ce_map;
420 	uint32_t             exec_map;
421 	uint32_t             user_cpu_affin_mask;
422 	struct qca_napi_info *napis[CE_COUNT_MAX];
423 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
424 	int                  lilcl_head, bigcl_head;
425 	enum qca_napi_tput_state napi_mode;
426 	struct qdf_cpuhp_handler *cpuhp_handler;
427 	uint8_t              flags;
428 };
429 
430 /**
431  * struct hif_config_info - Place Holder for HIF configuration
432  * @enable_self_recovery: Self Recovery
433  * @enable_runtime_pm: Enable Runtime PM
434  * @runtime_pm_delay: Runtime PM Delay
435  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
436  *
437  * Structure for holding HIF ini parameters.
438  */
439 struct hif_config_info {
440 	bool enable_self_recovery;
441 #ifdef FEATURE_RUNTIME_PM
442 	uint8_t enable_runtime_pm;
443 	u_int32_t runtime_pm_delay;
444 #endif
445 	uint64_t rx_softirq_max_yield_duration_ns;
446 };
447 
448 /**
449  * struct hif_target_info - Target Information
450  * @target_version: Target Version
451  * @target_type: Target Type
452  * @target_revision: Target Revision
453  * @soc_version: SOC Version
454  * @hw_name: pointer to hardware name
455  *
456  * Structure to hold target information.
457  */
458 struct hif_target_info {
459 	uint32_t target_version;
460 	uint32_t target_type;
461 	uint32_t target_revision;
462 	uint32_t soc_version;
463 	char *hw_name;
464 };
465 
466 struct hif_opaque_softc {
467 };
468 
469 /**
470  * struct hif_ce_ring_info - CE ring information
471  * @ring_id: ring id
472  * @ring_dir: ring direction
473  * @num_entries: number of entries in ring
474  * @entry_size: ring entry size
475  * @ring_base_paddr: srng base physical address
476  * @hp_paddr: head pointer physical address
477  * @tp_paddr: tail pointer physical address
478  */
479 struct hif_ce_ring_info {
480 	uint8_t ring_id;
481 	uint8_t ring_dir;
482 	uint32_t num_entries;
483 	uint32_t entry_size;
484 	uint64_t ring_base_paddr;
485 	uint64_t hp_paddr;
486 	uint64_t tp_paddr;
487 };
488 
489 /**
490  * struct hif_direct_link_ce_info - Direct Link CE information
491  * @ce_id: CE ide
492  * @pipe_dir: Pipe direction
493  * @ring_info: ring information
494  */
495 struct hif_direct_link_ce_info {
496 	uint8_t ce_id;
497 	uint8_t pipe_dir;
498 	struct hif_ce_ring_info ring_info;
499 };
500 
501 /**
502  * enum hif_event_type - Type of DP events to be recorded
503  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
504  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
505  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
506  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
507  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
508  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
509  * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event
510  * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event
511  * @HIF_EVENT_IRQ_DISABLE_EXPIRED: IRQ disable expired event
512  */
513 enum hif_event_type {
514 	HIF_EVENT_IRQ_TRIGGER,
515 	HIF_EVENT_TIMER_ENTRY,
516 	HIF_EVENT_TIMER_EXIT,
517 	HIF_EVENT_BH_SCHED,
518 	HIF_EVENT_SRNG_ACCESS_START,
519 	HIF_EVENT_SRNG_ACCESS_END,
520 	HIF_EVENT_BH_COMPLETE,
521 	HIF_EVENT_BH_FORCE_BREAK,
522 	HIF_EVENT_IRQ_DISABLE_EXPIRED,
523 	/* Do check hif_hist_skip_event_record when adding new events */
524 };
525 
526 /**
527  * enum hif_system_pm_state - System PM state
528  * @HIF_SYSTEM_PM_STATE_ON: System in active state
529  * @HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
530  *  system resume
531  * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
532  *  system suspend
533  * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
534  */
535 enum hif_system_pm_state {
536 	HIF_SYSTEM_PM_STATE_ON,
537 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
538 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
539 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
540 };
541 
542 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
543 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
544 
545 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
546 /* HIF_EVENT_HIST_MAX should always be power of 2 */
547 #define HIF_EVENT_HIST_MAX		512
548 
549 #define HIF_EVENT_HIST_ENABLE_MASK	0xFF
550 
551 static inline uint64_t hif_get_log_timestamp(void)
552 {
553 	return qdf_get_log_timestamp();
554 }
555 
556 #else
557 
558 #define HIF_EVENT_HIST_MAX		32
559 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
560 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
561 
562 static inline uint64_t hif_get_log_timestamp(void)
563 {
564 	return qdf_sched_clock();
565 }
566 
567 #endif
568 
569 /**
570  * struct hif_event_record - an entry of the DP event history
571  * @hal_ring_id: ring id for which event is recorded
572  * @hp: head pointer of the ring (may not be applicable for all events)
573  * @tp: tail pointer of the ring (may not be applicable for all events)
574  * @cpu_id: cpu id on which the event occurred
575  * @timestamp: timestamp when event occurred
576  * @type: type of the event
577  *
578  * This structure represents the information stored for every datapath
579  * event which is logged in the history.
580  */
581 struct hif_event_record {
582 	uint8_t hal_ring_id;
583 	uint32_t hp;
584 	uint32_t tp;
585 	int cpu_id;
586 	uint64_t timestamp;
587 	enum hif_event_type type;
588 };
589 
590 /**
591  * struct hif_event_misc - history related misc info
592  * @last_irq_index: last irq event index in history
593  * @last_irq_ts: last irq timestamp
594  */
595 struct hif_event_misc {
596 	int32_t last_irq_index;
597 	uint64_t last_irq_ts;
598 };
599 
600 /**
601  * struct hif_event_history - history for one interrupt group
602  * @index: index to store new event
603  * @misc: event misc information
604  * @event: event entry
605  *
606  * This structure represents the datapath history for one
607  * interrupt group.
608  */
609 struct hif_event_history {
610 	qdf_atomic_t index;
611 	struct hif_event_misc misc;
612 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
613 };
614 
615 /**
616  * hif_hist_record_event() - Record one datapath event in history
617  * @hif_ctx: HIF opaque context
618  * @event: DP event entry
619  * @intr_grp_id: interrupt group ID registered with hif
620  *
621  * Return: None
622  */
623 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
624 			   struct hif_event_record *event,
625 			   uint8_t intr_grp_id);
626 
627 /**
628  * hif_event_history_init() - Initialize SRNG event history buffers
629  * @hif_ctx: HIF opaque context
630  * @id: context group ID for which history is recorded
631  *
632  * Returns: None
633  */
634 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
635 
636 /**
637  * hif_event_history_deinit() - De-initialize SRNG event history buffers
638  * @hif_ctx: HIF opaque context
639  * @id: context group ID for which history is recorded
640  *
641  * Returns: None
642  */
643 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
644 
645 /**
646  * hif_record_event() - Wrapper function to form and record DP event
647  * @hif_ctx: HIF opaque context
648  * @intr_grp_id: interrupt group ID registered with hif
649  * @hal_ring_id: ring id for which event is recorded
650  * @hp: head pointer index of the srng
651  * @tp: tail pointer index of the srng
652  * @type: type of the event to be logged in history
653  *
654  * Return: None
655  */
656 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
657 				    uint8_t intr_grp_id,
658 				    uint8_t hal_ring_id,
659 				    uint32_t hp,
660 				    uint32_t tp,
661 				    enum hif_event_type type)
662 {
663 	struct hif_event_record event;
664 
665 	event.hal_ring_id = hal_ring_id;
666 	event.hp = hp;
667 	event.tp = tp;
668 	event.type = type;
669 
670 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
671 
672 	return;
673 }
674 
675 #else
676 
677 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
678 				    uint8_t intr_grp_id,
679 				    uint8_t hal_ring_id,
680 				    uint32_t hp,
681 				    uint32_t tp,
682 				    enum hif_event_type type)
683 {
684 }
685 
686 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
687 					  uint8_t id)
688 {
689 }
690 
691 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
692 					    uint8_t id)
693 {
694 }
695 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
696 
697 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx);
698 
699 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
700 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx);
701 #else
702 static
703 inline void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) {}
704 #endif
705 
706 /**
707  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
708  *
709  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
710  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
711  *                         minimize power
712  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
713  *                         platform-specific measures to completely power-off
714  *                         the module and associated hardware (i.e. cut power
715  *                         supplies)
716  */
717 enum HIF_DEVICE_POWER_CHANGE_TYPE {
718 	HIF_DEVICE_POWER_UP,
719 	HIF_DEVICE_POWER_DOWN,
720 	HIF_DEVICE_POWER_CUT
721 };
722 
723 /**
724  * enum hif_enable_type: what triggered the enabling of hif
725  *
726  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
727  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
728  * @HIF_ENABLE_TYPE_MAX: Max value
729  */
730 enum hif_enable_type {
731 	HIF_ENABLE_TYPE_PROBE,
732 	HIF_ENABLE_TYPE_REINIT,
733 	HIF_ENABLE_TYPE_MAX
734 };
735 
736 /**
737  * enum hif_disable_type: what triggered the disabling of hif
738  *
739  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
740  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
741  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
742  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
743  * @HIF_DISABLE_TYPE_MAX: Max value
744  */
745 enum hif_disable_type {
746 	HIF_DISABLE_TYPE_PROBE_ERROR,
747 	HIF_DISABLE_TYPE_REINIT_ERROR,
748 	HIF_DISABLE_TYPE_REMOVE,
749 	HIF_DISABLE_TYPE_SHUTDOWN,
750 	HIF_DISABLE_TYPE_MAX
751 };
752 
753 /**
754  * enum hif_device_config_opcode: configure mode
755  *
756  * @HIF_DEVICE_POWER_STATE: device power state
757  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
758  * @HIF_DEVICE_GET_FIFO_ADDR: get block address
759  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
760  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
761  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
762  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
763  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
764  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
765  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
766  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
767  * @HIF_BMI_DONE: bmi done
768  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
769  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
770  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
771  */
772 enum hif_device_config_opcode {
773 	HIF_DEVICE_POWER_STATE = 0,
774 	HIF_DEVICE_GET_BLOCK_SIZE,
775 	HIF_DEVICE_GET_FIFO_ADDR,
776 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
777 	HIF_DEVICE_GET_IRQ_PROC_MODE,
778 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
779 	HIF_DEVICE_POWER_STATE_CHANGE,
780 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
781 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
782 	HIF_DEVICE_GET_OS_DEVICE,
783 	HIF_DEVICE_DEBUG_BUS_STATE,
784 	HIF_BMI_DONE,
785 	HIF_DEVICE_SET_TARGET_TYPE,
786 	HIF_DEVICE_SET_HTC_CONTEXT,
787 	HIF_DEVICE_GET_HTC_CONTEXT,
788 };
789 
790 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
791 struct HID_ACCESS_LOG {
792 	uint32_t seqnum;
793 	bool is_write;
794 	void *addr;
795 	uint32_t value;
796 };
797 #endif
798 
799 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
800 		uint32_t value);
801 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
802 
803 #define HIF_MAX_DEVICES                 1
804 /**
805  * struct htc_callbacks - Structure for HTC Callbacks methods
806  * @context:             context to pass to the @dsr_handler
807  *                       note : @rw_compl_handler is provided the context
808  *                       passed to hif_read_write
809  * @rw_compl_handler:    Read / write completion handler
810  * @dsr_handler:         DSR Handler
811  */
812 struct htc_callbacks {
813 	void *context;
814 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
815 	QDF_STATUS(*dsr_handler)(void *context);
816 };
817 
818 /**
819  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
820  * @context: Private data context
821  * @set_recovery_in_progress: To Set Driver state for recovery in progress
822  * @is_recovery_in_progress: Query if driver state is recovery in progress
823  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
824  * @is_driver_unloading: Query if driver is unloading.
825  * @is_target_ready:
826  * @get_bandwidth_level: Query current bandwidth level for the driver
827  * @prealloc_get_consistent_mem_unaligned: get prealloc unaligned consistent mem
828  * @prealloc_put_consistent_mem_unaligned: put unaligned consistent mem to pool
829  * This Structure provides callback pointer for HIF to query hdd for driver
830  * states.
831  */
832 struct hif_driver_state_callbacks {
833 	void *context;
834 	void (*set_recovery_in_progress)(void *context, uint8_t val);
835 	bool (*is_recovery_in_progress)(void *context);
836 	bool (*is_load_unload_in_progress)(void *context);
837 	bool (*is_driver_unloading)(void *context);
838 	bool (*is_target_ready)(void *context);
839 	int (*get_bandwidth_level)(void *context);
840 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
841 						       qdf_dma_addr_t *paddr,
842 						       uint32_t ring_type);
843 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
844 };
845 
846 /* This API detaches the HTC layer from the HIF device */
847 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
848 
849 /****************************************************************/
850 /* BMI and Diag window abstraction                              */
851 /****************************************************************/
852 
853 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
854 
855 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
856 				     * handled atomically by
857 				     * DiagRead/DiagWrite
858 				     */
859 
860 #ifdef WLAN_FEATURE_BMI
861 /*
862  * API to handle HIF-specific BMI message exchanges, this API is synchronous
863  * and only allowed to be called from a context that can block (sleep)
864  */
865 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
866 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
867 				uint8_t *pSendMessage, uint32_t Length,
868 				uint8_t *pResponseMessage,
869 				uint32_t *pResponseLength, uint32_t TimeoutMS);
870 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
871 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
872 #else /* WLAN_FEATURE_BMI */
873 static inline void
874 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
875 {
876 }
877 
878 static inline bool
879 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
880 {
881 	return false;
882 }
883 #endif /* WLAN_FEATURE_BMI */
884 
885 #ifdef HIF_CPU_CLEAR_AFFINITY
886 /**
887  * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
888  * @scn: HIF handle
889  * @intr_ctxt_id: interrupt group index
890  * @cpu: CPU core to clear
891  *
892  * Return: None
893  */
894 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
895 				       int intr_ctxt_id, int cpu);
896 #else
897 static inline
898 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
899 				       int intr_ctxt_id, int cpu)
900 {
901 }
902 #endif
903 
904 /*
905  * APIs to handle HIF specific diagnostic read accesses. These APIs are
906  * synchronous and only allowed to be called from a context that
907  * can block (sleep). They are not high performance APIs.
908  *
909  * hif_diag_read_access reads a 4 Byte aligned/length value from a
910  * Target register or memory word.
911  *
912  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
913  */
914 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
915 				uint32_t address, uint32_t *data);
916 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
917 		      uint8_t *data, int nbytes);
918 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
919 			void *ramdump_base, uint32_t address, uint32_t size);
920 /*
921  * APIs to handle HIF specific diagnostic write accesses. These APIs are
922  * synchronous and only allowed to be called from a context that
923  * can block (sleep).
924  * They are not high performance APIs.
925  *
926  * hif_diag_write_access writes a 4 Byte aligned/length value to a
927  * Target register or memory word.
928  *
929  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
930  */
931 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
932 				 uint32_t address, uint32_t data);
933 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
934 			uint32_t address, uint8_t *data, int nbytes);
935 
936 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
937 
938 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
939 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
940 
941 /*
942  * Set the FASTPATH_mode_on flag in sc, for use by data path
943  */
944 #ifdef WLAN_FEATURE_FASTPATH
945 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
946 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
947 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
948 
949 /**
950  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
951  * @hif_ctx: HIF opaque context
952  * @handler: Callback function
953  * @context: handle for callback function
954  *
955  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
956  */
957 QDF_STATUS hif_ce_fastpath_cb_register(
958 		struct hif_opaque_softc *hif_ctx,
959 		fastpath_msg_handler handler, void *context);
960 #else
961 static inline QDF_STATUS hif_ce_fastpath_cb_register(
962 		struct hif_opaque_softc *hif_ctx,
963 		fastpath_msg_handler handler, void *context)
964 {
965 	return QDF_STATUS_E_FAILURE;
966 }
967 
968 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
969 {
970 	return NULL;
971 }
972 
973 #endif
974 
975 /*
976  * Enable/disable CDC max performance workaround
977  * For max-performance set this to 0
978  * To allow SoC to enter sleep set this to 1
979  */
980 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
981 
982 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
983 			     qdf_shared_mem_t **ce_sr,
984 			     uint32_t *ce_sr_ring_size,
985 			     qdf_dma_addr_t *ce_reg_paddr);
986 
987 /**
988  * struct hif_msg_callbacks - List of callbacks - filled in by HTC.
989  * @Context: context meaningful to HTC
990  * @txCompletionHandler:
991  * @rxCompletionHandler:
992  * @txResourceAvailHandler:
993  * @fwEventHandler:
994  * @update_bundle_stats:
995  */
996 struct hif_msg_callbacks {
997 	void *Context;
998 	/**< context meaningful to HTC */
999 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
1000 					uint32_t transferID,
1001 					uint32_t toeplitz_hash_result);
1002 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
1003 					uint8_t pipeID);
1004 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
1005 	void (*fwEventHandler)(void *context, QDF_STATUS status);
1006 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
1007 };
1008 
1009 enum hif_target_status {
1010 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
1011 	TARGET_STATUS_RESET,  /* target got reset */
1012 	TARGET_STATUS_EJECT,  /* target got ejected */
1013 	TARGET_STATUS_SUSPEND /*target got suspend */
1014 };
1015 
1016 /**
1017  * enum hif_attribute_flags: configure hif
1018  *
1019  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
1020  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
1021  *  							+ No pktlog CE
1022  */
1023 enum hif_attribute_flags {
1024 	HIF_LOWDESC_CE_CFG = 1,
1025 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
1026 };
1027 
1028 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
1029 	(attr |= (v & 0x01) << 5)
1030 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
1031 	(attr |= (v & 0x03) << 6)
1032 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
1033 	(attr |= (v & 0x01) << 13)
1034 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
1035 	(attr |= (v & 0x01) << 14)
1036 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
1037 	(attr |= (v & 0x01) << 15)
1038 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
1039 	(attr |= (v & 0x0FFF) << 16)
1040 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
1041 	(attr |= (v & 0x01) << 30)
1042 
1043 struct hif_ul_pipe_info {
1044 	unsigned int nentries;
1045 	unsigned int nentries_mask;
1046 	unsigned int sw_index;
1047 	unsigned int write_index; /* cached copy */
1048 	unsigned int hw_index;    /* cached copy */
1049 	void *base_addr_owner_space; /* Host address space */
1050 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1051 };
1052 
1053 struct hif_dl_pipe_info {
1054 	unsigned int nentries;
1055 	unsigned int nentries_mask;
1056 	unsigned int sw_index;
1057 	unsigned int write_index; /* cached copy */
1058 	unsigned int hw_index;    /* cached copy */
1059 	void *base_addr_owner_space; /* Host address space */
1060 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1061 };
1062 
1063 struct hif_pipe_addl_info {
1064 	uint32_t pci_mem;
1065 	uint32_t ctrl_addr;
1066 	struct hif_ul_pipe_info ul_pipe;
1067 	struct hif_dl_pipe_info dl_pipe;
1068 };
1069 
1070 #ifdef CONFIG_SLUB_DEBUG_ON
1071 #define MSG_FLUSH_NUM 16
1072 #else /* PERF build */
1073 #define MSG_FLUSH_NUM 32
1074 #endif /* SLUB_DEBUG_ON */
1075 
1076 struct hif_bus_id;
1077 
1078 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
1079 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
1080 		     int opcode, void *config, uint32_t config_len);
1081 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
1082 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
1083 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
1084 		   struct hif_msg_callbacks *callbacks);
1085 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
1086 void hif_stop(struct hif_opaque_softc *hif_ctx);
1087 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
1088 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
1089 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
1090 		      uint8_t cmd_id, bool start);
1091 
1092 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1093 				  uint32_t transferID, uint32_t nbytes,
1094 				  qdf_nbuf_t wbuf, uint32_t data_attr);
1095 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1096 			     int force);
1097 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1098 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
1099 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
1100 			  uint8_t *DLPipe);
1101 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
1102 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1103 			int *dl_is_polled);
1104 uint16_t
1105 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1106 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
1107 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
1108 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
1109 		     bool wait_for_it);
1110 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
1111 #ifndef HIF_PCI
1112 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
1113 {
1114 	return 0;
1115 }
1116 #else
1117 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
1118 #endif
1119 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1120 			u32 *revision, const char **target_name);
1121 
1122 #ifdef RECEIVE_OFFLOAD
1123 /**
1124  * hif_offld_flush_cb_register() - Register the offld flush callback
1125  * @scn: HIF opaque context
1126  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
1127  *			 Or GRO/LRO flush when RxThread is not enabled. Called
1128  *			 with corresponding context for flush.
1129  * Return: None
1130  */
1131 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1132 				 void (offld_flush_handler)(void *ol_ctx));
1133 
1134 /**
1135  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
1136  * @scn: HIF opaque context
1137  *
1138  * Return: None
1139  */
1140 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
1141 #endif
1142 
1143 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1144 /**
1145  * hif_exec_should_yield() - Check if hif napi context should yield
1146  * @hif_ctx: HIF opaque context
1147  * @grp_id: grp_id of the napi for which check needs to be done
1148  *
1149  * The function uses grp_id to look for NAPI and checks if NAPI needs to
1150  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
1151  * yield decision.
1152  *
1153  * Return: true if NAPI needs to yield, else false
1154  */
1155 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
1156 #else
1157 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
1158 					 uint grp_id)
1159 {
1160 	return false;
1161 }
1162 #endif
1163 
1164 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
1165 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
1166 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
1167 				      int htc_htt_tx_endpoint);
1168 
1169 /**
1170  * hif_open() - Create hif handle
1171  * @qdf_ctx: qdf context
1172  * @mode: Driver Mode
1173  * @bus_type: Bus Type
1174  * @cbk: CDS Callbacks
1175  * @psoc: psoc object manager
1176  *
1177  * API to open HIF Context
1178  *
1179  * Return: HIF Opaque Pointer
1180  */
1181 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1182 				  uint32_t mode,
1183 				  enum qdf_bus_type bus_type,
1184 				  struct hif_driver_state_callbacks *cbk,
1185 				  struct wlan_objmgr_psoc *psoc);
1186 
1187 /**
1188  * hif_init_dma_mask() - Set dma mask for the dev
1189  * @dev: dev for which DMA mask is to be set
1190  * @bus_type: bus type for the target
1191  *
1192  * This API sets the DMA mask for the device. before the datapath
1193  * memory pre-allocation is done. If the DMA mask is not set before
1194  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
1195  * and does not utilize the full device capability.
1196  *
1197  * Return: 0 - success, non-zero on failure.
1198  */
1199 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
1200 void hif_close(struct hif_opaque_softc *hif_ctx);
1201 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1202 		      void *bdev, const struct hif_bus_id *bid,
1203 		      enum qdf_bus_type bus_type,
1204 		      enum hif_enable_type type);
1205 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
1206 #ifdef CE_TASKLET_DEBUG_ENABLE
1207 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
1208 				 uint8_t value);
1209 #endif
1210 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
1211 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
1212 
1213 /**
1214  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
1215  * @HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
1216  * @HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
1217  * @HIF_PM_CE_WAKE: Wake irq is CE interrupt
1218  */
1219 typedef enum {
1220 	HIF_PM_INVALID_WAKE,
1221 	HIF_PM_MSI_WAKE,
1222 	HIF_PM_CE_WAKE,
1223 } hif_pm_wake_irq_type;
1224 
1225 /**
1226  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1227  * @hif_ctx: HIF context
1228  *
1229  * Return: enum hif_pm_wake_irq_type
1230  */
1231 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1232 
1233 /**
1234  * enum hif_ep_vote_type - hif ep vote type
1235  * @HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1236  * @HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1237  */
1238 enum hif_ep_vote_type {
1239 	HIF_EP_VOTE_DP_ACCESS,
1240 	HIF_EP_VOTE_NONDP_ACCESS
1241 };
1242 
1243 /**
1244  * enum hif_ep_vote_access - hif ep vote access
1245  * @HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1246  * @HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transition
1247  * @HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1248  */
1249 enum hif_ep_vote_access {
1250 	HIF_EP_VOTE_ACCESS_ENABLE,
1251 	HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1252 	HIF_EP_VOTE_ACCESS_DISABLE
1253 };
1254 
1255 /**
1256  * enum hif_rtpm_client_id - modules registered with runtime pm module
1257  * @HIF_RTPM_ID_RESERVED: Reserved ID
1258  * @HIF_RTPM_ID_HAL_REO_CMD: HAL REO commands
1259  * @HIF_RTPM_ID_WMI: WMI commands Tx
1260  * @HIF_RTPM_ID_HTT: HTT commands Tx
1261  * @HIF_RTPM_ID_DP: Datapath Tx path
1262  * @HIF_RTPM_ID_DP_RING_STATS: Datapath ring stats
1263  * @HIF_RTPM_ID_CE: CE Tx buffer posting
1264  * @HIF_RTPM_ID_FORCE_WAKE: Force wake request
1265  * @HIF_RTPM_ID_PM_QOS_NOTIFY:
1266  * @HIF_RTPM_ID_WIPHY_SUSPEND:
1267  * @HIF_RTPM_ID_MAX: Max id
1268  */
1269 enum  hif_rtpm_client_id {
1270 	HIF_RTPM_ID_RESERVED,
1271 	HIF_RTPM_ID_HAL_REO_CMD,
1272 	HIF_RTPM_ID_WMI,
1273 	HIF_RTPM_ID_HTT,
1274 	HIF_RTPM_ID_DP,
1275 	HIF_RTPM_ID_DP_RING_STATS,
1276 	HIF_RTPM_ID_CE,
1277 	HIF_RTPM_ID_FORCE_WAKE,
1278 	HIF_RTPM_ID_PM_QOS_NOTIFY,
1279 	HIF_RTPM_ID_WIPHY_SUSPEND,
1280 	HIF_RTPM_ID_MAX
1281 };
1282 
1283 /**
1284  * enum rpm_type - Get and Put calls types
1285  * @HIF_RTPM_GET_ASYNC: Increment usage count and when system is suspended
1286  *		      schedule resume process, return depends on pm state.
1287  * @HIF_RTPM_GET_FORCE: Increment usage count and when system is suspended
1288  *		      schedule resume process, returns success irrespective of
1289  *		      pm_state.
1290  * @HIF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
1291  *		     wait till process is resumed.
1292  * @HIF_RTPM_GET_NORESUME: Only increments usage count.
1293  * @HIF_RTPM_PUT_ASYNC: Decrements usage count and puts system in idle state.
1294  * @HIF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
1295  *			     suspended state.
1296  * @HIF_RTPM_PUT_NOIDLE: Decrements usage count.
1297  */
1298 enum rpm_type {
1299 	HIF_RTPM_GET_ASYNC,
1300 	HIF_RTPM_GET_FORCE,
1301 	HIF_RTPM_GET_SYNC,
1302 	HIF_RTPM_GET_NORESUME,
1303 	HIF_RTPM_PUT_ASYNC,
1304 	HIF_RTPM_PUT_SYNC_SUSPEND,
1305 	HIF_RTPM_PUT_NOIDLE,
1306 };
1307 
1308 /**
1309  * struct hif_pm_runtime_lock - data structure for preventing runtime suspend
1310  * @list: global list of runtime locks
1311  * @active: true if this lock is preventing suspend
1312  * @name: character string for tracking this lock
1313  */
1314 struct hif_pm_runtime_lock {
1315 	struct list_head list;
1316 	bool active;
1317 	const char *name;
1318 };
1319 
1320 #ifdef FEATURE_RUNTIME_PM
1321 /**
1322  * hif_rtpm_register() - Register a module with runtime PM.
1323  * @id: ID of the module which needs to be registered
1324  * @hif_rpm_cbk: callback to be called when get was called in suspended state.
1325  *
1326  * Return: success status if successfully registered
1327  */
1328 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
1329 
1330 /**
1331  * hif_rtpm_deregister() - Deregister the module
1332  * @id: ID of the module which needs to be de-registered
1333  */
1334 QDF_STATUS hif_rtpm_deregister(uint32_t id);
1335 
1336 /**
1337  * hif_rtpm_set_autosuspend_delay() - Set delay to trigger RTPM suspend
1338  * @delay: delay in ms to be set
1339  *
1340  * Return: Success if delay is set successfully
1341  */
1342 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay);
1343 
1344 /**
1345  * hif_rtpm_restore_autosuspend_delay() - Restore delay value to default value
1346  *
1347  * Return: Success if reset done. E_ALREADY if delay same as config value
1348  */
1349 QDF_STATUS hif_rtpm_restore_autosuspend_delay(void);
1350 
1351 /**
1352  * hif_rtpm_get_autosuspend_delay() -Get delay to trigger RTPM suspend
1353  *
1354  * Return: Delay in ms
1355  */
1356 int hif_rtpm_get_autosuspend_delay(void);
1357 
1358 /**
1359  * hif_runtime_lock_init() - API to initialize Runtime PM context
1360  * @lock: QDF lock context
1361  * @name: Context name
1362  *
1363  * This API initializes the Runtime PM context of the caller and
1364  * return the pointer.
1365  *
1366  * Return: None
1367  */
1368 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1369 
1370 /**
1371  * hif_runtime_lock_deinit() - This API frees the runtime pm context
1372  * @data: Runtime PM context
1373  *
1374  * Return: void
1375  */
1376 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data);
1377 
1378 /**
1379  * hif_rtpm_get() - Increment usage_count on the device to avoid suspend.
1380  * @type: get call types from hif_rpm_type
1381  * @id: ID of the module calling get()
1382  *
1383  * A get operation will prevent a runtime suspend until a
1384  * corresponding put is done.  This api should be used when accessing bus.
1385  *
1386  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1387  * THIS API WILL ONLY REQUEST THE RESUME AND NOT DO A GET!!!
1388  *
1389  * return: success if a get has been issued, else error code.
1390  */
1391 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id);
1392 
1393 /**
1394  * hif_rtpm_put() - do a put operation on the device
1395  * @type: put call types from hif_rpm_type
1396  * @id: ID of the module calling put()
1397  *
1398  * A put operation will allow a runtime suspend after a corresponding
1399  * get was done.  This api should be used when finished accessing bus.
1400  *
1401  * This api will return a failure if runtime pm is stopped
1402  * This api will return failure if it would decrement the usage count below 0.
1403  *
1404  * return: QDF_STATUS_SUCCESS if the put is performed
1405  */
1406 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id);
1407 
1408 /**
1409  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1410  * @data: runtime PM lock
1411  *
1412  * This function will prevent runtime suspend, by incrementing
1413  * device's usage count.
1414  *
1415  * Return: status
1416  */
1417 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data);
1418 
1419 /**
1420  * hif_pm_runtime_prevent_suspend_sync() - Synchronized prevent Runtime suspend
1421  * @data: runtime PM lock
1422  *
1423  * This function will prevent runtime suspend, by incrementing
1424  * device's usage count.
1425  *
1426  * Return: status
1427  */
1428 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data);
1429 
1430 /**
1431  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1432  * @data: runtime PM lock
1433  *
1434  * This function will allow runtime suspend, by decrementing
1435  * device's usage count.
1436  *
1437  * Return: status
1438  */
1439 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data);
1440 
1441 /**
1442  * hif_rtpm_request_resume() - Request resume if bus is suspended
1443  *
1444  * Return: None
1445  */
1446 void hif_rtpm_request_resume(void);
1447 
1448 /**
1449  * hif_rtpm_sync_resume() - Invoke synchronous runtime resume.
1450  *
1451  * This function will invoke synchronous runtime resume.
1452  *
1453  * Return: status
1454  */
1455 QDF_STATUS hif_rtpm_sync_resume(void);
1456 
1457 /**
1458  * hif_rtpm_check_and_request_resume() - check if bus is suspended and
1459  *                                       request resume.
1460  *
1461  * Return: void
1462  */
1463 void hif_rtpm_check_and_request_resume(void);
1464 
1465 /**
1466  * hif_rtpm_set_client_job() - Set job for the client.
1467  * @client_id: Client id for which job needs to be set
1468  *
1469  * If get failed due to system being in suspended state, set the client job so
1470  * when system resumes the client's job is called.
1471  *
1472  * Return: None
1473  */
1474 void hif_rtpm_set_client_job(uint32_t client_id);
1475 
1476 /**
1477  * hif_rtpm_mark_last_busy() - Mark last busy to delay retry to suspend
1478  * @id: ID marking last busy
1479  *
1480  * Return: None
1481  */
1482 void hif_rtpm_mark_last_busy(uint32_t id);
1483 
1484 /**
1485  * hif_rtpm_get_monitor_wake_intr() - API to get monitor_wake_intr
1486  *
1487  * monitor_wake_intr variable can be used to indicate if driver expects wake
1488  * MSI for runtime PM
1489  *
1490  * Return: monitor_wake_intr variable
1491  */
1492 int hif_rtpm_get_monitor_wake_intr(void);
1493 
1494 /**
1495  * hif_rtpm_set_monitor_wake_intr() - API to set monitor_wake_intr
1496  * @val: value to set
1497  *
1498  * monitor_wake_intr variable can be used to indicate if driver expects wake
1499  * MSI for runtime PM
1500  *
1501  * Return: void
1502  */
1503 void hif_rtpm_set_monitor_wake_intr(int val);
1504 
1505 /**
1506  * hif_pre_runtime_suspend() - book keeping before beginning runtime suspend.
1507  * @hif_ctx: HIF context
1508  *
1509  * Makes sure that the pci link will be taken down by the suspend operation.
1510  * If the hif layer is configured to leave the bus on, runtime suspend will
1511  * not save any power.
1512  *
1513  * Set the runtime suspend state to SUSPENDING.
1514  *
1515  * return -EINVAL if the bus won't go down.  otherwise return 0
1516  */
1517 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1518 
1519 /**
1520  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
1521  *
1522  * update the runtime pm state to RESUMING.
1523  * Return: void
1524  */
1525 void hif_pre_runtime_resume(void);
1526 
1527 /**
1528  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
1529  *
1530  * Record the success.
1531  * update the runtime_pm state to SUSPENDED
1532  * Return: void
1533  */
1534 void hif_process_runtime_suspend_success(void);
1535 
1536 /**
1537  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
1538  *
1539  * Record the failure.
1540  * mark last busy to delay a retry.
1541  * update the runtime_pm state back to ON
1542  *
1543  * Return: void
1544  */
1545 void hif_process_runtime_suspend_failure(void);
1546 
1547 /**
1548  * hif_process_runtime_resume_linkup() - bookkeeping of resuming link up
1549  *
1550  * update the runtime_pm state to RESUMING_LINKUP
1551  * Return: void
1552  */
1553 void hif_process_runtime_resume_linkup(void);
1554 
1555 /**
1556  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
1557  *
1558  * record the success.
1559  * update the runtime_pm state to SUSPENDED
1560  * Return: void
1561  */
1562 void hif_process_runtime_resume_success(void);
1563 
1564 /**
1565  * hif_rtpm_print_prevent_list() - list the clients preventing suspend.
1566  *
1567  * Return: None
1568  */
1569 void hif_rtpm_print_prevent_list(void);
1570 
1571 /**
1572  * hif_rtpm_suspend_lock() - spin_lock on marking runtime suspend
1573  *
1574  * Return: void
1575  */
1576 void hif_rtpm_suspend_lock(void);
1577 
1578 /**
1579  * hif_rtpm_suspend_unlock() - spin_unlock on marking runtime suspend
1580  *
1581  * Return: void
1582  */
1583 void hif_rtpm_suspend_unlock(void);
1584 
1585 /**
1586  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
1587  * @hif_ctx: HIF context
1588  *
1589  * Return: 0 for success and non-zero error code for failure
1590  */
1591 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1592 
1593 /**
1594  * hif_runtime_resume() - do the bus resume part of a runtime resume
1595  * @hif_ctx: HIF context
1596  *
1597  * Return: 0 for success and non-zero error code for failure
1598  */
1599 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1600 
1601 /**
1602  * hif_fastpath_resume() - resume fastpath for runtimepm
1603  * @hif_ctx: HIF context
1604  *
1605  * ensure that the fastpath write index register is up to date
1606  * since runtime pm may cause ce_send_fast to skip the register
1607  * write.
1608  *
1609  * fastpath only applicable to legacy copy engine
1610  */
1611 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1612 
1613 /**
1614  * hif_rtpm_get_state(): get rtpm link state
1615  *
1616  * Return: state
1617  */
1618 int hif_rtpm_get_state(void);
1619 
1620 /**
1621  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1622  * @hif_ctx: HIF context
1623  *
1624  * Return: None
1625  */
1626 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx);
1627 
1628 /**
1629  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1630  * @scn: HIF context
1631  * @ce_id: CE id
1632  *
1633  * Return: None
1634  */
1635 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1636 				      unsigned long ce_id);
1637 #else
1638 
1639 /**
1640  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1641  * @hif_ctx: HIF context
1642  *
1643  * Return: None
1644  */
1645 static inline
1646 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx) { }
1647 
1648 /**
1649  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1650  * @scn: HIF context
1651  * @ce_id: CE id
1652  *
1653  * Return: None
1654  */
1655 static inline
1656 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1657 				      unsigned long ce_id)
1658 { }
1659 
1660 static inline
1661 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
1662 { return QDF_STATUS_SUCCESS; }
1663 
1664 static inline
1665 QDF_STATUS hif_rtpm_deregister(uint32_t id)
1666 { return QDF_STATUS_SUCCESS; }
1667 
1668 static inline
1669 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
1670 { return QDF_STATUS_SUCCESS; }
1671 
1672 static inline QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
1673 { return QDF_STATUS_SUCCESS; }
1674 
1675 static inline int hif_rtpm_get_autosuspend_delay(void)
1676 { return 0; }
1677 
1678 static inline
1679 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1680 { return 0; }
1681 
1682 static inline
1683 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data)
1684 {}
1685 
1686 static inline
1687 int hif_rtpm_get(uint8_t type, uint32_t id)
1688 { return QDF_STATUS_SUCCESS; }
1689 
1690 static inline
1691 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
1692 { return QDF_STATUS_SUCCESS; }
1693 
1694 static inline
1695 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data)
1696 { return 0; }
1697 
1698 static inline
1699 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data)
1700 { return 0; }
1701 
1702 static inline
1703 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data)
1704 { return 0; }
1705 
1706 static inline
1707 QDF_STATUS hif_rtpm_sync_resume(void)
1708 { return QDF_STATUS_SUCCESS; }
1709 
1710 static inline
1711 void hif_rtpm_request_resume(void)
1712 {}
1713 
1714 static inline
1715 void hif_rtpm_check_and_request_resume(void)
1716 {}
1717 
1718 static inline
1719 void hif_rtpm_set_client_job(uint32_t client_id)
1720 {}
1721 
1722 static inline
1723 void hif_rtpm_print_prevent_list(void)
1724 {}
1725 
1726 static inline
1727 void hif_rtpm_suspend_unlock(void)
1728 {}
1729 
1730 static inline
1731 void hif_rtpm_suspend_lock(void)
1732 {}
1733 
1734 static inline
1735 int hif_rtpm_get_monitor_wake_intr(void)
1736 { return 0; }
1737 
1738 static inline
1739 void hif_rtpm_set_monitor_wake_intr(int val)
1740 {}
1741 
1742 static inline
1743 void hif_rtpm_mark_last_busy(uint32_t id)
1744 {}
1745 #endif
1746 
1747 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1748 				 bool is_packet_log_enabled);
1749 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1750 
1751 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1752 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1753 
1754 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1755 
1756 #ifdef IPA_OFFLOAD
1757 /**
1758  * hif_get_ipa_hw_type() - get IPA hw type
1759  *
1760  * This API return the IPA hw type.
1761  *
1762  * Return: IPA hw type
1763  */
1764 static inline
1765 enum ipa_hw_type hif_get_ipa_hw_type(void)
1766 {
1767 	return ipa_get_hw_type();
1768 }
1769 
1770 /**
1771  * hif_get_ipa_present() - get IPA hw status
1772  *
1773  * This API return the IPA hw status.
1774  *
1775  * Return: true if IPA is present or false otherwise
1776  */
1777 static inline
1778 bool hif_get_ipa_present(void)
1779 {
1780 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
1781 		return true;
1782 	else
1783 		return false;
1784 }
1785 #endif
1786 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1787 
1788 /**
1789  * hif_bus_early_suspend() - stop non wmi tx traffic
1790  * @hif_ctx: hif context
1791  */
1792 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1793 
1794 /**
1795  * hif_bus_late_resume() - resume non wmi traffic
1796  * @hif_ctx: hif context
1797  */
1798 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1799 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1800 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1801 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1802 
1803 /**
1804  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1805  * @hif_ctx: an opaque HIF handle to use
1806  *
1807  * As opposed to the standard hif_irq_enable, this function always applies to
1808  * the APPS side kernel interrupt handling.
1809  *
1810  * Return: errno
1811  */
1812 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1813 
1814 /**
1815  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1816  * @hif_ctx: an opaque HIF handle to use
1817  *
1818  * As opposed to the standard hif_irq_disable, this function always applies to
1819  * the APPS side kernel interrupt handling.
1820  *
1821  * Return: errno
1822  */
1823 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1824 
1825 /**
1826  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1827  * @hif_ctx: an opaque HIF handle to use
1828  *
1829  * As opposed to the standard hif_irq_enable, this function always applies to
1830  * the APPS side kernel interrupt handling.
1831  *
1832  * Return: errno
1833  */
1834 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1835 
1836 /**
1837  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1838  * @hif_ctx: an opaque HIF handle to use
1839  *
1840  * As opposed to the standard hif_irq_disable, this function always applies to
1841  * the APPS side kernel interrupt handling.
1842  *
1843  * Return: errno
1844  */
1845 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1846 
1847 /**
1848  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1849  * @hif_ctx: an opaque HIF handle to use
1850  *
1851  * This function always applies to the APPS side kernel interrupt handling
1852  * to wake the system from suspend.
1853  *
1854  * Return: errno
1855  */
1856 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1857 
1858 /**
1859  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1860  * @hif_ctx: an opaque HIF handle to use
1861  *
1862  * This function always applies to the APPS side kernel interrupt handling
1863  * to disable the wake irq.
1864  *
1865  * Return: errno
1866  */
1867 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1868 
1869 /**
1870  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
1871  * @hif_ctx: an opaque HIF handle to use
1872  *
1873  * As opposed to the standard hif_irq_enable, this function always applies to
1874  * the APPS side kernel interrupt handling.
1875  *
1876  * Return: errno
1877  */
1878 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1879 
1880 /**
1881  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
1882  * @hif_ctx: an opaque HIF handle to use
1883  *
1884  * As opposed to the standard hif_irq_disable, this function always applies to
1885  * the APPS side kernel interrupt handling.
1886  *
1887  * Return: errno
1888  */
1889 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1890 
1891 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1892 int hif_dump_registers(struct hif_opaque_softc *scn);
1893 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1894 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1895 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1896 		     u32 *revision, const char **target_name);
1897 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1898 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1899 						   scn);
1900 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1901 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1902 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1903 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1904 			   hif_target_status);
1905 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1906 			 struct hif_config_info *cfg);
1907 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1908 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1909 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1910 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1911 			   uint32_t transfer_id, u_int32_t len);
1912 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1913 	uint32_t transfer_id, uint32_t download_len);
1914 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1915 void hif_ce_war_disable(void);
1916 void hif_ce_war_enable(void);
1917 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1918 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1919 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1920 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1921 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1922 		uint32_t pipe_num);
1923 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1924 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1925 
1926 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1927 				int rx_bundle_cnt);
1928 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1929 
1930 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1931 
1932 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1933 
1934 enum hif_exec_type {
1935 	HIF_EXEC_NAPI_TYPE,
1936 	HIF_EXEC_TASKLET_TYPE,
1937 };
1938 
1939 typedef uint32_t (*ext_intr_handler)(void *, uint32_t, int);
1940 
1941 /**
1942  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1943  * @softc: hif opaque context owning the exec context
1944  * @id: the id of the interrupt context
1945  *
1946  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1947  *         'id' registered with the OS
1948  */
1949 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1950 				uint8_t id);
1951 
1952 /**
1953  * hif_configure_ext_group_interrupts() - Configure ext group interrupts
1954  * @hif_ctx: hif opaque context
1955  *
1956  * Return: QDF_STATUS
1957  */
1958 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1959 
1960 /**
1961  * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group interrupts
1962  * @hif_ctx: hif opaque context
1963  *
1964  * Return: None
1965  */
1966 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1967 
1968 /**
1969  * hif_register_ext_group() - API to register external group
1970  * interrupt handler.
1971  * @hif_ctx : HIF Context
1972  * @numirq: number of irq's in the group
1973  * @irq: array of irq values
1974  * @handler: callback interrupt handler function
1975  * @cb_ctx: context to passed in callback
1976  * @context_name: text name of the context
1977  * @type: napi vs tasklet
1978  * @scale:
1979  *
1980  * Return: QDF_STATUS
1981  */
1982 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1983 				  uint32_t numirq, uint32_t irq[],
1984 				  ext_intr_handler handler,
1985 				  void *cb_ctx, const char *context_name,
1986 				  enum hif_exec_type type, uint32_t scale);
1987 
1988 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1989 				const char *context_name);
1990 
1991 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1992 				u_int8_t pipeid,
1993 				struct hif_msg_callbacks *callbacks);
1994 
1995 /**
1996  * hif_print_napi_stats() - Display HIF NAPI stats
1997  * @hif_ctx: HIF opaque context
1998  *
1999  * Return: None
2000  */
2001 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
2002 
2003 /**
2004  * hif_clear_napi_stats() - function clears the stats of the
2005  * latency when called.
2006  * @hif_ctx: the HIF context to assign the callback to
2007  *
2008  * Return: None
2009  */
2010 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
2011 
2012 #ifdef __cplusplus
2013 }
2014 #endif
2015 
2016 #ifdef FORCE_WAKE
2017 /**
2018  * hif_force_wake_request() - Function to wake from power collapse
2019  * @handle: HIF opaque handle
2020  *
2021  * Description: API to check if the device is awake or not before
2022  * read/write to BAR + 4K registers. If device is awake return
2023  * success otherwise write '1' to
2024  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
2025  * the device and does wakeup the PCI and MHI within 50ms
2026  * and then the device writes a value to
2027  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
2028  * handshake process to let the host know the device is awake.
2029  *
2030  * Return: zero - success/non-zero - failure
2031  */
2032 int hif_force_wake_request(struct hif_opaque_softc *handle);
2033 
2034 /**
2035  * hif_force_wake_release() - API to release/reset the SOC wake register
2036  * from interrupting the device.
2037  * @handle: HIF opaque handle
2038  *
2039  * Description: API to set the
2040  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
2041  * to release the interrupt line.
2042  *
2043  * Return: zero - success/non-zero - failure
2044  */
2045 int hif_force_wake_release(struct hif_opaque_softc *handle);
2046 #else
2047 static inline
2048 int hif_force_wake_request(struct hif_opaque_softc *handle)
2049 {
2050 	return 0;
2051 }
2052 
2053 static inline
2054 int hif_force_wake_release(struct hif_opaque_softc *handle)
2055 {
2056 	return 0;
2057 }
2058 #endif /* FORCE_WAKE */
2059 
2060 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
2061 /**
2062  * hif_prevent_link_low_power_states() - Prevent from going to low power states
2063  * @hif: HIF opaque context
2064  *
2065  * Return: 0 on success. Error code on failure.
2066  */
2067 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
2068 
2069 /**
2070  * hif_allow_link_low_power_states() - Allow link to go to low power states
2071  * @hif: HIF opaque context
2072  *
2073  * Return: None
2074  */
2075 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
2076 
2077 #else
2078 
2079 static inline
2080 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
2081 {
2082 	return 0;
2083 }
2084 
2085 static inline
2086 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
2087 {
2088 }
2089 #endif
2090 
2091 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
2092 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
2093 void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle);
2094 
2095 /**
2096  * hif_get_dev_ba_cmem() - get base address of CMEM
2097  * @hif_handle: the HIF context
2098  *
2099  */
2100 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle);
2101 
2102 /**
2103  * hif_get_soc_version() - get soc major version from target info
2104  * @hif_handle: the HIF context
2105  *
2106  * Return: version number
2107  */
2108 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle);
2109 
2110 /**
2111  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
2112  * @hif_ctx: the HIF context to assign the callback to
2113  * @callback: the callback to assign
2114  * @priv: the private data to pass to the callback when invoked
2115  *
2116  * Return: None
2117  */
2118 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2119 			       void (*callback)(void *),
2120 			       void *priv);
2121 /*
2122  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
2123  * for defined here
2124  */
2125 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2126 ssize_t hif_dump_desc_trace_buf(struct device *dev,
2127 				struct device_attribute *attr, char *buf);
2128 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2129 					const char *buf, size_t size);
2130 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
2131 				const char *buf, size_t size);
2132 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
2133 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
2134 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
2135 
2136 /**
2137  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
2138  * @hif: hif context
2139  * @ce_service_max_yield_time: CE service max yield time to set
2140  *
2141  * This API storess CE service max yield time in hif context based
2142  * on ini value.
2143  *
2144  * Return: void
2145  */
2146 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2147 				       uint32_t ce_service_max_yield_time);
2148 
2149 /**
2150  * hif_get_ce_service_max_yield_time() - get CE service max yield time
2151  * @hif: hif context
2152  *
2153  * This API returns CE service max yield time.
2154  *
2155  * Return: CE service max yield time
2156  */
2157 unsigned long long
2158 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
2159 
2160 /**
2161  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
2162  * @hif: hif context
2163  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
2164  *
2165  * This API stores CE service max rx ind flush in hif context based
2166  * on ini value.
2167  *
2168  * Return: void
2169  */
2170 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2171 					 uint8_t ce_service_max_rx_ind_flush);
2172 
2173 #ifdef OL_ATH_SMART_LOGGING
2174 /**
2175  * hif_log_dump_ce() - Copy all the CE DEST ring to buf
2176  * @scn: HIF handler
2177  * @buf_cur: Current pointer in ring buffer
2178  * @buf_init:Start of the ring buffer
2179  * @buf_sz: Size of the ring buffer
2180  * @ce: Copy Engine id
2181  * @skb_sz: Max size of the SKB buffer to be copied
2182  *
2183  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2184  * and buffers pointed by them in to the given buf
2185  *
2186  * Return: Current pointer in ring buffer
2187  */
2188 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2189 			 uint8_t *buf_init, uint32_t buf_sz,
2190 			 uint32_t ce, uint32_t skb_sz);
2191 #endif /* OL_ATH_SMART_LOGGING */
2192 
2193 /**
2194  * hif_softc_to_hif_opaque_softc() - API to convert hif_softc handle
2195  * to hif_opaque_softc handle
2196  * @hif_handle: hif_softc type
2197  *
2198  * Return: hif_opaque_softc type
2199  */
2200 static inline struct hif_opaque_softc *
2201 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
2202 {
2203 	return (struct hif_opaque_softc *)hif_handle;
2204 }
2205 
2206 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
2207 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2208 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx);
2209 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2210 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2211 			    uint8_t type, uint8_t access);
2212 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2213 			       uint8_t type);
2214 #else
2215 static inline QDF_STATUS
2216 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2217 {
2218 	return QDF_STATUS_SUCCESS;
2219 }
2220 
2221 static inline void
2222 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
2223 {
2224 }
2225 
2226 static inline void
2227 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2228 {
2229 }
2230 
2231 static inline void
2232 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2233 		       uint8_t type, uint8_t access)
2234 {
2235 }
2236 
2237 static inline uint8_t
2238 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2239 		       uint8_t type)
2240 {
2241 	return HIF_EP_VOTE_ACCESS_ENABLE;
2242 }
2243 #endif
2244 
2245 #ifdef FORCE_WAKE
2246 /**
2247  * hif_srng_init_phase(): Indicate srng initialization phase
2248  * to avoid force wake as UMAC power collapse is not yet
2249  * enabled
2250  * @hif_ctx: hif opaque handle
2251  * @init_phase: initialization phase
2252  *
2253  * Return:  None
2254  */
2255 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2256 			 bool init_phase);
2257 #else
2258 static inline
2259 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2260 			 bool init_phase)
2261 {
2262 }
2263 #endif /* FORCE_WAKE */
2264 
2265 #ifdef HIF_IPCI
2266 /**
2267  * hif_shutdown_notifier_cb - Call back for shutdown notifier
2268  * @ctx: hif handle
2269  *
2270  * Return:  None
2271  */
2272 void hif_shutdown_notifier_cb(void *ctx);
2273 #else
2274 static inline
2275 void hif_shutdown_notifier_cb(void *ctx)
2276 {
2277 }
2278 #endif /* HIF_IPCI */
2279 
2280 #ifdef HIF_CE_LOG_INFO
2281 /**
2282  * hif_log_ce_info() - API to log ce info
2283  * @scn: hif handle
2284  * @data: hang event data buffer
2285  * @offset: offset at which data needs to be written
2286  *
2287  * Return:  None
2288  */
2289 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2290 		     unsigned int *offset);
2291 #else
2292 static inline
2293 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2294 		     unsigned int *offset)
2295 {
2296 }
2297 #endif
2298 
2299 #ifdef HIF_CPU_PERF_AFFINE_MASK
2300 /**
2301  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
2302  * @hif_ctx: hif opaque handle
2303  *
2304  * This function is used to move the WLAN IRQs to perf cores in
2305  * case of defconfig builds.
2306  *
2307  * Return:  None
2308  */
2309 void hif_config_irq_set_perf_affinity_hint(
2310 	struct hif_opaque_softc *hif_ctx);
2311 
2312 #else
2313 static inline void hif_config_irq_set_perf_affinity_hint(
2314 	struct hif_opaque_softc *hif_ctx)
2315 {
2316 }
2317 #endif
2318 
2319 /**
2320  * hif_apps_grp_irqs_enable() - enable ext grp irqs
2321  * @hif_ctx: HIF opaque context
2322  *
2323  * Return: 0 on success. Error code on failure.
2324  */
2325 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
2326 
2327 /**
2328  * hif_apps_grp_irqs_disable() - disable ext grp irqs
2329  * @hif_ctx: HIF opaque context
2330  *
2331  * Return: 0 on success. Error code on failure.
2332  */
2333 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
2334 
2335 /**
2336  * hif_disable_grp_irqs() - disable ext grp irqs
2337  * @scn: HIF opaque context
2338  *
2339  * Return: 0 on success. Error code on failure.
2340  */
2341 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
2342 
2343 /**
2344  * hif_enable_grp_irqs() - enable ext grp irqs
2345  * @scn: HIF opaque context
2346  *
2347  * Return: 0 on success. Error code on failure.
2348  */
2349 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
2350 
2351 enum hif_credit_exchange_type {
2352 	HIF_REQUEST_CREDIT,
2353 	HIF_PROCESS_CREDIT_REPORT,
2354 };
2355 
2356 enum hif_detect_latency_type {
2357 	HIF_DETECT_TASKLET,
2358 	HIF_DETECT_CREDIT,
2359 	HIF_DETECT_UNKNOWN
2360 };
2361 
2362 #ifdef HIF_DETECTION_LATENCY_ENABLE
2363 void hif_latency_detect_credit_record_time(
2364 	enum hif_credit_exchange_type type,
2365 	struct hif_opaque_softc *hif_ctx);
2366 
2367 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
2368 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
2369 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer);
2370 void hif_credit_latency(struct hif_softc *scn, bool from_timer);
2371 void hif_check_detection_latency(struct hif_softc *scn,
2372 				 bool from_timer,
2373 				 uint32_t bitmap_type);
2374 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
2375 #else
2376 static inline
2377 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
2378 {}
2379 
2380 static inline
2381 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
2382 {}
2383 
2384 static inline
2385 void hif_latency_detect_credit_record_time(
2386 	enum hif_credit_exchange_type type,
2387 	struct hif_opaque_softc *hif_ctx)
2388 {}
2389 static inline
2390 void hif_check_detection_latency(struct hif_softc *scn,
2391 				 bool from_timer,
2392 				 uint32_t bitmap_type)
2393 {}
2394 
2395 static inline
2396 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
2397 {}
2398 #endif
2399 
2400 #ifdef SYSTEM_PM_CHECK
2401 /**
2402  * __hif_system_pm_set_state() - Set system pm state
2403  * @hif: hif opaque handle
2404  * @state: system state
2405  *
2406  * Return:  None
2407  */
2408 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2409 			       enum hif_system_pm_state state);
2410 
2411 /**
2412  * hif_system_pm_set_state_on() - Set system pm state to ON
2413  * @hif: hif opaque handle
2414  *
2415  * Return:  None
2416  */
2417 static inline
2418 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2419 {
2420 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
2421 }
2422 
2423 /**
2424  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
2425  * @hif: hif opaque handle
2426  *
2427  * Return:  None
2428  */
2429 static inline
2430 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2431 {
2432 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
2433 }
2434 
2435 /**
2436  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
2437  * @hif: hif opaque handle
2438  *
2439  * Return:  None
2440  */
2441 static inline
2442 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2443 {
2444 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
2445 }
2446 
2447 /**
2448  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
2449  * @hif: hif opaque handle
2450  *
2451  * Return:  None
2452  */
2453 static inline
2454 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2455 {
2456 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
2457 }
2458 
2459 /**
2460  * hif_system_pm_get_state() - Get system pm state
2461  * @hif: hif opaque handle
2462  *
2463  * Return:  system state
2464  */
2465 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
2466 
2467 /**
2468  * hif_system_pm_state_check() - Check system state and trigger resume
2469  *  if required
2470  * @hif: hif opaque handle
2471  *
2472  * Return: 0 if system is in on state else error code
2473  */
2474 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
2475 #else
2476 static inline
2477 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2478 			       enum hif_system_pm_state state)
2479 {
2480 }
2481 
2482 static inline
2483 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2484 {
2485 }
2486 
2487 static inline
2488 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2489 {
2490 }
2491 
2492 static inline
2493 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2494 {
2495 }
2496 
2497 static inline
2498 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2499 {
2500 }
2501 
2502 static inline
2503 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2504 {
2505 	return 0;
2506 }
2507 
2508 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2509 {
2510 	return 0;
2511 }
2512 #endif
2513 
2514 #ifdef FEATURE_IRQ_AFFINITY
2515 /**
2516  * hif_set_grp_intr_affinity() - API to set affinity for grp
2517  *  intrs set in the bitmap
2518  * @scn: hif handle
2519  * @grp_intr_bitmask: grp intrs for which perf affinity should be
2520  *  applied
2521  * @perf: affine to perf or non-perf cluster
2522  *
2523  * Return: None
2524  */
2525 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2526 			       uint32_t grp_intr_bitmask, bool perf);
2527 #else
2528 static inline
2529 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2530 			       uint32_t grp_intr_bitmask, bool perf)
2531 {
2532 }
2533 #endif
2534 /**
2535  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
2536  * @scn: hif opaque handle
2537  *
2538  * Description:
2539  *   Gets number of WMI EPs configured in target svc map. Since EP map
2540  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
2541  *   configured for WMI service.
2542  *
2543  * Return:
2544  *  uint8_t: count for WMI eps in target svc map
2545  */
2546 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn);
2547 
2548 #ifdef DP_UMAC_HW_RESET_SUPPORT
2549 /**
2550  * hif_register_umac_reset_handler() - Register UMAC HW reset handler
2551  * @hif_scn: hif opaque handle
2552  * @handler: callback handler function
2553  * @cb_ctx: context to passed to @handler
2554  * @irq: irq number to be used for UMAC HW reset interrupt
2555  *
2556  * Return: QDF_STATUS of operation
2557  */
2558 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2559 					   int (*handler)(void *cb_ctx),
2560 					   void *cb_ctx, int irq);
2561 
2562 /**
2563  * hif_unregister_umac_reset_handler() - Unregister UMAC HW reset handler
2564  * @hif_scn: hif opaque handle
2565  *
2566  * Return: QDF_STATUS of operation
2567  */
2568 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn);
2569 #else
2570 static inline
2571 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2572 					   int (*handler)(void *cb_ctx),
2573 					   void *cb_ctx, int irq)
2574 {
2575 	return QDF_STATUS_SUCCESS;
2576 }
2577 
2578 static inline
2579 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
2580 {
2581 	return QDF_STATUS_SUCCESS;
2582 }
2583 
2584 #endif /* DP_UMAC_HW_RESET_SUPPORT */
2585 
2586 #ifdef FEATURE_DIRECT_LINK
2587 /**
2588  * hif_set_irq_config_by_ceid() - Set irq configuration for CE given by id
2589  * @scn: hif opaque handle
2590  * @ce_id: CE id
2591  * @addr: irq trigger address
2592  * @data: irq trigger data
2593  *
2594  * Return: QDF status
2595  */
2596 QDF_STATUS
2597 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2598 			   uint64_t addr, uint32_t data);
2599 
2600 /**
2601  * hif_get_direct_link_ce_dest_srng_buffers() - Get Direct Link ce dest srng
2602  *  buffer information
2603  * @scn: hif opaque handle
2604  * @dma_addr: pointer to array of dma addresses
2605  * @buf_size: ce dest ring buffer size
2606  *
2607  * Return: Number of buffers attached to the dest srng.
2608  */
2609 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2610 						  uint64_t **dma_addr,
2611 						  uint32_t *buf_size);
2612 
2613 /**
2614  * hif_get_direct_link_ce_srng_info() - Get Direct Link CE srng information
2615  * @scn: hif opaque handle
2616  * @info: Direct Link CEs information
2617  * @max_ce_info_len: max array size of ce info
2618  *
2619  * Return: QDF status
2620  */
2621 QDF_STATUS
2622 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2623 				 struct hif_direct_link_ce_info *info,
2624 				 uint8_t max_ce_info_len);
2625 #else
2626 static inline QDF_STATUS
2627 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2628 			   uint64_t addr, uint32_t data)
2629 {
2630 	return QDF_STATUS_SUCCESS;
2631 }
2632 
2633 static inline
2634 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2635 						  uint64_t **dma_addr,
2636 						  uint32_t *buf_size)
2637 {
2638 	return 0;
2639 }
2640 
2641 static inline QDF_STATUS
2642 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2643 				 struct hif_direct_link_ce_info *info,
2644 				 uint8_t max_ce_info_len)
2645 {
2646 	return QDF_STATUS_SUCCESS;
2647 }
2648 #endif
2649 #endif /* _HIF_H_ */
2650