xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision b62151f8dd0743da724a4533988c78d2c7385d4f)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HIF_H_
21 #define _HIF_H_
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif /* __cplusplus */
26 
27 /* Header files */
28 #include <qdf_status.h>
29 #include "qdf_ipa.h"
30 #include "qdf_nbuf.h"
31 #include "qdf_lro.h"
32 #include "ol_if_athvar.h"
33 #include <linux/platform_device.h>
34 #ifdef HIF_PCI
35 #include <linux/pci.h>
36 #endif /* HIF_PCI */
37 #ifdef HIF_USB
38 #include <linux/usb.h>
39 #endif /* HIF_USB */
40 #ifdef IPA_OFFLOAD
41 #include <linux/ipa.h>
42 #endif
43 #include "cfg_ucfg_api.h"
44 #include "qdf_dev.h"
45 #include <wlan_init_cfg.h>
46 
47 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
48 
49 typedef void __iomem *A_target_id_t;
50 typedef void *hif_handle_t;
51 
52 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
53 #define HIF_WORK_DRAIN_WAIT_CNT 50
54 
55 #define HIF_EP_WAKE_RESET_WAIT_CNT 10
56 #endif
57 
58 #define HIF_TYPE_AR6002   2
59 #define HIF_TYPE_AR6003   3
60 #define HIF_TYPE_AR6004   5
61 #define HIF_TYPE_AR9888   6
62 #define HIF_TYPE_AR6320   7
63 #define HIF_TYPE_AR6320V2 8
64 /* For attaching Peregrine 2.0 board host_reg_tbl only */
65 #define HIF_TYPE_AR9888V2 9
66 #define HIF_TYPE_ADRASTEA 10
67 #define HIF_TYPE_AR900B 11
68 #define HIF_TYPE_QCA9984 12
69 #define HIF_TYPE_QCA9888 14
70 #define HIF_TYPE_QCA8074 15
71 #define HIF_TYPE_QCA6290 16
72 #define HIF_TYPE_QCN7605 17
73 #define HIF_TYPE_QCA6390 18
74 #define HIF_TYPE_QCA8074V2 19
75 #define HIF_TYPE_QCA6018  20
76 #define HIF_TYPE_QCN9000 21
77 #define HIF_TYPE_QCA6490 22
78 #define HIF_TYPE_QCA6750 23
79 #define HIF_TYPE_QCA5018 24
80 #define HIF_TYPE_QCN6122 25
81 #define HIF_TYPE_KIWI 26
82 #define HIF_TYPE_QCN9224 27
83 #define HIF_TYPE_QCA9574 28
84 #define HIF_TYPE_MANGO 29
85 #define HIF_TYPE_QCA5332 30
86 #define HIF_TYPE_QCN9160 31
87 #define HIF_TYPE_PEACH 32
88 #define HIF_TYPE_WCN6450 33
89 #define HIF_TYPE_QCN6432 34
90 
91 #define DMA_COHERENT_MASK_DEFAULT   37
92 
93 #ifdef IPA_OFFLOAD
94 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
95 #endif
96 
97 /* enum hif_ic_irq - enum defining integrated chip irq numbers
98  * defining irq nubers that can be used by external modules like datapath
99  */
100 enum hif_ic_irq {
101 	host2wbm_desc_feed = 16,
102 	host2reo_re_injection,
103 	host2reo_command,
104 	host2rxdma_monitor_ring3,
105 	host2rxdma_monitor_ring2,
106 	host2rxdma_monitor_ring1,
107 	reo2host_exception,
108 	wbm2host_rx_release,
109 	reo2host_status,
110 	reo2host_destination_ring4,
111 	reo2host_destination_ring3,
112 	reo2host_destination_ring2,
113 	reo2host_destination_ring1,
114 	rxdma2host_monitor_destination_mac3,
115 	rxdma2host_monitor_destination_mac2,
116 	rxdma2host_monitor_destination_mac1,
117 	ppdu_end_interrupts_mac3,
118 	ppdu_end_interrupts_mac2,
119 	ppdu_end_interrupts_mac1,
120 	rxdma2host_monitor_status_ring_mac3,
121 	rxdma2host_monitor_status_ring_mac2,
122 	rxdma2host_monitor_status_ring_mac1,
123 	host2rxdma_host_buf_ring_mac3,
124 	host2rxdma_host_buf_ring_mac2,
125 	host2rxdma_host_buf_ring_mac1,
126 	rxdma2host_destination_ring_mac3,
127 	rxdma2host_destination_ring_mac2,
128 	rxdma2host_destination_ring_mac1,
129 	host2tcl_input_ring4,
130 	host2tcl_input_ring3,
131 	host2tcl_input_ring2,
132 	host2tcl_input_ring1,
133 	wbm2host_tx_completions_ring4,
134 	wbm2host_tx_completions_ring3,
135 	wbm2host_tx_completions_ring2,
136 	wbm2host_tx_completions_ring1,
137 	tcl2host_status_ring,
138 	txmon2host_monitor_destination_mac3,
139 	txmon2host_monitor_destination_mac2,
140 	txmon2host_monitor_destination_mac1,
141 	host2tx_monitor_ring1,
142 };
143 
144 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
145 enum hif_legacy_pci_irq {
146 	ce0,
147 	ce1,
148 	ce2,
149 	ce3,
150 	ce4,
151 	ce5,
152 	ce6,
153 	ce7,
154 	ce8,
155 	ce9,
156 	ce10,
157 	ce11,
158 	ce12,
159 	ce13,
160 	ce14,
161 	ce15,
162 	reo2sw8_intr2,
163 	reo2sw7_intr2,
164 	reo2sw6_intr2,
165 	reo2sw5_intr2,
166 	reo2sw4_intr2,
167 	reo2sw3_intr2,
168 	reo2sw2_intr2,
169 	reo2sw1_intr2,
170 	reo2sw0_intr2,
171 	reo2sw8_intr,
172 	reo2sw7_intr,
173 	reo2sw6_inrr,
174 	reo2sw5_intr,
175 	reo2sw4_intr,
176 	reo2sw3_intr,
177 	reo2sw2_intr,
178 	reo2sw1_intr,
179 	reo2sw0_intr,
180 	reo2status_intr2,
181 	reo_status,
182 	reo2rxdma_out_2,
183 	reo2rxdma_out_1,
184 	reo_cmd,
185 	sw2reo6,
186 	sw2reo5,
187 	sw2reo1,
188 	sw2reo,
189 	rxdma2reo_mlo_0_dst_ring1,
190 	rxdma2reo_mlo_0_dst_ring0,
191 	rxdma2reo_mlo_1_dst_ring1,
192 	rxdma2reo_mlo_1_dst_ring0,
193 	rxdma2reo_dst_ring1,
194 	rxdma2reo_dst_ring0,
195 	rxdma2sw_dst_ring1,
196 	rxdma2sw_dst_ring0,
197 	rxdma2release_dst_ring1,
198 	rxdma2release_dst_ring0,
199 	sw2rxdma_2_src_ring,
200 	sw2rxdma_1_src_ring,
201 	sw2rxdma_0,
202 	wbm2sw6_release2,
203 	wbm2sw5_release2,
204 	wbm2sw4_release2,
205 	wbm2sw3_release2,
206 	wbm2sw2_release2,
207 	wbm2sw1_release2,
208 	wbm2sw0_release2,
209 	wbm2sw6_release,
210 	wbm2sw5_release,
211 	wbm2sw4_release,
212 	wbm2sw3_release,
213 	wbm2sw2_release,
214 	wbm2sw1_release,
215 	wbm2sw0_release,
216 	wbm2sw_link,
217 	wbm_error_release,
218 	sw2txmon_src_ring,
219 	sw2rxmon_src_ring,
220 	txmon2sw_p1_intr1,
221 	txmon2sw_p1_intr0,
222 	txmon2sw_p0_dest1,
223 	txmon2sw_p0_dest0,
224 	rxmon2sw_p1_intr1,
225 	rxmon2sw_p1_intr0,
226 	rxmon2sw_p0_dest1,
227 	rxmon2sw_p0_dest0,
228 	sw_release,
229 	sw2tcl_credit2,
230 	sw2tcl_credit,
231 	sw2tcl4,
232 	sw2tcl5,
233 	sw2tcl3,
234 	sw2tcl2,
235 	sw2tcl1,
236 	sw2wbm1,
237 	misc_8,
238 	misc_7,
239 	misc_6,
240 	misc_5,
241 	misc_4,
242 	misc_3,
243 	misc_2,
244 	misc_1,
245 	misc_0,
246 };
247 #endif
248 
249 struct CE_state;
250 #ifdef QCA_WIFI_QCN9224
251 #define CE_COUNT_MAX 16
252 #else
253 #define CE_COUNT_MAX 12
254 #endif
255 
256 #ifndef HIF_MAX_GROUP
257 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS
258 #endif
259 
260 #ifdef CONFIG_BERYLLIUM
261 #define HIF_MAX_GRP_IRQ 25
262 #else
263 #define HIF_MAX_GRP_IRQ 16
264 #endif
265 
266 #ifndef NAPI_YIELD_BUDGET_BASED
267 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
268 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
269 #endif
270 #else  /* NAPI_YIELD_BUDGET_BASED */
271 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
272 #endif /* NAPI_YIELD_BUDGET_BASED */
273 
274 #define QCA_NAPI_BUDGET    64
275 #define QCA_NAPI_DEF_SCALE  \
276 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
277 
278 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
279 /* NOTE: "napi->scale" can be changed,
280  * but this does not change the number of buckets
281  */
282 #define QCA_NAPI_NUM_BUCKETS 4
283 
284 /**
285  * struct qca_napi_stat - stats structure for execution contexts
286  * @napi_schedules: number of times the schedule function is called
287  * @napi_polls: number of times the execution context runs
288  * @napi_completes: number of times that the generating interrupt is re-enabled
289  * @napi_workdone: cumulative of all work done reported by handler
290  * @cpu_corrected: incremented when execution context runs on a different core
291  *			than the one that its irq is affined to.
292  * @napi_budget_uses: histogram of work done per execution run
293  * @time_limit_reached: count of yields due to time limit thresholds
294  * @rxpkt_thresh_reached: count of yields due to a work limit
295  * @napi_max_poll_time:
296  * @poll_time_buckets: histogram of poll times for the napi
297  *
298  */
299 struct qca_napi_stat {
300 	uint32_t napi_schedules;
301 	uint32_t napi_polls;
302 	uint32_t napi_completes;
303 	uint32_t napi_workdone;
304 	uint32_t cpu_corrected;
305 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
306 	uint32_t time_limit_reached;
307 	uint32_t rxpkt_thresh_reached;
308 	unsigned long long napi_max_poll_time;
309 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
310 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
311 #endif
312 };
313 
314 
315 /**
316  * struct qca_napi_info - per NAPI instance data structure
317  * @netdev: dummy net_dev
318  * @hif_ctx:
319  * @napi:
320  * @scale:
321  * @id:
322  * @cpu:
323  * @irq:
324  * @cpumask:
325  * @stats:
326  * @offld_flush_cb:
327  * @rx_thread_napi:
328  * @rx_thread_netdev:
329  * @lro_ctx:
330  *
331  * This data structure holds stuff per NAPI instance.
332  * Note that, in the current implementation, though scale is
333  * an instance variable, it is set to the same value for all
334  * instances.
335  */
336 struct qca_napi_info {
337 	struct net_device    netdev; /* dummy net_dev */
338 	void                 *hif_ctx;
339 	struct napi_struct   napi;
340 	uint8_t              scale;   /* currently same on all instances */
341 	uint8_t              id;
342 	uint8_t              cpu;
343 	int                  irq;
344 	cpumask_t            cpumask;
345 	struct qca_napi_stat stats[NR_CPUS];
346 #ifdef RECEIVE_OFFLOAD
347 	/* will only be present for data rx CE's */
348 	void (*offld_flush_cb)(void *);
349 	struct napi_struct   rx_thread_napi;
350 	struct net_device    rx_thread_netdev;
351 #endif /* RECEIVE_OFFLOAD */
352 	qdf_lro_ctx_t        lro_ctx;
353 };
354 
355 enum qca_napi_tput_state {
356 	QCA_NAPI_TPUT_UNINITIALIZED,
357 	QCA_NAPI_TPUT_LO,
358 	QCA_NAPI_TPUT_HI
359 };
360 enum qca_napi_cpu_state {
361 	QCA_NAPI_CPU_UNINITIALIZED,
362 	QCA_NAPI_CPU_DOWN,
363 	QCA_NAPI_CPU_UP };
364 
365 /**
366  * struct qca_napi_cpu - an entry of the napi cpu table
367  * @state:
368  * @core_id:     physical core id of the core
369  * @cluster_id:  cluster this core belongs to
370  * @core_mask:   mask to match all core of this cluster
371  * @thread_mask: mask for this core within the cluster
372  * @max_freq:    maximum clock this core can be clocked at
373  *               same for all cpus of the same core.
374  * @napis:       bitmap of napi instances on this core
375  * @execs:       bitmap of execution contexts on this core
376  * @cluster_nxt: chain to link cores within the same cluster
377  *
378  * This structure represents a single entry in the napi cpu
379  * table. The table is part of struct qca_napi_data.
380  * This table is initialized by the init function, called while
381  * the first napi instance is being created, updated by hotplug
382  * notifier and when cpu affinity decisions are made (by throughput
383  * detection), and deleted when the last napi instance is removed.
384  */
385 struct qca_napi_cpu {
386 	enum qca_napi_cpu_state state;
387 	int			core_id;
388 	int			cluster_id;
389 	cpumask_t		core_mask;
390 	cpumask_t		thread_mask;
391 	unsigned int		max_freq;
392 	uint32_t		napis;
393 	uint32_t		execs;
394 	int			cluster_nxt;  /* index, not pointer */
395 };
396 
397 /**
398  * struct qca_napi_data - collection of napi data for a single hif context
399  * @hif_softc: pointer to the hif context
400  * @lock: spinlock used in the event state machine
401  * @state: state variable used in the napi stat machine
402  * @ce_map: bit map indicating which ce's have napis running
403  * @exec_map: bit map of instantiated exec contexts
404  * @user_cpu_affin_mask: CPU affinity mask from INI config.
405  * @napis:
406  * @napi_cpu: cpu info for irq affinty
407  * @lilcl_head:
408  * @bigcl_head:
409  * @napi_mode: irq affinity & clock voting mode
410  * @cpuhp_handler: CPU hotplug event registration handle
411  * @flags:
412  */
413 struct qca_napi_data {
414 	struct               hif_softc *hif_softc;
415 	qdf_spinlock_t       lock;
416 	uint32_t             state;
417 
418 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
419 	 * not used by clients (clients use an id returned by create)
420 	 */
421 	uint32_t             ce_map;
422 	uint32_t             exec_map;
423 	uint32_t             user_cpu_affin_mask;
424 	struct qca_napi_info *napis[CE_COUNT_MAX];
425 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
426 	int                  lilcl_head, bigcl_head;
427 	enum qca_napi_tput_state napi_mode;
428 	struct qdf_cpuhp_handler *cpuhp_handler;
429 	uint8_t              flags;
430 };
431 
432 /**
433  * struct hif_config_info - Place Holder for HIF configuration
434  * @enable_self_recovery: Self Recovery
435  * @enable_runtime_pm: Enable Runtime PM
436  * @runtime_pm_delay: Runtime PM Delay
437  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
438  *
439  * Structure for holding HIF ini parameters.
440  */
441 struct hif_config_info {
442 	bool enable_self_recovery;
443 #ifdef FEATURE_RUNTIME_PM
444 	uint8_t enable_runtime_pm;
445 	u_int32_t runtime_pm_delay;
446 #endif
447 	uint64_t rx_softirq_max_yield_duration_ns;
448 };
449 
450 /**
451  * struct hif_target_info - Target Information
452  * @target_version: Target Version
453  * @target_type: Target Type
454  * @target_revision: Target Revision
455  * @soc_version: SOC Version
456  * @hw_name: pointer to hardware name
457  *
458  * Structure to hold target information.
459  */
460 struct hif_target_info {
461 	uint32_t target_version;
462 	uint32_t target_type;
463 	uint32_t target_revision;
464 	uint32_t soc_version;
465 	char *hw_name;
466 };
467 
468 struct hif_opaque_softc {
469 };
470 
471 /**
472  * struct hif_ce_ring_info - CE ring information
473  * @ring_id: ring id
474  * @ring_dir: ring direction
475  * @num_entries: number of entries in ring
476  * @entry_size: ring entry size
477  * @ring_base_paddr: srng base physical address
478  * @hp_paddr: head pointer physical address
479  * @tp_paddr: tail pointer physical address
480  */
481 struct hif_ce_ring_info {
482 	uint8_t ring_id;
483 	uint8_t ring_dir;
484 	uint32_t num_entries;
485 	uint32_t entry_size;
486 	uint64_t ring_base_paddr;
487 	uint64_t hp_paddr;
488 	uint64_t tp_paddr;
489 };
490 
491 /**
492  * struct hif_direct_link_ce_info - Direct Link CE information
493  * @ce_id: CE ide
494  * @pipe_dir: Pipe direction
495  * @ring_info: ring information
496  */
497 struct hif_direct_link_ce_info {
498 	uint8_t ce_id;
499 	uint8_t pipe_dir;
500 	struct hif_ce_ring_info ring_info;
501 };
502 
503 /**
504  * enum hif_event_type - Type of DP events to be recorded
505  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
506  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
507  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
508  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
509  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
510  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
511  * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event
512  * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event
513  * @HIF_EVENT_IRQ_DISABLE_EXPIRED: IRQ disable expired event
514  */
515 enum hif_event_type {
516 	HIF_EVENT_IRQ_TRIGGER,
517 	HIF_EVENT_TIMER_ENTRY,
518 	HIF_EVENT_TIMER_EXIT,
519 	HIF_EVENT_BH_SCHED,
520 	HIF_EVENT_SRNG_ACCESS_START,
521 	HIF_EVENT_SRNG_ACCESS_END,
522 	HIF_EVENT_BH_COMPLETE,
523 	HIF_EVENT_BH_FORCE_BREAK,
524 	HIF_EVENT_IRQ_DISABLE_EXPIRED,
525 	/* Do check hif_hist_skip_event_record when adding new events */
526 };
527 
528 /**
529  * enum hif_system_pm_state - System PM state
530  * @HIF_SYSTEM_PM_STATE_ON: System in active state
531  * @HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
532  *  system resume
533  * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
534  *  system suspend
535  * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
536  */
537 enum hif_system_pm_state {
538 	HIF_SYSTEM_PM_STATE_ON,
539 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
540 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
541 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
542 };
543 
544 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
545 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
546 
547 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
548 /* HIF_EVENT_HIST_MAX should always be power of 2 */
549 #define HIF_EVENT_HIST_MAX		512
550 
551 #define HIF_EVENT_HIST_ENABLE_MASK	0xFF
552 
553 static inline uint64_t hif_get_log_timestamp(void)
554 {
555 	return qdf_get_log_timestamp();
556 }
557 
558 #else
559 
560 #define HIF_EVENT_HIST_MAX		32
561 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
562 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
563 
564 static inline uint64_t hif_get_log_timestamp(void)
565 {
566 	return qdf_sched_clock();
567 }
568 
569 #endif
570 
571 /**
572  * struct hif_event_record - an entry of the DP event history
573  * @hal_ring_id: ring id for which event is recorded
574  * @hp: head pointer of the ring (may not be applicable for all events)
575  * @tp: tail pointer of the ring (may not be applicable for all events)
576  * @cpu_id: cpu id on which the event occurred
577  * @timestamp: timestamp when event occurred
578  * @type: type of the event
579  *
580  * This structure represents the information stored for every datapath
581  * event which is logged in the history.
582  */
583 struct hif_event_record {
584 	uint8_t hal_ring_id;
585 	uint32_t hp;
586 	uint32_t tp;
587 	int cpu_id;
588 	uint64_t timestamp;
589 	enum hif_event_type type;
590 };
591 
592 /**
593  * struct hif_event_misc - history related misc info
594  * @last_irq_index: last irq event index in history
595  * @last_irq_ts: last irq timestamp
596  */
597 struct hif_event_misc {
598 	int32_t last_irq_index;
599 	uint64_t last_irq_ts;
600 };
601 
602 /**
603  * struct hif_event_history - history for one interrupt group
604  * @index: index to store new event
605  * @misc: event misc information
606  * @event: event entry
607  *
608  * This structure represents the datapath history for one
609  * interrupt group.
610  */
611 struct hif_event_history {
612 	qdf_atomic_t index;
613 	struct hif_event_misc misc;
614 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
615 };
616 
617 /**
618  * hif_hist_record_event() - Record one datapath event in history
619  * @hif_ctx: HIF opaque context
620  * @event: DP event entry
621  * @intr_grp_id: interrupt group ID registered with hif
622  *
623  * Return: None
624  */
625 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
626 			   struct hif_event_record *event,
627 			   uint8_t intr_grp_id);
628 
629 /**
630  * hif_event_history_init() - Initialize SRNG event history buffers
631  * @hif_ctx: HIF opaque context
632  * @id: context group ID for which history is recorded
633  *
634  * Returns: None
635  */
636 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
637 
638 /**
639  * hif_event_history_deinit() - De-initialize SRNG event history buffers
640  * @hif_ctx: HIF opaque context
641  * @id: context group ID for which history is recorded
642  *
643  * Returns: None
644  */
645 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
646 
647 /**
648  * hif_record_event() - Wrapper function to form and record DP event
649  * @hif_ctx: HIF opaque context
650  * @intr_grp_id: interrupt group ID registered with hif
651  * @hal_ring_id: ring id for which event is recorded
652  * @hp: head pointer index of the srng
653  * @tp: tail pointer index of the srng
654  * @type: type of the event to be logged in history
655  *
656  * Return: None
657  */
658 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
659 				    uint8_t intr_grp_id,
660 				    uint8_t hal_ring_id,
661 				    uint32_t hp,
662 				    uint32_t tp,
663 				    enum hif_event_type type)
664 {
665 	struct hif_event_record event;
666 
667 	event.hal_ring_id = hal_ring_id;
668 	event.hp = hp;
669 	event.tp = tp;
670 	event.type = type;
671 
672 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
673 
674 	return;
675 }
676 
677 #else
678 
679 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
680 				    uint8_t intr_grp_id,
681 				    uint8_t hal_ring_id,
682 				    uint32_t hp,
683 				    uint32_t tp,
684 				    enum hif_event_type type)
685 {
686 }
687 
688 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
689 					  uint8_t id)
690 {
691 }
692 
693 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
694 					    uint8_t id)
695 {
696 }
697 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
698 
699 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx);
700 
701 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
702 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx);
703 #else
704 static
705 inline void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) {}
706 #endif
707 
708 /**
709  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
710  *
711  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
712  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
713  *                         minimize power
714  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
715  *                         platform-specific measures to completely power-off
716  *                         the module and associated hardware (i.e. cut power
717  *                         supplies)
718  */
719 enum HIF_DEVICE_POWER_CHANGE_TYPE {
720 	HIF_DEVICE_POWER_UP,
721 	HIF_DEVICE_POWER_DOWN,
722 	HIF_DEVICE_POWER_CUT
723 };
724 
725 /**
726  * enum hif_enable_type: what triggered the enabling of hif
727  *
728  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
729  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
730  * @HIF_ENABLE_TYPE_MAX: Max value
731  */
732 enum hif_enable_type {
733 	HIF_ENABLE_TYPE_PROBE,
734 	HIF_ENABLE_TYPE_REINIT,
735 	HIF_ENABLE_TYPE_MAX
736 };
737 
738 /**
739  * enum hif_disable_type: what triggered the disabling of hif
740  *
741  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
742  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
743  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
744  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
745  * @HIF_DISABLE_TYPE_MAX: Max value
746  */
747 enum hif_disable_type {
748 	HIF_DISABLE_TYPE_PROBE_ERROR,
749 	HIF_DISABLE_TYPE_REINIT_ERROR,
750 	HIF_DISABLE_TYPE_REMOVE,
751 	HIF_DISABLE_TYPE_SHUTDOWN,
752 	HIF_DISABLE_TYPE_MAX
753 };
754 
755 /**
756  * enum hif_device_config_opcode: configure mode
757  *
758  * @HIF_DEVICE_POWER_STATE: device power state
759  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
760  * @HIF_DEVICE_GET_FIFO_ADDR: get block address
761  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
762  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
763  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
764  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
765  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
766  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
767  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
768  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
769  * @HIF_BMI_DONE: bmi done
770  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
771  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
772  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
773  */
774 enum hif_device_config_opcode {
775 	HIF_DEVICE_POWER_STATE = 0,
776 	HIF_DEVICE_GET_BLOCK_SIZE,
777 	HIF_DEVICE_GET_FIFO_ADDR,
778 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
779 	HIF_DEVICE_GET_IRQ_PROC_MODE,
780 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
781 	HIF_DEVICE_POWER_STATE_CHANGE,
782 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
783 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
784 	HIF_DEVICE_GET_OS_DEVICE,
785 	HIF_DEVICE_DEBUG_BUS_STATE,
786 	HIF_BMI_DONE,
787 	HIF_DEVICE_SET_TARGET_TYPE,
788 	HIF_DEVICE_SET_HTC_CONTEXT,
789 	HIF_DEVICE_GET_HTC_CONTEXT,
790 };
791 
792 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
793 struct HID_ACCESS_LOG {
794 	uint32_t seqnum;
795 	bool is_write;
796 	void *addr;
797 	uint32_t value;
798 };
799 #endif
800 
801 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
802 		uint32_t value);
803 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
804 
805 #define HIF_MAX_DEVICES                 1
806 /**
807  * struct htc_callbacks - Structure for HTC Callbacks methods
808  * @context:             context to pass to the @dsr_handler
809  *                       note : @rw_compl_handler is provided the context
810  *                       passed to hif_read_write
811  * @rw_compl_handler:    Read / write completion handler
812  * @dsr_handler:         DSR Handler
813  */
814 struct htc_callbacks {
815 	void *context;
816 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
817 	QDF_STATUS(*dsr_handler)(void *context);
818 };
819 
820 /**
821  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
822  * @context: Private data context
823  * @set_recovery_in_progress: To Set Driver state for recovery in progress
824  * @is_recovery_in_progress: Query if driver state is recovery in progress
825  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
826  * @is_driver_unloading: Query if driver is unloading.
827  * @is_target_ready:
828  * @get_bandwidth_level: Query current bandwidth level for the driver
829  * @prealloc_get_consistent_mem_unaligned: get prealloc unaligned consistent mem
830  * @prealloc_put_consistent_mem_unaligned: put unaligned consistent mem to pool
831  * This Structure provides callback pointer for HIF to query hdd for driver
832  * states.
833  */
834 struct hif_driver_state_callbacks {
835 	void *context;
836 	void (*set_recovery_in_progress)(void *context, uint8_t val);
837 	bool (*is_recovery_in_progress)(void *context);
838 	bool (*is_load_unload_in_progress)(void *context);
839 	bool (*is_driver_unloading)(void *context);
840 	bool (*is_target_ready)(void *context);
841 	int (*get_bandwidth_level)(void *context);
842 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
843 						       qdf_dma_addr_t *paddr,
844 						       uint32_t ring_type);
845 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
846 };
847 
848 /* This API detaches the HTC layer from the HIF device */
849 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
850 
851 /****************************************************************/
852 /* BMI and Diag window abstraction                              */
853 /****************************************************************/
854 
855 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
856 
857 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
858 				     * handled atomically by
859 				     * DiagRead/DiagWrite
860 				     */
861 
862 #ifdef WLAN_FEATURE_BMI
863 /*
864  * API to handle HIF-specific BMI message exchanges, this API is synchronous
865  * and only allowed to be called from a context that can block (sleep)
866  */
867 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
868 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
869 				uint8_t *pSendMessage, uint32_t Length,
870 				uint8_t *pResponseMessage,
871 				uint32_t *pResponseLength, uint32_t TimeoutMS);
872 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
873 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
874 #else /* WLAN_FEATURE_BMI */
875 static inline void
876 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
877 {
878 }
879 
880 static inline bool
881 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
882 {
883 	return false;
884 }
885 #endif /* WLAN_FEATURE_BMI */
886 
887 #ifdef HIF_CPU_CLEAR_AFFINITY
888 /**
889  * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
890  * @scn: HIF handle
891  * @intr_ctxt_id: interrupt group index
892  * @cpu: CPU core to clear
893  *
894  * Return: None
895  */
896 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
897 				       int intr_ctxt_id, int cpu);
898 #else
899 static inline
900 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
901 				       int intr_ctxt_id, int cpu)
902 {
903 }
904 #endif
905 
906 /*
907  * APIs to handle HIF specific diagnostic read accesses. These APIs are
908  * synchronous and only allowed to be called from a context that
909  * can block (sleep). They are not high performance APIs.
910  *
911  * hif_diag_read_access reads a 4 Byte aligned/length value from a
912  * Target register or memory word.
913  *
914  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
915  */
916 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
917 				uint32_t address, uint32_t *data);
918 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
919 		      uint8_t *data, int nbytes);
920 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
921 			void *ramdump_base, uint32_t address, uint32_t size);
922 /*
923  * APIs to handle HIF specific diagnostic write accesses. These APIs are
924  * synchronous and only allowed to be called from a context that
925  * can block (sleep).
926  * They are not high performance APIs.
927  *
928  * hif_diag_write_access writes a 4 Byte aligned/length value to a
929  * Target register or memory word.
930  *
931  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
932  */
933 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
934 				 uint32_t address, uint32_t data);
935 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
936 			uint32_t address, uint8_t *data, int nbytes);
937 
938 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
939 
940 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
941 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
942 
943 /*
944  * Set the FASTPATH_mode_on flag in sc, for use by data path
945  */
946 #ifdef WLAN_FEATURE_FASTPATH
947 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
948 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
949 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
950 
951 /**
952  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
953  * @hif_ctx: HIF opaque context
954  * @handler: Callback function
955  * @context: handle for callback function
956  *
957  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
958  */
959 QDF_STATUS hif_ce_fastpath_cb_register(
960 		struct hif_opaque_softc *hif_ctx,
961 		fastpath_msg_handler handler, void *context);
962 #else
963 static inline QDF_STATUS hif_ce_fastpath_cb_register(
964 		struct hif_opaque_softc *hif_ctx,
965 		fastpath_msg_handler handler, void *context)
966 {
967 	return QDF_STATUS_E_FAILURE;
968 }
969 
970 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
971 {
972 	return NULL;
973 }
974 
975 #endif
976 
977 /*
978  * Enable/disable CDC max performance workaround
979  * For max-performance set this to 0
980  * To allow SoC to enter sleep set this to 1
981  */
982 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
983 
984 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
985 			     qdf_shared_mem_t **ce_sr,
986 			     uint32_t *ce_sr_ring_size,
987 			     qdf_dma_addr_t *ce_reg_paddr);
988 
989 /**
990  * struct hif_msg_callbacks - List of callbacks - filled in by HTC.
991  * @Context: context meaningful to HTC
992  * @txCompletionHandler:
993  * @rxCompletionHandler:
994  * @txResourceAvailHandler:
995  * @fwEventHandler:
996  * @update_bundle_stats:
997  */
998 struct hif_msg_callbacks {
999 	void *Context;
1000 	/**< context meaningful to HTC */
1001 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
1002 					uint32_t transferID,
1003 					uint32_t toeplitz_hash_result);
1004 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
1005 					uint8_t pipeID);
1006 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
1007 	void (*fwEventHandler)(void *context, QDF_STATUS status);
1008 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
1009 };
1010 
1011 enum hif_target_status {
1012 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
1013 	TARGET_STATUS_RESET,  /* target got reset */
1014 	TARGET_STATUS_EJECT,  /* target got ejected */
1015 	TARGET_STATUS_SUSPEND /*target got suspend */
1016 };
1017 
1018 /**
1019  * enum hif_attribute_flags: configure hif
1020  *
1021  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
1022  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
1023  *  							+ No pktlog CE
1024  */
1025 enum hif_attribute_flags {
1026 	HIF_LOWDESC_CE_CFG = 1,
1027 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
1028 };
1029 
1030 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
1031 	(attr |= (v & 0x01) << 5)
1032 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
1033 	(attr |= (v & 0x03) << 6)
1034 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
1035 	(attr |= (v & 0x01) << 13)
1036 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
1037 	(attr |= (v & 0x01) << 14)
1038 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
1039 	(attr |= (v & 0x01) << 15)
1040 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
1041 	(attr |= (v & 0x0FFF) << 16)
1042 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
1043 	(attr |= (v & 0x01) << 30)
1044 
1045 struct hif_ul_pipe_info {
1046 	unsigned int nentries;
1047 	unsigned int nentries_mask;
1048 	unsigned int sw_index;
1049 	unsigned int write_index; /* cached copy */
1050 	unsigned int hw_index;    /* cached copy */
1051 	void *base_addr_owner_space; /* Host address space */
1052 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1053 };
1054 
1055 struct hif_dl_pipe_info {
1056 	unsigned int nentries;
1057 	unsigned int nentries_mask;
1058 	unsigned int sw_index;
1059 	unsigned int write_index; /* cached copy */
1060 	unsigned int hw_index;    /* cached copy */
1061 	void *base_addr_owner_space; /* Host address space */
1062 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1063 };
1064 
1065 struct hif_pipe_addl_info {
1066 	uint32_t pci_mem;
1067 	uint32_t ctrl_addr;
1068 	struct hif_ul_pipe_info ul_pipe;
1069 	struct hif_dl_pipe_info dl_pipe;
1070 };
1071 
1072 #ifdef CONFIG_SLUB_DEBUG_ON
1073 #define MSG_FLUSH_NUM 16
1074 #else /* PERF build */
1075 #define MSG_FLUSH_NUM 32
1076 #endif /* SLUB_DEBUG_ON */
1077 
1078 struct hif_bus_id;
1079 
1080 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
1081 /**
1082  * hif_register_ce_custom_cb() - Helper API to register the custom callback
1083  * @hif_ctx: HIF opaque context
1084  * @pipe: Pipe number
1085  * @custom_cb: Custom call back function pointer
1086  * @custom_cb_context: Custom callback context
1087  *
1088  * return: QDF_STATUS
1089  */
1090 QDF_STATUS
1091 hif_register_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1092 			  void (*custom_cb)(void *), void *custom_cb_context);
1093 
1094 /**
1095  * hif_unregister_ce_custom_cb() - Helper API to unregister the custom callback
1096  * @hif_ctx: HIF opaque context
1097  * @pipe: Pipe number
1098  *
1099  * return: QDF_STATUS
1100  */
1101 QDF_STATUS
1102 hif_unregister_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1103 
1104 /**
1105  * hif_enable_ce_custom_cb() - Helper API to enable the custom callback
1106  * @hif_ctx: HIF opaque context
1107  * @pipe: Pipe number
1108  *
1109  * return: QDF_STATUS
1110  */
1111 QDF_STATUS
1112 hif_enable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1113 
1114 /**
1115  * hif_disable_ce_custom_cb() - Helper API to disable the custom callback
1116  * @hif_ctx: HIF opaque context
1117  * @pipe: Pipe number
1118  *
1119  * return: QDF_STATUS
1120  */
1121 QDF_STATUS
1122 hif_disable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1123 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
1124 
1125 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
1126 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
1127 		     int opcode, void *config, uint32_t config_len);
1128 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
1129 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
1130 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
1131 		   struct hif_msg_callbacks *callbacks);
1132 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
1133 void hif_stop(struct hif_opaque_softc *hif_ctx);
1134 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
1135 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
1136 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
1137 		      uint8_t cmd_id, bool start);
1138 
1139 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1140 				  uint32_t transferID, uint32_t nbytes,
1141 				  qdf_nbuf_t wbuf, uint32_t data_attr);
1142 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1143 			     int force);
1144 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1145 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
1146 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
1147 			  uint8_t *DLPipe);
1148 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
1149 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1150 			int *dl_is_polled);
1151 uint16_t
1152 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1153 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
1154 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
1155 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
1156 		     bool wait_for_it);
1157 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
1158 #ifndef HIF_PCI
1159 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
1160 {
1161 	return 0;
1162 }
1163 #else
1164 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
1165 #endif
1166 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1167 			u32 *revision, const char **target_name);
1168 
1169 #ifdef RECEIVE_OFFLOAD
1170 /**
1171  * hif_offld_flush_cb_register() - Register the offld flush callback
1172  * @scn: HIF opaque context
1173  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
1174  *			 Or GRO/LRO flush when RxThread is not enabled. Called
1175  *			 with corresponding context for flush.
1176  * Return: None
1177  */
1178 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1179 				 void (offld_flush_handler)(void *ol_ctx));
1180 
1181 /**
1182  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
1183  * @scn: HIF opaque context
1184  *
1185  * Return: None
1186  */
1187 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
1188 #endif
1189 
1190 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1191 /**
1192  * hif_exec_should_yield() - Check if hif napi context should yield
1193  * @hif_ctx: HIF opaque context
1194  * @grp_id: grp_id of the napi for which check needs to be done
1195  *
1196  * The function uses grp_id to look for NAPI and checks if NAPI needs to
1197  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
1198  * yield decision.
1199  *
1200  * Return: true if NAPI needs to yield, else false
1201  */
1202 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
1203 #else
1204 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
1205 					 uint grp_id)
1206 {
1207 	return false;
1208 }
1209 #endif
1210 
1211 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
1212 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
1213 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
1214 				      int htc_htt_tx_endpoint);
1215 
1216 /**
1217  * hif_open() - Create hif handle
1218  * @qdf_ctx: qdf context
1219  * @mode: Driver Mode
1220  * @bus_type: Bus Type
1221  * @cbk: CDS Callbacks
1222  * @psoc: psoc object manager
1223  *
1224  * API to open HIF Context
1225  *
1226  * Return: HIF Opaque Pointer
1227  */
1228 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1229 				  uint32_t mode,
1230 				  enum qdf_bus_type bus_type,
1231 				  struct hif_driver_state_callbacks *cbk,
1232 				  struct wlan_objmgr_psoc *psoc);
1233 
1234 /**
1235  * hif_init_dma_mask() - Set dma mask for the dev
1236  * @dev: dev for which DMA mask is to be set
1237  * @bus_type: bus type for the target
1238  *
1239  * This API sets the DMA mask for the device. before the datapath
1240  * memory pre-allocation is done. If the DMA mask is not set before
1241  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
1242  * and does not utilize the full device capability.
1243  *
1244  * Return: 0 - success, non-zero on failure.
1245  */
1246 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
1247 void hif_close(struct hif_opaque_softc *hif_ctx);
1248 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1249 		      void *bdev, const struct hif_bus_id *bid,
1250 		      enum qdf_bus_type bus_type,
1251 		      enum hif_enable_type type);
1252 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
1253 #ifdef CE_TASKLET_DEBUG_ENABLE
1254 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
1255 				 uint8_t value);
1256 #endif
1257 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
1258 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
1259 
1260 /**
1261  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
1262  * @HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
1263  * @HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
1264  * @HIF_PM_CE_WAKE: Wake irq is CE interrupt
1265  */
1266 typedef enum {
1267 	HIF_PM_INVALID_WAKE,
1268 	HIF_PM_MSI_WAKE,
1269 	HIF_PM_CE_WAKE,
1270 } hif_pm_wake_irq_type;
1271 
1272 /**
1273  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1274  * @hif_ctx: HIF context
1275  *
1276  * Return: enum hif_pm_wake_irq_type
1277  */
1278 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1279 
1280 /**
1281  * enum hif_ep_vote_type - hif ep vote type
1282  * @HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1283  * @HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1284  */
1285 enum hif_ep_vote_type {
1286 	HIF_EP_VOTE_DP_ACCESS,
1287 	HIF_EP_VOTE_NONDP_ACCESS
1288 };
1289 
1290 /**
1291  * enum hif_ep_vote_access - hif ep vote access
1292  * @HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1293  * @HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transition
1294  * @HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1295  */
1296 enum hif_ep_vote_access {
1297 	HIF_EP_VOTE_ACCESS_ENABLE,
1298 	HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1299 	HIF_EP_VOTE_ACCESS_DISABLE
1300 };
1301 
1302 /**
1303  * enum hif_rtpm_client_id - modules registered with runtime pm module
1304  * @HIF_RTPM_ID_RESERVED: Reserved ID
1305  * @HIF_RTPM_ID_HAL_REO_CMD: HAL REO commands
1306  * @HIF_RTPM_ID_WMI: WMI commands Tx
1307  * @HIF_RTPM_ID_HTT: HTT commands Tx
1308  * @HIF_RTPM_ID_DP: Datapath Tx path
1309  * @HIF_RTPM_ID_DP_RING_STATS: Datapath ring stats
1310  * @HIF_RTPM_ID_CE: CE Tx buffer posting
1311  * @HIF_RTPM_ID_FORCE_WAKE: Force wake request
1312  * @HIF_RTPM_ID_PM_QOS_NOTIFY:
1313  * @HIF_RTPM_ID_WIPHY_SUSPEND:
1314  * @HIF_RTPM_ID_MAX: Max id
1315  */
1316 enum  hif_rtpm_client_id {
1317 	HIF_RTPM_ID_RESERVED,
1318 	HIF_RTPM_ID_HAL_REO_CMD,
1319 	HIF_RTPM_ID_WMI,
1320 	HIF_RTPM_ID_HTT,
1321 	HIF_RTPM_ID_DP,
1322 	HIF_RTPM_ID_DP_RING_STATS,
1323 	HIF_RTPM_ID_CE,
1324 	HIF_RTPM_ID_FORCE_WAKE,
1325 	HIF_RTPM_ID_PM_QOS_NOTIFY,
1326 	HIF_RTPM_ID_WIPHY_SUSPEND,
1327 	HIF_RTPM_ID_MAX
1328 };
1329 
1330 /**
1331  * enum rpm_type - Get and Put calls types
1332  * @HIF_RTPM_GET_ASYNC: Increment usage count and when system is suspended
1333  *		      schedule resume process, return depends on pm state.
1334  * @HIF_RTPM_GET_FORCE: Increment usage count and when system is suspended
1335  *		      schedule resume process, returns success irrespective of
1336  *		      pm_state.
1337  * @HIF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
1338  *		     wait till process is resumed.
1339  * @HIF_RTPM_GET_NORESUME: Only increments usage count.
1340  * @HIF_RTPM_PUT_ASYNC: Decrements usage count and puts system in idle state.
1341  * @HIF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
1342  *			     suspended state.
1343  * @HIF_RTPM_PUT_NOIDLE: Decrements usage count.
1344  */
1345 enum rpm_type {
1346 	HIF_RTPM_GET_ASYNC,
1347 	HIF_RTPM_GET_FORCE,
1348 	HIF_RTPM_GET_SYNC,
1349 	HIF_RTPM_GET_NORESUME,
1350 	HIF_RTPM_PUT_ASYNC,
1351 	HIF_RTPM_PUT_SYNC_SUSPEND,
1352 	HIF_RTPM_PUT_NOIDLE,
1353 };
1354 
1355 /**
1356  * struct hif_pm_runtime_lock - data structure for preventing runtime suspend
1357  * @list: global list of runtime locks
1358  * @active: true if this lock is preventing suspend
1359  * @name: character string for tracking this lock
1360  */
1361 struct hif_pm_runtime_lock {
1362 	struct list_head list;
1363 	bool active;
1364 	const char *name;
1365 };
1366 
1367 #ifdef FEATURE_RUNTIME_PM
1368 /**
1369  * hif_rtpm_register() - Register a module with runtime PM.
1370  * @id: ID of the module which needs to be registered
1371  * @hif_rpm_cbk: callback to be called when get was called in suspended state.
1372  *
1373  * Return: success status if successfully registered
1374  */
1375 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
1376 
1377 /**
1378  * hif_rtpm_deregister() - Deregister the module
1379  * @id: ID of the module which needs to be de-registered
1380  */
1381 QDF_STATUS hif_rtpm_deregister(uint32_t id);
1382 
1383 /**
1384  * hif_rtpm_set_autosuspend_delay() - Set delay to trigger RTPM suspend
1385  * @delay: delay in ms to be set
1386  *
1387  * Return: Success if delay is set successfully
1388  */
1389 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay);
1390 
1391 /**
1392  * hif_rtpm_restore_autosuspend_delay() - Restore delay value to default value
1393  *
1394  * Return: Success if reset done. E_ALREADY if delay same as config value
1395  */
1396 QDF_STATUS hif_rtpm_restore_autosuspend_delay(void);
1397 
1398 /**
1399  * hif_rtpm_get_autosuspend_delay() -Get delay to trigger RTPM suspend
1400  *
1401  * Return: Delay in ms
1402  */
1403 int hif_rtpm_get_autosuspend_delay(void);
1404 
1405 /**
1406  * hif_runtime_lock_init() - API to initialize Runtime PM context
1407  * @lock: QDF lock context
1408  * @name: Context name
1409  *
1410  * This API initializes the Runtime PM context of the caller and
1411  * return the pointer.
1412  *
1413  * Return: None
1414  */
1415 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1416 
1417 /**
1418  * hif_runtime_lock_deinit() - This API frees the runtime pm context
1419  * @data: Runtime PM context
1420  *
1421  * Return: void
1422  */
1423 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data);
1424 
1425 /**
1426  * hif_rtpm_get() - Increment usage_count on the device to avoid suspend.
1427  * @type: get call types from hif_rpm_type
1428  * @id: ID of the module calling get()
1429  *
1430  * A get operation will prevent a runtime suspend until a
1431  * corresponding put is done.  This api should be used when accessing bus.
1432  *
1433  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1434  * THIS API WILL ONLY REQUEST THE RESUME AND NOT DO A GET!!!
1435  *
1436  * return: success if a get has been issued, else error code.
1437  */
1438 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id);
1439 
1440 /**
1441  * hif_rtpm_put() - do a put operation on the device
1442  * @type: put call types from hif_rpm_type
1443  * @id: ID of the module calling put()
1444  *
1445  * A put operation will allow a runtime suspend after a corresponding
1446  * get was done.  This api should be used when finished accessing bus.
1447  *
1448  * This api will return a failure if runtime pm is stopped
1449  * This api will return failure if it would decrement the usage count below 0.
1450  *
1451  * return: QDF_STATUS_SUCCESS if the put is performed
1452  */
1453 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id);
1454 
1455 /**
1456  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1457  * @data: runtime PM lock
1458  *
1459  * This function will prevent runtime suspend, by incrementing
1460  * device's usage count.
1461  *
1462  * Return: status
1463  */
1464 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data);
1465 
1466 /**
1467  * hif_pm_runtime_prevent_suspend_sync() - Synchronized prevent Runtime suspend
1468  * @data: runtime PM lock
1469  *
1470  * This function will prevent runtime suspend, by incrementing
1471  * device's usage count.
1472  *
1473  * Return: status
1474  */
1475 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data);
1476 
1477 /**
1478  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1479  * @data: runtime PM lock
1480  *
1481  * This function will allow runtime suspend, by decrementing
1482  * device's usage count.
1483  *
1484  * Return: status
1485  */
1486 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data);
1487 
1488 /**
1489  * hif_rtpm_request_resume() - Request resume if bus is suspended
1490  *
1491  * Return: None
1492  */
1493 void hif_rtpm_request_resume(void);
1494 
1495 /**
1496  * hif_rtpm_sync_resume() - Invoke synchronous runtime resume.
1497  *
1498  * This function will invoke synchronous runtime resume.
1499  *
1500  * Return: status
1501  */
1502 QDF_STATUS hif_rtpm_sync_resume(void);
1503 
1504 /**
1505  * hif_rtpm_check_and_request_resume() - check if bus is suspended and
1506  *                                       request resume.
1507  *
1508  * Return: void
1509  */
1510 void hif_rtpm_check_and_request_resume(void);
1511 
1512 /**
1513  * hif_rtpm_set_client_job() - Set job for the client.
1514  * @client_id: Client id for which job needs to be set
1515  *
1516  * If get failed due to system being in suspended state, set the client job so
1517  * when system resumes the client's job is called.
1518  *
1519  * Return: None
1520  */
1521 void hif_rtpm_set_client_job(uint32_t client_id);
1522 
1523 /**
1524  * hif_rtpm_mark_last_busy() - Mark last busy to delay retry to suspend
1525  * @id: ID marking last busy
1526  *
1527  * Return: None
1528  */
1529 void hif_rtpm_mark_last_busy(uint32_t id);
1530 
1531 /**
1532  * hif_rtpm_get_monitor_wake_intr() - API to get monitor_wake_intr
1533  *
1534  * monitor_wake_intr variable can be used to indicate if driver expects wake
1535  * MSI for runtime PM
1536  *
1537  * Return: monitor_wake_intr variable
1538  */
1539 int hif_rtpm_get_monitor_wake_intr(void);
1540 
1541 /**
1542  * hif_rtpm_set_monitor_wake_intr() - API to set monitor_wake_intr
1543  * @val: value to set
1544  *
1545  * monitor_wake_intr variable can be used to indicate if driver expects wake
1546  * MSI for runtime PM
1547  *
1548  * Return: void
1549  */
1550 void hif_rtpm_set_monitor_wake_intr(int val);
1551 
1552 /**
1553  * hif_pre_runtime_suspend() - book keeping before beginning runtime suspend.
1554  * @hif_ctx: HIF context
1555  *
1556  * Makes sure that the pci link will be taken down by the suspend operation.
1557  * If the hif layer is configured to leave the bus on, runtime suspend will
1558  * not save any power.
1559  *
1560  * Set the runtime suspend state to SUSPENDING.
1561  *
1562  * return -EINVAL if the bus won't go down.  otherwise return 0
1563  */
1564 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1565 
1566 /**
1567  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
1568  *
1569  * update the runtime pm state to RESUMING.
1570  * Return: void
1571  */
1572 void hif_pre_runtime_resume(void);
1573 
1574 /**
1575  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
1576  *
1577  * Record the success.
1578  * update the runtime_pm state to SUSPENDED
1579  * Return: void
1580  */
1581 void hif_process_runtime_suspend_success(void);
1582 
1583 /**
1584  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
1585  *
1586  * Record the failure.
1587  * mark last busy to delay a retry.
1588  * update the runtime_pm state back to ON
1589  *
1590  * Return: void
1591  */
1592 void hif_process_runtime_suspend_failure(void);
1593 
1594 /**
1595  * hif_process_runtime_resume_linkup() - bookkeeping of resuming link up
1596  *
1597  * update the runtime_pm state to RESUMING_LINKUP
1598  * Return: void
1599  */
1600 void hif_process_runtime_resume_linkup(void);
1601 
1602 /**
1603  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
1604  *
1605  * record the success.
1606  * update the runtime_pm state to SUSPENDED
1607  * Return: void
1608  */
1609 void hif_process_runtime_resume_success(void);
1610 
1611 /**
1612  * hif_rtpm_print_prevent_list() - list the clients preventing suspend.
1613  *
1614  * Return: None
1615  */
1616 void hif_rtpm_print_prevent_list(void);
1617 
1618 /**
1619  * hif_rtpm_suspend_lock() - spin_lock on marking runtime suspend
1620  *
1621  * Return: void
1622  */
1623 void hif_rtpm_suspend_lock(void);
1624 
1625 /**
1626  * hif_rtpm_suspend_unlock() - spin_unlock on marking runtime suspend
1627  *
1628  * Return: void
1629  */
1630 void hif_rtpm_suspend_unlock(void);
1631 
1632 /**
1633  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
1634  * @hif_ctx: HIF context
1635  *
1636  * Return: 0 for success and non-zero error code for failure
1637  */
1638 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1639 
1640 /**
1641  * hif_runtime_resume() - do the bus resume part of a runtime resume
1642  * @hif_ctx: HIF context
1643  *
1644  * Return: 0 for success and non-zero error code for failure
1645  */
1646 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1647 
1648 /**
1649  * hif_fastpath_resume() - resume fastpath for runtimepm
1650  * @hif_ctx: HIF context
1651  *
1652  * ensure that the fastpath write index register is up to date
1653  * since runtime pm may cause ce_send_fast to skip the register
1654  * write.
1655  *
1656  * fastpath only applicable to legacy copy engine
1657  */
1658 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1659 
1660 /**
1661  * hif_rtpm_get_state(): get rtpm link state
1662  *
1663  * Return: state
1664  */
1665 int hif_rtpm_get_state(void);
1666 
1667 /**
1668  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1669  * @hif_ctx: HIF context
1670  *
1671  * Return: None
1672  */
1673 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx);
1674 
1675 /**
1676  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1677  * @scn: HIF context
1678  * @ce_id: CE id
1679  *
1680  * Return: None
1681  */
1682 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1683 				      unsigned long ce_id);
1684 #else
1685 
1686 /**
1687  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1688  * @hif_ctx: HIF context
1689  *
1690  * Return: None
1691  */
1692 static inline
1693 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx) { }
1694 
1695 /**
1696  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1697  * @scn: HIF context
1698  * @ce_id: CE id
1699  *
1700  * Return: None
1701  */
1702 static inline
1703 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1704 				      unsigned long ce_id)
1705 { }
1706 
1707 static inline
1708 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
1709 { return QDF_STATUS_SUCCESS; }
1710 
1711 static inline
1712 QDF_STATUS hif_rtpm_deregister(uint32_t id)
1713 { return QDF_STATUS_SUCCESS; }
1714 
1715 static inline
1716 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
1717 { return QDF_STATUS_SUCCESS; }
1718 
1719 static inline QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
1720 { return QDF_STATUS_SUCCESS; }
1721 
1722 static inline int hif_rtpm_get_autosuspend_delay(void)
1723 { return 0; }
1724 
1725 static inline
1726 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1727 { return 0; }
1728 
1729 static inline
1730 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data)
1731 {}
1732 
1733 static inline
1734 int hif_rtpm_get(uint8_t type, uint32_t id)
1735 { return QDF_STATUS_SUCCESS; }
1736 
1737 static inline
1738 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
1739 { return QDF_STATUS_SUCCESS; }
1740 
1741 static inline
1742 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data)
1743 { return 0; }
1744 
1745 static inline
1746 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data)
1747 { return 0; }
1748 
1749 static inline
1750 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data)
1751 { return 0; }
1752 
1753 static inline
1754 QDF_STATUS hif_rtpm_sync_resume(void)
1755 { return QDF_STATUS_SUCCESS; }
1756 
1757 static inline
1758 void hif_rtpm_request_resume(void)
1759 {}
1760 
1761 static inline
1762 void hif_rtpm_check_and_request_resume(void)
1763 {}
1764 
1765 static inline
1766 void hif_rtpm_set_client_job(uint32_t client_id)
1767 {}
1768 
1769 static inline
1770 void hif_rtpm_print_prevent_list(void)
1771 {}
1772 
1773 static inline
1774 void hif_rtpm_suspend_unlock(void)
1775 {}
1776 
1777 static inline
1778 void hif_rtpm_suspend_lock(void)
1779 {}
1780 
1781 static inline
1782 int hif_rtpm_get_monitor_wake_intr(void)
1783 { return 0; }
1784 
1785 static inline
1786 void hif_rtpm_set_monitor_wake_intr(int val)
1787 {}
1788 
1789 static inline
1790 void hif_rtpm_mark_last_busy(uint32_t id)
1791 {}
1792 #endif
1793 
1794 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1795 				 bool is_packet_log_enabled);
1796 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1797 
1798 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1799 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1800 
1801 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1802 
1803 #ifdef IPA_OFFLOAD
1804 /**
1805  * hif_get_ipa_hw_type() - get IPA hw type
1806  *
1807  * This API return the IPA hw type.
1808  *
1809  * Return: IPA hw type
1810  */
1811 static inline
1812 enum ipa_hw_type hif_get_ipa_hw_type(void)
1813 {
1814 	return ipa_get_hw_type();
1815 }
1816 
1817 /**
1818  * hif_get_ipa_present() - get IPA hw status
1819  *
1820  * This API return the IPA hw status.
1821  *
1822  * Return: true if IPA is present or false otherwise
1823  */
1824 static inline
1825 bool hif_get_ipa_present(void)
1826 {
1827 	if (qdf_ipa_uc_reg_rdyCB(NULL) != -EPERM)
1828 		return true;
1829 	else
1830 		return false;
1831 }
1832 #endif
1833 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1834 
1835 /**
1836  * hif_bus_early_suspend() - stop non wmi tx traffic
1837  * @hif_ctx: hif context
1838  */
1839 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1840 
1841 /**
1842  * hif_bus_late_resume() - resume non wmi traffic
1843  * @hif_ctx: hif context
1844  */
1845 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1846 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1847 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1848 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1849 
1850 /**
1851  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1852  * @hif_ctx: an opaque HIF handle to use
1853  *
1854  * As opposed to the standard hif_irq_enable, this function always applies to
1855  * the APPS side kernel interrupt handling.
1856  *
1857  * Return: errno
1858  */
1859 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1860 
1861 /**
1862  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1863  * @hif_ctx: an opaque HIF handle to use
1864  *
1865  * As opposed to the standard hif_irq_disable, this function always applies to
1866  * the APPS side kernel interrupt handling.
1867  *
1868  * Return: errno
1869  */
1870 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1871 
1872 /**
1873  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1874  * @hif_ctx: an opaque HIF handle to use
1875  *
1876  * As opposed to the standard hif_irq_enable, this function always applies to
1877  * the APPS side kernel interrupt handling.
1878  *
1879  * Return: errno
1880  */
1881 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1882 
1883 /**
1884  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1885  * @hif_ctx: an opaque HIF handle to use
1886  *
1887  * As opposed to the standard hif_irq_disable, this function always applies to
1888  * the APPS side kernel interrupt handling.
1889  *
1890  * Return: errno
1891  */
1892 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1893 
1894 /**
1895  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1896  * @hif_ctx: an opaque HIF handle to use
1897  *
1898  * This function always applies to the APPS side kernel interrupt handling
1899  * to wake the system from suspend.
1900  *
1901  * Return: errno
1902  */
1903 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1904 
1905 /**
1906  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1907  * @hif_ctx: an opaque HIF handle to use
1908  *
1909  * This function always applies to the APPS side kernel interrupt handling
1910  * to disable the wake irq.
1911  *
1912  * Return: errno
1913  */
1914 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1915 
1916 /**
1917  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
1918  * @hif_ctx: an opaque HIF handle to use
1919  *
1920  * As opposed to the standard hif_irq_enable, this function always applies to
1921  * the APPS side kernel interrupt handling.
1922  *
1923  * Return: errno
1924  */
1925 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1926 
1927 /**
1928  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
1929  * @hif_ctx: an opaque HIF handle to use
1930  *
1931  * As opposed to the standard hif_irq_disable, this function always applies to
1932  * the APPS side kernel interrupt handling.
1933  *
1934  * Return: errno
1935  */
1936 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1937 
1938 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1939 int hif_dump_registers(struct hif_opaque_softc *scn);
1940 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1941 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1942 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1943 		     u32 *revision, const char **target_name);
1944 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1945 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1946 						   scn);
1947 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1948 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1949 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1950 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1951 			   hif_target_status);
1952 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1953 			 struct hif_config_info *cfg);
1954 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1955 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1956 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1957 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1958 			   uint32_t transfer_id, u_int32_t len);
1959 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1960 	uint32_t transfer_id, uint32_t download_len);
1961 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1962 void hif_ce_war_disable(void);
1963 void hif_ce_war_enable(void);
1964 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1965 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1966 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1967 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1968 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1969 		uint32_t pipe_num);
1970 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1971 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1972 
1973 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1974 				int rx_bundle_cnt);
1975 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1976 
1977 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1978 
1979 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1980 
1981 enum hif_exec_type {
1982 	HIF_EXEC_NAPI_TYPE,
1983 	HIF_EXEC_TASKLET_TYPE,
1984 };
1985 
1986 typedef uint32_t (*ext_intr_handler)(void *, uint32_t, int);
1987 
1988 /**
1989  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1990  * @softc: hif opaque context owning the exec context
1991  * @id: the id of the interrupt context
1992  *
1993  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1994  *         'id' registered with the OS
1995  */
1996 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1997 				uint8_t id);
1998 
1999 /**
2000  * hif_configure_ext_group_interrupts() - Configure ext group interrupts
2001  * @hif_ctx: hif opaque context
2002  *
2003  * Return: QDF_STATUS
2004  */
2005 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
2006 
2007 /**
2008  * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group interrupts
2009  * @hif_ctx: hif opaque context
2010  *
2011  * Return: None
2012  */
2013 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
2014 
2015 /**
2016  * hif_register_ext_group() - API to register external group
2017  * interrupt handler.
2018  * @hif_ctx : HIF Context
2019  * @numirq: number of irq's in the group
2020  * @irq: array of irq values
2021  * @handler: callback interrupt handler function
2022  * @cb_ctx: context to passed in callback
2023  * @context_name: text name of the context
2024  * @type: napi vs tasklet
2025  * @scale:
2026  *
2027  * Return: QDF_STATUS
2028  */
2029 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
2030 				  uint32_t numirq, uint32_t irq[],
2031 				  ext_intr_handler handler,
2032 				  void *cb_ctx, const char *context_name,
2033 				  enum hif_exec_type type, uint32_t scale);
2034 
2035 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
2036 				const char *context_name);
2037 
2038 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
2039 				u_int8_t pipeid,
2040 				struct hif_msg_callbacks *callbacks);
2041 
2042 /**
2043  * hif_print_napi_stats() - Display HIF NAPI stats
2044  * @hif_ctx: HIF opaque context
2045  *
2046  * Return: None
2047  */
2048 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
2049 
2050 /**
2051  * hif_clear_napi_stats() - function clears the stats of the
2052  * latency when called.
2053  * @hif_ctx: the HIF context to assign the callback to
2054  *
2055  * Return: None
2056  */
2057 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
2058 
2059 #ifdef __cplusplus
2060 }
2061 #endif
2062 
2063 #ifdef FORCE_WAKE
2064 /**
2065  * hif_force_wake_request() - Function to wake from power collapse
2066  * @handle: HIF opaque handle
2067  *
2068  * Description: API to check if the device is awake or not before
2069  * read/write to BAR + 4K registers. If device is awake return
2070  * success otherwise write '1' to
2071  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
2072  * the device and does wakeup the PCI and MHI within 50ms
2073  * and then the device writes a value to
2074  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
2075  * handshake process to let the host know the device is awake.
2076  *
2077  * Return: zero - success/non-zero - failure
2078  */
2079 int hif_force_wake_request(struct hif_opaque_softc *handle);
2080 
2081 /**
2082  * hif_force_wake_release() - API to release/reset the SOC wake register
2083  * from interrupting the device.
2084  * @handle: HIF opaque handle
2085  *
2086  * Description: API to set the
2087  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
2088  * to release the interrupt line.
2089  *
2090  * Return: zero - success/non-zero - failure
2091  */
2092 int hif_force_wake_release(struct hif_opaque_softc *handle);
2093 #else
2094 static inline
2095 int hif_force_wake_request(struct hif_opaque_softc *handle)
2096 {
2097 	return 0;
2098 }
2099 
2100 static inline
2101 int hif_force_wake_release(struct hif_opaque_softc *handle)
2102 {
2103 	return 0;
2104 }
2105 #endif /* FORCE_WAKE */
2106 
2107 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
2108 /**
2109  * hif_prevent_link_low_power_states() - Prevent from going to low power states
2110  * @hif: HIF opaque context
2111  *
2112  * Return: 0 on success. Error code on failure.
2113  */
2114 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
2115 
2116 /**
2117  * hif_allow_link_low_power_states() - Allow link to go to low power states
2118  * @hif: HIF opaque context
2119  *
2120  * Return: None
2121  */
2122 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
2123 
2124 #else
2125 
2126 static inline
2127 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
2128 {
2129 	return 0;
2130 }
2131 
2132 static inline
2133 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
2134 {
2135 }
2136 #endif
2137 
2138 #ifdef IPA_OPT_WIFI_DP
2139 /**
2140  * hif_prevent_l1() - Prevent from going to low power states
2141  * @hif: HIF opaque context
2142  *
2143  * Return: 0 on success. Error code on failure.
2144  */
2145 int hif_prevent_l1(struct hif_opaque_softc *hif);
2146 
2147 /**
2148  * hif_allow_l1() - Allow link to go to low power states
2149  * @hif: HIF opaque context
2150  *
2151  * Return: None
2152  */
2153 void hif_allow_l1(struct hif_opaque_softc *hif);
2154 
2155 #else
2156 
2157 static inline
2158 int hif_prevent_l1(struct hif_opaque_softc *hif)
2159 {
2160 	return 0;
2161 }
2162 
2163 static inline
2164 void hif_allow_l1(struct hif_opaque_softc *hif)
2165 {
2166 }
2167 #endif
2168 
2169 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
2170 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
2171 void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle);
2172 
2173 /**
2174  * hif_get_dev_ba_cmem() - get base address of CMEM
2175  * @hif_handle: the HIF context
2176  *
2177  */
2178 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle);
2179 
2180 /**
2181  * hif_get_soc_version() - get soc major version from target info
2182  * @hif_handle: the HIF context
2183  *
2184  * Return: version number
2185  */
2186 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle);
2187 
2188 /**
2189  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
2190  * @hif_ctx: the HIF context to assign the callback to
2191  * @callback: the callback to assign
2192  * @priv: the private data to pass to the callback when invoked
2193  *
2194  * Return: None
2195  */
2196 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2197 			       void (*callback)(void *),
2198 			       void *priv);
2199 /*
2200  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
2201  * for defined here
2202  */
2203 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2204 ssize_t hif_dump_desc_trace_buf(struct device *dev,
2205 				struct device_attribute *attr, char *buf);
2206 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2207 					const char *buf, size_t size);
2208 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
2209 				const char *buf, size_t size);
2210 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
2211 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
2212 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
2213 
2214 /**
2215  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
2216  * @hif: hif context
2217  * @ce_service_max_yield_time: CE service max yield time to set
2218  *
2219  * This API storess CE service max yield time in hif context based
2220  * on ini value.
2221  *
2222  * Return: void
2223  */
2224 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2225 				       uint32_t ce_service_max_yield_time);
2226 
2227 /**
2228  * hif_get_ce_service_max_yield_time() - get CE service max yield time
2229  * @hif: hif context
2230  *
2231  * This API returns CE service max yield time.
2232  *
2233  * Return: CE service max yield time
2234  */
2235 unsigned long long
2236 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
2237 
2238 /**
2239  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
2240  * @hif: hif context
2241  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
2242  *
2243  * This API stores CE service max rx ind flush in hif context based
2244  * on ini value.
2245  *
2246  * Return: void
2247  */
2248 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2249 					 uint8_t ce_service_max_rx_ind_flush);
2250 
2251 #ifdef OL_ATH_SMART_LOGGING
2252 /**
2253  * hif_log_dump_ce() - Copy all the CE DEST ring to buf
2254  * @scn: HIF handler
2255  * @buf_cur: Current pointer in ring buffer
2256  * @buf_init:Start of the ring buffer
2257  * @buf_sz: Size of the ring buffer
2258  * @ce: Copy Engine id
2259  * @skb_sz: Max size of the SKB buffer to be copied
2260  *
2261  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2262  * and buffers pointed by them in to the given buf
2263  *
2264  * Return: Current pointer in ring buffer
2265  */
2266 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2267 			 uint8_t *buf_init, uint32_t buf_sz,
2268 			 uint32_t ce, uint32_t skb_sz);
2269 #endif /* OL_ATH_SMART_LOGGING */
2270 
2271 /**
2272  * hif_softc_to_hif_opaque_softc() - API to convert hif_softc handle
2273  * to hif_opaque_softc handle
2274  * @hif_handle: hif_softc type
2275  *
2276  * Return: hif_opaque_softc type
2277  */
2278 static inline struct hif_opaque_softc *
2279 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
2280 {
2281 	return (struct hif_opaque_softc *)hif_handle;
2282 }
2283 
2284 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
2285 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2286 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx);
2287 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2288 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2289 			    uint8_t type, uint8_t access);
2290 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2291 			       uint8_t type);
2292 #else
2293 static inline QDF_STATUS
2294 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2295 {
2296 	return QDF_STATUS_SUCCESS;
2297 }
2298 
2299 static inline void
2300 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
2301 {
2302 }
2303 
2304 static inline void
2305 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2306 {
2307 }
2308 
2309 static inline void
2310 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2311 		       uint8_t type, uint8_t access)
2312 {
2313 }
2314 
2315 static inline uint8_t
2316 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2317 		       uint8_t type)
2318 {
2319 	return HIF_EP_VOTE_ACCESS_ENABLE;
2320 }
2321 #endif
2322 
2323 #ifdef FORCE_WAKE
2324 /**
2325  * hif_srng_init_phase(): Indicate srng initialization phase
2326  * to avoid force wake as UMAC power collapse is not yet
2327  * enabled
2328  * @hif_ctx: hif opaque handle
2329  * @init_phase: initialization phase
2330  *
2331  * Return:  None
2332  */
2333 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2334 			 bool init_phase);
2335 #else
2336 static inline
2337 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2338 			 bool init_phase)
2339 {
2340 }
2341 #endif /* FORCE_WAKE */
2342 
2343 #ifdef HIF_IPCI
2344 /**
2345  * hif_shutdown_notifier_cb - Call back for shutdown notifier
2346  * @ctx: hif handle
2347  *
2348  * Return:  None
2349  */
2350 void hif_shutdown_notifier_cb(void *ctx);
2351 #else
2352 static inline
2353 void hif_shutdown_notifier_cb(void *ctx)
2354 {
2355 }
2356 #endif /* HIF_IPCI */
2357 
2358 #ifdef HIF_CE_LOG_INFO
2359 /**
2360  * hif_log_ce_info() - API to log ce info
2361  * @scn: hif handle
2362  * @data: hang event data buffer
2363  * @offset: offset at which data needs to be written
2364  *
2365  * Return:  None
2366  */
2367 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2368 		     unsigned int *offset);
2369 #else
2370 static inline
2371 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2372 		     unsigned int *offset)
2373 {
2374 }
2375 #endif
2376 
2377 #ifdef HIF_CPU_PERF_AFFINE_MASK
2378 /**
2379  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
2380  * @hif_ctx: hif opaque handle
2381  *
2382  * This function is used to move the WLAN IRQs to perf cores in
2383  * case of defconfig builds.
2384  *
2385  * Return:  None
2386  */
2387 void hif_config_irq_set_perf_affinity_hint(
2388 	struct hif_opaque_softc *hif_ctx);
2389 
2390 #else
2391 static inline void hif_config_irq_set_perf_affinity_hint(
2392 	struct hif_opaque_softc *hif_ctx)
2393 {
2394 }
2395 #endif
2396 
2397 /**
2398  * hif_apps_grp_irqs_enable() - enable ext grp irqs
2399  * @hif_ctx: HIF opaque context
2400  *
2401  * Return: 0 on success. Error code on failure.
2402  */
2403 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
2404 
2405 /**
2406  * hif_apps_grp_irqs_disable() - disable ext grp irqs
2407  * @hif_ctx: HIF opaque context
2408  *
2409  * Return: 0 on success. Error code on failure.
2410  */
2411 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
2412 
2413 /**
2414  * hif_disable_grp_irqs() - disable ext grp irqs
2415  * @scn: HIF opaque context
2416  *
2417  * Return: 0 on success. Error code on failure.
2418  */
2419 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
2420 
2421 /**
2422  * hif_enable_grp_irqs() - enable ext grp irqs
2423  * @scn: HIF opaque context
2424  *
2425  * Return: 0 on success. Error code on failure.
2426  */
2427 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
2428 
2429 enum hif_credit_exchange_type {
2430 	HIF_REQUEST_CREDIT,
2431 	HIF_PROCESS_CREDIT_REPORT,
2432 };
2433 
2434 enum hif_detect_latency_type {
2435 	HIF_DETECT_TASKLET,
2436 	HIF_DETECT_CREDIT,
2437 	HIF_DETECT_UNKNOWN
2438 };
2439 
2440 #ifdef HIF_DETECTION_LATENCY_ENABLE
2441 void hif_latency_detect_credit_record_time(
2442 	enum hif_credit_exchange_type type,
2443 	struct hif_opaque_softc *hif_ctx);
2444 
2445 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
2446 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
2447 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer);
2448 void hif_credit_latency(struct hif_softc *scn, bool from_timer);
2449 void hif_check_detection_latency(struct hif_softc *scn,
2450 				 bool from_timer,
2451 				 uint32_t bitmap_type);
2452 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
2453 #else
2454 static inline
2455 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
2456 {}
2457 
2458 static inline
2459 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
2460 {}
2461 
2462 static inline
2463 void hif_latency_detect_credit_record_time(
2464 	enum hif_credit_exchange_type type,
2465 	struct hif_opaque_softc *hif_ctx)
2466 {}
2467 static inline
2468 void hif_check_detection_latency(struct hif_softc *scn,
2469 				 bool from_timer,
2470 				 uint32_t bitmap_type)
2471 {}
2472 
2473 static inline
2474 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
2475 {}
2476 #endif
2477 
2478 #ifdef SYSTEM_PM_CHECK
2479 /**
2480  * __hif_system_pm_set_state() - Set system pm state
2481  * @hif: hif opaque handle
2482  * @state: system state
2483  *
2484  * Return:  None
2485  */
2486 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2487 			       enum hif_system_pm_state state);
2488 
2489 /**
2490  * hif_system_pm_set_state_on() - Set system pm state to ON
2491  * @hif: hif opaque handle
2492  *
2493  * Return:  None
2494  */
2495 static inline
2496 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2497 {
2498 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
2499 }
2500 
2501 /**
2502  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
2503  * @hif: hif opaque handle
2504  *
2505  * Return:  None
2506  */
2507 static inline
2508 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2509 {
2510 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
2511 }
2512 
2513 /**
2514  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
2515  * @hif: hif opaque handle
2516  *
2517  * Return:  None
2518  */
2519 static inline
2520 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2521 {
2522 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
2523 }
2524 
2525 /**
2526  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
2527  * @hif: hif opaque handle
2528  *
2529  * Return:  None
2530  */
2531 static inline
2532 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2533 {
2534 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
2535 }
2536 
2537 /**
2538  * hif_system_pm_get_state() - Get system pm state
2539  * @hif: hif opaque handle
2540  *
2541  * Return:  system state
2542  */
2543 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
2544 
2545 /**
2546  * hif_system_pm_state_check() - Check system state and trigger resume
2547  *  if required
2548  * @hif: hif opaque handle
2549  *
2550  * Return: 0 if system is in on state else error code
2551  */
2552 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
2553 #else
2554 static inline
2555 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2556 			       enum hif_system_pm_state state)
2557 {
2558 }
2559 
2560 static inline
2561 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2562 {
2563 }
2564 
2565 static inline
2566 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2567 {
2568 }
2569 
2570 static inline
2571 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2572 {
2573 }
2574 
2575 static inline
2576 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2577 {
2578 }
2579 
2580 static inline
2581 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2582 {
2583 	return 0;
2584 }
2585 
2586 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2587 {
2588 	return 0;
2589 }
2590 #endif
2591 
2592 #ifdef FEATURE_IRQ_AFFINITY
2593 /**
2594  * hif_set_grp_intr_affinity() - API to set affinity for grp
2595  *  intrs set in the bitmap
2596  * @scn: hif handle
2597  * @grp_intr_bitmask: grp intrs for which perf affinity should be
2598  *  applied
2599  * @perf: affine to perf or non-perf cluster
2600  *
2601  * Return: None
2602  */
2603 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2604 			       uint32_t grp_intr_bitmask, bool perf);
2605 #else
2606 static inline
2607 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2608 			       uint32_t grp_intr_bitmask, bool perf)
2609 {
2610 }
2611 #endif
2612 /**
2613  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
2614  * @scn: hif opaque handle
2615  *
2616  * Description:
2617  *   Gets number of WMI EPs configured in target svc map. Since EP map
2618  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
2619  *   configured for WMI service.
2620  *
2621  * Return:
2622  *  uint8_t: count for WMI eps in target svc map
2623  */
2624 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn);
2625 
2626 #ifdef DP_UMAC_HW_RESET_SUPPORT
2627 /**
2628  * hif_register_umac_reset_handler() - Register UMAC HW reset handler
2629  * @hif_scn: hif opaque handle
2630  * @irq_handler: irq callback handler function
2631  * @tl_handler: tasklet callback handler function
2632  * @cb_ctx: context to passed to @handler
2633  * @irq: irq number to be used for UMAC HW reset interrupt
2634  *
2635  * Return: QDF_STATUS of operation
2636  */
2637 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2638 					   bool (*irq_handler)(void *cb_ctx),
2639 					   int (*tl_handler)(void *cb_ctx),
2640 					   void *cb_ctx, int irq);
2641 
2642 /**
2643  * hif_unregister_umac_reset_handler() - Unregister UMAC HW reset handler
2644  * @hif_scn: hif opaque handle
2645  *
2646  * Return: QDF_STATUS of operation
2647  */
2648 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn);
2649 QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
2650 				  int *umac_reset_irq);
2651 #else
2652 static inline
2653 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2654 					   bool (*irq_handler)(void *cb_ctx),
2655 					   int (*tl_handler)(void *cb_ctx),
2656 					   void *cb_ctx, int irq)
2657 {
2658 	return QDF_STATUS_SUCCESS;
2659 }
2660 
2661 static inline
2662 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
2663 {
2664 	return QDF_STATUS_SUCCESS;
2665 }
2666 
2667 static inline
2668 QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
2669 				  int *umac_reset_irq)
2670 {
2671 	return QDF_STATUS_SUCCESS;
2672 }
2673 
2674 #endif /* DP_UMAC_HW_RESET_SUPPORT */
2675 
2676 #ifdef FEATURE_DIRECT_LINK
2677 /**
2678  * hif_set_irq_config_by_ceid() - Set irq configuration for CE given by id
2679  * @scn: hif opaque handle
2680  * @ce_id: CE id
2681  * @addr: irq trigger address
2682  * @data: irq trigger data
2683  *
2684  * Return: QDF status
2685  */
2686 QDF_STATUS
2687 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2688 			   uint64_t addr, uint32_t data);
2689 
2690 /**
2691  * hif_get_direct_link_ce_dest_srng_buffers() - Get Direct Link ce dest srng
2692  *  buffer information
2693  * @scn: hif opaque handle
2694  * @dma_addr: pointer to array of dma addresses
2695  * @buf_size: ce dest ring buffer size
2696  *
2697  * Return: Number of buffers attached to the dest srng.
2698  */
2699 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2700 						  uint64_t **dma_addr,
2701 						  uint32_t *buf_size);
2702 
2703 /**
2704  * hif_get_direct_link_ce_srng_info() - Get Direct Link CE srng information
2705  * @scn: hif opaque handle
2706  * @info: Direct Link CEs information
2707  * @max_ce_info_len: max array size of ce info
2708  *
2709  * Return: QDF status
2710  */
2711 QDF_STATUS
2712 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2713 				 struct hif_direct_link_ce_info *info,
2714 				 uint8_t max_ce_info_len);
2715 #else
2716 static inline QDF_STATUS
2717 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2718 			   uint64_t addr, uint32_t data)
2719 {
2720 	return QDF_STATUS_SUCCESS;
2721 }
2722 
2723 static inline
2724 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2725 						  uint64_t **dma_addr,
2726 						  uint32_t *buf_size)
2727 {
2728 	return 0;
2729 }
2730 
2731 static inline QDF_STATUS
2732 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2733 				 struct hif_direct_link_ce_info *info,
2734 				 uint8_t max_ce_info_len)
2735 {
2736 	return QDF_STATUS_SUCCESS;
2737 }
2738 #endif
2739 #endif /* _HIF_H_ */
2740