xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HIF_H_
21 #define _HIF_H_
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif /* __cplusplus */
26 
27 /* Header files */
28 #include <qdf_status.h>
29 #include "qdf_nbuf.h"
30 #include "qdf_lro.h"
31 #include "ol_if_athvar.h"
32 #include <linux/platform_device.h>
33 #ifdef HIF_PCI
34 #include <linux/pci.h>
35 #endif /* HIF_PCI */
36 #ifdef HIF_USB
37 #include <linux/usb.h>
38 #endif /* HIF_USB */
39 #ifdef IPA_OFFLOAD
40 #include <linux/ipa.h>
41 #endif
42 #include "cfg_ucfg_api.h"
43 #include "qdf_dev.h"
44 #include <wlan_init_cfg.h>
45 
46 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
47 
48 typedef void __iomem *A_target_id_t;
49 typedef void *hif_handle_t;
50 
51 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
52 #define HIF_WORK_DRAIN_WAIT_CNT 50
53 
54 #define HIF_EP_WAKE_RESET_WAIT_CNT 10
55 #endif
56 
57 #define HIF_TYPE_AR6002   2
58 #define HIF_TYPE_AR6003   3
59 #define HIF_TYPE_AR6004   5
60 #define HIF_TYPE_AR9888   6
61 #define HIF_TYPE_AR6320   7
62 #define HIF_TYPE_AR6320V2 8
63 /* For attaching Peregrine 2.0 board host_reg_tbl only */
64 #define HIF_TYPE_AR9888V2 9
65 #define HIF_TYPE_ADRASTEA 10
66 #define HIF_TYPE_AR900B 11
67 #define HIF_TYPE_QCA9984 12
68 #define HIF_TYPE_QCA9888 14
69 #define HIF_TYPE_QCA8074 15
70 #define HIF_TYPE_QCA6290 16
71 #define HIF_TYPE_QCN7605 17
72 #define HIF_TYPE_QCA6390 18
73 #define HIF_TYPE_QCA8074V2 19
74 #define HIF_TYPE_QCA6018  20
75 #define HIF_TYPE_QCN9000 21
76 #define HIF_TYPE_QCA6490 22
77 #define HIF_TYPE_QCA6750 23
78 #define HIF_TYPE_QCA5018 24
79 #define HIF_TYPE_QCN6122 25
80 #define HIF_TYPE_KIWI 26
81 #define HIF_TYPE_QCN9224 27
82 #define HIF_TYPE_QCA9574 28
83 #define HIF_TYPE_MANGO 29
84 #define HIF_TYPE_QCA5332 30
85 #define HIF_TYPE_QCN9160 31
86 
87 #define DMA_COHERENT_MASK_DEFAULT   37
88 
89 #ifdef IPA_OFFLOAD
90 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
91 #endif
92 
93 /* enum hif_ic_irq - enum defining integrated chip irq numbers
94  * defining irq nubers that can be used by external modules like datapath
95  */
96 enum hif_ic_irq {
97 	host2wbm_desc_feed = 16,
98 	host2reo_re_injection,
99 	host2reo_command,
100 	host2rxdma_monitor_ring3,
101 	host2rxdma_monitor_ring2,
102 	host2rxdma_monitor_ring1,
103 	reo2host_exception,
104 	wbm2host_rx_release,
105 	reo2host_status,
106 	reo2host_destination_ring4,
107 	reo2host_destination_ring3,
108 	reo2host_destination_ring2,
109 	reo2host_destination_ring1,
110 	rxdma2host_monitor_destination_mac3,
111 	rxdma2host_monitor_destination_mac2,
112 	rxdma2host_monitor_destination_mac1,
113 	ppdu_end_interrupts_mac3,
114 	ppdu_end_interrupts_mac2,
115 	ppdu_end_interrupts_mac1,
116 	rxdma2host_monitor_status_ring_mac3,
117 	rxdma2host_monitor_status_ring_mac2,
118 	rxdma2host_monitor_status_ring_mac1,
119 	host2rxdma_host_buf_ring_mac3,
120 	host2rxdma_host_buf_ring_mac2,
121 	host2rxdma_host_buf_ring_mac1,
122 	rxdma2host_destination_ring_mac3,
123 	rxdma2host_destination_ring_mac2,
124 	rxdma2host_destination_ring_mac1,
125 	host2tcl_input_ring4,
126 	host2tcl_input_ring3,
127 	host2tcl_input_ring2,
128 	host2tcl_input_ring1,
129 	wbm2host_tx_completions_ring4,
130 	wbm2host_tx_completions_ring3,
131 	wbm2host_tx_completions_ring2,
132 	wbm2host_tx_completions_ring1,
133 	tcl2host_status_ring,
134 	txmon2host_monitor_destination_mac3,
135 	txmon2host_monitor_destination_mac2,
136 	txmon2host_monitor_destination_mac1,
137 	host2tx_monitor_ring1,
138 	umac_reset,
139 };
140 
141 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
142 enum hif_legacy_pci_irq {
143 	ce0,
144 	ce1,
145 	ce2,
146 	ce3,
147 	ce4,
148 	ce5,
149 	ce6,
150 	ce7,
151 	ce8,
152 	ce9,
153 	ce10,
154 	ce11,
155 	ce12,
156 	ce13,
157 	ce14,
158 	ce15,
159 	reo2sw8_intr2,
160 	reo2sw7_intr2,
161 	reo2sw6_intr2,
162 	reo2sw5_intr2,
163 	reo2sw4_intr2,
164 	reo2sw3_intr2,
165 	reo2sw2_intr2,
166 	reo2sw1_intr2,
167 	reo2sw0_intr2,
168 	reo2sw8_intr,
169 	reo2sw7_intr,
170 	reo2sw6_inrr,
171 	reo2sw5_intr,
172 	reo2sw4_intr,
173 	reo2sw3_intr,
174 	reo2sw2_intr,
175 	reo2sw1_intr,
176 	reo2sw0_intr,
177 	reo2status_intr2,
178 	reo_status,
179 	reo2rxdma_out_2,
180 	reo2rxdma_out_1,
181 	reo_cmd,
182 	sw2reo6,
183 	sw2reo5,
184 	sw2reo1,
185 	sw2reo,
186 	rxdma2reo_mlo_0_dst_ring1,
187 	rxdma2reo_mlo_0_dst_ring0,
188 	rxdma2reo_mlo_1_dst_ring1,
189 	rxdma2reo_mlo_1_dst_ring0,
190 	rxdma2reo_dst_ring1,
191 	rxdma2reo_dst_ring0,
192 	rxdma2sw_dst_ring1,
193 	rxdma2sw_dst_ring0,
194 	rxdma2release_dst_ring1,
195 	rxdma2release_dst_ring0,
196 	sw2rxdma_2_src_ring,
197 	sw2rxdma_1_src_ring,
198 	sw2rxdma_0,
199 	wbm2sw6_release2,
200 	wbm2sw5_release2,
201 	wbm2sw4_release2,
202 	wbm2sw3_release2,
203 	wbm2sw2_release2,
204 	wbm2sw1_release2,
205 	wbm2sw0_release2,
206 	wbm2sw6_release,
207 	wbm2sw5_release,
208 	wbm2sw4_release,
209 	wbm2sw3_release,
210 	wbm2sw2_release,
211 	wbm2sw1_release,
212 	wbm2sw0_release,
213 	wbm2sw_link,
214 	wbm_error_release,
215 	sw2txmon_src_ring,
216 	sw2rxmon_src_ring,
217 	txmon2sw_p1_intr1,
218 	txmon2sw_p1_intr0,
219 	txmon2sw_p0_dest1,
220 	txmon2sw_p0_dest0,
221 	rxmon2sw_p1_intr1,
222 	rxmon2sw_p1_intr0,
223 	rxmon2sw_p0_dest1,
224 	rxmon2sw_p0_dest0,
225 	sw_release,
226 	sw2tcl_credit2,
227 	sw2tcl_credit,
228 	sw2tcl4,
229 	sw2tcl5,
230 	sw2tcl3,
231 	sw2tcl2,
232 	sw2tcl1,
233 	sw2wbm1,
234 	misc_8,
235 	misc_7,
236 	misc_6,
237 	misc_5,
238 	misc_4,
239 	misc_3,
240 	misc_2,
241 	misc_1,
242 	misc_0,
243 };
244 #endif
245 
246 struct CE_state;
247 #ifdef QCA_WIFI_QCN9224
248 #define CE_COUNT_MAX 16
249 #else
250 #define CE_COUNT_MAX 12
251 #endif
252 
253 #ifndef HIF_MAX_GROUP
254 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS
255 #endif
256 
257 #ifdef CONFIG_BERYLLIUM
258 #define HIF_MAX_GRP_IRQ 25
259 #else
260 #define HIF_MAX_GRP_IRQ 16
261 #endif
262 
263 #ifndef NAPI_YIELD_BUDGET_BASED
264 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
265 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
266 #endif
267 #else  /* NAPI_YIELD_BUDGET_BASED */
268 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
269 #endif /* NAPI_YIELD_BUDGET_BASED */
270 
271 #define QCA_NAPI_BUDGET    64
272 #define QCA_NAPI_DEF_SCALE  \
273 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
274 
275 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
276 /* NOTE: "napi->scale" can be changed,
277  * but this does not change the number of buckets
278  */
279 #define QCA_NAPI_NUM_BUCKETS 4
280 
281 /**
282  * struct qca_napi_stat - stats structure for execution contexts
283  * @napi_schedules: number of times the schedule function is called
284  * @napi_polls: number of times the execution context runs
285  * @napi_completes: number of times that the generating interrupt is re-enabled
286  * @napi_workdone: cumulative of all work done reported by handler
287  * @cpu_corrected: incremented when execution context runs on a different core
288  *			than the one that its irq is affined to.
289  * @napi_budget_uses: histogram of work done per execution run
290  * @time_limit_reached: count of yields due to time limit thresholds
291  * @rxpkt_thresh_reached: count of yields due to a work limit
292  * @napi_max_poll_time:
293  * @poll_time_buckets: histogram of poll times for the napi
294  *
295  */
296 struct qca_napi_stat {
297 	uint32_t napi_schedules;
298 	uint32_t napi_polls;
299 	uint32_t napi_completes;
300 	uint32_t napi_workdone;
301 	uint32_t cpu_corrected;
302 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
303 	uint32_t time_limit_reached;
304 	uint32_t rxpkt_thresh_reached;
305 	unsigned long long napi_max_poll_time;
306 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
307 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
308 #endif
309 };
310 
311 
312 /**
313  * struct qca_napi_info - per NAPI instance data structure
314  * @netdev: dummy net_dev
315  * @hif_ctx:
316  * @napi:
317  * @scale:
318  * @id:
319  * @cpu:
320  * @irq:
321  * @cpumask:
322  * @stats:
323  * @offld_flush_cb:
324  * @rx_thread_napi:
325  * @rx_thread_netdev:
326  * @lro_ctx:
327  *
328  * This data structure holds stuff per NAPI instance.
329  * Note that, in the current implementation, though scale is
330  * an instance variable, it is set to the same value for all
331  * instances.
332  */
333 struct qca_napi_info {
334 	struct net_device    netdev; /* dummy net_dev */
335 	void                 *hif_ctx;
336 	struct napi_struct   napi;
337 	uint8_t              scale;   /* currently same on all instances */
338 	uint8_t              id;
339 	uint8_t              cpu;
340 	int                  irq;
341 	cpumask_t            cpumask;
342 	struct qca_napi_stat stats[NR_CPUS];
343 #ifdef RECEIVE_OFFLOAD
344 	/* will only be present for data rx CE's */
345 	void (*offld_flush_cb)(void *);
346 	struct napi_struct   rx_thread_napi;
347 	struct net_device    rx_thread_netdev;
348 #endif /* RECEIVE_OFFLOAD */
349 	qdf_lro_ctx_t        lro_ctx;
350 };
351 
352 enum qca_napi_tput_state {
353 	QCA_NAPI_TPUT_UNINITIALIZED,
354 	QCA_NAPI_TPUT_LO,
355 	QCA_NAPI_TPUT_HI
356 };
357 enum qca_napi_cpu_state {
358 	QCA_NAPI_CPU_UNINITIALIZED,
359 	QCA_NAPI_CPU_DOWN,
360 	QCA_NAPI_CPU_UP };
361 
362 /**
363  * struct qca_napi_cpu - an entry of the napi cpu table
364  * @state:
365  * @core_id:     physical core id of the core
366  * @cluster_id:  cluster this core belongs to
367  * @core_mask:   mask to match all core of this cluster
368  * @thread_mask: mask for this core within the cluster
369  * @max_freq:    maximum clock this core can be clocked at
370  *               same for all cpus of the same core.
371  * @napis:       bitmap of napi instances on this core
372  * @execs:       bitmap of execution contexts on this core
373  * @cluster_nxt: chain to link cores within the same cluster
374  *
375  * This structure represents a single entry in the napi cpu
376  * table. The table is part of struct qca_napi_data.
377  * This table is initialized by the init function, called while
378  * the first napi instance is being created, updated by hotplug
379  * notifier and when cpu affinity decisions are made (by throughput
380  * detection), and deleted when the last napi instance is removed.
381  */
382 struct qca_napi_cpu {
383 	enum qca_napi_cpu_state state;
384 	int			core_id;
385 	int			cluster_id;
386 	cpumask_t		core_mask;
387 	cpumask_t		thread_mask;
388 	unsigned int		max_freq;
389 	uint32_t		napis;
390 	uint32_t		execs;
391 	int			cluster_nxt;  /* index, not pointer */
392 };
393 
394 /**
395  * struct qca_napi_data - collection of napi data for a single hif context
396  * @hif_softc: pointer to the hif context
397  * @lock: spinlock used in the event state machine
398  * @state: state variable used in the napi stat machine
399  * @ce_map: bit map indicating which ce's have napis running
400  * @exec_map: bit map of instantiated exec contexts
401  * @user_cpu_affin_mask: CPU affinity mask from INI config.
402  * @napis:
403  * @napi_cpu: cpu info for irq affinty
404  * @lilcl_head:
405  * @bigcl_head:
406  * @napi_mode: irq affinity & clock voting mode
407  * @cpuhp_handler: CPU hotplug event registration handle
408  * @flags:
409  */
410 struct qca_napi_data {
411 	struct               hif_softc *hif_softc;
412 	qdf_spinlock_t       lock;
413 	uint32_t             state;
414 
415 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
416 	 * not used by clients (clients use an id returned by create)
417 	 */
418 	uint32_t             ce_map;
419 	uint32_t             exec_map;
420 	uint32_t             user_cpu_affin_mask;
421 	struct qca_napi_info *napis[CE_COUNT_MAX];
422 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
423 	int                  lilcl_head, bigcl_head;
424 	enum qca_napi_tput_state napi_mode;
425 	struct qdf_cpuhp_handler *cpuhp_handler;
426 	uint8_t              flags;
427 };
428 
429 /**
430  * struct hif_config_info - Place Holder for HIF configuration
431  * @enable_self_recovery: Self Recovery
432  * @enable_runtime_pm: Enable Runtime PM
433  * @runtime_pm_delay: Runtime PM Delay
434  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
435  *
436  * Structure for holding HIF ini parameters.
437  */
438 struct hif_config_info {
439 	bool enable_self_recovery;
440 #ifdef FEATURE_RUNTIME_PM
441 	uint8_t enable_runtime_pm;
442 	u_int32_t runtime_pm_delay;
443 #endif
444 	uint64_t rx_softirq_max_yield_duration_ns;
445 };
446 
447 /**
448  * struct hif_target_info - Target Information
449  * @target_version: Target Version
450  * @target_type: Target Type
451  * @target_revision: Target Revision
452  * @soc_version: SOC Version
453  * @hw_name: pointer to hardware name
454  *
455  * Structure to hold target information.
456  */
457 struct hif_target_info {
458 	uint32_t target_version;
459 	uint32_t target_type;
460 	uint32_t target_revision;
461 	uint32_t soc_version;
462 	char *hw_name;
463 };
464 
465 struct hif_opaque_softc {
466 };
467 
468 /**
469  * struct hif_ce_ring_info - CE ring information
470  * @ring_id: ring id
471  * @ring_dir: ring direction
472  * @num_entries: number of entries in ring
473  * @entry_size: ring entry size
474  * @ring_base_paddr: srng base physical address
475  * @hp_paddr: head pointer physical address
476  * @tp_paddr: tail pointer physical address
477  */
478 struct hif_ce_ring_info {
479 	uint8_t ring_id;
480 	uint8_t ring_dir;
481 	uint32_t num_entries;
482 	uint32_t entry_size;
483 	uint64_t ring_base_paddr;
484 	uint64_t hp_paddr;
485 	uint64_t tp_paddr;
486 };
487 
488 /**
489  * struct hif_direct_link_ce_info - Direct Link CE information
490  * @ce_id: CE ide
491  * @pipe_dir: Pipe direction
492  * @ring_info: ring information
493  */
494 struct hif_direct_link_ce_info {
495 	uint8_t ce_id;
496 	uint8_t pipe_dir;
497 	struct hif_ce_ring_info ring_info;
498 };
499 
500 /**
501  * enum hif_event_type - Type of DP events to be recorded
502  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
503  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
504  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
505  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
506  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
507  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
508  * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event
509  * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event
510  * @HIF_EVENT_IRQ_DISABLE_EXPIRED: IRQ disable expired event
511  */
512 enum hif_event_type {
513 	HIF_EVENT_IRQ_TRIGGER,
514 	HIF_EVENT_TIMER_ENTRY,
515 	HIF_EVENT_TIMER_EXIT,
516 	HIF_EVENT_BH_SCHED,
517 	HIF_EVENT_SRNG_ACCESS_START,
518 	HIF_EVENT_SRNG_ACCESS_END,
519 	HIF_EVENT_BH_COMPLETE,
520 	HIF_EVENT_BH_FORCE_BREAK,
521 	HIF_EVENT_IRQ_DISABLE_EXPIRED,
522 	/* Do check hif_hist_skip_event_record when adding new events */
523 };
524 
525 /**
526  * enum hif_system_pm_state - System PM state
527  * @HIF_SYSTEM_PM_STATE_ON: System in active state
528  * @HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
529  *  system resume
530  * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
531  *  system suspend
532  * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
533  */
534 enum hif_system_pm_state {
535 	HIF_SYSTEM_PM_STATE_ON,
536 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
537 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
538 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
539 };
540 
541 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
542 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
543 
544 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
545 /* HIF_EVENT_HIST_MAX should always be power of 2 */
546 #define HIF_EVENT_HIST_MAX		512
547 
548 #define HIF_EVENT_HIST_ENABLE_MASK	0xFF
549 
550 static inline uint64_t hif_get_log_timestamp(void)
551 {
552 	return qdf_get_log_timestamp();
553 }
554 
555 #else
556 
557 #define HIF_EVENT_HIST_MAX		32
558 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
559 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
560 
561 static inline uint64_t hif_get_log_timestamp(void)
562 {
563 	return qdf_sched_clock();
564 }
565 
566 #endif
567 
568 /**
569  * struct hif_event_record - an entry of the DP event history
570  * @hal_ring_id: ring id for which event is recorded
571  * @hp: head pointer of the ring (may not be applicable for all events)
572  * @tp: tail pointer of the ring (may not be applicable for all events)
573  * @cpu_id: cpu id on which the event occurred
574  * @timestamp: timestamp when event occurred
575  * @type: type of the event
576  *
577  * This structure represents the information stored for every datapath
578  * event which is logged in the history.
579  */
580 struct hif_event_record {
581 	uint8_t hal_ring_id;
582 	uint32_t hp;
583 	uint32_t tp;
584 	int cpu_id;
585 	uint64_t timestamp;
586 	enum hif_event_type type;
587 };
588 
589 /**
590  * struct hif_event_misc - history related misc info
591  * @last_irq_index: last irq event index in history
592  * @last_irq_ts: last irq timestamp
593  */
594 struct hif_event_misc {
595 	int32_t last_irq_index;
596 	uint64_t last_irq_ts;
597 };
598 
599 /**
600  * struct hif_event_history - history for one interrupt group
601  * @index: index to store new event
602  * @misc: event misc information
603  * @event: event entry
604  *
605  * This structure represents the datapath history for one
606  * interrupt group.
607  */
608 struct hif_event_history {
609 	qdf_atomic_t index;
610 	struct hif_event_misc misc;
611 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
612 };
613 
614 /**
615  * hif_hist_record_event() - Record one datapath event in history
616  * @hif_ctx: HIF opaque context
617  * @event: DP event entry
618  * @intr_grp_id: interrupt group ID registered with hif
619  *
620  * Return: None
621  */
622 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
623 			   struct hif_event_record *event,
624 			   uint8_t intr_grp_id);
625 
626 /**
627  * hif_event_history_init() - Initialize SRNG event history buffers
628  * @hif_ctx: HIF opaque context
629  * @id: context group ID for which history is recorded
630  *
631  * Returns: None
632  */
633 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
634 
635 /**
636  * hif_event_history_deinit() - De-initialize SRNG event history buffers
637  * @hif_ctx: HIF opaque context
638  * @id: context group ID for which history is recorded
639  *
640  * Returns: None
641  */
642 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
643 
644 /**
645  * hif_record_event() - Wrapper function to form and record DP event
646  * @hif_ctx: HIF opaque context
647  * @intr_grp_id: interrupt group ID registered with hif
648  * @hal_ring_id: ring id for which event is recorded
649  * @hp: head pointer index of the srng
650  * @tp: tail pointer index of the srng
651  * @type: type of the event to be logged in history
652  *
653  * Return: None
654  */
655 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
656 				    uint8_t intr_grp_id,
657 				    uint8_t hal_ring_id,
658 				    uint32_t hp,
659 				    uint32_t tp,
660 				    enum hif_event_type type)
661 {
662 	struct hif_event_record event;
663 
664 	event.hal_ring_id = hal_ring_id;
665 	event.hp = hp;
666 	event.tp = tp;
667 	event.type = type;
668 
669 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
670 
671 	return;
672 }
673 
674 #else
675 
676 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
677 				    uint8_t intr_grp_id,
678 				    uint8_t hal_ring_id,
679 				    uint32_t hp,
680 				    uint32_t tp,
681 				    enum hif_event_type type)
682 {
683 }
684 
685 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
686 					  uint8_t id)
687 {
688 }
689 
690 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
691 					    uint8_t id)
692 {
693 }
694 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
695 
696 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx);
697 
698 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
699 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx);
700 #else
701 static
702 inline void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) {}
703 #endif
704 
705 /**
706  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
707  *
708  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
709  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
710  *                         minimize power
711  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
712  *                         platform-specific measures to completely power-off
713  *                         the module and associated hardware (i.e. cut power
714  *                         supplies)
715  */
716 enum HIF_DEVICE_POWER_CHANGE_TYPE {
717 	HIF_DEVICE_POWER_UP,
718 	HIF_DEVICE_POWER_DOWN,
719 	HIF_DEVICE_POWER_CUT
720 };
721 
722 /**
723  * enum hif_enable_type: what triggered the enabling of hif
724  *
725  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
726  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
727  * @HIF_ENABLE_TYPE_MAX: Max value
728  */
729 enum hif_enable_type {
730 	HIF_ENABLE_TYPE_PROBE,
731 	HIF_ENABLE_TYPE_REINIT,
732 	HIF_ENABLE_TYPE_MAX
733 };
734 
735 /**
736  * enum hif_disable_type: what triggered the disabling of hif
737  *
738  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
739  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
740  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
741  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
742  * @HIF_DISABLE_TYPE_MAX: Max value
743  */
744 enum hif_disable_type {
745 	HIF_DISABLE_TYPE_PROBE_ERROR,
746 	HIF_DISABLE_TYPE_REINIT_ERROR,
747 	HIF_DISABLE_TYPE_REMOVE,
748 	HIF_DISABLE_TYPE_SHUTDOWN,
749 	HIF_DISABLE_TYPE_MAX
750 };
751 
752 /**
753  * enum hif_device_config_opcode: configure mode
754  *
755  * @HIF_DEVICE_POWER_STATE: device power state
756  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
757  * @HIF_DEVICE_GET_FIFO_ADDR: get block address
758  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
759  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
760  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
761  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
762  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
763  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
764  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
765  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
766  * @HIF_BMI_DONE: bmi done
767  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
768  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
769  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
770  */
771 enum hif_device_config_opcode {
772 	HIF_DEVICE_POWER_STATE = 0,
773 	HIF_DEVICE_GET_BLOCK_SIZE,
774 	HIF_DEVICE_GET_FIFO_ADDR,
775 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
776 	HIF_DEVICE_GET_IRQ_PROC_MODE,
777 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
778 	HIF_DEVICE_POWER_STATE_CHANGE,
779 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
780 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
781 	HIF_DEVICE_GET_OS_DEVICE,
782 	HIF_DEVICE_DEBUG_BUS_STATE,
783 	HIF_BMI_DONE,
784 	HIF_DEVICE_SET_TARGET_TYPE,
785 	HIF_DEVICE_SET_HTC_CONTEXT,
786 	HIF_DEVICE_GET_HTC_CONTEXT,
787 };
788 
789 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
790 struct HID_ACCESS_LOG {
791 	uint32_t seqnum;
792 	bool is_write;
793 	void *addr;
794 	uint32_t value;
795 };
796 #endif
797 
798 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
799 		uint32_t value);
800 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
801 
802 #define HIF_MAX_DEVICES                 1
803 /**
804  * struct htc_callbacks - Structure for HTC Callbacks methods
805  * @context:             context to pass to the @dsr_handler
806  *                       note : @rw_compl_handler is provided the context
807  *                       passed to hif_read_write
808  * @rw_compl_handler:    Read / write completion handler
809  * @dsr_handler:         DSR Handler
810  */
811 struct htc_callbacks {
812 	void *context;
813 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
814 	QDF_STATUS(*dsr_handler)(void *context);
815 };
816 
817 /**
818  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
819  * @context: Private data context
820  * @set_recovery_in_progress: To Set Driver state for recovery in progress
821  * @is_recovery_in_progress: Query if driver state is recovery in progress
822  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
823  * @is_driver_unloading: Query if driver is unloading.
824  * @is_target_ready:
825  * @get_bandwidth_level: Query current bandwidth level for the driver
826  * @prealloc_get_consistent_mem_unaligned: get prealloc unaligned consistent mem
827  * @prealloc_put_consistent_mem_unaligned: put unaligned consistent mem to pool
828  * This Structure provides callback pointer for HIF to query hdd for driver
829  * states.
830  */
831 struct hif_driver_state_callbacks {
832 	void *context;
833 	void (*set_recovery_in_progress)(void *context, uint8_t val);
834 	bool (*is_recovery_in_progress)(void *context);
835 	bool (*is_load_unload_in_progress)(void *context);
836 	bool (*is_driver_unloading)(void *context);
837 	bool (*is_target_ready)(void *context);
838 	int (*get_bandwidth_level)(void *context);
839 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
840 						       qdf_dma_addr_t *paddr,
841 						       uint32_t ring_type);
842 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
843 };
844 
845 /* This API detaches the HTC layer from the HIF device */
846 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
847 
848 /****************************************************************/
849 /* BMI and Diag window abstraction                              */
850 /****************************************************************/
851 
852 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
853 
854 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
855 				     * handled atomically by
856 				     * DiagRead/DiagWrite
857 				     */
858 
859 #ifdef WLAN_FEATURE_BMI
860 /*
861  * API to handle HIF-specific BMI message exchanges, this API is synchronous
862  * and only allowed to be called from a context that can block (sleep)
863  */
864 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
865 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
866 				uint8_t *pSendMessage, uint32_t Length,
867 				uint8_t *pResponseMessage,
868 				uint32_t *pResponseLength, uint32_t TimeoutMS);
869 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
870 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
871 #else /* WLAN_FEATURE_BMI */
872 static inline void
873 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
874 {
875 }
876 
877 static inline bool
878 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
879 {
880 	return false;
881 }
882 #endif /* WLAN_FEATURE_BMI */
883 
884 #ifdef HIF_CPU_CLEAR_AFFINITY
885 /**
886  * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
887  * @scn: HIF handle
888  * @intr_ctxt_id: interrupt group index
889  * @cpu: CPU core to clear
890  *
891  * Return: None
892  */
893 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
894 				       int intr_ctxt_id, int cpu);
895 #else
896 static inline
897 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
898 				       int intr_ctxt_id, int cpu)
899 {
900 }
901 #endif
902 
903 /*
904  * APIs to handle HIF specific diagnostic read accesses. These APIs are
905  * synchronous and only allowed to be called from a context that
906  * can block (sleep). They are not high performance APIs.
907  *
908  * hif_diag_read_access reads a 4 Byte aligned/length value from a
909  * Target register or memory word.
910  *
911  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
912  */
913 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
914 				uint32_t address, uint32_t *data);
915 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
916 		      uint8_t *data, int nbytes);
917 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
918 			void *ramdump_base, uint32_t address, uint32_t size);
919 /*
920  * APIs to handle HIF specific diagnostic write accesses. These APIs are
921  * synchronous and only allowed to be called from a context that
922  * can block (sleep).
923  * They are not high performance APIs.
924  *
925  * hif_diag_write_access writes a 4 Byte aligned/length value to a
926  * Target register or memory word.
927  *
928  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
929  */
930 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
931 				 uint32_t address, uint32_t data);
932 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
933 			uint32_t address, uint8_t *data, int nbytes);
934 
935 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
936 
937 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
938 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
939 
940 /*
941  * Set the FASTPATH_mode_on flag in sc, for use by data path
942  */
943 #ifdef WLAN_FEATURE_FASTPATH
944 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
945 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
946 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
947 
948 /**
949  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
950  * @hif_ctx: HIF opaque context
951  * @handler: Callback function
952  * @context: handle for callback function
953  *
954  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
955  */
956 QDF_STATUS hif_ce_fastpath_cb_register(
957 		struct hif_opaque_softc *hif_ctx,
958 		fastpath_msg_handler handler, void *context);
959 #else
960 static inline QDF_STATUS hif_ce_fastpath_cb_register(
961 		struct hif_opaque_softc *hif_ctx,
962 		fastpath_msg_handler handler, void *context)
963 {
964 	return QDF_STATUS_E_FAILURE;
965 }
966 
967 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
968 {
969 	return NULL;
970 }
971 
972 #endif
973 
974 /*
975  * Enable/disable CDC max performance workaround
976  * For max-performance set this to 0
977  * To allow SoC to enter sleep set this to 1
978  */
979 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
980 
981 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
982 			     qdf_shared_mem_t **ce_sr,
983 			     uint32_t *ce_sr_ring_size,
984 			     qdf_dma_addr_t *ce_reg_paddr);
985 
986 /**
987  * struct hif_msg_callbacks - List of callbacks - filled in by HTC.
988  * @Context: context meaningful to HTC
989  * @txCompletionHandler:
990  * @rxCompletionHandler:
991  * @txResourceAvailHandler:
992  * @fwEventHandler:
993  * @update_bundle_stats:
994  */
995 struct hif_msg_callbacks {
996 	void *Context;
997 	/**< context meaningful to HTC */
998 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
999 					uint32_t transferID,
1000 					uint32_t toeplitz_hash_result);
1001 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
1002 					uint8_t pipeID);
1003 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
1004 	void (*fwEventHandler)(void *context, QDF_STATUS status);
1005 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
1006 };
1007 
1008 enum hif_target_status {
1009 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
1010 	TARGET_STATUS_RESET,  /* target got reset */
1011 	TARGET_STATUS_EJECT,  /* target got ejected */
1012 	TARGET_STATUS_SUSPEND /*target got suspend */
1013 };
1014 
1015 /**
1016  * enum hif_attribute_flags: configure hif
1017  *
1018  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
1019  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
1020  *  							+ No pktlog CE
1021  */
1022 enum hif_attribute_flags {
1023 	HIF_LOWDESC_CE_CFG = 1,
1024 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
1025 };
1026 
1027 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
1028 	(attr |= (v & 0x01) << 5)
1029 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
1030 	(attr |= (v & 0x03) << 6)
1031 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
1032 	(attr |= (v & 0x01) << 13)
1033 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
1034 	(attr |= (v & 0x01) << 14)
1035 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
1036 	(attr |= (v & 0x01) << 15)
1037 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
1038 	(attr |= (v & 0x0FFF) << 16)
1039 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
1040 	(attr |= (v & 0x01) << 30)
1041 
1042 struct hif_ul_pipe_info {
1043 	unsigned int nentries;
1044 	unsigned int nentries_mask;
1045 	unsigned int sw_index;
1046 	unsigned int write_index; /* cached copy */
1047 	unsigned int hw_index;    /* cached copy */
1048 	void *base_addr_owner_space; /* Host address space */
1049 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1050 };
1051 
1052 struct hif_dl_pipe_info {
1053 	unsigned int nentries;
1054 	unsigned int nentries_mask;
1055 	unsigned int sw_index;
1056 	unsigned int write_index; /* cached copy */
1057 	unsigned int hw_index;    /* cached copy */
1058 	void *base_addr_owner_space; /* Host address space */
1059 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1060 };
1061 
1062 struct hif_pipe_addl_info {
1063 	uint32_t pci_mem;
1064 	uint32_t ctrl_addr;
1065 	struct hif_ul_pipe_info ul_pipe;
1066 	struct hif_dl_pipe_info dl_pipe;
1067 };
1068 
1069 #ifdef CONFIG_SLUB_DEBUG_ON
1070 #define MSG_FLUSH_NUM 16
1071 #else /* PERF build */
1072 #define MSG_FLUSH_NUM 32
1073 #endif /* SLUB_DEBUG_ON */
1074 
1075 struct hif_bus_id;
1076 
1077 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
1078 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
1079 		     int opcode, void *config, uint32_t config_len);
1080 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
1081 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
1082 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
1083 		   struct hif_msg_callbacks *callbacks);
1084 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
1085 void hif_stop(struct hif_opaque_softc *hif_ctx);
1086 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
1087 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
1088 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
1089 		      uint8_t cmd_id, bool start);
1090 
1091 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1092 				  uint32_t transferID, uint32_t nbytes,
1093 				  qdf_nbuf_t wbuf, uint32_t data_attr);
1094 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1095 			     int force);
1096 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1097 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
1098 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
1099 			  uint8_t *DLPipe);
1100 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
1101 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1102 			int *dl_is_polled);
1103 uint16_t
1104 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1105 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
1106 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
1107 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
1108 		     bool wait_for_it);
1109 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
1110 #ifndef HIF_PCI
1111 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
1112 {
1113 	return 0;
1114 }
1115 #else
1116 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
1117 #endif
1118 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1119 			u32 *revision, const char **target_name);
1120 
1121 #ifdef RECEIVE_OFFLOAD
1122 /**
1123  * hif_offld_flush_cb_register() - Register the offld flush callback
1124  * @scn: HIF opaque context
1125  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
1126  *			 Or GRO/LRO flush when RxThread is not enabled. Called
1127  *			 with corresponding context for flush.
1128  * Return: None
1129  */
1130 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1131 				 void (offld_flush_handler)(void *ol_ctx));
1132 
1133 /**
1134  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
1135  * @scn: HIF opaque context
1136  *
1137  * Return: None
1138  */
1139 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
1140 #endif
1141 
1142 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1143 /**
1144  * hif_exec_should_yield() - Check if hif napi context should yield
1145  * @hif_ctx: HIF opaque context
1146  * @grp_id: grp_id of the napi for which check needs to be done
1147  *
1148  * The function uses grp_id to look for NAPI and checks if NAPI needs to
1149  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
1150  * yield decision.
1151  *
1152  * Return: true if NAPI needs to yield, else false
1153  */
1154 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
1155 #else
1156 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
1157 					 uint grp_id)
1158 {
1159 	return false;
1160 }
1161 #endif
1162 
1163 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
1164 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
1165 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
1166 				      int htc_htt_tx_endpoint);
1167 
1168 /**
1169  * hif_open() - Create hif handle
1170  * @qdf_ctx: qdf context
1171  * @mode: Driver Mode
1172  * @bus_type: Bus Type
1173  * @cbk: CDS Callbacks
1174  * @psoc: psoc object manager
1175  *
1176  * API to open HIF Context
1177  *
1178  * Return: HIF Opaque Pointer
1179  */
1180 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1181 				  uint32_t mode,
1182 				  enum qdf_bus_type bus_type,
1183 				  struct hif_driver_state_callbacks *cbk,
1184 				  struct wlan_objmgr_psoc *psoc);
1185 
1186 /**
1187  * hif_init_dma_mask() - Set dma mask for the dev
1188  * @dev: dev for which DMA mask is to be set
1189  * @bus_type: bus type for the target
1190  *
1191  * This API sets the DMA mask for the device. before the datapath
1192  * memory pre-allocation is done. If the DMA mask is not set before
1193  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
1194  * and does not utilize the full device capability.
1195  *
1196  * Return: 0 - success, non-zero on failure.
1197  */
1198 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
1199 void hif_close(struct hif_opaque_softc *hif_ctx);
1200 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1201 		      void *bdev, const struct hif_bus_id *bid,
1202 		      enum qdf_bus_type bus_type,
1203 		      enum hif_enable_type type);
1204 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
1205 #ifdef CE_TASKLET_DEBUG_ENABLE
1206 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
1207 				 uint8_t value);
1208 #endif
1209 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
1210 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
1211 
1212 /**
1213  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
1214  * @HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
1215  * @HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
1216  * @HIF_PM_CE_WAKE: Wake irq is CE interrupt
1217  */
1218 typedef enum {
1219 	HIF_PM_INVALID_WAKE,
1220 	HIF_PM_MSI_WAKE,
1221 	HIF_PM_CE_WAKE,
1222 } hif_pm_wake_irq_type;
1223 
1224 /**
1225  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1226  * @hif_ctx: HIF context
1227  *
1228  * Return: enum hif_pm_wake_irq_type
1229  */
1230 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1231 
1232 /**
1233  * enum hif_ep_vote_type - hif ep vote type
1234  * @HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1235  * @HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1236  */
1237 enum hif_ep_vote_type {
1238 	HIF_EP_VOTE_DP_ACCESS,
1239 	HIF_EP_VOTE_NONDP_ACCESS
1240 };
1241 
1242 /**
1243  * enum hif_ep_vote_access - hif ep vote access
1244  * @HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1245  * @HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transition
1246  * @HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1247  */
1248 enum hif_ep_vote_access {
1249 	HIF_EP_VOTE_ACCESS_ENABLE,
1250 	HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1251 	HIF_EP_VOTE_ACCESS_DISABLE
1252 };
1253 
1254 /**
1255  * enum hif_rtpm_client_id - modules registered with runtime pm module
1256  * @HIF_RTPM_ID_RESERVED: Reserved ID
1257  * @HIF_RTPM_ID_HAL_REO_CMD: HAL REO commands
1258  * @HIF_RTPM_ID_WMI: WMI commands Tx
1259  * @HIF_RTPM_ID_HTT: HTT commands Tx
1260  * @HIF_RTPM_ID_DP: Datapath Tx path
1261  * @HIF_RTPM_ID_DP_RING_STATS: Datapath ring stats
1262  * @HIF_RTPM_ID_CE: CE Tx buffer posting
1263  * @HIF_RTPM_ID_FORCE_WAKE: Force wake request
1264  * @HIF_RTPM_ID_PM_QOS_NOTIFY:
1265  * @HIF_RTPM_ID_WIPHY_SUSPEND:
1266  * @HIF_RTPM_ID_MAX: Max id
1267  */
1268 enum  hif_rtpm_client_id {
1269 	HIF_RTPM_ID_RESERVED,
1270 	HIF_RTPM_ID_HAL_REO_CMD,
1271 	HIF_RTPM_ID_WMI,
1272 	HIF_RTPM_ID_HTT,
1273 	HIF_RTPM_ID_DP,
1274 	HIF_RTPM_ID_DP_RING_STATS,
1275 	HIF_RTPM_ID_CE,
1276 	HIF_RTPM_ID_FORCE_WAKE,
1277 	HIF_RTPM_ID_PM_QOS_NOTIFY,
1278 	HIF_RTPM_ID_WIPHY_SUSPEND,
1279 	HIF_RTPM_ID_MAX
1280 };
1281 
1282 /**
1283  * enum rpm_type - Get and Put calls types
1284  * @HIF_RTPM_GET_ASYNC: Increment usage count and when system is suspended
1285  *		      schedule resume process, return depends on pm state.
1286  * @HIF_RTPM_GET_FORCE: Increment usage count and when system is suspended
1287  *		      schedule resume process, returns success irrespective of
1288  *		      pm_state.
1289  * @HIF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
1290  *		     wait till process is resumed.
1291  * @HIF_RTPM_GET_NORESUME: Only increments usage count.
1292  * @HIF_RTPM_PUT_ASYNC: Decrements usage count and puts system in idle state.
1293  * @HIF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
1294  *			     suspended state.
1295  * @HIF_RTPM_PUT_NOIDLE: Decrements usage count.
1296  */
1297 enum rpm_type {
1298 	HIF_RTPM_GET_ASYNC,
1299 	HIF_RTPM_GET_FORCE,
1300 	HIF_RTPM_GET_SYNC,
1301 	HIF_RTPM_GET_NORESUME,
1302 	HIF_RTPM_PUT_ASYNC,
1303 	HIF_RTPM_PUT_SYNC_SUSPEND,
1304 	HIF_RTPM_PUT_NOIDLE,
1305 };
1306 
1307 /**
1308  * struct hif_pm_runtime_lock - data structure for preventing runtime suspend
1309  * @list: global list of runtime locks
1310  * @active: true if this lock is preventing suspend
1311  * @name: character string for tracking this lock
1312  */
1313 struct hif_pm_runtime_lock {
1314 	struct list_head list;
1315 	bool active;
1316 	const char *name;
1317 };
1318 
1319 #ifdef FEATURE_RUNTIME_PM
1320 /**
1321  * hif_rtpm_register() - Register a module with runtime PM.
1322  * @id: ID of the module which needs to be registered
1323  * @hif_rpm_cbk: callback to be called when get was called in suspended state.
1324  *
1325  * Return: success status if successfully registered
1326  */
1327 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
1328 
1329 /**
1330  * hif_rtpm_deregister() - Deregister the module
1331  * @id: ID of the module which needs to be de-registered
1332  */
1333 QDF_STATUS hif_rtpm_deregister(uint32_t id);
1334 
1335 /**
1336  * hif_rtpm_set_autosuspend_delay() - Set delay to trigger RTPM suspend
1337  * @delay: delay in ms to be set
1338  *
1339  * Return: Success if delay is set successfully
1340  */
1341 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay);
1342 
1343 /**
1344  * hif_rtpm_restore_autosuspend_delay() - Restore delay value to default value
1345  *
1346  * Return: Success if reset done. E_ALREADY if delay same as config value
1347  */
1348 QDF_STATUS hif_rtpm_restore_autosuspend_delay(void);
1349 
1350 /**
1351  * hif_rtpm_get_autosuspend_delay() -Get delay to trigger RTPM suspend
1352  *
1353  * Return: Delay in ms
1354  */
1355 int hif_rtpm_get_autosuspend_delay(void);
1356 
1357 /**
1358  * hif_runtime_lock_init() - API to initialize Runtime PM context
1359  * @lock: QDF lock context
1360  * @name: Context name
1361  *
1362  * This API initializes the Runtime PM context of the caller and
1363  * return the pointer.
1364  *
1365  * Return: None
1366  */
1367 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1368 
1369 /**
1370  * hif_runtime_lock_deinit() - This API frees the runtime pm context
1371  * @data: Runtime PM context
1372  *
1373  * Return: void
1374  */
1375 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data);
1376 
1377 /**
1378  * hif_rtpm_get() - Increment usage_count on the device to avoid suspend.
1379  * @type: get call types from hif_rpm_type
1380  * @id: ID of the module calling get()
1381  *
1382  * A get operation will prevent a runtime suspend until a
1383  * corresponding put is done.  This api should be used when accessing bus.
1384  *
1385  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1386  * THIS API WILL ONLY REQUEST THE RESUME AND NOT DO A GET!!!
1387  *
1388  * return: success if a get has been issued, else error code.
1389  */
1390 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id);
1391 
1392 /**
1393  * hif_rtpm_put() - do a put operation on the device
1394  * @type: put call types from hif_rpm_type
1395  * @id: ID of the module calling put()
1396  *
1397  * A put operation will allow a runtime suspend after a corresponding
1398  * get was done.  This api should be used when finished accessing bus.
1399  *
1400  * This api will return a failure if runtime pm is stopped
1401  * This api will return failure if it would decrement the usage count below 0.
1402  *
1403  * return: QDF_STATUS_SUCCESS if the put is performed
1404  */
1405 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id);
1406 
1407 /**
1408  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1409  * @data: runtime PM lock
1410  *
1411  * This function will prevent runtime suspend, by incrementing
1412  * device's usage count.
1413  *
1414  * Return: status
1415  */
1416 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data);
1417 
1418 /**
1419  * hif_pm_runtime_prevent_suspend_sync() - Synchronized prevent Runtime suspend
1420  * @data: runtime PM lock
1421  *
1422  * This function will prevent runtime suspend, by incrementing
1423  * device's usage count.
1424  *
1425  * Return: status
1426  */
1427 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data);
1428 
1429 /**
1430  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1431  * @data: runtime PM lock
1432  *
1433  * This function will allow runtime suspend, by decrementing
1434  * device's usage count.
1435  *
1436  * Return: status
1437  */
1438 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data);
1439 
1440 /**
1441  * hif_rtpm_request_resume() - Request resume if bus is suspended
1442  *
1443  * Return: None
1444  */
1445 void hif_rtpm_request_resume(void);
1446 
1447 /**
1448  * hif_rtpm_sync_resume() - Invoke synchronous runtime resume.
1449  *
1450  * This function will invoke synchronous runtime resume.
1451  *
1452  * Return: status
1453  */
1454 QDF_STATUS hif_rtpm_sync_resume(void);
1455 
1456 /**
1457  * hif_rtpm_check_and_request_resume() - check if bus is suspended and
1458  *                                       request resume.
1459  *
1460  * Return: void
1461  */
1462 void hif_rtpm_check_and_request_resume(void);
1463 
1464 /**
1465  * hif_rtpm_set_client_job() - Set job for the client.
1466  * @client_id: Client id for which job needs to be set
1467  *
1468  * If get failed due to system being in suspended state, set the client job so
1469  * when system resumes the client's job is called.
1470  *
1471  * Return: None
1472  */
1473 void hif_rtpm_set_client_job(uint32_t client_id);
1474 
1475 /**
1476  * hif_rtpm_mark_last_busy() - Mark last busy to delay retry to suspend
1477  * @id: ID marking last busy
1478  *
1479  * Return: None
1480  */
1481 void hif_rtpm_mark_last_busy(uint32_t id);
1482 
1483 /**
1484  * hif_rtpm_get_monitor_wake_intr() - API to get monitor_wake_intr
1485  *
1486  * monitor_wake_intr variable can be used to indicate if driver expects wake
1487  * MSI for runtime PM
1488  *
1489  * Return: monitor_wake_intr variable
1490  */
1491 int hif_rtpm_get_monitor_wake_intr(void);
1492 
1493 /**
1494  * hif_rtpm_set_monitor_wake_intr() - API to set monitor_wake_intr
1495  * @val: value to set
1496  *
1497  * monitor_wake_intr variable can be used to indicate if driver expects wake
1498  * MSI for runtime PM
1499  *
1500  * Return: void
1501  */
1502 void hif_rtpm_set_monitor_wake_intr(int val);
1503 
1504 /**
1505  * hif_pre_runtime_suspend() - book keeping before beginning runtime suspend.
1506  * @hif_ctx: HIF context
1507  *
1508  * Makes sure that the pci link will be taken down by the suspend operation.
1509  * If the hif layer is configured to leave the bus on, runtime suspend will
1510  * not save any power.
1511  *
1512  * Set the runtime suspend state to SUSPENDING.
1513  *
1514  * return -EINVAL if the bus won't go down.  otherwise return 0
1515  */
1516 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1517 
1518 /**
1519  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
1520  *
1521  * update the runtime pm state to RESUMING.
1522  * Return: void
1523  */
1524 void hif_pre_runtime_resume(void);
1525 
1526 /**
1527  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
1528  *
1529  * Record the success.
1530  * update the runtime_pm state to SUSPENDED
1531  * Return: void
1532  */
1533 void hif_process_runtime_suspend_success(void);
1534 
1535 /**
1536  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
1537  *
1538  * Record the failure.
1539  * mark last busy to delay a retry.
1540  * update the runtime_pm state back to ON
1541  *
1542  * Return: void
1543  */
1544 void hif_process_runtime_suspend_failure(void);
1545 
1546 /**
1547  * hif_process_runtime_resume_linkup() - bookkeeping of resuming link up
1548  *
1549  * update the runtime_pm state to RESUMING_LINKUP
1550  * Return: void
1551  */
1552 void hif_process_runtime_resume_linkup(void);
1553 
1554 /**
1555  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
1556  *
1557  * record the success.
1558  * update the runtime_pm state to SUSPENDED
1559  * Return: void
1560  */
1561 void hif_process_runtime_resume_success(void);
1562 
1563 /**
1564  * hif_rtpm_print_prevent_list() - list the clients preventing suspend.
1565  *
1566  * Return: None
1567  */
1568 void hif_rtpm_print_prevent_list(void);
1569 
1570 /**
1571  * hif_rtpm_suspend_lock() - spin_lock on marking runtime suspend
1572  *
1573  * Return: void
1574  */
1575 void hif_rtpm_suspend_lock(void);
1576 
1577 /**
1578  * hif_rtpm_suspend_unlock() - spin_unlock on marking runtime suspend
1579  *
1580  * Return: void
1581  */
1582 void hif_rtpm_suspend_unlock(void);
1583 
1584 /**
1585  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
1586  * @hif_ctx: HIF context
1587  *
1588  * Return: 0 for success and non-zero error code for failure
1589  */
1590 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1591 
1592 /**
1593  * hif_runtime_resume() - do the bus resume part of a runtime resume
1594  * @hif_ctx: HIF context
1595  *
1596  * Return: 0 for success and non-zero error code for failure
1597  */
1598 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1599 
1600 /**
1601  * hif_fastpath_resume() - resume fastpath for runtimepm
1602  * @hif_ctx: HIF context
1603  *
1604  * ensure that the fastpath write index register is up to date
1605  * since runtime pm may cause ce_send_fast to skip the register
1606  * write.
1607  *
1608  * fastpath only applicable to legacy copy engine
1609  */
1610 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1611 
1612 /**
1613  * hif_rtpm_get_state(): get rtpm link state
1614  *
1615  * Return: state
1616  */
1617 int hif_rtpm_get_state(void);
1618 
1619 /**
1620  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1621  * @hif_ctx: HIF context
1622  *
1623  * Return: None
1624  */
1625 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx);
1626 
1627 /**
1628  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1629  * @scn: HIF context
1630  * @ce_id: CE id
1631  *
1632  * Return: None
1633  */
1634 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1635 				      unsigned long ce_id);
1636 #else
1637 
1638 /**
1639  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1640  * @hif_ctx: HIF context
1641  *
1642  * Return: None
1643  */
1644 static inline
1645 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx) { }
1646 
1647 /**
1648  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1649  * @scn: HIF context
1650  * @ce_id: CE id
1651  *
1652  * Return: None
1653  */
1654 static inline
1655 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1656 				      unsigned long ce_id)
1657 { }
1658 
1659 static inline
1660 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
1661 { return QDF_STATUS_SUCCESS; }
1662 
1663 static inline
1664 QDF_STATUS hif_rtpm_deregister(uint32_t id)
1665 { return QDF_STATUS_SUCCESS; }
1666 
1667 static inline
1668 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
1669 { return QDF_STATUS_SUCCESS; }
1670 
1671 static inline QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
1672 { return QDF_STATUS_SUCCESS; }
1673 
1674 static inline int hif_rtpm_get_autosuspend_delay(void)
1675 { return 0; }
1676 
1677 static inline
1678 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1679 { return 0; }
1680 
1681 static inline
1682 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data)
1683 {}
1684 
1685 static inline
1686 int hif_rtpm_get(uint8_t type, uint32_t id)
1687 { return QDF_STATUS_SUCCESS; }
1688 
1689 static inline
1690 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
1691 { return QDF_STATUS_SUCCESS; }
1692 
1693 static inline
1694 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data)
1695 { return 0; }
1696 
1697 static inline
1698 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data)
1699 { return 0; }
1700 
1701 static inline
1702 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data)
1703 { return 0; }
1704 
1705 static inline
1706 QDF_STATUS hif_rtpm_sync_resume(void)
1707 { return QDF_STATUS_SUCCESS; }
1708 
1709 static inline
1710 void hif_rtpm_request_resume(void)
1711 {}
1712 
1713 static inline
1714 void hif_rtpm_check_and_request_resume(void)
1715 {}
1716 
1717 static inline
1718 void hif_rtpm_set_client_job(uint32_t client_id)
1719 {}
1720 
1721 static inline
1722 void hif_rtpm_print_prevent_list(void)
1723 {}
1724 
1725 static inline
1726 void hif_rtpm_suspend_unlock(void)
1727 {}
1728 
1729 static inline
1730 void hif_rtpm_suspend_lock(void)
1731 {}
1732 
1733 static inline
1734 int hif_rtpm_get_monitor_wake_intr(void)
1735 { return 0; }
1736 
1737 static inline
1738 void hif_rtpm_set_monitor_wake_intr(int val)
1739 {}
1740 
1741 static inline
1742 void hif_rtpm_mark_last_busy(uint32_t id)
1743 {}
1744 #endif
1745 
1746 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1747 				 bool is_packet_log_enabled);
1748 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1749 
1750 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1751 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1752 
1753 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1754 
1755 #ifdef IPA_OFFLOAD
1756 /**
1757  * hif_get_ipa_hw_type() - get IPA hw type
1758  *
1759  * This API return the IPA hw type.
1760  *
1761  * Return: IPA hw type
1762  */
1763 static inline
1764 enum ipa_hw_type hif_get_ipa_hw_type(void)
1765 {
1766 	return ipa_get_hw_type();
1767 }
1768 
1769 /**
1770  * hif_get_ipa_present() - get IPA hw status
1771  *
1772  * This API return the IPA hw status.
1773  *
1774  * Return: true if IPA is present or false otherwise
1775  */
1776 static inline
1777 bool hif_get_ipa_present(void)
1778 {
1779 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
1780 		return true;
1781 	else
1782 		return false;
1783 }
1784 #endif
1785 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1786 
1787 /**
1788  * hif_bus_early_suspend() - stop non wmi tx traffic
1789  * @hif_ctx: hif context
1790  */
1791 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1792 
1793 /**
1794  * hif_bus_late_resume() - resume non wmi traffic
1795  * @hif_ctx: hif context
1796  */
1797 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1798 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1799 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1800 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1801 
1802 /**
1803  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1804  * @hif_ctx: an opaque HIF handle to use
1805  *
1806  * As opposed to the standard hif_irq_enable, this function always applies to
1807  * the APPS side kernel interrupt handling.
1808  *
1809  * Return: errno
1810  */
1811 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1812 
1813 /**
1814  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1815  * @hif_ctx: an opaque HIF handle to use
1816  *
1817  * As opposed to the standard hif_irq_disable, this function always applies to
1818  * the APPS side kernel interrupt handling.
1819  *
1820  * Return: errno
1821  */
1822 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1823 
1824 /**
1825  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1826  * @hif_ctx: an opaque HIF handle to use
1827  *
1828  * As opposed to the standard hif_irq_enable, this function always applies to
1829  * the APPS side kernel interrupt handling.
1830  *
1831  * Return: errno
1832  */
1833 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1834 
1835 /**
1836  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1837  * @hif_ctx: an opaque HIF handle to use
1838  *
1839  * As opposed to the standard hif_irq_disable, this function always applies to
1840  * the APPS side kernel interrupt handling.
1841  *
1842  * Return: errno
1843  */
1844 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1845 
1846 /**
1847  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1848  * @hif_ctx: an opaque HIF handle to use
1849  *
1850  * This function always applies to the APPS side kernel interrupt handling
1851  * to wake the system from suspend.
1852  *
1853  * Return: errno
1854  */
1855 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1856 
1857 /**
1858  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1859  * @hif_ctx: an opaque HIF handle to use
1860  *
1861  * This function always applies to the APPS side kernel interrupt handling
1862  * to disable the wake irq.
1863  *
1864  * Return: errno
1865  */
1866 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1867 
1868 /**
1869  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
1870  * @hif_ctx: an opaque HIF handle to use
1871  *
1872  * As opposed to the standard hif_irq_enable, this function always applies to
1873  * the APPS side kernel interrupt handling.
1874  *
1875  * Return: errno
1876  */
1877 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1878 
1879 /**
1880  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
1881  * @hif_ctx: an opaque HIF handle to use
1882  *
1883  * As opposed to the standard hif_irq_disable, this function always applies to
1884  * the APPS side kernel interrupt handling.
1885  *
1886  * Return: errno
1887  */
1888 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1889 
1890 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1891 int hif_dump_registers(struct hif_opaque_softc *scn);
1892 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1893 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1894 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1895 		     u32 *revision, const char **target_name);
1896 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1897 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1898 						   scn);
1899 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1900 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1901 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1902 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1903 			   hif_target_status);
1904 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1905 			 struct hif_config_info *cfg);
1906 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1907 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1908 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1909 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1910 			   uint32_t transfer_id, u_int32_t len);
1911 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1912 	uint32_t transfer_id, uint32_t download_len);
1913 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1914 void hif_ce_war_disable(void);
1915 void hif_ce_war_enable(void);
1916 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1917 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1918 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1919 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1920 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1921 		uint32_t pipe_num);
1922 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1923 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1924 
1925 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1926 				int rx_bundle_cnt);
1927 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1928 
1929 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1930 
1931 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1932 
1933 enum hif_exec_type {
1934 	HIF_EXEC_NAPI_TYPE,
1935 	HIF_EXEC_TASKLET_TYPE,
1936 };
1937 
1938 typedef uint32_t (*ext_intr_handler)(void *, uint32_t, int);
1939 
1940 /**
1941  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1942  * @softc: hif opaque context owning the exec context
1943  * @id: the id of the interrupt context
1944  *
1945  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1946  *         'id' registered with the OS
1947  */
1948 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1949 				uint8_t id);
1950 
1951 /**
1952  * hif_configure_ext_group_interrupts() - Configure ext group interrupts
1953  * @hif_ctx: hif opaque context
1954  *
1955  * Return: QDF_STATUS
1956  */
1957 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1958 
1959 /**
1960  * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group interrupts
1961  * @hif_ctx: hif opaque context
1962  *
1963  * Return: None
1964  */
1965 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1966 
1967 /**
1968  * hif_register_ext_group() - API to register external group
1969  * interrupt handler.
1970  * @hif_ctx : HIF Context
1971  * @numirq: number of irq's in the group
1972  * @irq: array of irq values
1973  * @handler: callback interrupt handler function
1974  * @cb_ctx: context to passed in callback
1975  * @context_name: text name of the context
1976  * @type: napi vs tasklet
1977  * @scale:
1978  *
1979  * Return: QDF_STATUS
1980  */
1981 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1982 				  uint32_t numirq, uint32_t irq[],
1983 				  ext_intr_handler handler,
1984 				  void *cb_ctx, const char *context_name,
1985 				  enum hif_exec_type type, uint32_t scale);
1986 
1987 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1988 				const char *context_name);
1989 
1990 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1991 				u_int8_t pipeid,
1992 				struct hif_msg_callbacks *callbacks);
1993 
1994 /**
1995  * hif_print_napi_stats() - Display HIF NAPI stats
1996  * @hif_ctx: HIF opaque context
1997  *
1998  * Return: None
1999  */
2000 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
2001 
2002 /**
2003  * hif_clear_napi_stats() - function clears the stats of the
2004  * latency when called.
2005  * @hif_ctx: the HIF context to assign the callback to
2006  *
2007  * Return: None
2008  */
2009 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
2010 
2011 #ifdef __cplusplus
2012 }
2013 #endif
2014 
2015 #ifdef FORCE_WAKE
2016 /**
2017  * hif_force_wake_request() - Function to wake from power collapse
2018  * @handle: HIF opaque handle
2019  *
2020  * Description: API to check if the device is awake or not before
2021  * read/write to BAR + 4K registers. If device is awake return
2022  * success otherwise write '1' to
2023  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
2024  * the device and does wakeup the PCI and MHI within 50ms
2025  * and then the device writes a value to
2026  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
2027  * handshake process to let the host know the device is awake.
2028  *
2029  * Return: zero - success/non-zero - failure
2030  */
2031 int hif_force_wake_request(struct hif_opaque_softc *handle);
2032 
2033 /**
2034  * hif_force_wake_release() - API to release/reset the SOC wake register
2035  * from interrupting the device.
2036  * @handle: HIF opaque handle
2037  *
2038  * Description: API to set the
2039  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
2040  * to release the interrupt line.
2041  *
2042  * Return: zero - success/non-zero - failure
2043  */
2044 int hif_force_wake_release(struct hif_opaque_softc *handle);
2045 #else
2046 static inline
2047 int hif_force_wake_request(struct hif_opaque_softc *handle)
2048 {
2049 	return 0;
2050 }
2051 
2052 static inline
2053 int hif_force_wake_release(struct hif_opaque_softc *handle)
2054 {
2055 	return 0;
2056 }
2057 #endif /* FORCE_WAKE */
2058 
2059 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
2060 /**
2061  * hif_prevent_link_low_power_states() - Prevent from going to low power states
2062  * @hif: HIF opaque context
2063  *
2064  * Return: 0 on success. Error code on failure.
2065  */
2066 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
2067 
2068 /**
2069  * hif_allow_link_low_power_states() - Allow link to go to low power states
2070  * @hif: HIF opaque context
2071  *
2072  * Return: None
2073  */
2074 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
2075 
2076 #else
2077 
2078 static inline
2079 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
2080 {
2081 	return 0;
2082 }
2083 
2084 static inline
2085 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
2086 {
2087 }
2088 #endif
2089 
2090 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
2091 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
2092 void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle);
2093 
2094 /**
2095  * hif_get_dev_ba_cmem() - get base address of CMEM
2096  * @hif_handle: the HIF context
2097  *
2098  */
2099 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle);
2100 
2101 /**
2102  * hif_get_soc_version() - get soc major version from target info
2103  * @hif_handle: the HIF context
2104  *
2105  * Return: version number
2106  */
2107 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle);
2108 
2109 /**
2110  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
2111  * @hif_ctx: the HIF context to assign the callback to
2112  * @callback: the callback to assign
2113  * @priv: the private data to pass to the callback when invoked
2114  *
2115  * Return: None
2116  */
2117 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2118 			       void (*callback)(void *),
2119 			       void *priv);
2120 /*
2121  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
2122  * for defined here
2123  */
2124 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2125 ssize_t hif_dump_desc_trace_buf(struct device *dev,
2126 				struct device_attribute *attr, char *buf);
2127 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2128 					const char *buf, size_t size);
2129 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
2130 				const char *buf, size_t size);
2131 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
2132 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
2133 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
2134 
2135 /**
2136  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
2137  * @hif: hif context
2138  * @ce_service_max_yield_time: CE service max yield time to set
2139  *
2140  * This API storess CE service max yield time in hif context based
2141  * on ini value.
2142  *
2143  * Return: void
2144  */
2145 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2146 				       uint32_t ce_service_max_yield_time);
2147 
2148 /**
2149  * hif_get_ce_service_max_yield_time() - get CE service max yield time
2150  * @hif: hif context
2151  *
2152  * This API returns CE service max yield time.
2153  *
2154  * Return: CE service max yield time
2155  */
2156 unsigned long long
2157 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
2158 
2159 /**
2160  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
2161  * @hif: hif context
2162  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
2163  *
2164  * This API stores CE service max rx ind flush in hif context based
2165  * on ini value.
2166  *
2167  * Return: void
2168  */
2169 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2170 					 uint8_t ce_service_max_rx_ind_flush);
2171 
2172 #ifdef OL_ATH_SMART_LOGGING
2173 /**
2174  * hif_log_dump_ce() - Copy all the CE DEST ring to buf
2175  * @scn: HIF handler
2176  * @buf_cur: Current pointer in ring buffer
2177  * @buf_init:Start of the ring buffer
2178  * @buf_sz: Size of the ring buffer
2179  * @ce: Copy Engine id
2180  * @skb_sz: Max size of the SKB buffer to be copied
2181  *
2182  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2183  * and buffers pointed by them in to the given buf
2184  *
2185  * Return: Current pointer in ring buffer
2186  */
2187 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2188 			 uint8_t *buf_init, uint32_t buf_sz,
2189 			 uint32_t ce, uint32_t skb_sz);
2190 #endif /* OL_ATH_SMART_LOGGING */
2191 
2192 /**
2193  * hif_softc_to_hif_opaque_softc() - API to convert hif_softc handle
2194  * to hif_opaque_softc handle
2195  * @hif_handle: hif_softc type
2196  *
2197  * Return: hif_opaque_softc type
2198  */
2199 static inline struct hif_opaque_softc *
2200 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
2201 {
2202 	return (struct hif_opaque_softc *)hif_handle;
2203 }
2204 
2205 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
2206 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2207 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx);
2208 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2209 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2210 			    uint8_t type, uint8_t access);
2211 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2212 			       uint8_t type);
2213 #else
2214 static inline QDF_STATUS
2215 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2216 {
2217 	return QDF_STATUS_SUCCESS;
2218 }
2219 
2220 static inline void
2221 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
2222 {
2223 }
2224 
2225 static inline void
2226 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2227 {
2228 }
2229 
2230 static inline void
2231 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2232 		       uint8_t type, uint8_t access)
2233 {
2234 }
2235 
2236 static inline uint8_t
2237 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2238 		       uint8_t type)
2239 {
2240 	return HIF_EP_VOTE_ACCESS_ENABLE;
2241 }
2242 #endif
2243 
2244 #ifdef FORCE_WAKE
2245 /**
2246  * hif_srng_init_phase(): Indicate srng initialization phase
2247  * to avoid force wake as UMAC power collapse is not yet
2248  * enabled
2249  * @hif_ctx: hif opaque handle
2250  * @init_phase: initialization phase
2251  *
2252  * Return:  None
2253  */
2254 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2255 			 bool init_phase);
2256 #else
2257 static inline
2258 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2259 			 bool init_phase)
2260 {
2261 }
2262 #endif /* FORCE_WAKE */
2263 
2264 #ifdef HIF_IPCI
2265 /**
2266  * hif_shutdown_notifier_cb - Call back for shutdown notifier
2267  * @ctx: hif handle
2268  *
2269  * Return:  None
2270  */
2271 void hif_shutdown_notifier_cb(void *ctx);
2272 #else
2273 static inline
2274 void hif_shutdown_notifier_cb(void *ctx)
2275 {
2276 }
2277 #endif /* HIF_IPCI */
2278 
2279 #ifdef HIF_CE_LOG_INFO
2280 /**
2281  * hif_log_ce_info() - API to log ce info
2282  * @scn: hif handle
2283  * @data: hang event data buffer
2284  * @offset: offset at which data needs to be written
2285  *
2286  * Return:  None
2287  */
2288 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2289 		     unsigned int *offset);
2290 #else
2291 static inline
2292 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2293 		     unsigned int *offset)
2294 {
2295 }
2296 #endif
2297 
2298 #ifdef HIF_CPU_PERF_AFFINE_MASK
2299 /**
2300  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
2301  * @hif_ctx: hif opaque handle
2302  *
2303  * This function is used to move the WLAN IRQs to perf cores in
2304  * case of defconfig builds.
2305  *
2306  * Return:  None
2307  */
2308 void hif_config_irq_set_perf_affinity_hint(
2309 	struct hif_opaque_softc *hif_ctx);
2310 
2311 #else
2312 static inline void hif_config_irq_set_perf_affinity_hint(
2313 	struct hif_opaque_softc *hif_ctx)
2314 {
2315 }
2316 #endif
2317 
2318 /**
2319  * hif_apps_grp_irqs_enable() - enable ext grp irqs
2320  * @hif_ctx: HIF opaque context
2321  *
2322  * Return: 0 on success. Error code on failure.
2323  */
2324 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
2325 
2326 /**
2327  * hif_apps_grp_irqs_disable() - disable ext grp irqs
2328  * @hif_ctx: HIF opaque context
2329  *
2330  * Return: 0 on success. Error code on failure.
2331  */
2332 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
2333 
2334 /**
2335  * hif_disable_grp_irqs() - disable ext grp irqs
2336  * @scn: HIF opaque context
2337  *
2338  * Return: 0 on success. Error code on failure.
2339  */
2340 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
2341 
2342 /**
2343  * hif_enable_grp_irqs() - enable ext grp irqs
2344  * @scn: HIF opaque context
2345  *
2346  * Return: 0 on success. Error code on failure.
2347  */
2348 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
2349 
2350 enum hif_credit_exchange_type {
2351 	HIF_REQUEST_CREDIT,
2352 	HIF_PROCESS_CREDIT_REPORT,
2353 };
2354 
2355 enum hif_detect_latency_type {
2356 	HIF_DETECT_TASKLET,
2357 	HIF_DETECT_CREDIT,
2358 	HIF_DETECT_UNKNOWN
2359 };
2360 
2361 #ifdef HIF_DETECTION_LATENCY_ENABLE
2362 void hif_latency_detect_credit_record_time(
2363 	enum hif_credit_exchange_type type,
2364 	struct hif_opaque_softc *hif_ctx);
2365 
2366 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
2367 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
2368 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer);
2369 void hif_credit_latency(struct hif_softc *scn, bool from_timer);
2370 void hif_check_detection_latency(struct hif_softc *scn,
2371 				 bool from_timer,
2372 				 uint32_t bitmap_type);
2373 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
2374 #else
2375 static inline
2376 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
2377 {}
2378 
2379 static inline
2380 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
2381 {}
2382 
2383 static inline
2384 void hif_latency_detect_credit_record_time(
2385 	enum hif_credit_exchange_type type,
2386 	struct hif_opaque_softc *hif_ctx)
2387 {}
2388 static inline
2389 void hif_check_detection_latency(struct hif_softc *scn,
2390 				 bool from_timer,
2391 				 uint32_t bitmap_type)
2392 {}
2393 
2394 static inline
2395 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
2396 {}
2397 #endif
2398 
2399 #ifdef SYSTEM_PM_CHECK
2400 /**
2401  * __hif_system_pm_set_state() - Set system pm state
2402  * @hif: hif opaque handle
2403  * @state: system state
2404  *
2405  * Return:  None
2406  */
2407 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2408 			       enum hif_system_pm_state state);
2409 
2410 /**
2411  * hif_system_pm_set_state_on() - Set system pm state to ON
2412  * @hif: hif opaque handle
2413  *
2414  * Return:  None
2415  */
2416 static inline
2417 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2418 {
2419 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
2420 }
2421 
2422 /**
2423  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
2424  * @hif: hif opaque handle
2425  *
2426  * Return:  None
2427  */
2428 static inline
2429 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2430 {
2431 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
2432 }
2433 
2434 /**
2435  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
2436  * @hif: hif opaque handle
2437  *
2438  * Return:  None
2439  */
2440 static inline
2441 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2442 {
2443 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
2444 }
2445 
2446 /**
2447  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
2448  * @hif: hif opaque handle
2449  *
2450  * Return:  None
2451  */
2452 static inline
2453 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2454 {
2455 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
2456 }
2457 
2458 /**
2459  * hif_system_pm_get_state() - Get system pm state
2460  * @hif: hif opaque handle
2461  *
2462  * Return:  system state
2463  */
2464 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
2465 
2466 /**
2467  * hif_system_pm_state_check() - Check system state and trigger resume
2468  *  if required
2469  * @hif: hif opaque handle
2470  *
2471  * Return: 0 if system is in on state else error code
2472  */
2473 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
2474 #else
2475 static inline
2476 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2477 			       enum hif_system_pm_state state)
2478 {
2479 }
2480 
2481 static inline
2482 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2483 {
2484 }
2485 
2486 static inline
2487 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2488 {
2489 }
2490 
2491 static inline
2492 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2493 {
2494 }
2495 
2496 static inline
2497 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2498 {
2499 }
2500 
2501 static inline
2502 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2503 {
2504 	return 0;
2505 }
2506 
2507 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2508 {
2509 	return 0;
2510 }
2511 #endif
2512 
2513 #ifdef FEATURE_IRQ_AFFINITY
2514 /**
2515  * hif_set_grp_intr_affinity() - API to set affinity for grp
2516  *  intrs set in the bitmap
2517  * @scn: hif handle
2518  * @grp_intr_bitmask: grp intrs for which perf affinity should be
2519  *  applied
2520  * @perf: affine to perf or non-perf cluster
2521  *
2522  * Return: None
2523  */
2524 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2525 			       uint32_t grp_intr_bitmask, bool perf);
2526 #else
2527 static inline
2528 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2529 			       uint32_t grp_intr_bitmask, bool perf)
2530 {
2531 }
2532 #endif
2533 /**
2534  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
2535  * @scn: hif opaque handle
2536  *
2537  * Description:
2538  *   Gets number of WMI EPs configured in target svc map. Since EP map
2539  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
2540  *   configured for WMI service.
2541  *
2542  * Return:
2543  *  uint8_t: count for WMI eps in target svc map
2544  */
2545 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn);
2546 
2547 #ifdef DP_UMAC_HW_RESET_SUPPORT
2548 /**
2549  * hif_register_umac_reset_handler() - Register UMAC HW reset handler
2550  * @hif_scn: hif opaque handle
2551  * @handler: callback handler function
2552  * @cb_ctx: context to passed to @handler
2553  * @irq: irq number to be used for UMAC HW reset interrupt
2554  *
2555  * Return: QDF_STATUS of operation
2556  */
2557 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2558 					   int (*handler)(void *cb_ctx),
2559 					   void *cb_ctx, int irq);
2560 
2561 /**
2562  * hif_unregister_umac_reset_handler() - Unregister UMAC HW reset handler
2563  * @hif_scn: hif opaque handle
2564  *
2565  * Return: QDF_STATUS of operation
2566  */
2567 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn);
2568 #else
2569 static inline
2570 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2571 					   int (*handler)(void *cb_ctx),
2572 					   void *cb_ctx, int irq)
2573 {
2574 	return QDF_STATUS_SUCCESS;
2575 }
2576 
2577 static inline
2578 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
2579 {
2580 	return QDF_STATUS_SUCCESS;
2581 }
2582 
2583 #endif /* DP_UMAC_HW_RESET_SUPPORT */
2584 
2585 #ifdef FEATURE_DIRECT_LINK
2586 /**
2587  * hif_set_irq_config_by_ceid() - Set irq configuration for CE given by id
2588  * @scn: hif opaque handle
2589  * @ce_id: CE id
2590  * @addr: irq trigger address
2591  * @data: irq trigger data
2592  *
2593  * Return: QDF status
2594  */
2595 QDF_STATUS
2596 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2597 			   uint64_t addr, uint32_t data);
2598 
2599 /**
2600  * hif_get_direct_link_ce_dest_srng_buffers() - Get Direct Link ce dest srng
2601  *  buffer information
2602  * @scn: hif opaque handle
2603  * @dma_addr: pointer to array of dma addresses
2604  * @buf_size: ce dest ring buffer size
2605  *
2606  * Return: Number of buffers attached to the dest srng.
2607  */
2608 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2609 						  uint64_t **dma_addr,
2610 						  uint32_t *buf_size);
2611 
2612 /**
2613  * hif_get_direct_link_ce_srng_info() - Get Direct Link CE srng information
2614  * @scn: hif opaque handle
2615  * @info: Direct Link CEs information
2616  * @max_ce_info_len: max array size of ce info
2617  *
2618  * Return: QDF status
2619  */
2620 QDF_STATUS
2621 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2622 				 struct hif_direct_link_ce_info *info,
2623 				 uint8_t max_ce_info_len);
2624 #else
2625 static inline QDF_STATUS
2626 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2627 			   uint64_t addr, uint32_t data)
2628 {
2629 	return QDF_STATUS_SUCCESS;
2630 }
2631 
2632 static inline
2633 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2634 						  uint64_t **dma_addr,
2635 						  uint32_t *buf_size)
2636 {
2637 	return 0;
2638 }
2639 
2640 static inline QDF_STATUS
2641 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2642 				 struct hif_direct_link_ce_info *info,
2643 				 uint8_t max_ce_info_len)
2644 {
2645 	return QDF_STATUS_SUCCESS;
2646 }
2647 #endif
2648 #endif /* _HIF_H_ */
2649