xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 7d007034fb2934d81e3012120bee9be0e32e9d63)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HIF_H_
21 #define _HIF_H_
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif /* __cplusplus */
26 
27 /* Header files */
28 #include <qdf_status.h>
29 #include "qdf_nbuf.h"
30 #include "qdf_lro.h"
31 #include "ol_if_athvar.h"
32 #include <linux/platform_device.h>
33 #ifdef HIF_PCI
34 #include <linux/pci.h>
35 #endif /* HIF_PCI */
36 #ifdef HIF_USB
37 #include <linux/usb.h>
38 #endif /* HIF_USB */
39 #ifdef IPA_OFFLOAD
40 #include <linux/ipa.h>
41 #endif
42 #include "cfg_ucfg_api.h"
43 #include "qdf_dev.h"
44 #include <wlan_init_cfg.h>
45 
46 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
47 
48 typedef void __iomem *A_target_id_t;
49 typedef void *hif_handle_t;
50 
51 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
52 #define HIF_WORK_DRAIN_WAIT_CNT 50
53 
54 #define HIF_EP_WAKE_RESET_WAIT_CNT 10
55 #endif
56 
57 #define HIF_TYPE_AR6002   2
58 #define HIF_TYPE_AR6003   3
59 #define HIF_TYPE_AR6004   5
60 #define HIF_TYPE_AR9888   6
61 #define HIF_TYPE_AR6320   7
62 #define HIF_TYPE_AR6320V2 8
63 /* For attaching Peregrine 2.0 board host_reg_tbl only */
64 #define HIF_TYPE_AR9888V2 9
65 #define HIF_TYPE_ADRASTEA 10
66 #define HIF_TYPE_AR900B 11
67 #define HIF_TYPE_QCA9984 12
68 #define HIF_TYPE_QCA9888 14
69 #define HIF_TYPE_QCA8074 15
70 #define HIF_TYPE_QCA6290 16
71 #define HIF_TYPE_QCN7605 17
72 #define HIF_TYPE_QCA6390 18
73 #define HIF_TYPE_QCA8074V2 19
74 #define HIF_TYPE_QCA6018  20
75 #define HIF_TYPE_QCN9000 21
76 #define HIF_TYPE_QCA6490 22
77 #define HIF_TYPE_QCA6750 23
78 #define HIF_TYPE_QCA5018 24
79 #define HIF_TYPE_QCN6122 25
80 #define HIF_TYPE_KIWI 26
81 #define HIF_TYPE_QCN9224 27
82 #define HIF_TYPE_QCA9574 28
83 #define HIF_TYPE_MANGO 29
84 #define HIF_TYPE_QCA5332 30
85 #define HIF_TYPE_QCN9160 31
86 
87 #define DMA_COHERENT_MASK_DEFAULT   37
88 
89 #ifdef IPA_OFFLOAD
90 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
91 #endif
92 
93 /* enum hif_ic_irq - enum defining integrated chip irq numbers
94  * defining irq nubers that can be used by external modules like datapath
95  */
96 enum hif_ic_irq {
97 	host2wbm_desc_feed = 16,
98 	host2reo_re_injection,
99 	host2reo_command,
100 	host2rxdma_monitor_ring3,
101 	host2rxdma_monitor_ring2,
102 	host2rxdma_monitor_ring1,
103 	reo2host_exception,
104 	wbm2host_rx_release,
105 	reo2host_status,
106 	reo2host_destination_ring4,
107 	reo2host_destination_ring3,
108 	reo2host_destination_ring2,
109 	reo2host_destination_ring1,
110 	rxdma2host_monitor_destination_mac3,
111 	rxdma2host_monitor_destination_mac2,
112 	rxdma2host_monitor_destination_mac1,
113 	ppdu_end_interrupts_mac3,
114 	ppdu_end_interrupts_mac2,
115 	ppdu_end_interrupts_mac1,
116 	rxdma2host_monitor_status_ring_mac3,
117 	rxdma2host_monitor_status_ring_mac2,
118 	rxdma2host_monitor_status_ring_mac1,
119 	host2rxdma_host_buf_ring_mac3,
120 	host2rxdma_host_buf_ring_mac2,
121 	host2rxdma_host_buf_ring_mac1,
122 	rxdma2host_destination_ring_mac3,
123 	rxdma2host_destination_ring_mac2,
124 	rxdma2host_destination_ring_mac1,
125 	host2tcl_input_ring4,
126 	host2tcl_input_ring3,
127 	host2tcl_input_ring2,
128 	host2tcl_input_ring1,
129 	wbm2host_tx_completions_ring4,
130 	wbm2host_tx_completions_ring3,
131 	wbm2host_tx_completions_ring2,
132 	wbm2host_tx_completions_ring1,
133 	tcl2host_status_ring,
134 };
135 
136 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
137 enum hif_legacy_pci_irq {
138 	ce0,
139 	ce1,
140 	ce2,
141 	ce3,
142 	ce4,
143 	ce5,
144 	ce6,
145 	ce7,
146 	ce8,
147 	ce9,
148 	ce10,
149 	ce11,
150 	ce12,
151 	ce13,
152 	ce14,
153 	ce15,
154 	reo2sw8_intr2,
155 	reo2sw7_intr2,
156 	reo2sw6_intr2,
157 	reo2sw5_intr2,
158 	reo2sw4_intr2,
159 	reo2sw3_intr2,
160 	reo2sw2_intr2,
161 	reo2sw1_intr2,
162 	reo2sw0_intr2,
163 	reo2sw8_intr,
164 	reo2sw7_intr,
165 	reo2sw6_inrr,
166 	reo2sw5_intr,
167 	reo2sw4_intr,
168 	reo2sw3_intr,
169 	reo2sw2_intr,
170 	reo2sw1_intr,
171 	reo2sw0_intr,
172 	reo2status_intr2,
173 	reo_status,
174 	reo2rxdma_out_2,
175 	reo2rxdma_out_1,
176 	reo_cmd,
177 	sw2reo6,
178 	sw2reo5,
179 	sw2reo1,
180 	sw2reo,
181 	rxdma2reo_mlo_0_dst_ring1,
182 	rxdma2reo_mlo_0_dst_ring0,
183 	rxdma2reo_mlo_1_dst_ring1,
184 	rxdma2reo_mlo_1_dst_ring0,
185 	rxdma2reo_dst_ring1,
186 	rxdma2reo_dst_ring0,
187 	rxdma2sw_dst_ring1,
188 	rxdma2sw_dst_ring0,
189 	rxdma2release_dst_ring1,
190 	rxdma2release_dst_ring0,
191 	sw2rxdma_2_src_ring,
192 	sw2rxdma_1_src_ring,
193 	sw2rxdma_0,
194 	wbm2sw6_release2,
195 	wbm2sw5_release2,
196 	wbm2sw4_release2,
197 	wbm2sw3_release2,
198 	wbm2sw2_release2,
199 	wbm2sw1_release2,
200 	wbm2sw0_release2,
201 	wbm2sw6_release,
202 	wbm2sw5_release,
203 	wbm2sw4_release,
204 	wbm2sw3_release,
205 	wbm2sw2_release,
206 	wbm2sw1_release,
207 	wbm2sw0_release,
208 	wbm2sw_link,
209 	wbm_error_release,
210 	sw2txmon_src_ring,
211 	sw2rxmon_src_ring,
212 	txmon2sw_p1_intr1,
213 	txmon2sw_p1_intr0,
214 	txmon2sw_p0_dest1,
215 	txmon2sw_p0_dest0,
216 	rxmon2sw_p1_intr1,
217 	rxmon2sw_p1_intr0,
218 	rxmon2sw_p0_dest1,
219 	rxmon2sw_p0_dest0,
220 	sw_release,
221 	sw2tcl_credit2,
222 	sw2tcl_credit,
223 	sw2tcl4,
224 	sw2tcl5,
225 	sw2tcl3,
226 	sw2tcl2,
227 	sw2tcl1,
228 	sw2wbm1,
229 	misc_8,
230 	misc_7,
231 	misc_6,
232 	misc_5,
233 	misc_4,
234 	misc_3,
235 	misc_2,
236 	misc_1,
237 	misc_0,
238 };
239 #endif
240 
241 struct CE_state;
242 #ifdef QCA_WIFI_QCN9224
243 #define CE_COUNT_MAX 16
244 #else
245 #define CE_COUNT_MAX 12
246 #endif
247 
248 #ifndef HIF_MAX_GROUP
249 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS
250 #endif
251 
252 #ifdef CONFIG_BERYLLIUM
253 #define HIF_MAX_GRP_IRQ 25
254 #else
255 #define HIF_MAX_GRP_IRQ 16
256 #endif
257 
258 #ifndef NAPI_YIELD_BUDGET_BASED
259 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
260 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
261 #endif
262 #else  /* NAPI_YIELD_BUDGET_BASED */
263 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
264 #endif /* NAPI_YIELD_BUDGET_BASED */
265 
266 #define QCA_NAPI_BUDGET    64
267 #define QCA_NAPI_DEF_SCALE  \
268 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
269 
270 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
271 /* NOTE: "napi->scale" can be changed,
272  * but this does not change the number of buckets
273  */
274 #define QCA_NAPI_NUM_BUCKETS 4
275 
276 /**
277  * qca_napi_stat - stats structure for execution contexts
278  * @napi_schedules - number of times the schedule function is called
279  * @napi_polls - number of times the execution context runs
280  * @napi_completes - number of times that the generating interrupt is re-enabled
281  * @napi_workdone - cumulative of all work done reported by handler
282  * @cpu_corrected - incremented when execution context runs on a different core
283  *			than the one that its irq is affined to.
284  * @napi_budget_uses - histogram of work done per execution run
285  * @time_limit_reache - count of yields due to time limit thresholds
286  * @rxpkt_thresh_reached - count of yields due to a work limit
287  * @poll_time_buckets - histogram of poll times for the napi
288  *
289  */
290 struct qca_napi_stat {
291 	uint32_t napi_schedules;
292 	uint32_t napi_polls;
293 	uint32_t napi_completes;
294 	uint32_t napi_workdone;
295 	uint32_t cpu_corrected;
296 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
297 	uint32_t time_limit_reached;
298 	uint32_t rxpkt_thresh_reached;
299 	unsigned long long napi_max_poll_time;
300 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
301 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
302 #endif
303 };
304 
305 
306 /**
307  * per NAPI instance data structure
308  * This data structure holds stuff per NAPI instance.
309  * Note that, in the current implementation, though scale is
310  * an instance variable, it is set to the same value for all
311  * instances.
312  */
313 struct qca_napi_info {
314 	struct net_device    netdev; /* dummy net_dev */
315 	void                 *hif_ctx;
316 	struct napi_struct   napi;
317 	uint8_t              scale;   /* currently same on all instances */
318 	uint8_t              id;
319 	uint8_t              cpu;
320 	int                  irq;
321 	cpumask_t            cpumask;
322 	struct qca_napi_stat stats[NR_CPUS];
323 #ifdef RECEIVE_OFFLOAD
324 	/* will only be present for data rx CE's */
325 	void (*offld_flush_cb)(void *);
326 	struct napi_struct   rx_thread_napi;
327 	struct net_device    rx_thread_netdev;
328 #endif /* RECEIVE_OFFLOAD */
329 	qdf_lro_ctx_t        lro_ctx;
330 };
331 
332 enum qca_napi_tput_state {
333 	QCA_NAPI_TPUT_UNINITIALIZED,
334 	QCA_NAPI_TPUT_LO,
335 	QCA_NAPI_TPUT_HI
336 };
337 enum qca_napi_cpu_state {
338 	QCA_NAPI_CPU_UNINITIALIZED,
339 	QCA_NAPI_CPU_DOWN,
340 	QCA_NAPI_CPU_UP };
341 
342 /**
343  * struct qca_napi_cpu - an entry of the napi cpu table
344  * @core_id:     physical core id of the core
345  * @cluster_id:  cluster this core belongs to
346  * @core_mask:   mask to match all core of this cluster
347  * @thread_mask: mask for this core within the cluster
348  * @max_freq:    maximum clock this core can be clocked at
349  *               same for all cpus of the same core.
350  * @napis:       bitmap of napi instances on this core
351  * @execs:       bitmap of execution contexts on this core
352  * cluster_nxt:  chain to link cores within the same cluster
353  *
354  * This structure represents a single entry in the napi cpu
355  * table. The table is part of struct qca_napi_data.
356  * This table is initialized by the init function, called while
357  * the first napi instance is being created, updated by hotplug
358  * notifier and when cpu affinity decisions are made (by throughput
359  * detection), and deleted when the last napi instance is removed.
360  */
361 struct qca_napi_cpu {
362 	enum qca_napi_cpu_state state;
363 	int			core_id;
364 	int			cluster_id;
365 	cpumask_t		core_mask;
366 	cpumask_t		thread_mask;
367 	unsigned int		max_freq;
368 	uint32_t		napis;
369 	uint32_t		execs;
370 	int			cluster_nxt;  /* index, not pointer */
371 };
372 
373 /**
374  * struct qca_napi_data - collection of napi data for a single hif context
375  * @hif_softc: pointer to the hif context
376  * @lock: spinlock used in the event state machine
377  * @state: state variable used in the napi stat machine
378  * @ce_map: bit map indicating which ce's have napis running
379  * @exec_map: bit map of instantiated exec contexts
380  * @user_cpu_affin_map: CPU affinity map from INI config.
381  * @napi_cpu: cpu info for irq affinty
382  * @lilcl_head:
383  * @bigcl_head:
384  * @napi_mode: irq affinity & clock voting mode
385  * @cpuhp_handler: CPU hotplug event registration handle
386  */
387 struct qca_napi_data {
388 	struct               hif_softc *hif_softc;
389 	qdf_spinlock_t       lock;
390 	uint32_t             state;
391 
392 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
393 	 * not used by clients (clients use an id returned by create)
394 	 */
395 	uint32_t             ce_map;
396 	uint32_t             exec_map;
397 	uint32_t             user_cpu_affin_mask;
398 	struct qca_napi_info *napis[CE_COUNT_MAX];
399 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
400 	int                  lilcl_head, bigcl_head;
401 	enum qca_napi_tput_state napi_mode;
402 	struct qdf_cpuhp_handler *cpuhp_handler;
403 	uint8_t              flags;
404 };
405 
406 /**
407  * struct hif_config_info - Place Holder for HIF configuration
408  * @enable_self_recovery: Self Recovery
409  * @enable_runtime_pm: Enable Runtime PM
410  * @runtime_pm_delay: Runtime PM Delay
411  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
412  *
413  * Structure for holding HIF ini parameters.
414  */
415 struct hif_config_info {
416 	bool enable_self_recovery;
417 #ifdef FEATURE_RUNTIME_PM
418 	uint8_t enable_runtime_pm;
419 	u_int32_t runtime_pm_delay;
420 #endif
421 	uint64_t rx_softirq_max_yield_duration_ns;
422 };
423 
424 /**
425  * struct hif_target_info - Target Information
426  * @target_version: Target Version
427  * @target_type: Target Type
428  * @target_revision: Target Revision
429  * @soc_version: SOC Version
430  * @hw_name: pointer to hardware name
431  *
432  * Structure to hold target information.
433  */
434 struct hif_target_info {
435 	uint32_t target_version;
436 	uint32_t target_type;
437 	uint32_t target_revision;
438 	uint32_t soc_version;
439 	char *hw_name;
440 };
441 
442 struct hif_opaque_softc {
443 };
444 
445 /**
446  * struct hif_ce_ring_info - CE ring information
447  * @ring_id: ring id
448  * @ring_dir: ring direction
449  * @num_entries: number of entries in ring
450  * @entry_size: ring entry size
451  * @ring_base_paddr: srng base physical address
452  * @hp_paddr: head pointer physical address
453  * @tp_paddr: tail pointer physical address
454  */
455 struct hif_ce_ring_info {
456 	uint8_t ring_id;
457 	uint8_t ring_dir;
458 	uint32_t num_entries;
459 	uint32_t entry_size;
460 	uint64_t ring_base_paddr;
461 	uint64_t hp_paddr;
462 	uint64_t tp_paddr;
463 };
464 
465 /**
466  * struct hif_direct_link_ce_info - Direct Link CE information
467  * @ce_id: CE ide
468  * @pipe_dir: Pipe direction
469  * @ring_info: ring information
470  */
471 struct hif_direct_link_ce_info {
472 	uint8_t ce_id;
473 	uint8_t pipe_dir;
474 	struct hif_ce_ring_info ring_info;
475 };
476 
477 /**
478  * enum hif_event_type - Type of DP events to be recorded
479  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
480  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
481  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
482  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
483  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
484  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
485  * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event
486  * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event
487  */
488 enum hif_event_type {
489 	HIF_EVENT_IRQ_TRIGGER,
490 	HIF_EVENT_TIMER_ENTRY,
491 	HIF_EVENT_TIMER_EXIT,
492 	HIF_EVENT_BH_SCHED,
493 	HIF_EVENT_SRNG_ACCESS_START,
494 	HIF_EVENT_SRNG_ACCESS_END,
495 	HIF_EVENT_BH_COMPLETE,
496 	HIF_EVENT_BH_FORCE_BREAK,
497 	/* Do check hif_hist_skip_event_record when adding new events */
498 };
499 
500 /**
501  * enum hif_system_pm_state - System PM state
502  * HIF_SYSTEM_PM_STATE_ON: System in active state
503  * HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
504  *  system resume
505  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
506  *  system suspend
507  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
508  */
509 enum hif_system_pm_state {
510 	HIF_SYSTEM_PM_STATE_ON,
511 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
512 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
513 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
514 };
515 
516 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
517 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
518 
519 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
520 /* HIF_EVENT_HIST_MAX should always be power of 2 */
521 #define HIF_EVENT_HIST_MAX		512
522 
523 #define HIF_EVENT_HIST_ENABLE_MASK	0xFF
524 
525 static inline uint64_t hif_get_log_timestamp(void)
526 {
527 	return qdf_get_log_timestamp();
528 }
529 
530 #else
531 
532 #define HIF_EVENT_HIST_MAX		32
533 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
534 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
535 
536 static inline uint64_t hif_get_log_timestamp(void)
537 {
538 	return qdf_sched_clock();
539 }
540 
541 #endif
542 
543 /**
544  * struct hif_event_record - an entry of the DP event history
545  * @hal_ring_id: ring id for which event is recorded
546  * @hp: head pointer of the ring (may not be applicable for all events)
547  * @tp: tail pointer of the ring (may not be applicable for all events)
548  * @cpu_id: cpu id on which the event occurred
549  * @timestamp: timestamp when event occurred
550  * @type: type of the event
551  *
552  * This structure represents the information stored for every datapath
553  * event which is logged in the history.
554  */
555 struct hif_event_record {
556 	uint8_t hal_ring_id;
557 	uint32_t hp;
558 	uint32_t tp;
559 	int cpu_id;
560 	uint64_t timestamp;
561 	enum hif_event_type type;
562 };
563 
564 /**
565  * struct hif_event_misc - history related misc info
566  * @last_irq_index: last irq event index in history
567  * @last_irq_ts: last irq timestamp
568  */
569 struct hif_event_misc {
570 	int32_t last_irq_index;
571 	uint64_t last_irq_ts;
572 };
573 
574 /**
575  * struct hif_event_history - history for one interrupt group
576  * @index: index to store new event
577  * @event: event entry
578  *
579  * This structure represents the datapath history for one
580  * interrupt group.
581  */
582 struct hif_event_history {
583 	qdf_atomic_t index;
584 	struct hif_event_misc misc;
585 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
586 };
587 
588 /**
589  * hif_hist_record_event() - Record one datapath event in history
590  * @hif_ctx: HIF opaque context
591  * @event: DP event entry
592  * @intr_grp_id: interrupt group ID registered with hif
593  *
594  * Return: None
595  */
596 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
597 			   struct hif_event_record *event,
598 			   uint8_t intr_grp_id);
599 
600 /**
601  * hif_event_history_init() - Initialize SRNG event history buffers
602  * @hif_ctx: HIF opaque context
603  * @id: context group ID for which history is recorded
604  *
605  * Returns: None
606  */
607 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
608 
609 /**
610  * hif_event_history_deinit() - De-initialize SRNG event history buffers
611  * @hif_ctx: HIF opaque context
612  * @id: context group ID for which history is recorded
613  *
614  * Returns: None
615  */
616 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
617 
618 /**
619  * hif_record_event() - Wrapper function to form and record DP event
620  * @hif_ctx: HIF opaque context
621  * @intr_grp_id: interrupt group ID registered with hif
622  * @hal_ring_id: ring id for which event is recorded
623  * @hp: head pointer index of the srng
624  * @tp: tail pointer index of the srng
625  * @type: type of the event to be logged in history
626  *
627  * Return: None
628  */
629 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
630 				    uint8_t intr_grp_id,
631 				    uint8_t hal_ring_id,
632 				    uint32_t hp,
633 				    uint32_t tp,
634 				    enum hif_event_type type)
635 {
636 	struct hif_event_record event;
637 
638 	event.hal_ring_id = hal_ring_id;
639 	event.hp = hp;
640 	event.tp = tp;
641 	event.type = type;
642 
643 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
644 
645 	return;
646 }
647 
648 #else
649 
650 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
651 				    uint8_t intr_grp_id,
652 				    uint8_t hal_ring_id,
653 				    uint32_t hp,
654 				    uint32_t tp,
655 				    enum hif_event_type type)
656 {
657 }
658 
659 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
660 					  uint8_t id)
661 {
662 }
663 
664 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
665 					    uint8_t id)
666 {
667 }
668 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
669 
670 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx);
671 
672 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
673 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx);
674 #else
675 static
676 inline void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) {}
677 #endif
678 
679 /**
680  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
681  *
682  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
683  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
684  *                         minimize power
685  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
686  *                         platform-specific measures to completely power-off
687  *                         the module and associated hardware (i.e. cut power
688  *                         supplies)
689  */
690 enum HIF_DEVICE_POWER_CHANGE_TYPE {
691 	HIF_DEVICE_POWER_UP,
692 	HIF_DEVICE_POWER_DOWN,
693 	HIF_DEVICE_POWER_CUT
694 };
695 
696 /**
697  * enum hif_enable_type: what triggered the enabling of hif
698  *
699  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
700  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
701  */
702 enum hif_enable_type {
703 	HIF_ENABLE_TYPE_PROBE,
704 	HIF_ENABLE_TYPE_REINIT,
705 	HIF_ENABLE_TYPE_MAX
706 };
707 
708 /**
709  * enum hif_disable_type: what triggered the disabling of hif
710  *
711  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
712  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
713  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
714  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
715  */
716 enum hif_disable_type {
717 	HIF_DISABLE_TYPE_PROBE_ERROR,
718 	HIF_DISABLE_TYPE_REINIT_ERROR,
719 	HIF_DISABLE_TYPE_REMOVE,
720 	HIF_DISABLE_TYPE_SHUTDOWN,
721 	HIF_DISABLE_TYPE_MAX
722 };
723 /**
724  * enum hif_device_config_opcode: configure mode
725  *
726  * @HIF_DEVICE_POWER_STATE: device power state
727  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
728  * @HIF_DEVICE_GET_ADDR: get block address
729  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
730  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
731  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
732  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
733  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
734  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
735  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
736  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
737  * @HIF_BMI_DONE: bmi done
738  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
739  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
740  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
741  */
742 enum hif_device_config_opcode {
743 	HIF_DEVICE_POWER_STATE = 0,
744 	HIF_DEVICE_GET_BLOCK_SIZE,
745 	HIF_DEVICE_GET_FIFO_ADDR,
746 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
747 	HIF_DEVICE_GET_IRQ_PROC_MODE,
748 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
749 	HIF_DEVICE_POWER_STATE_CHANGE,
750 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
751 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
752 	HIF_DEVICE_GET_OS_DEVICE,
753 	HIF_DEVICE_DEBUG_BUS_STATE,
754 	HIF_BMI_DONE,
755 	HIF_DEVICE_SET_TARGET_TYPE,
756 	HIF_DEVICE_SET_HTC_CONTEXT,
757 	HIF_DEVICE_GET_HTC_CONTEXT,
758 };
759 
760 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
761 struct HID_ACCESS_LOG {
762 	uint32_t seqnum;
763 	bool is_write;
764 	void *addr;
765 	uint32_t value;
766 };
767 #endif
768 
769 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
770 		uint32_t value);
771 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
772 
773 #define HIF_MAX_DEVICES                 1
774 /**
775  * struct htc_callbacks - Structure for HTC Callbacks methods
776  * @context:             context to pass to the dsrhandler
777  *                       note : rwCompletionHandler is provided the context
778  *                       passed to hif_read_write
779  * @rwCompletionHandler: Read / write completion handler
780  * @dsrHandler:          DSR Handler
781  */
782 struct htc_callbacks {
783 	void *context;
784 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
785 	QDF_STATUS(*dsr_handler)(void *context);
786 };
787 
788 /**
789  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
790  * @context: Private data context
791  * @set_recovery_in_progress: To Set Driver state for recovery in progress
792  * @is_recovery_in_progress: Query if driver state is recovery in progress
793  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
794  * @is_driver_unloading: Query if driver is unloading.
795  * @get_bandwidth_level: Query current bandwidth level for the driver
796  * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem
797  * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool
798  * This Structure provides callback pointer for HIF to query hdd for driver
799  * states.
800  */
801 struct hif_driver_state_callbacks {
802 	void *context;
803 	void (*set_recovery_in_progress)(void *context, uint8_t val);
804 	bool (*is_recovery_in_progress)(void *context);
805 	bool (*is_load_unload_in_progress)(void *context);
806 	bool (*is_driver_unloading)(void *context);
807 	bool (*is_target_ready)(void *context);
808 	int (*get_bandwidth_level)(void *context);
809 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
810 						       qdf_dma_addr_t *paddr,
811 						       uint32_t ring_type);
812 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
813 };
814 
815 /* This API detaches the HTC layer from the HIF device */
816 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
817 
818 /****************************************************************/
819 /* BMI and Diag window abstraction                              */
820 /****************************************************************/
821 
822 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
823 
824 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
825 				     * handled atomically by
826 				     * DiagRead/DiagWrite
827 				     */
828 
829 #ifdef WLAN_FEATURE_BMI
830 /*
831  * API to handle HIF-specific BMI message exchanges, this API is synchronous
832  * and only allowed to be called from a context that can block (sleep)
833  */
834 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
835 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
836 				uint8_t *pSendMessage, uint32_t Length,
837 				uint8_t *pResponseMessage,
838 				uint32_t *pResponseLength, uint32_t TimeoutMS);
839 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
840 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
841 #else /* WLAN_FEATURE_BMI */
842 static inline void
843 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
844 {
845 }
846 
847 static inline bool
848 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
849 {
850 	return false;
851 }
852 #endif /* WLAN_FEATURE_BMI */
853 
854 #ifdef HIF_CPU_CLEAR_AFFINITY
855 /**
856  * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
857  * @scn: HIF handle
858  * @intr_ctxt_id: interrupt group index
859  * @cpu: CPU core to clear
860  *
861  * Return: None
862  */
863 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
864 				       int intr_ctxt_id, int cpu);
865 #else
866 static inline
867 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
868 				       int intr_ctxt_id, int cpu)
869 {
870 }
871 #endif
872 
873 /*
874  * APIs to handle HIF specific diagnostic read accesses. These APIs are
875  * synchronous and only allowed to be called from a context that
876  * can block (sleep). They are not high performance APIs.
877  *
878  * hif_diag_read_access reads a 4 Byte aligned/length value from a
879  * Target register or memory word.
880  *
881  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
882  */
883 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
884 				uint32_t address, uint32_t *data);
885 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
886 		      uint8_t *data, int nbytes);
887 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
888 			void *ramdump_base, uint32_t address, uint32_t size);
889 /*
890  * APIs to handle HIF specific diagnostic write accesses. These APIs are
891  * synchronous and only allowed to be called from a context that
892  * can block (sleep).
893  * They are not high performance APIs.
894  *
895  * hif_diag_write_access writes a 4 Byte aligned/length value to a
896  * Target register or memory word.
897  *
898  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
899  */
900 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
901 				 uint32_t address, uint32_t data);
902 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
903 			uint32_t address, uint8_t *data, int nbytes);
904 
905 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
906 
907 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
908 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
909 
910 /*
911  * Set the FASTPATH_mode_on flag in sc, for use by data path
912  */
913 #ifdef WLAN_FEATURE_FASTPATH
914 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
915 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
916 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
917 
918 /**
919  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
920  * @handler: Callback funtcion
921  * @context: handle for callback function
922  *
923  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
924  */
925 QDF_STATUS hif_ce_fastpath_cb_register(
926 		struct hif_opaque_softc *hif_ctx,
927 		fastpath_msg_handler handler, void *context);
928 #else
929 static inline QDF_STATUS hif_ce_fastpath_cb_register(
930 		struct hif_opaque_softc *hif_ctx,
931 		fastpath_msg_handler handler, void *context)
932 {
933 	return QDF_STATUS_E_FAILURE;
934 }
935 
936 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
937 {
938 	return NULL;
939 }
940 
941 #endif
942 
943 /*
944  * Enable/disable CDC max performance workaround
945  * For max-performance set this to 0
946  * To allow SoC to enter sleep set this to 1
947  */
948 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
949 
950 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
951 			     qdf_shared_mem_t **ce_sr,
952 			     uint32_t *ce_sr_ring_size,
953 			     qdf_dma_addr_t *ce_reg_paddr);
954 
955 /**
956  * @brief List of callbacks - filled in by HTC.
957  */
958 struct hif_msg_callbacks {
959 	void *Context;
960 	/**< context meaningful to HTC */
961 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
962 					uint32_t transferID,
963 					uint32_t toeplitz_hash_result);
964 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
965 					uint8_t pipeID);
966 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
967 	void (*fwEventHandler)(void *context, QDF_STATUS status);
968 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
969 };
970 
971 enum hif_target_status {
972 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
973 	TARGET_STATUS_RESET,  /* target got reset */
974 	TARGET_STATUS_EJECT,  /* target got ejected */
975 	TARGET_STATUS_SUSPEND /*target got suspend */
976 };
977 
978 /**
979  * enum hif_attribute_flags: configure hif
980  *
981  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
982  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
983  *  							+ No pktlog CE
984  */
985 enum hif_attribute_flags {
986 	HIF_LOWDESC_CE_CFG = 1,
987 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
988 };
989 
990 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
991 	(attr |= (v & 0x01) << 5)
992 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
993 	(attr |= (v & 0x03) << 6)
994 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
995 	(attr |= (v & 0x01) << 13)
996 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
997 	(attr |= (v & 0x01) << 14)
998 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
999 	(attr |= (v & 0x01) << 15)
1000 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
1001 	(attr |= (v & 0x0FFF) << 16)
1002 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
1003 	(attr |= (v & 0x01) << 30)
1004 
1005 struct hif_ul_pipe_info {
1006 	unsigned int nentries;
1007 	unsigned int nentries_mask;
1008 	unsigned int sw_index;
1009 	unsigned int write_index; /* cached copy */
1010 	unsigned int hw_index;    /* cached copy */
1011 	void *base_addr_owner_space; /* Host address space */
1012 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1013 };
1014 
1015 struct hif_dl_pipe_info {
1016 	unsigned int nentries;
1017 	unsigned int nentries_mask;
1018 	unsigned int sw_index;
1019 	unsigned int write_index; /* cached copy */
1020 	unsigned int hw_index;    /* cached copy */
1021 	void *base_addr_owner_space; /* Host address space */
1022 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1023 };
1024 
1025 struct hif_pipe_addl_info {
1026 	uint32_t pci_mem;
1027 	uint32_t ctrl_addr;
1028 	struct hif_ul_pipe_info ul_pipe;
1029 	struct hif_dl_pipe_info dl_pipe;
1030 };
1031 
1032 #ifdef CONFIG_SLUB_DEBUG_ON
1033 #define MSG_FLUSH_NUM 16
1034 #else /* PERF build */
1035 #define MSG_FLUSH_NUM 32
1036 #endif /* SLUB_DEBUG_ON */
1037 
1038 struct hif_bus_id;
1039 
1040 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
1041 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
1042 		     int opcode, void *config, uint32_t config_len);
1043 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
1044 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
1045 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
1046 		   struct hif_msg_callbacks *callbacks);
1047 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
1048 void hif_stop(struct hif_opaque_softc *hif_ctx);
1049 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
1050 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
1051 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
1052 		      uint8_t cmd_id, bool start);
1053 
1054 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1055 				  uint32_t transferID, uint32_t nbytes,
1056 				  qdf_nbuf_t wbuf, uint32_t data_attr);
1057 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1058 			     int force);
1059 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1060 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
1061 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
1062 			  uint8_t *DLPipe);
1063 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
1064 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1065 			int *dl_is_polled);
1066 uint16_t
1067 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1068 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
1069 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
1070 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
1071 		     bool wait_for_it);
1072 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
1073 #ifndef HIF_PCI
1074 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
1075 {
1076 	return 0;
1077 }
1078 #else
1079 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
1080 #endif
1081 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1082 			u32 *revision, const char **target_name);
1083 
1084 #ifdef RECEIVE_OFFLOAD
1085 /**
1086  * hif_offld_flush_cb_register() - Register the offld flush callback
1087  * @scn: HIF opaque context
1088  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
1089  *			 Or GRO/LRO flush when RxThread is not enabled. Called
1090  *			 with corresponding context for flush.
1091  * Return: None
1092  */
1093 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1094 				 void (offld_flush_handler)(void *ol_ctx));
1095 
1096 /**
1097  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
1098  * @scn: HIF opaque context
1099  *
1100  * Return: None
1101  */
1102 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
1103 #endif
1104 
1105 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1106 /**
1107  * hif_exec_should_yield() - Check if hif napi context should yield
1108  * @hif_ctx - HIF opaque context
1109  * @grp_id - grp_id of the napi for which check needs to be done
1110  *
1111  * The function uses grp_id to look for NAPI and checks if NAPI needs to
1112  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
1113  * yield decision.
1114  *
1115  * Return: true if NAPI needs to yield, else false
1116  */
1117 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
1118 #else
1119 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
1120 					 uint grp_id)
1121 {
1122 	return false;
1123 }
1124 #endif
1125 
1126 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
1127 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
1128 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
1129 				      int htc_htt_tx_endpoint);
1130 
1131 /**
1132  * hif_open() - Create hif handle
1133  * @qdf_ctx: qdf context
1134  * @mode: Driver Mode
1135  * @bus_type: Bus Type
1136  * @cbk: CDS Callbacks
1137  * @psoc: psoc object manager
1138  *
1139  * API to open HIF Context
1140  *
1141  * Return: HIF Opaque Pointer
1142  */
1143 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1144 				  uint32_t mode,
1145 				  enum qdf_bus_type bus_type,
1146 				  struct hif_driver_state_callbacks *cbk,
1147 				  struct wlan_objmgr_psoc *psoc);
1148 
1149 /**
1150  * hif_init_dma_mask() - Set dma mask for the dev
1151  * @dev: dev for which DMA mask is to be set
1152  * @bus_type: bus type for the target
1153  *
1154  * This API sets the DMA mask for the device. before the datapath
1155  * memory pre-allocation is done. If the DMA mask is not set before
1156  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
1157  * and does not utilize the full device capability.
1158  *
1159  * Return: 0 - success, non-zero on failure.
1160  */
1161 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
1162 void hif_close(struct hif_opaque_softc *hif_ctx);
1163 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1164 		      void *bdev, const struct hif_bus_id *bid,
1165 		      enum qdf_bus_type bus_type,
1166 		      enum hif_enable_type type);
1167 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
1168 #ifdef CE_TASKLET_DEBUG_ENABLE
1169 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
1170 				 uint8_t value);
1171 #endif
1172 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
1173 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
1174 
1175 /**
1176  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
1177  * HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
1178  * HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
1179  * HIF_PM_CE_WAKE: Wake irq is CE interrupt
1180  */
1181 typedef enum {
1182 	HIF_PM_INVALID_WAKE,
1183 	HIF_PM_MSI_WAKE,
1184 	HIF_PM_CE_WAKE,
1185 } hif_pm_wake_irq_type;
1186 
1187 /**
1188  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1189  * @hif_ctx: HIF context
1190  *
1191  * Return: enum hif_pm_wake_irq_type
1192  */
1193 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1194 
1195 /**
1196  * enum hif_ep_vote_type - hif ep vote type
1197  * HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1198  * HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1199  */
1200 enum hif_ep_vote_type {
1201 	HIF_EP_VOTE_DP_ACCESS,
1202 	HIF_EP_VOTE_NONDP_ACCESS
1203 };
1204 
1205 /**
1206  * enum hif_ep_vote_access - hif ep vote access
1207  * HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1208  * HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transition
1209  * HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1210  */
1211 enum hif_ep_vote_access {
1212 	HIF_EP_VOTE_ACCESS_ENABLE,
1213 	HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1214 	HIF_EP_VOTE_ACCESS_DISABLE
1215 };
1216 
1217 /**
1218  * enum hif_rpm_id - modules registered with runtime pm module
1219  * @HIF_RTPM_ID_RESERVED: Reserved ID
1220  * @HIF_RTPM_ID_HAL_REO_CMD: HAL REO commands
1221  * @HIF_RTPM_ID_WMI: WMI commands Tx
1222  * @HIF_RTPM_ID_HTT: HTT commands Tx
1223  * @HIF_RTPM_ID_DP_TX: Datapath Tx path
1224  * @HIF_RTPM_ID_DP_RING_STATS: Datapath ring stats
1225  * @HIF_RTPM_ID_CE_SEND_FAST: CE Tx buffer posting
1226  * @HIF_RTPM_ID_FORCE_WAKE: Force wake request
1227  * @HIF_RTPM_ID_PREVENT_LINKDOWN: Prevent linkdown by not allowing runtime PM
1228  * @HIF_RTPM_ID_PREVENT_ALLOW_LOCK: Generic ID for runtime PM lock contexts
1229  * @HIF_RTPM_ID_MAX: Max id
1230  */
1231 enum  hif_rtpm_client_id {
1232 	HIF_RTPM_ID_RESERVED,
1233 	HIF_RTPM_ID_HAL_REO_CMD,
1234 	HIF_RTPM_ID_WMI,
1235 	HIF_RTPM_ID_HTT,
1236 	HIF_RTPM_ID_DP,
1237 	HIF_RTPM_ID_DP_RING_STATS,
1238 	HIF_RTPM_ID_CE,
1239 	HIF_RTPM_ID_FORCE_WAKE,
1240 	HIF_RTPM_ID_PM_QOS_NOTIFY,
1241 	HIF_RTPM_ID_WIPHY_SUSPEND,
1242 	HIF_RTPM_ID_MAX
1243 };
1244 
1245 /**
1246  * enum hif_rpm_type - Get and Put calls types
1247  * HIF_RTPM_GET_ASYNC: Increment usage count and when system is suspended
1248  *		      schedule resume process, return depends on pm state.
1249  * HIF_RTPM_GET_FORCE: Increment usage count and when system is suspended
1250  *		      schedule resume process, returns success irrespective of
1251  *		      pm_state.
1252  * HIF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
1253  *		     wait till process is resumed.
1254  * HIF_RTPM_GET_NORESUME: Only increments usage count.
1255  * HIF_RTPM_PUT_ASYNC: Decrements usage count and puts system in idle state.
1256  * HIF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
1257  *			     suspended state.
1258  * HIF_RTPM_PUT_NOIDLE: Decrements usage count.
1259  */
1260 enum rpm_type {
1261 	HIF_RTPM_GET_ASYNC,
1262 	HIF_RTPM_GET_FORCE,
1263 	HIF_RTPM_GET_SYNC,
1264 	HIF_RTPM_GET_NORESUME,
1265 	HIF_RTPM_PUT_ASYNC,
1266 	HIF_RTPM_PUT_SYNC_SUSPEND,
1267 	HIF_RTPM_PUT_NOIDLE,
1268 };
1269 
1270 /**
1271  * struct hif_pm_runtime_lock - data structure for preventing runtime suspend
1272  * @list - global list of runtime locks
1273  * @active - true if this lock is preventing suspend
1274  * @name - character string for tracking this lock
1275  */
1276 struct hif_pm_runtime_lock {
1277 	struct list_head list;
1278 	bool active;
1279 	const char *name;
1280 };
1281 
1282 #ifdef FEATURE_RUNTIME_PM
1283 /**
1284  * hif_rtpm_register() - Register a module with runtime PM.
1285  * @id: ID of the module which needs to be registered
1286  * @hif_rpm_cbk: callback to be called when get was called in suspended state.
1287  * @prevent_multiple_get: not allow simultaneous get calls or put calls
1288  *
1289  * Return: success status if successfully registered
1290  */
1291 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
1292 
1293 /**
1294  * hif_rtpm_deregister() - Deregister the module
1295  * @id: ID of the module which needs to be de-registered
1296  */
1297 QDF_STATUS hif_rtpm_deregister(uint32_t id);
1298 
1299 /**
1300  * hif_rtpm_set_autosuspend_delay() - Set delay to trigger RTPM suspend
1301  * @delay: delay in ms to be set
1302  *
1303  * Return: Success if delay is set successfully
1304  */
1305 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay);
1306 
1307 /**
1308  * hif_rtpm_restore_autosuspend_delay() - Restore delay value to default value
1309  *
1310  * Return: Success if reset done. E_ALREADY if delay same as config value
1311  */
1312 QDF_STATUS hif_rtpm_restore_autosuspend_delay(void);
1313 
1314 /**
1315  * hif_rtpm_get_autosuspend_delay() -Get delay to trigger RTPM suspend
1316  *
1317  * Return: Delay in ms
1318  */
1319 int hif_rtpm_get_autosuspend_delay(void);
1320 
1321 /**
1322  * hif_runtime_lock_init() - API to initialize Runtime PM context
1323  * @lock: QDF lock context
1324  * @name: Context name
1325  *
1326  * This API initializes the Runtime PM context of the caller and
1327  * return the pointer.
1328  *
1329  * Return: None
1330  */
1331 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1332 
1333 /**
1334  * hif_runtime_lock_deinit() - This API frees the runtime pm context
1335  * @data: Runtime PM context
1336  *
1337  * Return: void
1338  */
1339 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data);
1340 
1341 /**
1342  * hif_rtpm_get() - Increment usage_count on the device to avoid suspend.
1343  * @type: get call types from hif_rpm_type
1344  * @id: ID of the module calling get()
1345  *
1346  * A get operation will prevent a runtime suspend until a
1347  * corresponding put is done.  This api should be used when accessing bus.
1348  *
1349  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1350  * THIS API WILL ONLY REQUEST THE RESUME AND NOT DO A GET!!!
1351  *
1352  * return: success if a get has been issued, else error code.
1353  */
1354 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id);
1355 
1356 /**
1357  * hif_pm_runtime_put() - do a put operation on the device
1358  * @type: put call types from hif_rpm_type
1359  * @id: ID of the module calling put()
1360  *
1361  * A put operation will allow a runtime suspend after a corresponding
1362  * get was done.  This api should be used when finished accessing bus.
1363  *
1364  * This api will return a failure if runtime pm is stopped
1365  * This api will return failure if it would decrement the usage count below 0.
1366  *
1367  * return: QDF_STATUS_SUCCESS if the put is performed
1368  */
1369 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id);
1370 
1371 /**
1372  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1373  * @data: runtime PM lock
1374  *
1375  * This function will prevent runtime suspend, by incrementing
1376  * device's usage count.
1377  *
1378  * Return: status
1379  */
1380 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data);
1381 
1382 /**
1383  * hif_pm_runtime_prevent_suspend_sync() - Synchronized prevent Runtime suspend
1384  * @data: runtime PM lock
1385  *
1386  * This function will prevent runtime suspend, by incrementing
1387  * device's usage count.
1388  *
1389  * Return: status
1390  */
1391 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data);
1392 
1393 /**
1394  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1395  * @data: runtime PM lock
1396  *
1397  * This function will allow runtime suspend, by decrementing
1398  * device's usage count.
1399  *
1400  * Return: status
1401  */
1402 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data);
1403 
1404 /**
1405  * hif_rtpm_request_resume() - Request resume if bus is suspended
1406  *
1407  * Return: None
1408  */
1409 void hif_rtpm_request_resume(void);
1410 
1411 /**
1412  * hif_rtpm_sync_resume() - Invoke synchronous runtime resume.
1413  *
1414  * This function will invoke synchronous runtime resume.
1415  *
1416  * Return: status
1417  */
1418 QDF_STATUS hif_rtpm_sync_resume(void);
1419 
1420 /**
1421  * hif_rtpm_check_and_request_resume() - check if bus is suspended and
1422  *                                       request resume.
1423  *
1424  * Return: void
1425  */
1426 void hif_rtpm_check_and_request_resume(void);
1427 
1428 /**
1429  * hif_rtpm_set_client_job() - Set job for the client.
1430  * @client_id: Client id for which job needs to be set
1431  *
1432  * If get failed due to system being in suspended state, set the client job so
1433  * when system resumes the client's job is called.
1434  *
1435  * Return: None
1436  */
1437 void hif_rtpm_set_client_job(uint32_t client_id);
1438 
1439 /**
1440  * hif_rtpm_mark_last_busy() - Mark last busy to delay retry to suspend
1441  * @id: ID marking last busy
1442  *
1443  * Return: None
1444  */
1445 void hif_rtpm_mark_last_busy(uint32_t id);
1446 
1447 /**
1448  * hif_rtpm_get_monitor_wake_intr() - API to get monitor_wake_intr
1449  *
1450  * monitor_wake_intr variable can be used to indicate if driver expects wake
1451  * MSI for runtime PM
1452  *
1453  * Return: monitor_wake_intr variable
1454  */
1455 int hif_rtpm_get_monitor_wake_intr(void);
1456 
1457 /**
1458  * hif_rtpm_set_monitor_wake_intr() - API to set monitor_wake_intr
1459  * @val: value to set
1460  *
1461  * monitor_wake_intr variable can be used to indicate if driver expects wake
1462  * MSI for runtime PM
1463  *
1464  * Return: void
1465  */
1466 void hif_rtpm_set_monitor_wake_intr(int val);
1467 
1468 /**
1469  * hif_pre_runtime_suspend() - book keeping before beginning runtime suspend.
1470  * @hif_ctx: HIF context
1471  *
1472  * Makes sure that the pci link will be taken down by the suspend operation.
1473  * If the hif layer is configured to leave the bus on, runtime suspend will
1474  * not save any power.
1475  *
1476  * Set the runtime suspend state to SUSPENDING.
1477  *
1478  * return -EINVAL if the bus won't go down.  otherwise return 0
1479  */
1480 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1481 
1482 /**
1483  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
1484  *
1485  * update the runtime pm state to RESUMING.
1486  * Return: void
1487  */
1488 void hif_pre_runtime_resume(void);
1489 
1490 /**
1491  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
1492  *
1493  * Record the success.
1494  * update the runtime_pm state to SUSPENDED
1495  * Return: void
1496  */
1497 void hif_process_runtime_suspend_success(void);
1498 
1499 /**
1500  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
1501  *
1502  * Record the failure.
1503  * mark last busy to delay a retry.
1504  * update the runtime_pm state back to ON
1505  *
1506  * Return: void
1507  */
1508 void hif_process_runtime_suspend_failure(void);
1509 
1510 /**
1511  * hif_process_runtime_suspend_failure() - bookkeeping of resuming link up
1512  *
1513  * update the runtime_pm state to RESUMING_LINKUP
1514  * Return: void
1515  */
1516 void hif_process_runtime_resume_linkup(void);
1517 
1518 /**
1519  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
1520  *
1521  * record the success.
1522  * update the runtime_pm state to SUSPENDED
1523  * Return: void
1524  */
1525 void hif_process_runtime_resume_success(void);
1526 
1527 /**
1528  * hif_rtpm_print_prevent_list() - list the clients preventing suspend.
1529  *
1530  * Return: None
1531  */
1532 void hif_rtpm_print_prevent_list(void);
1533 
1534 /**
1535  * hif_rtpm_suspend_lock() - spin_lock on marking runtime suspend
1536  *
1537  * Return: void
1538  */
1539 void hif_rtpm_suspend_lock(void);
1540 
1541 /**
1542  * hif_rtpm_suspend_unlock() - spin_unlock on marking runtime suspend
1543  *
1544  * Return: void
1545  */
1546 void hif_rtpm_suspend_unlock(void);
1547 
1548 /**
1549  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
1550  * @hif_ctx: HIF context
1551  *
1552  * Return: 0 for success and non-zero error code for failure
1553  */
1554 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1555 
1556 /**
1557  * hif_runtime_resume() - do the bus resume part of a runtime resume
1558  * @hif_ctx: HIF context
1559  *
1560  * Return: 0 for success and non-zero error code for failure
1561  */
1562 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1563 
1564 /**
1565  * hif_fastpath_resume() - resume fastpath for runtimepm
1566  * @hif_ctx: HIF context
1567  *
1568  * ensure that the fastpath write index register is up to date
1569  * since runtime pm may cause ce_send_fast to skip the register
1570  * write.
1571  *
1572  * fastpath only applicable to legacy copy engine
1573  */
1574 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1575 
1576 /**
1577  * hif_rtpm_get_state(): get rtpm link state
1578  *
1579  * Return: state
1580  */
1581 int hif_rtpm_get_state(void);
1582 
1583 /**
1584  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1585  * @hif_ctx: HIF context
1586  *
1587  * Return: None
1588  */
1589 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx);
1590 
1591 /**
1592  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1593  * @hif_ctx: HIF context
1594  *
1595  * Return: None
1596  */
1597 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1598 				      unsigned long ce_id);
1599 #else
1600 
1601 /**
1602  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1603  * @hif_ctx: HIF context
1604  *
1605  * Return: None
1606  */
1607 static inline
1608 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx) { }
1609 
1610 /**
1611  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1612  * @hif_ctx: HIF context
1613  *
1614  * Return: None
1615  */
1616 static inline
1617 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1618 				      unsigned long ce_id)
1619 { }
1620 
1621 static inline
1622 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
1623 { return QDF_STATUS_SUCCESS; }
1624 
1625 static inline
1626 QDF_STATUS hif_rtpm_deregister(uint32_t id)
1627 { return QDF_STATUS_SUCCESS; }
1628 
1629 static inline
1630 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
1631 { return QDF_STATUS_SUCCESS; }
1632 
1633 static inline QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
1634 { return QDF_STATUS_SUCCESS; }
1635 
1636 static inline int hif_rtpm_get_autosuspend_delay(void)
1637 { return 0; }
1638 
1639 static inline
1640 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1641 { return 0; }
1642 
1643 static inline
1644 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data)
1645 {}
1646 
1647 static inline
1648 int hif_rtpm_get(uint8_t type, uint32_t id)
1649 { return QDF_STATUS_SUCCESS; }
1650 
1651 static inline
1652 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
1653 { return QDF_STATUS_SUCCESS; }
1654 
1655 static inline
1656 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data)
1657 { return 0; }
1658 
1659 static inline
1660 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data)
1661 { return 0; }
1662 
1663 static inline
1664 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data)
1665 { return 0; }
1666 
1667 static inline
1668 QDF_STATUS hif_rtpm_sync_resume(void)
1669 { return QDF_STATUS_SUCCESS; }
1670 
1671 static inline
1672 void hif_rtpm_request_resume(void)
1673 {}
1674 
1675 static inline
1676 void hif_rtpm_check_and_request_resume(void)
1677 {}
1678 
1679 static inline
1680 void hif_rtpm_set_client_job(uint32_t client_id)
1681 {}
1682 
1683 static inline
1684 void hif_rtpm_print_prevent_list(void)
1685 {}
1686 
1687 static inline
1688 void hif_rtpm_suspend_unlock(void)
1689 {}
1690 
1691 static inline
1692 void hif_rtpm_suspend_lock(void)
1693 {}
1694 
1695 static inline
1696 int hif_rtpm_get_monitor_wake_intr(void)
1697 { return 0; }
1698 
1699 static inline
1700 void hif_rtpm_set_monitor_wake_intr(int val)
1701 {}
1702 
1703 static inline
1704 void hif_rtpm_mark_last_busy(uint32_t id)
1705 {}
1706 #endif
1707 
1708 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1709 				 bool is_packet_log_enabled);
1710 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1711 
1712 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1713 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1714 
1715 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1716 
1717 #ifdef IPA_OFFLOAD
1718 /**
1719  * hif_get_ipa_hw_type() - get IPA hw type
1720  *
1721  * This API return the IPA hw type.
1722  *
1723  * Return: IPA hw type
1724  */
1725 static inline
1726 enum ipa_hw_type hif_get_ipa_hw_type(void)
1727 {
1728 	return ipa_get_hw_type();
1729 }
1730 
1731 /**
1732  * hif_get_ipa_present() - get IPA hw status
1733  *
1734  * This API return the IPA hw status.
1735  *
1736  * Return: true if IPA is present or false otherwise
1737  */
1738 static inline
1739 bool hif_get_ipa_present(void)
1740 {
1741 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
1742 		return true;
1743 	else
1744 		return false;
1745 }
1746 #endif
1747 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1748 /**
1749  * hif_bus_ealry_suspend() - stop non wmi tx traffic
1750  * @context: hif context
1751  */
1752 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1753 
1754 /**
1755  * hif_bus_late_resume() - resume non wmi traffic
1756  * @context: hif context
1757  */
1758 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1759 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1760 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1761 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1762 
1763 /**
1764  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1765  * @hif_ctx: an opaque HIF handle to use
1766  *
1767  * As opposed to the standard hif_irq_enable, this function always applies to
1768  * the APPS side kernel interrupt handling.
1769  *
1770  * Return: errno
1771  */
1772 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1773 
1774 /**
1775  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1776  * @hif_ctx: an opaque HIF handle to use
1777  *
1778  * As opposed to the standard hif_irq_disable, this function always applies to
1779  * the APPS side kernel interrupt handling.
1780  *
1781  * Return: errno
1782  */
1783 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1784 
1785 /**
1786  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1787  * @hif_ctx: an opaque HIF handle to use
1788  *
1789  * As opposed to the standard hif_irq_enable, this function always applies to
1790  * the APPS side kernel interrupt handling.
1791  *
1792  * Return: errno
1793  */
1794 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1795 
1796 /**
1797  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1798  * @hif_ctx: an opaque HIF handle to use
1799  *
1800  * As opposed to the standard hif_irq_disable, this function always applies to
1801  * the APPS side kernel interrupt handling.
1802  *
1803  * Return: errno
1804  */
1805 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1806 
1807 /**
1808  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1809  * @hif_ctx: an opaque HIF handle to use
1810  *
1811  * This function always applies to the APPS side kernel interrupt handling
1812  * to wake the system from suspend.
1813  *
1814  * Return: errno
1815  */
1816 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1817 
1818 /**
1819  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1820  * @hif_ctx: an opaque HIF handle to use
1821  *
1822  * This function always applies to the APPS side kernel interrupt handling
1823  * to disable the wake irq.
1824  *
1825  * Return: errno
1826  */
1827 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1828 
1829 /**
1830  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
1831  * @hif_ctx: an opaque HIF handle to use
1832  *
1833  * As opposed to the standard hif_irq_enable, this function always applies to
1834  * the APPS side kernel interrupt handling.
1835  *
1836  * Return: errno
1837  */
1838 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1839 
1840 /**
1841  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
1842  * @hif_ctx: an opaque HIF handle to use
1843  *
1844  * As opposed to the standard hif_irq_disable, this function always applies to
1845  * the APPS side kernel interrupt handling.
1846  *
1847  * Return: errno
1848  */
1849 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1850 
1851 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1852 int hif_dump_registers(struct hif_opaque_softc *scn);
1853 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1854 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1855 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1856 		     u32 *revision, const char **target_name);
1857 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1858 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1859 						   scn);
1860 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1861 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1862 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1863 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1864 			   hif_target_status);
1865 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1866 			 struct hif_config_info *cfg);
1867 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1868 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1869 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1870 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1871 			   uint32_t transfer_id, u_int32_t len);
1872 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1873 	uint32_t transfer_id, uint32_t download_len);
1874 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1875 void hif_ce_war_disable(void);
1876 void hif_ce_war_enable(void);
1877 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1878 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1879 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1880 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1881 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1882 		uint32_t pipe_num);
1883 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1884 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1885 
1886 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1887 				int rx_bundle_cnt);
1888 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1889 
1890 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1891 
1892 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1893 
1894 enum hif_exec_type {
1895 	HIF_EXEC_NAPI_TYPE,
1896 	HIF_EXEC_TASKLET_TYPE,
1897 };
1898 
1899 typedef uint32_t (*ext_intr_handler)(void *, uint32_t, int);
1900 
1901 /**
1902  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1903  * @softc: hif opaque context owning the exec context
1904  * @id: the id of the interrupt context
1905  *
1906  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1907  *         'id' registered with the OS
1908  */
1909 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1910 				uint8_t id);
1911 
1912 /**
1913  * hif_configure_ext_group_interrupts() - Configure ext group interrupts
1914  * @hif_ctx: hif opaque context
1915  *
1916  * Return: QDF_STATUS
1917  */
1918 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1919 
1920 /**
1921  * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group interrupts
1922  * @hif_ctx: hif opaque context
1923  *
1924  * Return: None
1925  */
1926 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1927 
1928 /**
1929  * hif_register_ext_group() - API to register external group
1930  * interrupt handler.
1931  * @hif_ctx : HIF Context
1932  * @numirq: number of irq's in the group
1933  * @irq: array of irq values
1934  * @handler: callback interrupt handler function
1935  * @cb_ctx: context to passed in callback
1936  * @type: napi vs tasklet
1937  *
1938  * Return: QDF_STATUS
1939  */
1940 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1941 				  uint32_t numirq, uint32_t irq[],
1942 				  ext_intr_handler handler,
1943 				  void *cb_ctx, const char *context_name,
1944 				  enum hif_exec_type type, uint32_t scale);
1945 
1946 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1947 				const char *context_name);
1948 
1949 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1950 				u_int8_t pipeid,
1951 				struct hif_msg_callbacks *callbacks);
1952 
1953 /**
1954  * hif_print_napi_stats() - Display HIF NAPI stats
1955  * @hif_ctx - HIF opaque context
1956  *
1957  * Return: None
1958  */
1959 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
1960 
1961 /* hif_clear_napi_stats() - function clears the stats of the
1962  * latency when called.
1963  * @hif_ctx - the HIF context to assign the callback to
1964  *
1965  * Return: None
1966  */
1967 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
1968 
1969 #ifdef __cplusplus
1970 }
1971 #endif
1972 
1973 #ifdef FORCE_WAKE
1974 /**
1975  * hif_force_wake_request() - Function to wake from power collapse
1976  * @handle: HIF opaque handle
1977  *
1978  * Description: API to check if the device is awake or not before
1979  * read/write to BAR + 4K registers. If device is awake return
1980  * success otherwise write '1' to
1981  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
1982  * the device and does wakeup the PCI and MHI within 50ms
1983  * and then the device writes a value to
1984  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
1985  * handshake process to let the host know the device is awake.
1986  *
1987  * Return: zero - success/non-zero - failure
1988  */
1989 int hif_force_wake_request(struct hif_opaque_softc *handle);
1990 
1991 /**
1992  * hif_force_wake_release() - API to release/reset the SOC wake register
1993  * from interrupting the device.
1994  * @handle: HIF opaque handle
1995  *
1996  * Description: API to set the
1997  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
1998  * to release the interrupt line.
1999  *
2000  * Return: zero - success/non-zero - failure
2001  */
2002 int hif_force_wake_release(struct hif_opaque_softc *handle);
2003 #else
2004 static inline
2005 int hif_force_wake_request(struct hif_opaque_softc *handle)
2006 {
2007 	return 0;
2008 }
2009 
2010 static inline
2011 int hif_force_wake_release(struct hif_opaque_softc *handle)
2012 {
2013 	return 0;
2014 }
2015 #endif /* FORCE_WAKE */
2016 
2017 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
2018 /**
2019  * hif_prevent_link_low_power_states() - Prevent from going to low power states
2020  * @hif - HIF opaque context
2021  *
2022  * Return: 0 on success. Error code on failure.
2023  */
2024 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
2025 
2026 /**
2027  * hif_allow_link_low_power_states() - Allow link to go to low power states
2028  * @hif - HIF opaque context
2029  *
2030  * Return: None
2031  */
2032 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
2033 
2034 #else
2035 
2036 static inline
2037 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
2038 {
2039 	return 0;
2040 }
2041 
2042 static inline
2043 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
2044 {
2045 }
2046 #endif
2047 
2048 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
2049 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
2050 
2051 /**
2052  * hif_get_dev_ba_cmem() - get base address of CMEM
2053  * @hif_ctx - the HIF context
2054  *
2055  */
2056 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle);
2057 
2058 /**
2059  * hif_get_soc_version() - get soc major version from target info
2060  * @hif_ctx - the HIF context
2061  *
2062  * Return: version number
2063  */
2064 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle);
2065 
2066 /**
2067  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
2068  * @hif_ctx - the HIF context to assign the callback to
2069  * @callback - the callback to assign
2070  * @priv - the private data to pass to the callback when invoked
2071  *
2072  * Return: None
2073  */
2074 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2075 			       void (*callback)(void *),
2076 			       void *priv);
2077 /*
2078  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
2079  * for defined here
2080  */
2081 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2082 ssize_t hif_dump_desc_trace_buf(struct device *dev,
2083 				struct device_attribute *attr, char *buf);
2084 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2085 					const char *buf, size_t size);
2086 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
2087 				const char *buf, size_t size);
2088 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
2089 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
2090 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
2091 
2092 /**
2093  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
2094  * @hif: hif context
2095  * @ce_service_max_yield_time: CE service max yield time to set
2096  *
2097  * This API storess CE service max yield time in hif context based
2098  * on ini value.
2099  *
2100  * Return: void
2101  */
2102 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2103 				       uint32_t ce_service_max_yield_time);
2104 
2105 /**
2106  * hif_get_ce_service_max_yield_time() - get CE service max yield time
2107  * @hif: hif context
2108  *
2109  * This API returns CE service max yield time.
2110  *
2111  * Return: CE service max yield time
2112  */
2113 unsigned long long
2114 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
2115 
2116 /**
2117  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
2118  * @hif: hif context
2119  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
2120  *
2121  * This API stores CE service max rx ind flush in hif context based
2122  * on ini value.
2123  *
2124  * Return: void
2125  */
2126 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2127 					 uint8_t ce_service_max_rx_ind_flush);
2128 
2129 #ifdef OL_ATH_SMART_LOGGING
2130 /*
2131  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
2132  * @scn : HIF handler
2133  * @buf_cur: Current pointer in ring buffer
2134  * @buf_init:Start of the ring buffer
2135  * @buf_sz: Size of the ring buffer
2136  * @ce: Copy Engine id
2137  * @skb_sz: Max size of the SKB buffer to be copied
2138  *
2139  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2140  * and buffers pointed by them in to the given buf
2141  *
2142  * Return: Current pointer in ring buffer
2143  */
2144 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2145 			 uint8_t *buf_init, uint32_t buf_sz,
2146 			 uint32_t ce, uint32_t skb_sz);
2147 #endif /* OL_ATH_SMART_LOGGING */
2148 
2149 /*
2150  * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
2151  * to hif_opaque_softc handle
2152  * @hif_handle - hif_softc type
2153  *
2154  * Return: hif_opaque_softc type
2155  */
2156 static inline struct hif_opaque_softc *
2157 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
2158 {
2159 	return (struct hif_opaque_softc *)hif_handle;
2160 }
2161 
2162 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
2163 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2164 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx);
2165 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2166 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2167 			    uint8_t type, uint8_t access);
2168 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2169 			       uint8_t type);
2170 #else
2171 static inline QDF_STATUS
2172 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2173 {
2174 	return QDF_STATUS_SUCCESS;
2175 }
2176 
2177 static inline void
2178 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
2179 {
2180 }
2181 
2182 static inline void
2183 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2184 {
2185 }
2186 
2187 static inline void
2188 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2189 		       uint8_t type, uint8_t access)
2190 {
2191 }
2192 
2193 static inline uint8_t
2194 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2195 		       uint8_t type)
2196 {
2197 	return HIF_EP_VOTE_ACCESS_ENABLE;
2198 }
2199 #endif
2200 
2201 #ifdef FORCE_WAKE
2202 /**
2203  * hif_srng_init_phase(): Indicate srng initialization phase
2204  * to avoid force wake as UMAC power collapse is not yet
2205  * enabled
2206  * @hif_ctx: hif opaque handle
2207  * @init_phase: initialization phase
2208  *
2209  * Return:  None
2210  */
2211 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2212 			 bool init_phase);
2213 #else
2214 static inline
2215 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2216 			 bool init_phase)
2217 {
2218 }
2219 #endif /* FORCE_WAKE */
2220 
2221 #ifdef HIF_IPCI
2222 /**
2223  * hif_shutdown_notifier_cb - Call back for shutdown notifier
2224  * @ctx: hif handle
2225  *
2226  * Return:  None
2227  */
2228 void hif_shutdown_notifier_cb(void *ctx);
2229 #else
2230 static inline
2231 void hif_shutdown_notifier_cb(void *ctx)
2232 {
2233 }
2234 #endif /* HIF_IPCI */
2235 
2236 #ifdef HIF_CE_LOG_INFO
2237 /**
2238  * hif_log_ce_info() - API to log ce info
2239  * @scn: hif handle
2240  * @data: hang event data buffer
2241  * @offset: offset at which data needs to be written
2242  *
2243  * Return:  None
2244  */
2245 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2246 		     unsigned int *offset);
2247 #else
2248 static inline
2249 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2250 		     unsigned int *offset)
2251 {
2252 }
2253 #endif
2254 
2255 #ifdef HIF_CPU_PERF_AFFINE_MASK
2256 /**
2257  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
2258  * @hif_ctx: hif opaque handle
2259  *
2260  * This function is used to move the WLAN IRQs to perf cores in
2261  * case of defconfig builds.
2262  *
2263  * Return:  None
2264  */
2265 void hif_config_irq_set_perf_affinity_hint(
2266 	struct hif_opaque_softc *hif_ctx);
2267 
2268 #else
2269 static inline void hif_config_irq_set_perf_affinity_hint(
2270 	struct hif_opaque_softc *hif_ctx)
2271 {
2272 }
2273 #endif
2274 
2275 /**
2276  * hif_apps_grp_irqs_enable() - enable ext grp irqs
2277  * @hif - HIF opaque context
2278  *
2279  * Return: 0 on success. Error code on failure.
2280  */
2281 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
2282 
2283 /**
2284  * hif_apps_grp_irqs_disable() - disable ext grp irqs
2285  * @hif - HIF opaque context
2286  *
2287  * Return: 0 on success. Error code on failure.
2288  */
2289 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
2290 
2291 /**
2292  * hif_disable_grp_irqs() - disable ext grp irqs
2293  * @hif - HIF opaque context
2294  *
2295  * Return: 0 on success. Error code on failure.
2296  */
2297 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
2298 
2299 /**
2300  * hif_enable_grp_irqs() - enable ext grp irqs
2301  * @hif - HIF opaque context
2302  *
2303  * Return: 0 on success. Error code on failure.
2304  */
2305 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
2306 
2307 enum hif_credit_exchange_type {
2308 	HIF_REQUEST_CREDIT,
2309 	HIF_PROCESS_CREDIT_REPORT,
2310 };
2311 
2312 enum hif_detect_latency_type {
2313 	HIF_DETECT_TASKLET,
2314 	HIF_DETECT_CREDIT,
2315 	HIF_DETECT_UNKNOWN
2316 };
2317 
2318 #ifdef HIF_DETECTION_LATENCY_ENABLE
2319 void hif_latency_detect_credit_record_time(
2320 	enum hif_credit_exchange_type type,
2321 	struct hif_opaque_softc *hif_ctx);
2322 
2323 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
2324 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
2325 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer);
2326 void hif_credit_latency(struct hif_softc *scn, bool from_timer);
2327 void hif_check_detection_latency(struct hif_softc *scn,
2328 				 bool from_timer,
2329 				 uint32_t bitmap_type);
2330 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
2331 #else
2332 static inline
2333 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
2334 {}
2335 
2336 static inline
2337 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
2338 {}
2339 
2340 static inline
2341 void hif_latency_detect_credit_record_time(
2342 	enum hif_credit_exchange_type type,
2343 	struct hif_opaque_softc *hif_ctx)
2344 {}
2345 static inline
2346 void hif_check_detection_latency(struct hif_softc *scn,
2347 				 bool from_timer,
2348 				 uint32_t bitmap_type)
2349 {}
2350 
2351 static inline
2352 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
2353 {}
2354 #endif
2355 
2356 #ifdef SYSTEM_PM_CHECK
2357 /**
2358  * __hif_system_pm_set_state() - Set system pm state
2359  * @hif: hif opaque handle
2360  * @state: system state
2361  *
2362  * Return:  None
2363  */
2364 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2365 			       enum hif_system_pm_state state);
2366 
2367 /**
2368  * hif_system_pm_set_state_on() - Set system pm state to ON
2369  * @hif: hif opaque handle
2370  *
2371  * Return:  None
2372  */
2373 static inline
2374 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2375 {
2376 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
2377 }
2378 
2379 /**
2380  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
2381  * @hif: hif opaque handle
2382  *
2383  * Return:  None
2384  */
2385 static inline
2386 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2387 {
2388 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
2389 }
2390 
2391 /**
2392  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
2393  * @hif: hif opaque handle
2394  *
2395  * Return:  None
2396  */
2397 static inline
2398 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2399 {
2400 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
2401 }
2402 
2403 /**
2404  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
2405  * @hif: hif opaque handle
2406  *
2407  * Return:  None
2408  */
2409 static inline
2410 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2411 {
2412 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
2413 }
2414 
2415 /**
2416  * hif_system_pm_get_state() - Get system pm state
2417  * @hif: hif opaque handle
2418  *
2419  * Return:  system state
2420  */
2421 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
2422 
2423 /**
2424  * hif_system_pm_state_check() - Check system state and trigger resume
2425  *  if required
2426  * @hif: hif opaque handle
2427  *
2428  * Return: 0 if system is in on state else error code
2429  */
2430 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
2431 #else
2432 static inline
2433 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2434 			       enum hif_system_pm_state state)
2435 {
2436 }
2437 
2438 static inline
2439 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2440 {
2441 }
2442 
2443 static inline
2444 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2445 {
2446 }
2447 
2448 static inline
2449 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2450 {
2451 }
2452 
2453 static inline
2454 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2455 {
2456 }
2457 
2458 static inline
2459 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2460 {
2461 	return 0;
2462 }
2463 
2464 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2465 {
2466 	return 0;
2467 }
2468 #endif
2469 
2470 #ifdef FEATURE_IRQ_AFFINITY
2471 /**
2472  * hif_set_grp_intr_affinity() - API to set affinity for grp
2473  *  intrs set in the bitmap
2474  * @scn: hif handle
2475  * @grp_intr_bitmask: grp intrs for which perf affinity should be
2476  *  applied
2477  * @perf: affine to perf or non-perf cluster
2478  *
2479  * Return: None
2480  */
2481 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2482 			       uint32_t grp_intr_bitmask, bool perf);
2483 #else
2484 static inline
2485 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2486 			       uint32_t grp_intr_bitmask, bool perf)
2487 {
2488 }
2489 #endif
2490 /**
2491  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
2492  * @hif_ctx: hif opaque handle
2493  *
2494  * Description:
2495  *   Gets number of WMI EPs configured in target svc map. Since EP map
2496  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
2497  *   configured for WMI service.
2498  *
2499  * Return:
2500  *  uint8_t: count for WMI eps in target svc map
2501  */
2502 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn);
2503 
2504 #ifdef DP_UMAC_HW_RESET_SUPPORT
2505 /**
2506  * hif_register_umac_reset_handler() - Register UMAC HW reset handler
2507  * @hif_scn: hif opaque handle
2508  * @handler: callback handler function
2509  * @cb_ctx: context to passed to @handler
2510  * @irq: irq number to be used for UMAC HW reset interrupt
2511  *
2512  * Return: QDF_STATUS of operation
2513  */
2514 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2515 					   int (*handler)(void *cb_ctx),
2516 					   void *cb_ctx, int irq);
2517 
2518 /**
2519  * hif_unregister_umac_reset_handler() - Unregister UMAC HW reset handler
2520  * @hif_scn: hif opaque handle
2521  *
2522  * Return: QDF_STATUS of operation
2523  */
2524 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn);
2525 #else
2526 static inline
2527 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2528 					   int (*handler)(void *cb_ctx),
2529 					   void *cb_ctx, int irq)
2530 {
2531 	return QDF_STATUS_SUCCESS;
2532 }
2533 
2534 static inline
2535 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
2536 {
2537 	return QDF_STATUS_SUCCESS;
2538 }
2539 
2540 #endif /* DP_UMAC_HW_RESET_SUPPORT */
2541 
2542 #ifdef FEATURE_DIRECT_LINK
2543 /**
2544  * hif_set_irq_config_by_ceid() - Set irq configuration for CE given by id
2545  * @scn: hif opaque handle
2546  * @ce_id: CE id
2547  * @addr: irq trigger address
2548  * @data: irq trigger data
2549  *
2550  * Return: QDF status
2551  */
2552 QDF_STATUS
2553 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2554 			   uint64_t addr, uint32_t data);
2555 
2556 /**
2557  * hif_get_direct_link_ce_dest_srng_buffers() - Get Direct Link ce dest srng
2558  *  buffer information
2559  * @hif_ctx: hif opaque handle
2560  * @dma_addr: pointer to array of dma addresses
2561  *
2562  * Return: Number of buffers attached to the dest srng.
2563  */
2564 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2565 						  uint64_t **dma_addr);
2566 
2567 /**
2568  * hif_get_direct_link_ce_srng_info() - Get Direct Link CE srng information
2569  * @hif_ctx: hif opaque handle
2570  * @info: Direct Link CEs information
2571  * @max_ce_info_len: max array size of ce info
2572  *
2573  * Return: QDF status
2574  */
2575 QDF_STATUS
2576 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2577 				 struct hif_direct_link_ce_info *info,
2578 				 uint8_t max_ce_info_len);
2579 #else
2580 static inline QDF_STATUS
2581 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2582 			   uint64_t addr, uint32_t data)
2583 {
2584 	return QDF_STATUS_SUCCESS;
2585 }
2586 
2587 static inline
2588 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn)
2589 {
2590 	return 0;
2591 }
2592 
2593 static inline QDF_STATUS
2594 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2595 				 struct hif_direct_link_ce_info *info,
2596 				 uint8_t max_ce_info_len)
2597 {
2598 	return QDF_STATUS_SUCCESS;
2599 }
2600 #endif
2601 #endif /* _HIF_H_ */
2602