xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 7630cc90f02e8e853426e72adcbd746fb48d2d89)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HIF_H_
21 #define _HIF_H_
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif /* __cplusplus */
26 
27 /* Header files */
28 #include <qdf_status.h>
29 #include "qdf_nbuf.h"
30 #include "qdf_lro.h"
31 #include "ol_if_athvar.h"
32 #include <linux/platform_device.h>
33 #ifdef HIF_PCI
34 #include <linux/pci.h>
35 #endif /* HIF_PCI */
36 #ifdef HIF_USB
37 #include <linux/usb.h>
38 #endif /* HIF_USB */
39 #ifdef IPA_OFFLOAD
40 #include <linux/ipa.h>
41 #endif
42 #include "cfg_ucfg_api.h"
43 #include "qdf_dev.h"
44 #include <wlan_init_cfg.h>
45 
46 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
47 
48 typedef void __iomem *A_target_id_t;
49 typedef void *hif_handle_t;
50 
51 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
52 #define HIF_WORK_DRAIN_WAIT_CNT 50
53 
54 #define HIF_EP_WAKE_RESET_WAIT_CNT 10
55 #endif
56 
57 #define HIF_TYPE_AR6002   2
58 #define HIF_TYPE_AR6003   3
59 #define HIF_TYPE_AR6004   5
60 #define HIF_TYPE_AR9888   6
61 #define HIF_TYPE_AR6320   7
62 #define HIF_TYPE_AR6320V2 8
63 /* For attaching Peregrine 2.0 board host_reg_tbl only */
64 #define HIF_TYPE_AR9888V2 9
65 #define HIF_TYPE_ADRASTEA 10
66 #define HIF_TYPE_AR900B 11
67 #define HIF_TYPE_QCA9984 12
68 #define HIF_TYPE_QCA9888 14
69 #define HIF_TYPE_QCA8074 15
70 #define HIF_TYPE_QCA6290 16
71 #define HIF_TYPE_QCN7605 17
72 #define HIF_TYPE_QCA6390 18
73 #define HIF_TYPE_QCA8074V2 19
74 #define HIF_TYPE_QCA6018  20
75 #define HIF_TYPE_QCN9000 21
76 #define HIF_TYPE_QCA6490 22
77 #define HIF_TYPE_QCA6750 23
78 #define HIF_TYPE_QCA5018 24
79 #define HIF_TYPE_QCN6122 25
80 #define HIF_TYPE_KIWI 26
81 #define HIF_TYPE_QCN9224 27
82 #define HIF_TYPE_QCA9574 28
83 #define HIF_TYPE_MANGO 29
84 
85 #define DMA_COHERENT_MASK_DEFAULT   37
86 
87 #ifdef IPA_OFFLOAD
88 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
89 #endif
90 
91 /* enum hif_ic_irq - enum defining integrated chip irq numbers
92  * defining irq nubers that can be used by external modules like datapath
93  */
94 enum hif_ic_irq {
95 	host2wbm_desc_feed = 16,
96 	host2reo_re_injection,
97 	host2reo_command,
98 	host2rxdma_monitor_ring3,
99 	host2rxdma_monitor_ring2,
100 	host2rxdma_monitor_ring1,
101 	reo2host_exception,
102 	wbm2host_rx_release,
103 	reo2host_status,
104 	reo2host_destination_ring4,
105 	reo2host_destination_ring3,
106 	reo2host_destination_ring2,
107 	reo2host_destination_ring1,
108 	rxdma2host_monitor_destination_mac3,
109 	rxdma2host_monitor_destination_mac2,
110 	rxdma2host_monitor_destination_mac1,
111 	ppdu_end_interrupts_mac3,
112 	ppdu_end_interrupts_mac2,
113 	ppdu_end_interrupts_mac1,
114 	rxdma2host_monitor_status_ring_mac3,
115 	rxdma2host_monitor_status_ring_mac2,
116 	rxdma2host_monitor_status_ring_mac1,
117 	host2rxdma_host_buf_ring_mac3,
118 	host2rxdma_host_buf_ring_mac2,
119 	host2rxdma_host_buf_ring_mac1,
120 	rxdma2host_destination_ring_mac3,
121 	rxdma2host_destination_ring_mac2,
122 	rxdma2host_destination_ring_mac1,
123 	host2tcl_input_ring4,
124 	host2tcl_input_ring3,
125 	host2tcl_input_ring2,
126 	host2tcl_input_ring1,
127 	wbm2host_tx_completions_ring4,
128 	wbm2host_tx_completions_ring3,
129 	wbm2host_tx_completions_ring2,
130 	wbm2host_tx_completions_ring1,
131 	tcl2host_status_ring,
132 };
133 
134 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
135 enum hif_legacy_pci_irq {
136 	ce0,
137 	ce1,
138 	ce2,
139 	ce3,
140 	ce4,
141 	ce5,
142 	ce6,
143 	ce7,
144 	ce8,
145 	ce9,
146 	ce10,
147 	ce11,
148 	ce12,
149 	ce13,
150 	ce14,
151 	ce15,
152 	reo2sw8_intr2,
153 	reo2sw7_intr2,
154 	reo2sw6_intr2,
155 	reo2sw5_intr2,
156 	reo2sw4_intr2,
157 	reo2sw3_intr2,
158 	reo2sw2_intr2,
159 	reo2sw1_intr2,
160 	reo2sw0_intr2,
161 	reo2sw8_intr,
162 	reo2sw7_intr,
163 	reo2sw6_inrr,
164 	reo2sw5_intr,
165 	reo2sw4_intr,
166 	reo2sw3_intr,
167 	reo2sw2_intr,
168 	reo2sw1_intr,
169 	reo2sw0_intr,
170 	reo2status_intr2,
171 	reo_status,
172 	reo2rxdma_out_2,
173 	reo2rxdma_out_1,
174 	reo_cmd,
175 	sw2reo6,
176 	sw2reo5,
177 	sw2reo1,
178 	sw2reo,
179 	rxdma2reo_mlo_0_dst_ring1,
180 	rxdma2reo_mlo_0_dst_ring0,
181 	rxdma2reo_mlo_1_dst_ring1,
182 	rxdma2reo_mlo_1_dst_ring0,
183 	rxdma2reo_dst_ring1,
184 	rxdma2reo_dst_ring0,
185 	rxdma2sw_dst_ring1,
186 	rxdma2sw_dst_ring0,
187 	rxdma2release_dst_ring1,
188 	rxdma2release_dst_ring0,
189 	sw2rxdma_2_src_ring,
190 	sw2rxdma_1_src_ring,
191 	sw2rxdma_0,
192 	wbm2sw6_release2,
193 	wbm2sw5_release2,
194 	wbm2sw4_release2,
195 	wbm2sw3_release2,
196 	wbm2sw2_release2,
197 	wbm2sw1_release2,
198 	wbm2sw0_release2,
199 	wbm2sw6_release,
200 	wbm2sw5_release,
201 	wbm2sw4_release,
202 	wbm2sw3_release,
203 	wbm2sw2_release,
204 	wbm2sw1_release,
205 	wbm2sw0_release,
206 	wbm2sw_link,
207 	wbm_error_release,
208 	sw2txmon_src_ring,
209 	sw2rxmon_src_ring,
210 	txmon2sw_p1_intr1,
211 	txmon2sw_p1_intr0,
212 	txmon2sw_p0_dest1,
213 	txmon2sw_p0_dest0,
214 	rxmon2sw_p1_intr1,
215 	rxmon2sw_p1_intr0,
216 	rxmon2sw_p0_dest1,
217 	rxmon2sw_p0_dest0,
218 	sw_release,
219 	sw2tcl_credit2,
220 	sw2tcl_credit,
221 	sw2tcl4,
222 	sw2tcl5,
223 	sw2tcl3,
224 	sw2tcl2,
225 	sw2tcl1,
226 	sw2wbm1,
227 	misc_8,
228 	misc_7,
229 	misc_6,
230 	misc_5,
231 	misc_4,
232 	misc_3,
233 	misc_2,
234 	misc_1,
235 	misc_0,
236 };
237 #endif
238 
239 struct CE_state;
240 #ifdef QCA_WIFI_QCN9224
241 #define CE_COUNT_MAX 16
242 #else
243 #define CE_COUNT_MAX 12
244 #endif
245 
246 #ifndef HIF_MAX_GROUP
247 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS
248 #endif
249 
250 #ifdef CONFIG_BERYLLIUM
251 #define HIF_MAX_GRP_IRQ 25
252 #else
253 #define HIF_MAX_GRP_IRQ 16
254 #endif
255 
256 #ifndef NAPI_YIELD_BUDGET_BASED
257 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
258 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
259 #endif
260 #else  /* NAPI_YIELD_BUDGET_BASED */
261 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
262 #endif /* NAPI_YIELD_BUDGET_BASED */
263 
264 #define QCA_NAPI_BUDGET    64
265 #define QCA_NAPI_DEF_SCALE  \
266 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
267 
268 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
269 /* NOTE: "napi->scale" can be changed,
270  * but this does not change the number of buckets
271  */
272 #define QCA_NAPI_NUM_BUCKETS 4
273 
274 /**
275  * qca_napi_stat - stats structure for execution contexts
276  * @napi_schedules - number of times the schedule function is called
277  * @napi_polls - number of times the execution context runs
278  * @napi_completes - number of times that the generating interrupt is reenabled
279  * @napi_workdone - cumulative of all work done reported by handler
280  * @cpu_corrected - incremented when execution context runs on a different core
281  *			than the one that its irq is affined to.
282  * @napi_budget_uses - histogram of work done per execution run
283  * @time_limit_reache - count of yields due to time limit threshholds
284  * @rxpkt_thresh_reached - count of yields due to a work limit
285  * @poll_time_buckets - histogram of poll times for the napi
286  *
287  */
288 struct qca_napi_stat {
289 	uint32_t napi_schedules;
290 	uint32_t napi_polls;
291 	uint32_t napi_completes;
292 	uint32_t napi_workdone;
293 	uint32_t cpu_corrected;
294 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
295 	uint32_t time_limit_reached;
296 	uint32_t rxpkt_thresh_reached;
297 	unsigned long long napi_max_poll_time;
298 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
299 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
300 #endif
301 };
302 
303 
304 /**
305  * per NAPI instance data structure
306  * This data structure holds stuff per NAPI instance.
307  * Note that, in the current implementation, though scale is
308  * an instance variable, it is set to the same value for all
309  * instances.
310  */
311 struct qca_napi_info {
312 	struct net_device    netdev; /* dummy net_dev */
313 	void                 *hif_ctx;
314 	struct napi_struct   napi;
315 	uint8_t              scale;   /* currently same on all instances */
316 	uint8_t              id;
317 	uint8_t              cpu;
318 	int                  irq;
319 	cpumask_t            cpumask;
320 	struct qca_napi_stat stats[NR_CPUS];
321 #ifdef RECEIVE_OFFLOAD
322 	/* will only be present for data rx CE's */
323 	void (*offld_flush_cb)(void *);
324 	struct napi_struct   rx_thread_napi;
325 	struct net_device    rx_thread_netdev;
326 #endif /* RECEIVE_OFFLOAD */
327 	qdf_lro_ctx_t        lro_ctx;
328 };
329 
330 enum qca_napi_tput_state {
331 	QCA_NAPI_TPUT_UNINITIALIZED,
332 	QCA_NAPI_TPUT_LO,
333 	QCA_NAPI_TPUT_HI
334 };
335 enum qca_napi_cpu_state {
336 	QCA_NAPI_CPU_UNINITIALIZED,
337 	QCA_NAPI_CPU_DOWN,
338 	QCA_NAPI_CPU_UP };
339 
340 /**
341  * struct qca_napi_cpu - an entry of the napi cpu table
342  * @core_id:     physical core id of the core
343  * @cluster_id:  cluster this core belongs to
344  * @core_mask:   mask to match all core of this cluster
345  * @thread_mask: mask for this core within the cluster
346  * @max_freq:    maximum clock this core can be clocked at
347  *               same for all cpus of the same core.
348  * @napis:       bitmap of napi instances on this core
349  * @execs:       bitmap of execution contexts on this core
350  * cluster_nxt:  chain to link cores within the same cluster
351  *
352  * This structure represents a single entry in the napi cpu
353  * table. The table is part of struct qca_napi_data.
354  * This table is initialized by the init function, called while
355  * the first napi instance is being created, updated by hotplug
356  * notifier and when cpu affinity decisions are made (by throughput
357  * detection), and deleted when the last napi instance is removed.
358  */
359 struct qca_napi_cpu {
360 	enum qca_napi_cpu_state state;
361 	int			core_id;
362 	int			cluster_id;
363 	cpumask_t		core_mask;
364 	cpumask_t		thread_mask;
365 	unsigned int		max_freq;
366 	uint32_t		napis;
367 	uint32_t		execs;
368 	int			cluster_nxt;  /* index, not pointer */
369 };
370 
371 /**
372  * struct qca_napi_data - collection of napi data for a single hif context
373  * @hif_softc: pointer to the hif context
374  * @lock: spinlock used in the event state machine
375  * @state: state variable used in the napi stat machine
376  * @ce_map: bit map indicating which ce's have napis running
377  * @exec_map: bit map of instanciated exec contexts
378  * @user_cpu_affin_map: CPU affinity map from INI config.
379  * @napi_cpu: cpu info for irq affinty
380  * @lilcl_head:
381  * @bigcl_head:
382  * @napi_mode: irq affinity & clock voting mode
383  * @cpuhp_handler: CPU hotplug event registration handle
384  */
385 struct qca_napi_data {
386 	struct               hif_softc *hif_softc;
387 	qdf_spinlock_t       lock;
388 	uint32_t             state;
389 
390 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
391 	 * not used by clients (clients use an id returned by create)
392 	 */
393 	uint32_t             ce_map;
394 	uint32_t             exec_map;
395 	uint32_t             user_cpu_affin_mask;
396 	struct qca_napi_info *napis[CE_COUNT_MAX];
397 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
398 	int                  lilcl_head, bigcl_head;
399 	enum qca_napi_tput_state napi_mode;
400 	struct qdf_cpuhp_handler *cpuhp_handler;
401 	uint8_t              flags;
402 };
403 
404 /**
405  * struct hif_config_info - Place Holder for HIF configuration
406  * @enable_self_recovery: Self Recovery
407  * @enable_runtime_pm: Enable Runtime PM
408  * @runtime_pm_delay: Runtime PM Delay
409  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
410  *
411  * Structure for holding HIF ini parameters.
412  */
413 struct hif_config_info {
414 	bool enable_self_recovery;
415 #ifdef FEATURE_RUNTIME_PM
416 	uint8_t enable_runtime_pm;
417 	u_int32_t runtime_pm_delay;
418 #endif
419 	uint64_t rx_softirq_max_yield_duration_ns;
420 };
421 
422 /**
423  * struct hif_target_info - Target Information
424  * @target_version: Target Version
425  * @target_type: Target Type
426  * @target_revision: Target Revision
427  * @soc_version: SOC Version
428  * @hw_name: pointer to hardware name
429  *
430  * Structure to hold target information.
431  */
432 struct hif_target_info {
433 	uint32_t target_version;
434 	uint32_t target_type;
435 	uint32_t target_revision;
436 	uint32_t soc_version;
437 	char *hw_name;
438 };
439 
440 struct hif_opaque_softc {
441 };
442 
443 /**
444  * enum hif_event_type - Type of DP events to be recorded
445  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
446  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
447  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
448  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
449  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
450  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
451  * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event
452  * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event
453  */
454 enum hif_event_type {
455 	HIF_EVENT_IRQ_TRIGGER,
456 	HIF_EVENT_TIMER_ENTRY,
457 	HIF_EVENT_TIMER_EXIT,
458 	HIF_EVENT_BH_SCHED,
459 	HIF_EVENT_SRNG_ACCESS_START,
460 	HIF_EVENT_SRNG_ACCESS_END,
461 	HIF_EVENT_BH_COMPLETE,
462 	HIF_EVENT_BH_FORCE_BREAK,
463 	/* Do check hif_hist_skip_event_record when adding new events */
464 };
465 
466 /**
467  * enum hif_system_pm_state - System PM state
468  * HIF_SYSTEM_PM_STATE_ON: System in active state
469  * HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
470  *  system resume
471  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
472  *  system suspend
473  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
474  */
475 enum hif_system_pm_state {
476 	HIF_SYSTEM_PM_STATE_ON,
477 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
478 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
479 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
480 };
481 
482 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
483 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
484 
485 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
486 /* HIF_EVENT_HIST_MAX should always be power of 2 */
487 #define HIF_EVENT_HIST_MAX		512
488 
489 #define HIF_EVENT_HIST_ENABLE_MASK	0xFF
490 
491 static inline uint64_t hif_get_log_timestamp(void)
492 {
493 	return qdf_get_log_timestamp();
494 }
495 
496 #else
497 
498 #define HIF_EVENT_HIST_MAX		32
499 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
500 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
501 
502 static inline uint64_t hif_get_log_timestamp(void)
503 {
504 	return qdf_sched_clock();
505 }
506 
507 #endif
508 
509 /**
510  * struct hif_event_record - an entry of the DP event history
511  * @hal_ring_id: ring id for which event is recorded
512  * @hp: head pointer of the ring (may not be applicable for all events)
513  * @tp: tail pointer of the ring (may not be applicable for all events)
514  * @cpu_id: cpu id on which the event occurred
515  * @timestamp: timestamp when event occurred
516  * @type: type of the event
517  *
518  * This structure represents the information stored for every datapath
519  * event which is logged in the history.
520  */
521 struct hif_event_record {
522 	uint8_t hal_ring_id;
523 	uint32_t hp;
524 	uint32_t tp;
525 	int cpu_id;
526 	uint64_t timestamp;
527 	enum hif_event_type type;
528 };
529 
530 /**
531  * struct hif_event_misc - history related misc info
532  * @last_irq_index: last irq event index in history
533  * @last_irq_ts: last irq timestamp
534  */
535 struct hif_event_misc {
536 	int32_t last_irq_index;
537 	uint64_t last_irq_ts;
538 };
539 
540 /**
541  * struct hif_event_history - history for one interrupt group
542  * @index: index to store new event
543  * @event: event entry
544  *
545  * This structure represents the datapath history for one
546  * interrupt group.
547  */
548 struct hif_event_history {
549 	qdf_atomic_t index;
550 	struct hif_event_misc misc;
551 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
552 };
553 
554 /**
555  * hif_hist_record_event() - Record one datapath event in history
556  * @hif_ctx: HIF opaque context
557  * @event: DP event entry
558  * @intr_grp_id: interrupt group ID registered with hif
559  *
560  * Return: None
561  */
562 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
563 			   struct hif_event_record *event,
564 			   uint8_t intr_grp_id);
565 
566 /**
567  * hif_event_history_init() - Initialize SRNG event history buffers
568  * @hif_ctx: HIF opaque context
569  * @id: context group ID for which history is recorded
570  *
571  * Returns: None
572  */
573 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
574 
575 /**
576  * hif_event_history_deinit() - De-initialize SRNG event history buffers
577  * @hif_ctx: HIF opaque context
578  * @id: context group ID for which history is recorded
579  *
580  * Returns: None
581  */
582 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
583 
584 /**
585  * hif_record_event() - Wrapper function to form and record DP event
586  * @hif_ctx: HIF opaque context
587  * @intr_grp_id: interrupt group ID registered with hif
588  * @hal_ring_id: ring id for which event is recorded
589  * @hp: head pointer index of the srng
590  * @tp: tail pointer index of the srng
591  * @type: type of the event to be logged in history
592  *
593  * Return: None
594  */
595 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
596 				    uint8_t intr_grp_id,
597 				    uint8_t hal_ring_id,
598 				    uint32_t hp,
599 				    uint32_t tp,
600 				    enum hif_event_type type)
601 {
602 	struct hif_event_record event;
603 
604 	event.hal_ring_id = hal_ring_id;
605 	event.hp = hp;
606 	event.tp = tp;
607 	event.type = type;
608 
609 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
610 
611 	return;
612 }
613 
614 #else
615 
616 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
617 				    uint8_t intr_grp_id,
618 				    uint8_t hal_ring_id,
619 				    uint32_t hp,
620 				    uint32_t tp,
621 				    enum hif_event_type type)
622 {
623 }
624 
625 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
626 					  uint8_t id)
627 {
628 }
629 
630 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
631 					    uint8_t id)
632 {
633 }
634 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
635 
636 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx);
637 
638 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
639 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx);
640 #else
641 static
642 inline void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) {}
643 #endif
644 
645 /**
646  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
647  *
648  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
649  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
650  *                         minimize power
651  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
652  *                         platform-specific measures to completely power-off
653  *                         the module and associated hardware (i.e. cut power
654  *                         supplies)
655  */
656 enum HIF_DEVICE_POWER_CHANGE_TYPE {
657 	HIF_DEVICE_POWER_UP,
658 	HIF_DEVICE_POWER_DOWN,
659 	HIF_DEVICE_POWER_CUT
660 };
661 
662 /**
663  * enum hif_enable_type: what triggered the enabling of hif
664  *
665  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
666  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
667  */
668 enum hif_enable_type {
669 	HIF_ENABLE_TYPE_PROBE,
670 	HIF_ENABLE_TYPE_REINIT,
671 	HIF_ENABLE_TYPE_MAX
672 };
673 
674 /**
675  * enum hif_disable_type: what triggered the disabling of hif
676  *
677  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
678  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
679  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
680  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
681  */
682 enum hif_disable_type {
683 	HIF_DISABLE_TYPE_PROBE_ERROR,
684 	HIF_DISABLE_TYPE_REINIT_ERROR,
685 	HIF_DISABLE_TYPE_REMOVE,
686 	HIF_DISABLE_TYPE_SHUTDOWN,
687 	HIF_DISABLE_TYPE_MAX
688 };
689 /**
690  * enum hif_device_config_opcode: configure mode
691  *
692  * @HIF_DEVICE_POWER_STATE: device power state
693  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
694  * @HIF_DEVICE_GET_ADDR: get block address
695  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
696  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
697  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
698  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
699  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
700  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
701  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
702  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
703  * @HIF_BMI_DONE: bmi done
704  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
705  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
706  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
707  */
708 enum hif_device_config_opcode {
709 	HIF_DEVICE_POWER_STATE = 0,
710 	HIF_DEVICE_GET_BLOCK_SIZE,
711 	HIF_DEVICE_GET_FIFO_ADDR,
712 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
713 	HIF_DEVICE_GET_IRQ_PROC_MODE,
714 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
715 	HIF_DEVICE_POWER_STATE_CHANGE,
716 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
717 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
718 	HIF_DEVICE_GET_OS_DEVICE,
719 	HIF_DEVICE_DEBUG_BUS_STATE,
720 	HIF_BMI_DONE,
721 	HIF_DEVICE_SET_TARGET_TYPE,
722 	HIF_DEVICE_SET_HTC_CONTEXT,
723 	HIF_DEVICE_GET_HTC_CONTEXT,
724 };
725 
726 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
727 struct HID_ACCESS_LOG {
728 	uint32_t seqnum;
729 	bool is_write;
730 	void *addr;
731 	uint32_t value;
732 };
733 #endif
734 
735 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
736 		uint32_t value);
737 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
738 
739 #define HIF_MAX_DEVICES                 1
740 /**
741  * struct htc_callbacks - Structure for HTC Callbacks methods
742  * @context:             context to pass to the dsrhandler
743  *                       note : rwCompletionHandler is provided the context
744  *                       passed to hif_read_write
745  * @rwCompletionHandler: Read / write completion handler
746  * @dsrHandler:          DSR Handler
747  */
748 struct htc_callbacks {
749 	void *context;
750 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
751 	QDF_STATUS(*dsr_handler)(void *context);
752 };
753 
754 /**
755  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
756  * @context: Private data context
757  * @set_recovery_in_progress: To Set Driver state for recovery in progress
758  * @is_recovery_in_progress: Query if driver state is recovery in progress
759  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
760  * @is_driver_unloading: Query if driver is unloading.
761  * @get_bandwidth_level: Query current bandwidth level for the driver
762  * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem
763  * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool
764  * This Structure provides callback pointer for HIF to query hdd for driver
765  * states.
766  */
767 struct hif_driver_state_callbacks {
768 	void *context;
769 	void (*set_recovery_in_progress)(void *context, uint8_t val);
770 	bool (*is_recovery_in_progress)(void *context);
771 	bool (*is_load_unload_in_progress)(void *context);
772 	bool (*is_driver_unloading)(void *context);
773 	bool (*is_target_ready)(void *context);
774 	int (*get_bandwidth_level)(void *context);
775 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
776 						       qdf_dma_addr_t *paddr,
777 						       uint32_t ring_type);
778 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
779 };
780 
781 /* This API detaches the HTC layer from the HIF device */
782 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
783 
784 /****************************************************************/
785 /* BMI and Diag window abstraction                              */
786 /****************************************************************/
787 
788 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
789 
790 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
791 				     * handled atomically by
792 				     * DiagRead/DiagWrite
793 				     */
794 
795 #ifdef WLAN_FEATURE_BMI
796 /*
797  * API to handle HIF-specific BMI message exchanges, this API is synchronous
798  * and only allowed to be called from a context that can block (sleep)
799  */
800 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
801 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
802 				uint8_t *pSendMessage, uint32_t Length,
803 				uint8_t *pResponseMessage,
804 				uint32_t *pResponseLength, uint32_t TimeoutMS);
805 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
806 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
807 #else /* WLAN_FEATURE_BMI */
808 static inline void
809 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
810 {
811 }
812 
813 static inline bool
814 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
815 {
816 	return false;
817 }
818 #endif /* WLAN_FEATURE_BMI */
819 
820 #ifdef HIF_CPU_CLEAR_AFFINITY
821 /**
822  * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
823  * @scn: HIF handle
824  * @intr_ctxt_id: interrupt group index
825  * @cpu: CPU core to clear
826  *
827  * Return: None
828  */
829 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
830 				       int intr_ctxt_id, int cpu);
831 #else
832 static inline
833 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
834 				       int intr_ctxt_id, int cpu)
835 {
836 }
837 #endif
838 
839 /*
840  * APIs to handle HIF specific diagnostic read accesses. These APIs are
841  * synchronous and only allowed to be called from a context that
842  * can block (sleep). They are not high performance APIs.
843  *
844  * hif_diag_read_access reads a 4 Byte aligned/length value from a
845  * Target register or memory word.
846  *
847  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
848  */
849 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
850 				uint32_t address, uint32_t *data);
851 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
852 		      uint8_t *data, int nbytes);
853 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
854 			void *ramdump_base, uint32_t address, uint32_t size);
855 /*
856  * APIs to handle HIF specific diagnostic write accesses. These APIs are
857  * synchronous and only allowed to be called from a context that
858  * can block (sleep).
859  * They are not high performance APIs.
860  *
861  * hif_diag_write_access writes a 4 Byte aligned/length value to a
862  * Target register or memory word.
863  *
864  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
865  */
866 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
867 				 uint32_t address, uint32_t data);
868 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
869 			uint32_t address, uint8_t *data, int nbytes);
870 
871 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
872 
873 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
874 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
875 
876 /*
877  * Set the FASTPATH_mode_on flag in sc, for use by data path
878  */
879 #ifdef WLAN_FEATURE_FASTPATH
880 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
881 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
882 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
883 
884 /**
885  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
886  * @handler: Callback funtcion
887  * @context: handle for callback function
888  *
889  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
890  */
891 QDF_STATUS hif_ce_fastpath_cb_register(
892 		struct hif_opaque_softc *hif_ctx,
893 		fastpath_msg_handler handler, void *context);
894 #else
895 static inline QDF_STATUS hif_ce_fastpath_cb_register(
896 		struct hif_opaque_softc *hif_ctx,
897 		fastpath_msg_handler handler, void *context)
898 {
899 	return QDF_STATUS_E_FAILURE;
900 }
901 
902 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
903 {
904 	return NULL;
905 }
906 
907 #endif
908 
909 /*
910  * Enable/disable CDC max performance workaround
911  * For max-performace set this to 0
912  * To allow SoC to enter sleep set this to 1
913  */
914 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
915 
916 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
917 			     qdf_shared_mem_t **ce_sr,
918 			     uint32_t *ce_sr_ring_size,
919 			     qdf_dma_addr_t *ce_reg_paddr);
920 
921 /**
922  * @brief List of callbacks - filled in by HTC.
923  */
924 struct hif_msg_callbacks {
925 	void *Context;
926 	/**< context meaningful to HTC */
927 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
928 					uint32_t transferID,
929 					uint32_t toeplitz_hash_result);
930 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
931 					uint8_t pipeID);
932 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
933 	void (*fwEventHandler)(void *context, QDF_STATUS status);
934 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
935 };
936 
937 enum hif_target_status {
938 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
939 	TARGET_STATUS_RESET,  /* target got reset */
940 	TARGET_STATUS_EJECT,  /* target got ejected */
941 	TARGET_STATUS_SUSPEND /*target got suspend */
942 };
943 
944 /**
945  * enum hif_attribute_flags: configure hif
946  *
947  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
948  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
949  *  							+ No pktlog CE
950  */
951 enum hif_attribute_flags {
952 	HIF_LOWDESC_CE_CFG = 1,
953 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
954 };
955 
956 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
957 	(attr |= (v & 0x01) << 5)
958 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
959 	(attr |= (v & 0x03) << 6)
960 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
961 	(attr |= (v & 0x01) << 13)
962 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
963 	(attr |= (v & 0x01) << 14)
964 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
965 	(attr |= (v & 0x01) << 15)
966 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
967 	(attr |= (v & 0x0FFF) << 16)
968 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
969 	(attr |= (v & 0x01) << 30)
970 
971 struct hif_ul_pipe_info {
972 	unsigned int nentries;
973 	unsigned int nentries_mask;
974 	unsigned int sw_index;
975 	unsigned int write_index; /* cached copy */
976 	unsigned int hw_index;    /* cached copy */
977 	void *base_addr_owner_space; /* Host address space */
978 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
979 };
980 
981 struct hif_dl_pipe_info {
982 	unsigned int nentries;
983 	unsigned int nentries_mask;
984 	unsigned int sw_index;
985 	unsigned int write_index; /* cached copy */
986 	unsigned int hw_index;    /* cached copy */
987 	void *base_addr_owner_space; /* Host address space */
988 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
989 };
990 
991 struct hif_pipe_addl_info {
992 	uint32_t pci_mem;
993 	uint32_t ctrl_addr;
994 	struct hif_ul_pipe_info ul_pipe;
995 	struct hif_dl_pipe_info dl_pipe;
996 };
997 
998 #ifdef CONFIG_SLUB_DEBUG_ON
999 #define MSG_FLUSH_NUM 16
1000 #else /* PERF build */
1001 #define MSG_FLUSH_NUM 32
1002 #endif /* SLUB_DEBUG_ON */
1003 
1004 struct hif_bus_id;
1005 
1006 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
1007 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
1008 		     int opcode, void *config, uint32_t config_len);
1009 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
1010 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
1011 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
1012 		   struct hif_msg_callbacks *callbacks);
1013 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
1014 void hif_stop(struct hif_opaque_softc *hif_ctx);
1015 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
1016 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
1017 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
1018 		      uint8_t cmd_id, bool start);
1019 
1020 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1021 				  uint32_t transferID, uint32_t nbytes,
1022 				  qdf_nbuf_t wbuf, uint32_t data_attr);
1023 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1024 			     int force);
1025 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1026 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
1027 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
1028 			  uint8_t *DLPipe);
1029 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
1030 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1031 			int *dl_is_polled);
1032 uint16_t
1033 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1034 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
1035 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
1036 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
1037 		     bool wait_for_it);
1038 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
1039 #ifndef HIF_PCI
1040 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
1041 {
1042 	return 0;
1043 }
1044 #else
1045 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
1046 #endif
1047 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1048 			u32 *revision, const char **target_name);
1049 
1050 #ifdef RECEIVE_OFFLOAD
1051 /**
1052  * hif_offld_flush_cb_register() - Register the offld flush callback
1053  * @scn: HIF opaque context
1054  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
1055  *			 Or GRO/LRO flush when RxThread is not enabled. Called
1056  *			 with corresponding context for flush.
1057  * Return: None
1058  */
1059 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1060 				 void (offld_flush_handler)(void *ol_ctx));
1061 
1062 /**
1063  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
1064  * @scn: HIF opaque context
1065  *
1066  * Return: None
1067  */
1068 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
1069 #endif
1070 
1071 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1072 /**
1073  * hif_exec_should_yield() - Check if hif napi context should yield
1074  * @hif_ctx - HIF opaque context
1075  * @grp_id - grp_id of the napi for which check needs to be done
1076  *
1077  * The function uses grp_id to look for NAPI and checks if NAPI needs to
1078  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
1079  * yield decision.
1080  *
1081  * Return: true if NAPI needs to yield, else false
1082  */
1083 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
1084 #else
1085 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
1086 					 uint grp_id)
1087 {
1088 	return false;
1089 }
1090 #endif
1091 
1092 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
1093 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
1094 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
1095 				      int htc_htt_tx_endpoint);
1096 
1097 /**
1098  * hif_open() - Create hif handle
1099  * @qdf_ctx: qdf context
1100  * @mode: Driver Mode
1101  * @bus_type: Bus Type
1102  * @cbk: CDS Callbacks
1103  * @psoc: psoc object manager
1104  *
1105  * API to open HIF Context
1106  *
1107  * Return: HIF Opaque Pointer
1108  */
1109 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1110 				  uint32_t mode,
1111 				  enum qdf_bus_type bus_type,
1112 				  struct hif_driver_state_callbacks *cbk,
1113 				  struct wlan_objmgr_psoc *psoc);
1114 
1115 /**
1116  * hif_init_dma_mask() - Set dma mask for the dev
1117  * @dev: dev for which DMA mask is to be set
1118  * @bus_type: bus type for the target
1119  *
1120  * This API sets the DMA mask for the device. before the datapath
1121  * memory pre-allocation is done. If the DMA mask is not set before
1122  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
1123  * and does not utilize the full device capability.
1124  *
1125  * Return: 0 - success, non-zero on failure.
1126  */
1127 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
1128 void hif_close(struct hif_opaque_softc *hif_ctx);
1129 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1130 		      void *bdev, const struct hif_bus_id *bid,
1131 		      enum qdf_bus_type bus_type,
1132 		      enum hif_enable_type type);
1133 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
1134 #ifdef CE_TASKLET_DEBUG_ENABLE
1135 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
1136 				 uint8_t value);
1137 #endif
1138 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
1139 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
1140 
1141 /**
1142  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
1143  * HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
1144  * HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
1145  * HIF_PM_CE_WAKE: Wake irq is CE interrupt
1146  */
1147 typedef enum {
1148 	HIF_PM_INVALID_WAKE,
1149 	HIF_PM_MSI_WAKE,
1150 	HIF_PM_CE_WAKE,
1151 } hif_pm_wake_irq_type;
1152 
1153 /**
1154  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1155  * @hif_ctx: HIF context
1156  *
1157  * Return: enum hif_pm_wake_irq_type
1158  */
1159 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1160 
1161 /**
1162  * enum hif_ep_vote_type - hif ep vote type
1163  * HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1164  * HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1165  */
1166 enum hif_ep_vote_type {
1167 	HIF_EP_VOTE_DP_ACCESS,
1168 	HIF_EP_VOTE_NONDP_ACCESS
1169 };
1170 
1171 /**
1172  * enum hif_ep_vote_access - hif ep vote access
1173  * HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1174  * HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transistion
1175  * HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1176  */
1177 enum hif_ep_vote_access {
1178 	HIF_EP_VOTE_ACCESS_ENABLE,
1179 	HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1180 	HIF_EP_VOTE_ACCESS_DISABLE
1181 };
1182 
1183 /**
1184  * enum hif_rpm_id - modules registered with runtime pm module
1185  * @HIF_RTPM_ID_RESERVED: Reserved ID
1186  * @HIF_RTPM_ID_HAL_REO_CMD: HAL REO commands
1187  * @HIF_RTPM_ID_WMI: WMI commands Tx
1188  * @HIF_RTPM_ID_HTT: HTT commands Tx
1189  * @HIF_RTPM_ID_DP_TX: Datapath Tx path
1190  * @HIF_RTPM_ID_DP_RING_STATS: Datapath ring stats
1191  * @HIF_RTPM_ID_CE_SEND_FAST: CE Tx buffer posting
1192  * @HIF_RTPM_ID_FORCE_WAKE: Force wake request
1193  * @HIF_RTPM_ID_PREVENT_LINKDOWN: Prevent linkdown by not allowing runtime PM
1194  * @HIF_RTPM_ID_PREVENT_ALLOW_LOCK: Generic ID for runtime PM lock contexts
1195  * @HIF_RTPM_ID_MAX: Max id
1196  */
1197 enum  hif_rtpm_client_id {
1198 	HIF_RTPM_ID_RESERVED,
1199 	HIF_RTPM_ID_HAL_REO_CMD,
1200 	HIF_RTPM_ID_WMI,
1201 	HIF_RTPM_ID_HTT,
1202 	HIF_RTPM_ID_DP,
1203 	HIF_RTPM_ID_DP_RING_STATS,
1204 	HIF_RTPM_ID_CE,
1205 	HIF_RTPM_ID_FORCE_WAKE,
1206 	HIF_RTPM_ID_PM_QOS_NOTIFY,
1207 	HIF_RTPM_ID_WIPHY_SUSPEND,
1208 	HIF_RTPM_ID_MAX
1209 };
1210 
1211 /**
1212  * enum hif_rpm_type - Get and Put calls types
1213  * HIF_RTPM_GET_ASYNC: Increment usage count and when system is suspended
1214  *		      schedule resume process, return depends on pm state.
1215  * HIF_RTPM_GET_FORCE: Increment usage count and when system is suspended
1216  *		      shedule resume process, returns success irrespective of
1217  *		      pm_state.
1218  * HIF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
1219  *		     wait till process is resumed.
1220  * HIF_RTPM_GET_NORESUME: Only increments usage count.
1221  * HIF_RTPM_PUT_ASYNC: Decrements usage count and puts system in idle state.
1222  * HIF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
1223  *			     suspended state.
1224  * HIF_RTPM_PUT_NOIDLE: Decrements usage count.
1225  */
1226 enum rpm_type {
1227 	HIF_RTPM_GET_ASYNC,
1228 	HIF_RTPM_GET_FORCE,
1229 	HIF_RTPM_GET_SYNC,
1230 	HIF_RTPM_GET_NORESUME,
1231 	HIF_RTPM_PUT_ASYNC,
1232 	HIF_RTPM_PUT_SYNC_SUSPEND,
1233 	HIF_RTPM_PUT_NOIDLE,
1234 };
1235 
1236 /**
1237  * struct hif_pm_runtime_lock - data structure for preventing runtime suspend
1238  * @list - global list of runtime locks
1239  * @active - true if this lock is preventing suspend
1240  * @name - character string for tracking this lock
1241  */
1242 struct hif_pm_runtime_lock {
1243 	struct list_head list;
1244 	bool active;
1245 	const char *name;
1246 };
1247 
1248 #ifdef FEATURE_RUNTIME_PM
1249 /**
1250  * hif_rtpm_register() - Register a module with runtime PM.
1251  * @id: ID of the module which needs to be registered
1252  * @hif_rpm_cbk: callback to be called when get was called in suspended state.
1253  * @prevent_multiple_get: not allow simultaneous get calls or put calls
1254  *
1255  * Return: success status if successfully registered
1256  */
1257 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
1258 
1259 /**
1260  * hif_rtpm_deregister() - Deregister the module
1261  * @id: ID of the module which needs to be de-registered
1262  */
1263 QDF_STATUS hif_rtpm_deregister(uint32_t id);
1264 
1265 /**
1266  * hif_runtime_lock_init() - API to initialize Runtime PM context
1267  * @lock: QDF lock context
1268  * @name: Context name
1269  *
1270  * This API initializes the Runtime PM context of the caller and
1271  * return the pointer.
1272  *
1273  * Return: None
1274  */
1275 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1276 
1277 /**
1278  * hif_runtime_lock_deinit() - This API frees the runtime pm context
1279  * @data: Runtime PM context
1280  *
1281  * Return: void
1282  */
1283 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data);
1284 
1285 /**
1286  * hif_rtpm_get() - Increment usage_count on the device to avoid suspend.
1287  * @type: get call types from hif_rpm_type
1288  * @id: ID of the module calling get()
1289  *
1290  * A get operation will prevent a runtime suspend until a
1291  * corresponding put is done.  This api should be used when accessing bus.
1292  *
1293  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1294  * THIS API WILL ONLY REQUEST THE RESUME AND NOT DO A GET!!!
1295  *
1296  * return: success if a get has been issued, else error code.
1297  */
1298 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id);
1299 
1300 /**
1301  * hif_pm_runtime_put() - do a put operation on the device
1302  * @type: put call types from hif_rpm_type
1303  * @id: ID of the module calling put()
1304  *
1305  * A put operation will allow a runtime suspend after a corresponding
1306  * get was done.  This api should be used when finished accessing bus.
1307  *
1308  * This api will return a failure if runtime pm is stopped
1309  * This api will return failure if it would decrement the usage count below 0.
1310  *
1311  * return: QDF_STATUS_SUCCESS if the put is performed
1312  */
1313 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id);
1314 
1315 /**
1316  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1317  * @data: runtime PM lock
1318  *
1319  * This function will prevent runtime suspend, by incrementing
1320  * device's usage count.
1321  *
1322  * Return: status
1323  */
1324 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data);
1325 
1326 /**
1327  * hif_pm_runtime_prevent_suspend_sync() - Synchronized prevent Runtime suspend
1328  * @data: runtime PM lock
1329  *
1330  * This function will prevent runtime suspend, by incrementing
1331  * device's usage count.
1332  *
1333  * Return: status
1334  */
1335 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data);
1336 
1337 /**
1338  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1339  * @data: runtime PM lock
1340  *
1341  * This function will allow runtime suspend, by decrementing
1342  * device's usage count.
1343  *
1344  * Return: status
1345  */
1346 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data);
1347 
1348 /**
1349  * hif_rtpm_request_resume() - Request resume if bus is suspended
1350  *
1351  * Return: None
1352  */
1353 void hif_rtpm_request_resume(void);
1354 
1355 /**
1356  * hif_rtpm_sync_resume() - Invoke synchronous runtime resume.
1357  *
1358  * This function will invoke synchronous runtime resume.
1359  *
1360  * Return: status
1361  */
1362 QDF_STATUS hif_rtpm_sync_resume(void);
1363 
1364 /**
1365  * hif_rtpm_check_and_request_resume() - check if bus is suspended and
1366  *                                       request resume.
1367  *
1368  * Return: void
1369  */
1370 void hif_rtpm_check_and_request_resume(void);
1371 
1372 /**
1373  * hif_rtpm_set_client_job() - Set job for the client.
1374  * @client_id: Client id for which job needs to be set
1375  *
1376  * If get failed due to system being in suspended state, set the client job so
1377  * when system resumes the client's job is called.
1378  *
1379  * Return: None
1380  */
1381 void hif_rtpm_set_client_job(uint32_t client_id);
1382 
1383 /**
1384  * hif_rtpm_mark_last_busy() - Mark last busy to delay retry to suspend
1385  * @id: ID marking last busy
1386  *
1387  * Return: None
1388  */
1389 void hif_rtpm_mark_last_busy(uint32_t id);
1390 
1391 /**
1392  * hif_rtpm_get_monitor_wake_intr() - API to get monitor_wake_intr
1393  *
1394  * monitor_wake_intr variable can be used to indicate if driver expects wake
1395  * MSI for runtime PM
1396  *
1397  * Return: monitor_wake_intr variable
1398  */
1399 int hif_rtpm_get_monitor_wake_intr(void);
1400 
1401 /**
1402  * hif_rtpm_set_monitor_wake_intr() - API to set monitor_wake_intr
1403  * @val: value to set
1404  *
1405  * monitor_wake_intr variable can be used to indicate if driver expects wake
1406  * MSI for runtime PM
1407  *
1408  * Return: void
1409  */
1410 void hif_rtpm_set_monitor_wake_intr(int val);
1411 
1412 /**
1413  * hif_pre_runtime_suspend() - book keeping before beginning runtime suspend.
1414  * @hif_ctx: HIF context
1415  *
1416  * Makes sure that the pci link will be taken down by the suspend opperation.
1417  * If the hif layer is configured to leave the bus on, runtime suspend will
1418  * not save any power.
1419  *
1420  * Set the runtime suspend state to SUSPENDING.
1421  *
1422  * return -EINVAL if the bus won't go down.  otherwise return 0
1423  */
1424 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1425 
1426 /**
1427  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
1428  *
1429  * update the runtime pm state to RESUMING.
1430  * Return: void
1431  */
1432 void hif_pre_runtime_resume(void);
1433 
1434 /**
1435  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
1436  *
1437  * Record the success.
1438  * update the runtime_pm state to SUSPENDED
1439  * Return: void
1440  */
1441 void hif_process_runtime_suspend_success(void);
1442 
1443 /**
1444  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
1445  *
1446  * Record the failure.
1447  * mark last busy to delay a retry.
1448  * update the runtime_pm state back to ON
1449  *
1450  * Return: void
1451  */
1452 void hif_process_runtime_suspend_failure(void);
1453 
1454 /**
1455  * hif_process_runtime_suspend_failure() - bookkeeping of resuming link up
1456  *
1457  * update the runtime_pm state to RESUMING_LINKUP
1458  * Return: void
1459  */
1460 void hif_process_runtime_resume_linkup(void);
1461 
1462 /**
1463  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
1464  *
1465  * record the success.
1466  * update the runtime_pm state to SUSPENDED
1467  * Return: void
1468  */
1469 void hif_process_runtime_resume_success(void);
1470 
1471 /**
1472  * hif_rtpm_print_prevent_list() - list the clients preventing suspend.
1473  *
1474  * Return: None
1475  */
1476 void hif_rtpm_print_prevent_list(void);
1477 
1478 /**
1479  * hif_rtpm_suspend_lock() - spin_lock on marking runtime suspend
1480  *
1481  * Return: void
1482  */
1483 void hif_rtpm_suspend_lock(void);
1484 
1485 /**
1486  * hif_rtpm_suspend_unlock() - spin_unlock on marking runtime suspend
1487  *
1488  * Return: void
1489  */
1490 void hif_rtpm_suspend_unlock(void);
1491 
1492 /**
1493  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
1494  * @hif_ctx: HIF context
1495  *
1496  * Return: 0 for success and non-zero error code for failure
1497  */
1498 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1499 
1500 /**
1501  * hif_runtime_resume() - do the bus resume part of a runtime resume
1502  * @hif_ctx: HIF context
1503  *
1504  * Return: 0 for success and non-zero error code for failure
1505  */
1506 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1507 
1508 /**
1509  * hif_fastpath_resume() - resume fastpath for runtimepm
1510  * @hif_ctx: HIF context
1511  *
1512  * ensure that the fastpath write index register is up to date
1513  * since runtime pm may cause ce_send_fast to skip the register
1514  * write.
1515  *
1516  * fastpath only applicable to legacy copy engine
1517  */
1518 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1519 
1520 /**
1521  * hif_rtpm_get_state(): get rtpm link state
1522  *
1523  * Return: state
1524  */
1525 int hif_rtpm_get_state(void);
1526 #else
1527 static inline
1528 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
1529 { return QDF_STATUS_SUCCESS; }
1530 
1531 static inline
1532 QDF_STATUS hif_rtpm_deregister(uint32_t id)
1533 { return QDF_STATUS_SUCCESS; }
1534 
1535 static inline
1536 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1537 { return 0; }
1538 
1539 static inline
1540 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data)
1541 {}
1542 
1543 static inline
1544 int hif_rtpm_get(uint8_t type, uint32_t id)
1545 { return QDF_STATUS_SUCCESS; }
1546 
1547 static inline
1548 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
1549 { return QDF_STATUS_SUCCESS; }
1550 
1551 static inline
1552 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data)
1553 { return 0; }
1554 
1555 static inline
1556 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data)
1557 { return 0; }
1558 
1559 static inline
1560 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data)
1561 { return 0; }
1562 
1563 static inline
1564 QDF_STATUS hif_rtpm_sync_resume(void)
1565 { return QDF_STATUS_SUCCESS; }
1566 
1567 static inline
1568 void hif_rtpm_request_resume(void)
1569 {}
1570 
1571 static inline
1572 void hif_rtpm_check_and_request_resume(void)
1573 {}
1574 
1575 static inline
1576 void hif_rtpm_set_client_job(uint32_t client_id)
1577 {}
1578 
1579 static inline
1580 void hif_rtpm_print_prevent_list(void)
1581 {}
1582 
1583 static inline
1584 void hif_rtpm_suspend_unlock(void)
1585 {}
1586 
1587 static inline
1588 void hif_rtpm_suspend_lock(void)
1589 {}
1590 
1591 static inline
1592 int hif_rtpm_get_monitor_wake_intr(void)
1593 { return 0; }
1594 
1595 static inline
1596 void hif_rtpm_set_monitor_wake_intr(int val)
1597 {}
1598 
1599 static inline
1600 void hif_rtpm_mark_last_busy(uint32_t id)
1601 {}
1602 #endif
1603 
1604 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1605 				 bool is_packet_log_enabled);
1606 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1607 
1608 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1609 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1610 
1611 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1612 
1613 #ifdef IPA_OFFLOAD
1614 /**
1615  * hif_get_ipa_hw_type() - get IPA hw type
1616  *
1617  * This API return the IPA hw type.
1618  *
1619  * Return: IPA hw type
1620  */
1621 static inline
1622 enum ipa_hw_type hif_get_ipa_hw_type(void)
1623 {
1624 	return ipa_get_hw_type();
1625 }
1626 
1627 /**
1628  * hif_get_ipa_present() - get IPA hw status
1629  *
1630  * This API return the IPA hw status.
1631  *
1632  * Return: true if IPA is present or false otherwise
1633  */
1634 static inline
1635 bool hif_get_ipa_present(void)
1636 {
1637 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
1638 		return true;
1639 	else
1640 		return false;
1641 }
1642 #endif
1643 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1644 /**
1645  * hif_bus_ealry_suspend() - stop non wmi tx traffic
1646  * @context: hif context
1647  */
1648 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1649 
1650 /**
1651  * hif_bus_late_resume() - resume non wmi traffic
1652  * @context: hif context
1653  */
1654 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1655 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1656 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1657 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1658 
1659 /**
1660  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1661  * @hif_ctx: an opaque HIF handle to use
1662  *
1663  * As opposed to the standard hif_irq_enable, this function always applies to
1664  * the APPS side kernel interrupt handling.
1665  *
1666  * Return: errno
1667  */
1668 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1669 
1670 /**
1671  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1672  * @hif_ctx: an opaque HIF handle to use
1673  *
1674  * As opposed to the standard hif_irq_disable, this function always applies to
1675  * the APPS side kernel interrupt handling.
1676  *
1677  * Return: errno
1678  */
1679 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1680 
1681 /**
1682  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1683  * @hif_ctx: an opaque HIF handle to use
1684  *
1685  * As opposed to the standard hif_irq_enable, this function always applies to
1686  * the APPS side kernel interrupt handling.
1687  *
1688  * Return: errno
1689  */
1690 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1691 
1692 /**
1693  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1694  * @hif_ctx: an opaque HIF handle to use
1695  *
1696  * As opposed to the standard hif_irq_disable, this function always applies to
1697  * the APPS side kernel interrupt handling.
1698  *
1699  * Return: errno
1700  */
1701 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1702 
1703 /**
1704  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1705  * @hif_ctx: an opaque HIF handle to use
1706  *
1707  * This function always applies to the APPS side kernel interrupt handling
1708  * to wake the system from suspend.
1709  *
1710  * Return: errno
1711  */
1712 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1713 
1714 /**
1715  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1716  * @hif_ctx: an opaque HIF handle to use
1717  *
1718  * This function always applies to the APPS side kernel interrupt handling
1719  * to disable the wake irq.
1720  *
1721  * Return: errno
1722  */
1723 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1724 
1725 /**
1726  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
1727  * @hif_ctx: an opaque HIF handle to use
1728  *
1729  * As opposed to the standard hif_irq_enable, this function always applies to
1730  * the APPS side kernel interrupt handling.
1731  *
1732  * Return: errno
1733  */
1734 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1735 
1736 /**
1737  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
1738  * @hif_ctx: an opaque HIF handle to use
1739  *
1740  * As opposed to the standard hif_irq_disable, this function always applies to
1741  * the APPS side kernel interrupt handling.
1742  *
1743  * Return: errno
1744  */
1745 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1746 
1747 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1748 int hif_dump_registers(struct hif_opaque_softc *scn);
1749 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1750 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1751 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1752 		     u32 *revision, const char **target_name);
1753 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1754 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1755 						   scn);
1756 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1757 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1758 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1759 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1760 			   hif_target_status);
1761 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1762 			 struct hif_config_info *cfg);
1763 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1764 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1765 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1766 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1767 			   uint32_t transfer_id, u_int32_t len);
1768 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1769 	uint32_t transfer_id, uint32_t download_len);
1770 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1771 void hif_ce_war_disable(void);
1772 void hif_ce_war_enable(void);
1773 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1774 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1775 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1776 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1777 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1778 		uint32_t pipe_num);
1779 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1780 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1781 
1782 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1783 				int rx_bundle_cnt);
1784 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1785 
1786 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1787 
1788 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1789 
1790 enum hif_exec_type {
1791 	HIF_EXEC_NAPI_TYPE,
1792 	HIF_EXEC_TASKLET_TYPE,
1793 };
1794 
1795 typedef uint32_t (*ext_intr_handler)(void *, uint32_t, int);
1796 
1797 /**
1798  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1799  * @softc: hif opaque context owning the exec context
1800  * @id: the id of the interrupt context
1801  *
1802  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1803  *         'id' registered with the OS
1804  */
1805 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1806 				uint8_t id);
1807 
1808 /**
1809  * hif_configure_ext_group_interrupts() - Congigure ext group intrrupts
1810  * @hif_ctx: hif opaque context
1811  *
1812  * Return: QDF_STATUS
1813  */
1814 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1815 
1816 /**
1817  * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group intrrupts
1818  * @hif_ctx: hif opaque context
1819  *
1820  * Return: None
1821  */
1822 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1823 
1824 /**
1825  * hif_register_ext_group() - API to register external group
1826  * interrupt handler.
1827  * @hif_ctx : HIF Context
1828  * @numirq: number of irq's in the group
1829  * @irq: array of irq values
1830  * @handler: callback interrupt handler function
1831  * @cb_ctx: context to passed in callback
1832  * @type: napi vs tasklet
1833  *
1834  * Return: QDF_STATUS
1835  */
1836 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1837 				  uint32_t numirq, uint32_t irq[],
1838 				  ext_intr_handler handler,
1839 				  void *cb_ctx, const char *context_name,
1840 				  enum hif_exec_type type, uint32_t scale);
1841 
1842 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1843 				const char *context_name);
1844 
1845 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1846 				u_int8_t pipeid,
1847 				struct hif_msg_callbacks *callbacks);
1848 
1849 /**
1850  * hif_print_napi_stats() - Display HIF NAPI stats
1851  * @hif_ctx - HIF opaque context
1852  *
1853  * Return: None
1854  */
1855 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
1856 
1857 /* hif_clear_napi_stats() - function clears the stats of the
1858  * latency when called.
1859  * @hif_ctx - the HIF context to assign the callback to
1860  *
1861  * Return: None
1862  */
1863 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
1864 
1865 #ifdef __cplusplus
1866 }
1867 #endif
1868 
1869 #ifdef FORCE_WAKE
1870 /**
1871  * hif_force_wake_request() - Function to wake from power collapse
1872  * @handle: HIF opaque handle
1873  *
1874  * Description: API to check if the device is awake or not before
1875  * read/write to BAR + 4K registers. If device is awake return
1876  * success otherwise write '1' to
1877  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
1878  * the device and does wakeup the PCI and MHI within 50ms
1879  * and then the device writes a value to
1880  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
1881  * handshake process to let the host know the device is awake.
1882  *
1883  * Return: zero - success/non-zero - failure
1884  */
1885 int hif_force_wake_request(struct hif_opaque_softc *handle);
1886 
1887 /**
1888  * hif_force_wake_release() - API to release/reset the SOC wake register
1889  * from interrupting the device.
1890  * @handle: HIF opaque handle
1891  *
1892  * Description: API to set the
1893  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
1894  * to release the interrupt line.
1895  *
1896  * Return: zero - success/non-zero - failure
1897  */
1898 int hif_force_wake_release(struct hif_opaque_softc *handle);
1899 #else
1900 static inline
1901 int hif_force_wake_request(struct hif_opaque_softc *handle)
1902 {
1903 	return 0;
1904 }
1905 
1906 static inline
1907 int hif_force_wake_release(struct hif_opaque_softc *handle)
1908 {
1909 	return 0;
1910 }
1911 #endif /* FORCE_WAKE */
1912 
1913 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1914 /**
1915  * hif_prevent_link_low_power_states() - Prevent from going to low power states
1916  * @hif - HIF opaque context
1917  *
1918  * Return: 0 on success. Error code on failure.
1919  */
1920 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
1921 
1922 /**
1923  * hif_allow_link_low_power_states() - Allow link to go to low power states
1924  * @hif - HIF opaque context
1925  *
1926  * Return: None
1927  */
1928 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
1929 
1930 #else
1931 
1932 static inline
1933 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
1934 {
1935 	return 0;
1936 }
1937 
1938 static inline
1939 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
1940 {
1941 }
1942 #endif
1943 
1944 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
1945 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
1946 
1947 /**
1948  * hif_get_soc_version() - get soc major version from target info
1949  * @hif_ctx - the HIF context
1950  *
1951  * Return: version number
1952  */
1953 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle);
1954 
1955 /**
1956  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
1957  * @hif_ctx - the HIF context to assign the callback to
1958  * @callback - the callback to assign
1959  * @priv - the private data to pass to the callback when invoked
1960  *
1961  * Return: None
1962  */
1963 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1964 			       void (*callback)(void *),
1965 			       void *priv);
1966 /*
1967  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1968  * for defined here
1969  */
1970 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1971 ssize_t hif_dump_desc_trace_buf(struct device *dev,
1972 				struct device_attribute *attr, char *buf);
1973 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1974 					const char *buf, size_t size);
1975 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
1976 				const char *buf, size_t size);
1977 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
1978 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
1979 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
1980 
1981 /**
1982  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
1983  * @hif: hif context
1984  * @ce_service_max_yield_time: CE service max yield time to set
1985  *
1986  * This API storess CE service max yield time in hif context based
1987  * on ini value.
1988  *
1989  * Return: void
1990  */
1991 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1992 				       uint32_t ce_service_max_yield_time);
1993 
1994 /**
1995  * hif_get_ce_service_max_yield_time() - get CE service max yield time
1996  * @hif: hif context
1997  *
1998  * This API returns CE service max yield time.
1999  *
2000  * Return: CE service max yield time
2001  */
2002 unsigned long long
2003 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
2004 
2005 /**
2006  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
2007  * @hif: hif context
2008  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
2009  *
2010  * This API stores CE service max rx ind flush in hif context based
2011  * on ini value.
2012  *
2013  * Return: void
2014  */
2015 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2016 					 uint8_t ce_service_max_rx_ind_flush);
2017 
2018 #ifdef OL_ATH_SMART_LOGGING
2019 /*
2020  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
2021  * @scn : HIF handler
2022  * @buf_cur: Current pointer in ring buffer
2023  * @buf_init:Start of the ring buffer
2024  * @buf_sz: Size of the ring buffer
2025  * @ce: Copy Engine id
2026  * @skb_sz: Max size of the SKB buffer to be copied
2027  *
2028  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2029  * and buffers pointed by them in to the given buf
2030  *
2031  * Return: Current pointer in ring buffer
2032  */
2033 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2034 			 uint8_t *buf_init, uint32_t buf_sz,
2035 			 uint32_t ce, uint32_t skb_sz);
2036 #endif /* OL_ATH_SMART_LOGGING */
2037 
2038 /*
2039  * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
2040  * to hif_opaque_softc handle
2041  * @hif_handle - hif_softc type
2042  *
2043  * Return: hif_opaque_softc type
2044  */
2045 static inline struct hif_opaque_softc *
2046 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
2047 {
2048 	return (struct hif_opaque_softc *)hif_handle;
2049 }
2050 
2051 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
2052 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2053 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx);
2054 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2055 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2056 			    uint8_t type, uint8_t access);
2057 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2058 			       uint8_t type);
2059 #else
2060 static inline QDF_STATUS
2061 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2062 {
2063 	return QDF_STATUS_SUCCESS;
2064 }
2065 
2066 static inline void
2067 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
2068 {
2069 }
2070 
2071 static inline void
2072 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2073 {
2074 }
2075 
2076 static inline void
2077 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2078 		       uint8_t type, uint8_t access)
2079 {
2080 }
2081 
2082 static inline uint8_t
2083 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2084 		       uint8_t type)
2085 {
2086 	return HIF_EP_VOTE_ACCESS_ENABLE;
2087 }
2088 #endif
2089 
2090 #ifdef FORCE_WAKE
2091 /**
2092  * hif_srng_init_phase(): Indicate srng initialization phase
2093  * to avoid force wake as UMAC power collapse is not yet
2094  * enabled
2095  * @hif_ctx: hif opaque handle
2096  * @init_phase: initialization phase
2097  *
2098  * Return:  None
2099  */
2100 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2101 			 bool init_phase);
2102 #else
2103 static inline
2104 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2105 			 bool init_phase)
2106 {
2107 }
2108 #endif /* FORCE_WAKE */
2109 
2110 #ifdef HIF_IPCI
2111 /**
2112  * hif_shutdown_notifier_cb - Call back for shutdown notifier
2113  * @ctx: hif handle
2114  *
2115  * Return:  None
2116  */
2117 void hif_shutdown_notifier_cb(void *ctx);
2118 #else
2119 static inline
2120 void hif_shutdown_notifier_cb(void *ctx)
2121 {
2122 }
2123 #endif /* HIF_IPCI */
2124 
2125 #ifdef HIF_CE_LOG_INFO
2126 /**
2127  * hif_log_ce_info() - API to log ce info
2128  * @scn: hif handle
2129  * @data: hang event data buffer
2130  * @offset: offset at which data needs to be written
2131  *
2132  * Return:  None
2133  */
2134 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2135 		     unsigned int *offset);
2136 #else
2137 static inline
2138 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2139 		     unsigned int *offset)
2140 {
2141 }
2142 #endif
2143 
2144 #ifdef HIF_CPU_PERF_AFFINE_MASK
2145 /**
2146  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
2147  * @hif_ctx: hif opaque handle
2148  *
2149  * This function is used to move the WLAN IRQs to perf cores in
2150  * case of defconfig builds.
2151  *
2152  * Return:  None
2153  */
2154 void hif_config_irq_set_perf_affinity_hint(
2155 	struct hif_opaque_softc *hif_ctx);
2156 
2157 #else
2158 static inline void hif_config_irq_set_perf_affinity_hint(
2159 	struct hif_opaque_softc *hif_ctx)
2160 {
2161 }
2162 #endif
2163 
2164 /**
2165  * hif_apps_grp_irqs_enable() - enable ext grp irqs
2166  * @hif - HIF opaque context
2167  *
2168  * Return: 0 on success. Error code on failure.
2169  */
2170 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
2171 
2172 /**
2173  * hif_apps_grp_irqs_disable() - disable ext grp irqs
2174  * @hif - HIF opaque context
2175  *
2176  * Return: 0 on success. Error code on failure.
2177  */
2178 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
2179 
2180 /**
2181  * hif_disable_grp_irqs() - disable ext grp irqs
2182  * @hif - HIF opaque context
2183  *
2184  * Return: 0 on success. Error code on failure.
2185  */
2186 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
2187 
2188 /**
2189  * hif_enable_grp_irqs() - enable ext grp irqs
2190  * @hif - HIF opaque context
2191  *
2192  * Return: 0 on success. Error code on failure.
2193  */
2194 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
2195 
2196 enum hif_credit_exchange_type {
2197 	HIF_REQUEST_CREDIT,
2198 	HIF_PROCESS_CREDIT_REPORT,
2199 };
2200 
2201 enum hif_detect_latency_type {
2202 	HIF_DETECT_TASKLET,
2203 	HIF_DETECT_CREDIT,
2204 	HIF_DETECT_UNKNOWN
2205 };
2206 
2207 #ifdef HIF_DETECTION_LATENCY_ENABLE
2208 void hif_latency_detect_credit_record_time(
2209 	enum hif_credit_exchange_type type,
2210 	struct hif_opaque_softc *hif_ctx);
2211 
2212 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
2213 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
2214 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer);
2215 void hif_credit_latency(struct hif_softc *scn, bool from_timer);
2216 void hif_check_detection_latency(struct hif_softc *scn,
2217 				 bool from_timer,
2218 				 uint32_t bitmap_type);
2219 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
2220 #else
2221 static inline
2222 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
2223 {}
2224 
2225 static inline
2226 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
2227 {}
2228 
2229 static inline
2230 void hif_latency_detect_credit_record_time(
2231 	enum hif_credit_exchange_type type,
2232 	struct hif_opaque_softc *hif_ctx)
2233 {}
2234 static inline
2235 void hif_check_detection_latency(struct hif_softc *scn,
2236 				 bool from_timer,
2237 				 uint32_t bitmap_type)
2238 {}
2239 
2240 static inline
2241 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
2242 {}
2243 #endif
2244 
2245 #ifdef SYSTEM_PM_CHECK
2246 /**
2247  * __hif_system_pm_set_state() - Set system pm state
2248  * @hif: hif opaque handle
2249  * @state: system state
2250  *
2251  * Return:  None
2252  */
2253 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2254 			       enum hif_system_pm_state state);
2255 
2256 /**
2257  * hif_system_pm_set_state_on() - Set system pm state to ON
2258  * @hif: hif opaque handle
2259  *
2260  * Return:  None
2261  */
2262 static inline
2263 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2264 {
2265 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
2266 }
2267 
2268 /**
2269  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
2270  * @hif: hif opaque handle
2271  *
2272  * Return:  None
2273  */
2274 static inline
2275 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2276 {
2277 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
2278 }
2279 
2280 /**
2281  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
2282  * @hif: hif opaque handle
2283  *
2284  * Return:  None
2285  */
2286 static inline
2287 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2288 {
2289 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
2290 }
2291 
2292 /**
2293  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
2294  * @hif: hif opaque handle
2295  *
2296  * Return:  None
2297  */
2298 static inline
2299 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2300 {
2301 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
2302 }
2303 
2304 /**
2305  * hif_system_pm_get_state() - Get system pm state
2306  * @hif: hif opaque handle
2307  *
2308  * Return:  system state
2309  */
2310 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
2311 
2312 /**
2313  * hif_system_pm_state_check() - Check system state and trigger resume
2314  *  if required
2315  * @hif: hif opaque handle
2316  *
2317  * Return: 0 if system is in on state else error code
2318  */
2319 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
2320 #else
2321 static inline
2322 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2323 			       enum hif_system_pm_state state)
2324 {
2325 }
2326 
2327 static inline
2328 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2329 {
2330 }
2331 
2332 static inline
2333 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2334 {
2335 }
2336 
2337 static inline
2338 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2339 {
2340 }
2341 
2342 static inline
2343 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2344 {
2345 }
2346 
2347 static inline
2348 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2349 {
2350 	return 0;
2351 }
2352 
2353 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2354 {
2355 	return 0;
2356 }
2357 #endif
2358 
2359 #ifdef FEATURE_IRQ_AFFINITY
2360 /**
2361  * hif_set_grp_intr_affinity() - API to set affinity for grp
2362  *  intrs set in the bitmap
2363  * @scn: hif handle
2364  * @grp_intr_bitmask: grp intrs for which perf affinity should be
2365  *  applied
2366  * @perf: affine to perf or non-perf cluster
2367  *
2368  * Return: None
2369  */
2370 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2371 			       uint32_t grp_intr_bitmask, bool perf);
2372 #else
2373 static inline
2374 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2375 			       uint32_t grp_intr_bitmask, bool perf)
2376 {
2377 }
2378 #endif
2379 /**
2380  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
2381  * @hif_ctx: hif opaque handle
2382  *
2383  * Description:
2384  *   Gets number of WMI EPs configured in target svc map. Since EP map
2385  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
2386  *   configured for WMI service.
2387  *
2388  * Return:
2389  *  uint8_t: count for WMI eps in target svc map
2390  */
2391 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn);
2392 
2393 #ifdef DP_UMAC_HW_RESET_SUPPORT
2394 /**
2395  * hif_register_umac_reset_handler() - Register UMAC HW reset handler
2396  * @hif_scn: hif opaque handle
2397  * @handler: callback handler function
2398  * @cb_ctx: context to passed to @handler
2399  * @irq: irq number to be used for UMAC HW reset interrupt
2400  *
2401  * Return: QDF_STATUS of operation
2402  */
2403 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2404 					   int (*handler)(void *cb_ctx),
2405 					   void *cb_ctx, int irq);
2406 
2407 /**
2408  * hif_unregister_umac_reset_handler() - Unregister UMAC HW reset handler
2409  * @hif_scn: hif opaque handle
2410  *
2411  * Return: QDF_STATUS of operation
2412  */
2413 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn);
2414 #else
2415 static inline
2416 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2417 					   int (*handler)(void *cb_ctx),
2418 					   void *cb_ctx, int irq)
2419 {
2420 	return QDF_STATUS_SUCCESS;
2421 }
2422 
2423 static inline
2424 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
2425 {
2426 	return QDF_STATUS_SUCCESS;
2427 }
2428 
2429 #endif /* DP_UMAC_HW_RESET_SUPPORT */
2430 
2431 #endif /* _HIF_H_ */
2432