xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 87ce989ee0b2172d08cfc0e040cdb9ca3fcce1e8)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HIF_H_
21 #define _HIF_H_
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif /* __cplusplus */
26 
27 /* Header files */
28 #include <qdf_status.h>
29 #include "qdf_nbuf.h"
30 #include "qdf_lro.h"
31 #include "ol_if_athvar.h"
32 #include <linux/platform_device.h>
33 #ifdef HIF_PCI
34 #include <linux/pci.h>
35 #endif /* HIF_PCI */
36 #ifdef HIF_USB
37 #include <linux/usb.h>
38 #endif /* HIF_USB */
39 #ifdef IPA_OFFLOAD
40 #include <linux/ipa.h>
41 #endif
42 #include "cfg_ucfg_api.h"
43 #include "qdf_dev.h"
44 #include <wlan_init_cfg.h>
45 
46 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
47 
48 typedef void __iomem *A_target_id_t;
49 typedef void *hif_handle_t;
50 
51 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
52 #define HIF_WORK_DRAIN_WAIT_CNT 50
53 
54 #define HIF_EP_WAKE_RESET_WAIT_CNT 10
55 #endif
56 
57 #define HIF_TYPE_AR6002   2
58 #define HIF_TYPE_AR6003   3
59 #define HIF_TYPE_AR6004   5
60 #define HIF_TYPE_AR9888   6
61 #define HIF_TYPE_AR6320   7
62 #define HIF_TYPE_AR6320V2 8
63 /* For attaching Peregrine 2.0 board host_reg_tbl only */
64 #define HIF_TYPE_AR9888V2 9
65 #define HIF_TYPE_ADRASTEA 10
66 #define HIF_TYPE_AR900B 11
67 #define HIF_TYPE_QCA9984 12
68 #define HIF_TYPE_QCA9888 14
69 #define HIF_TYPE_QCA8074 15
70 #define HIF_TYPE_QCA6290 16
71 #define HIF_TYPE_QCN7605 17
72 #define HIF_TYPE_QCA6390 18
73 #define HIF_TYPE_QCA8074V2 19
74 #define HIF_TYPE_QCA6018  20
75 #define HIF_TYPE_QCN9000 21
76 #define HIF_TYPE_QCA6490 22
77 #define HIF_TYPE_QCA6750 23
78 #define HIF_TYPE_QCA5018 24
79 #define HIF_TYPE_QCN6122 25
80 #define HIF_TYPE_KIWI 26
81 #define HIF_TYPE_QCN9224 27
82 #define HIF_TYPE_QCA9574 28
83 
84 #define DMA_COHERENT_MASK_DEFAULT   37
85 
86 #ifdef IPA_OFFLOAD
87 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
88 #endif
89 
90 /* enum hif_ic_irq - enum defining integrated chip irq numbers
91  * defining irq nubers that can be used by external modules like datapath
92  */
93 enum hif_ic_irq {
94 	host2wbm_desc_feed = 16,
95 	host2reo_re_injection,
96 	host2reo_command,
97 	host2rxdma_monitor_ring3,
98 	host2rxdma_monitor_ring2,
99 	host2rxdma_monitor_ring1,
100 	reo2host_exception,
101 	wbm2host_rx_release,
102 	reo2host_status,
103 	reo2host_destination_ring4,
104 	reo2host_destination_ring3,
105 	reo2host_destination_ring2,
106 	reo2host_destination_ring1,
107 	rxdma2host_monitor_destination_mac3,
108 	rxdma2host_monitor_destination_mac2,
109 	rxdma2host_monitor_destination_mac1,
110 	ppdu_end_interrupts_mac3,
111 	ppdu_end_interrupts_mac2,
112 	ppdu_end_interrupts_mac1,
113 	rxdma2host_monitor_status_ring_mac3,
114 	rxdma2host_monitor_status_ring_mac2,
115 	rxdma2host_monitor_status_ring_mac1,
116 	host2rxdma_host_buf_ring_mac3,
117 	host2rxdma_host_buf_ring_mac2,
118 	host2rxdma_host_buf_ring_mac1,
119 	rxdma2host_destination_ring_mac3,
120 	rxdma2host_destination_ring_mac2,
121 	rxdma2host_destination_ring_mac1,
122 	host2tcl_input_ring4,
123 	host2tcl_input_ring3,
124 	host2tcl_input_ring2,
125 	host2tcl_input_ring1,
126 	wbm2host_tx_completions_ring4,
127 	wbm2host_tx_completions_ring3,
128 	wbm2host_tx_completions_ring2,
129 	wbm2host_tx_completions_ring1,
130 	tcl2host_status_ring,
131 };
132 
133 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
134 enum hif_legacy_pci_irq {
135 	ce0,
136 	ce1,
137 	ce2,
138 	ce3,
139 	ce4,
140 	ce5,
141 	ce6,
142 	ce7,
143 	ce8,
144 	ce9,
145 	ce10,
146 	ce11,
147 	ce12,
148 	ce13,
149 	ce14,
150 	ce15,
151 	reo2sw8_intr2,
152 	reo2sw7_intr2,
153 	reo2sw6_intr2,
154 	reo2sw5_intr2,
155 	reo2sw4_intr2,
156 	reo2sw3_intr2,
157 	reo2sw2_intr2,
158 	reo2sw1_intr2,
159 	reo2sw0_intr2,
160 	reo2sw8_intr,
161 	reo2sw7_intr,
162 	reo2sw6_inrr,
163 	reo2sw5_intr,
164 	reo2sw4_intr,
165 	reo2sw3_intr,
166 	reo2sw2_intr,
167 	reo2sw1_intr,
168 	reo2sw0_intr,
169 	reo2status_intr2,
170 	reo_status,
171 	reo2rxdma_out_2,
172 	reo2rxdma_out_1,
173 	reo_cmd,
174 	sw2reo6,
175 	sw2reo5,
176 	sw2reo1,
177 	sw2reo,
178 	rxdma2reo_mlo_0_dst_ring1,
179 	rxdma2reo_mlo_0_dst_ring0,
180 	rxdma2reo_mlo_1_dst_ring1,
181 	rxdma2reo_mlo_1_dst_ring0,
182 	rxdma2reo_dst_ring1,
183 	rxdma2reo_dst_ring0,
184 	rxdma2sw_dst_ring1,
185 	rxdma2sw_dst_ring0,
186 	rxdma2release_dst_ring1,
187 	rxdma2release_dst_ring0,
188 	sw2rxdma_2_src_ring,
189 	sw2rxdma_1_src_ring,
190 	sw2rxdma_0,
191 	wbm2sw6_release2,
192 	wbm2sw5_release2,
193 	wbm2sw4_release2,
194 	wbm2sw3_release2,
195 	wbm2sw2_release2,
196 	wbm2sw1_release2,
197 	wbm2sw0_release2,
198 	wbm2sw6_release,
199 	wbm2sw5_release,
200 	wbm2sw4_release,
201 	wbm2sw3_release,
202 	wbm2sw2_release,
203 	wbm2sw1_release,
204 	wbm2sw0_release,
205 	wbm2sw_link,
206 	wbm_error_release,
207 	sw2txmon_src_ring,
208 	sw2rxmon_src_ring,
209 	txmon2sw_p1_intr1,
210 	txmon2sw_p1_intr0,
211 	txmon2sw_p0_dest1,
212 	txmon2sw_p0_dest0,
213 	rxmon2sw_p1_intr1,
214 	rxmon2sw_p1_intr0,
215 	rxmon2sw_p0_dest1,
216 	rxmon2sw_p0_dest0,
217 	sw_release,
218 	sw2tcl_credit2,
219 	sw2tcl_credit,
220 	sw2tcl4,
221 	sw2tcl5,
222 	sw2tcl3,
223 	sw2tcl2,
224 	sw2tcl1,
225 	sw2wbm1,
226 	misc_8,
227 	misc_7,
228 	misc_6,
229 	misc_5,
230 	misc_4,
231 	misc_3,
232 	misc_2,
233 	misc_1,
234 	misc_0,
235 };
236 #endif
237 
238 struct CE_state;
239 #ifdef QCA_WIFI_QCN9224
240 #define CE_COUNT_MAX 16
241 #else
242 #define CE_COUNT_MAX 12
243 #endif
244 
245 #ifndef HIF_MAX_GROUP
246 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS
247 #endif
248 
249 #ifdef CONFIG_BERYLLIUM
250 #define HIF_MAX_GRP_IRQ 25
251 #else
252 #define HIF_MAX_GRP_IRQ 16
253 #endif
254 
255 #ifndef NAPI_YIELD_BUDGET_BASED
256 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
257 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
258 #endif
259 #else  /* NAPI_YIELD_BUDGET_BASED */
260 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
261 #endif /* NAPI_YIELD_BUDGET_BASED */
262 
263 #define QCA_NAPI_BUDGET    64
264 #define QCA_NAPI_DEF_SCALE  \
265 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
266 
267 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
268 /* NOTE: "napi->scale" can be changed,
269  * but this does not change the number of buckets
270  */
271 #define QCA_NAPI_NUM_BUCKETS 4
272 
273 /**
274  * qca_napi_stat - stats structure for execution contexts
275  * @napi_schedules - number of times the schedule function is called
276  * @napi_polls - number of times the execution context runs
277  * @napi_completes - number of times that the generating interrupt is reenabled
278  * @napi_workdone - cumulative of all work done reported by handler
279  * @cpu_corrected - incremented when execution context runs on a different core
280  *			than the one that its irq is affined to.
281  * @napi_budget_uses - histogram of work done per execution run
282  * @time_limit_reache - count of yields due to time limit threshholds
283  * @rxpkt_thresh_reached - count of yields due to a work limit
284  * @poll_time_buckets - histogram of poll times for the napi
285  *
286  */
287 struct qca_napi_stat {
288 	uint32_t napi_schedules;
289 	uint32_t napi_polls;
290 	uint32_t napi_completes;
291 	uint32_t napi_workdone;
292 	uint32_t cpu_corrected;
293 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
294 	uint32_t time_limit_reached;
295 	uint32_t rxpkt_thresh_reached;
296 	unsigned long long napi_max_poll_time;
297 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
298 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
299 #endif
300 };
301 
302 
303 /**
304  * per NAPI instance data structure
305  * This data structure holds stuff per NAPI instance.
306  * Note that, in the current implementation, though scale is
307  * an instance variable, it is set to the same value for all
308  * instances.
309  */
310 struct qca_napi_info {
311 	struct net_device    netdev; /* dummy net_dev */
312 	void                 *hif_ctx;
313 	struct napi_struct   napi;
314 	uint8_t              scale;   /* currently same on all instances */
315 	uint8_t              id;
316 	uint8_t              cpu;
317 	int                  irq;
318 	cpumask_t            cpumask;
319 	struct qca_napi_stat stats[NR_CPUS];
320 #ifdef RECEIVE_OFFLOAD
321 	/* will only be present for data rx CE's */
322 	void (*offld_flush_cb)(void *);
323 	struct napi_struct   rx_thread_napi;
324 	struct net_device    rx_thread_netdev;
325 #endif /* RECEIVE_OFFLOAD */
326 	qdf_lro_ctx_t        lro_ctx;
327 };
328 
329 enum qca_napi_tput_state {
330 	QCA_NAPI_TPUT_UNINITIALIZED,
331 	QCA_NAPI_TPUT_LO,
332 	QCA_NAPI_TPUT_HI
333 };
334 enum qca_napi_cpu_state {
335 	QCA_NAPI_CPU_UNINITIALIZED,
336 	QCA_NAPI_CPU_DOWN,
337 	QCA_NAPI_CPU_UP };
338 
339 /**
340  * struct qca_napi_cpu - an entry of the napi cpu table
341  * @core_id:     physical core id of the core
342  * @cluster_id:  cluster this core belongs to
343  * @core_mask:   mask to match all core of this cluster
344  * @thread_mask: mask for this core within the cluster
345  * @max_freq:    maximum clock this core can be clocked at
346  *               same for all cpus of the same core.
347  * @napis:       bitmap of napi instances on this core
348  * @execs:       bitmap of execution contexts on this core
349  * cluster_nxt:  chain to link cores within the same cluster
350  *
351  * This structure represents a single entry in the napi cpu
352  * table. The table is part of struct qca_napi_data.
353  * This table is initialized by the init function, called while
354  * the first napi instance is being created, updated by hotplug
355  * notifier and when cpu affinity decisions are made (by throughput
356  * detection), and deleted when the last napi instance is removed.
357  */
358 struct qca_napi_cpu {
359 	enum qca_napi_cpu_state state;
360 	int			core_id;
361 	int			cluster_id;
362 	cpumask_t		core_mask;
363 	cpumask_t		thread_mask;
364 	unsigned int		max_freq;
365 	uint32_t		napis;
366 	uint32_t		execs;
367 	int			cluster_nxt;  /* index, not pointer */
368 };
369 
370 /**
371  * struct qca_napi_data - collection of napi data for a single hif context
372  * @hif_softc: pointer to the hif context
373  * @lock: spinlock used in the event state machine
374  * @state: state variable used in the napi stat machine
375  * @ce_map: bit map indicating which ce's have napis running
376  * @exec_map: bit map of instanciated exec contexts
377  * @user_cpu_affin_map: CPU affinity map from INI config.
378  * @napi_cpu: cpu info for irq affinty
379  * @lilcl_head:
380  * @bigcl_head:
381  * @napi_mode: irq affinity & clock voting mode
382  * @cpuhp_handler: CPU hotplug event registration handle
383  */
384 struct qca_napi_data {
385 	struct               hif_softc *hif_softc;
386 	qdf_spinlock_t       lock;
387 	uint32_t             state;
388 
389 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
390 	 * not used by clients (clients use an id returned by create)
391 	 */
392 	uint32_t             ce_map;
393 	uint32_t             exec_map;
394 	uint32_t             user_cpu_affin_mask;
395 	struct qca_napi_info *napis[CE_COUNT_MAX];
396 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
397 	int                  lilcl_head, bigcl_head;
398 	enum qca_napi_tput_state napi_mode;
399 	struct qdf_cpuhp_handler *cpuhp_handler;
400 	uint8_t              flags;
401 };
402 
403 /**
404  * struct hif_config_info - Place Holder for HIF configuration
405  * @enable_self_recovery: Self Recovery
406  * @enable_runtime_pm: Enable Runtime PM
407  * @runtime_pm_delay: Runtime PM Delay
408  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
409  *
410  * Structure for holding HIF ini parameters.
411  */
412 struct hif_config_info {
413 	bool enable_self_recovery;
414 #ifdef FEATURE_RUNTIME_PM
415 	uint8_t enable_runtime_pm;
416 	u_int32_t runtime_pm_delay;
417 #endif
418 	uint64_t rx_softirq_max_yield_duration_ns;
419 };
420 
421 /**
422  * struct hif_target_info - Target Information
423  * @target_version: Target Version
424  * @target_type: Target Type
425  * @target_revision: Target Revision
426  * @soc_version: SOC Version
427  * @hw_name: pointer to hardware name
428  *
429  * Structure to hold target information.
430  */
431 struct hif_target_info {
432 	uint32_t target_version;
433 	uint32_t target_type;
434 	uint32_t target_revision;
435 	uint32_t soc_version;
436 	char *hw_name;
437 };
438 
439 struct hif_opaque_softc {
440 };
441 
442 /**
443  * enum hif_event_type - Type of DP events to be recorded
444  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
445  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
446  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
447  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
448  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
449  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
450  * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event
451  * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event
452  */
453 enum hif_event_type {
454 	HIF_EVENT_IRQ_TRIGGER,
455 	HIF_EVENT_TIMER_ENTRY,
456 	HIF_EVENT_TIMER_EXIT,
457 	HIF_EVENT_BH_SCHED,
458 	HIF_EVENT_SRNG_ACCESS_START,
459 	HIF_EVENT_SRNG_ACCESS_END,
460 	HIF_EVENT_BH_COMPLETE,
461 	HIF_EVENT_BH_FORCE_BREAK,
462 	/* Do check hif_hist_skip_event_record when adding new events */
463 };
464 
465 /**
466  * enum hif_system_pm_state - System PM state
467  * HIF_SYSTEM_PM_STATE_ON: System in active state
468  * HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
469  *  system resume
470  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
471  *  system suspend
472  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
473  */
474 enum hif_system_pm_state {
475 	HIF_SYSTEM_PM_STATE_ON,
476 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
477 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
478 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
479 };
480 
481 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
482 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
483 
484 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
485 /* HIF_EVENT_HIST_MAX should always be power of 2 */
486 #define HIF_EVENT_HIST_MAX		512
487 
488 #define HIF_EVENT_HIST_ENABLE_MASK	0xFF
489 
490 static inline uint64_t hif_get_log_timestamp(void)
491 {
492 	return qdf_get_log_timestamp();
493 }
494 
495 #else
496 
497 #define HIF_EVENT_HIST_MAX		32
498 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
499 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
500 
501 static inline uint64_t hif_get_log_timestamp(void)
502 {
503 	return qdf_sched_clock();
504 }
505 
506 #endif
507 
508 /**
509  * struct hif_event_record - an entry of the DP event history
510  * @hal_ring_id: ring id for which event is recorded
511  * @hp: head pointer of the ring (may not be applicable for all events)
512  * @tp: tail pointer of the ring (may not be applicable for all events)
513  * @cpu_id: cpu id on which the event occurred
514  * @timestamp: timestamp when event occurred
515  * @type: type of the event
516  *
517  * This structure represents the information stored for every datapath
518  * event which is logged in the history.
519  */
520 struct hif_event_record {
521 	uint8_t hal_ring_id;
522 	uint32_t hp;
523 	uint32_t tp;
524 	int cpu_id;
525 	uint64_t timestamp;
526 	enum hif_event_type type;
527 };
528 
529 /**
530  * struct hif_event_misc - history related misc info
531  * @last_irq_index: last irq event index in history
532  * @last_irq_ts: last irq timestamp
533  */
534 struct hif_event_misc {
535 	int32_t last_irq_index;
536 	uint64_t last_irq_ts;
537 };
538 
539 /**
540  * struct hif_event_history - history for one interrupt group
541  * @index: index to store new event
542  * @event: event entry
543  *
544  * This structure represents the datapath history for one
545  * interrupt group.
546  */
547 struct hif_event_history {
548 	qdf_atomic_t index;
549 	struct hif_event_misc misc;
550 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
551 };
552 
553 /**
554  * hif_hist_record_event() - Record one datapath event in history
555  * @hif_ctx: HIF opaque context
556  * @event: DP event entry
557  * @intr_grp_id: interrupt group ID registered with hif
558  *
559  * Return: None
560  */
561 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
562 			   struct hif_event_record *event,
563 			   uint8_t intr_grp_id);
564 
565 /**
566  * hif_event_history_init() - Initialize SRNG event history buffers
567  * @hif_ctx: HIF opaque context
568  * @id: context group ID for which history is recorded
569  *
570  * Returns: None
571  */
572 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
573 
574 /**
575  * hif_event_history_deinit() - De-initialize SRNG event history buffers
576  * @hif_ctx: HIF opaque context
577  * @id: context group ID for which history is recorded
578  *
579  * Returns: None
580  */
581 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
582 
583 /**
584  * hif_record_event() - Wrapper function to form and record DP event
585  * @hif_ctx: HIF opaque context
586  * @intr_grp_id: interrupt group ID registered with hif
587  * @hal_ring_id: ring id for which event is recorded
588  * @hp: head pointer index of the srng
589  * @tp: tail pointer index of the srng
590  * @type: type of the event to be logged in history
591  *
592  * Return: None
593  */
594 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
595 				    uint8_t intr_grp_id,
596 				    uint8_t hal_ring_id,
597 				    uint32_t hp,
598 				    uint32_t tp,
599 				    enum hif_event_type type)
600 {
601 	struct hif_event_record event;
602 
603 	event.hal_ring_id = hal_ring_id;
604 	event.hp = hp;
605 	event.tp = tp;
606 	event.type = type;
607 
608 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
609 
610 	return;
611 }
612 
613 #else
614 
615 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
616 				    uint8_t intr_grp_id,
617 				    uint8_t hal_ring_id,
618 				    uint32_t hp,
619 				    uint32_t tp,
620 				    enum hif_event_type type)
621 {
622 }
623 
624 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
625 					  uint8_t id)
626 {
627 }
628 
629 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
630 					    uint8_t id)
631 {
632 }
633 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
634 
635 /**
636  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
637  *
638  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
639  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
640  *                         minimize power
641  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
642  *                         platform-specific measures to completely power-off
643  *                         the module and associated hardware (i.e. cut power
644  *                         supplies)
645  */
646 enum HIF_DEVICE_POWER_CHANGE_TYPE {
647 	HIF_DEVICE_POWER_UP,
648 	HIF_DEVICE_POWER_DOWN,
649 	HIF_DEVICE_POWER_CUT
650 };
651 
652 /**
653  * enum hif_enable_type: what triggered the enabling of hif
654  *
655  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
656  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
657  */
658 enum hif_enable_type {
659 	HIF_ENABLE_TYPE_PROBE,
660 	HIF_ENABLE_TYPE_REINIT,
661 	HIF_ENABLE_TYPE_MAX
662 };
663 
664 /**
665  * enum hif_disable_type: what triggered the disabling of hif
666  *
667  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
668  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
669  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
670  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
671  */
672 enum hif_disable_type {
673 	HIF_DISABLE_TYPE_PROBE_ERROR,
674 	HIF_DISABLE_TYPE_REINIT_ERROR,
675 	HIF_DISABLE_TYPE_REMOVE,
676 	HIF_DISABLE_TYPE_SHUTDOWN,
677 	HIF_DISABLE_TYPE_MAX
678 };
679 /**
680  * enum hif_device_config_opcode: configure mode
681  *
682  * @HIF_DEVICE_POWER_STATE: device power state
683  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
684  * @HIF_DEVICE_GET_ADDR: get block address
685  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
686  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
687  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
688  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
689  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
690  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
691  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
692  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
693  * @HIF_BMI_DONE: bmi done
694  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
695  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
696  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
697  */
698 enum hif_device_config_opcode {
699 	HIF_DEVICE_POWER_STATE = 0,
700 	HIF_DEVICE_GET_BLOCK_SIZE,
701 	HIF_DEVICE_GET_FIFO_ADDR,
702 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
703 	HIF_DEVICE_GET_IRQ_PROC_MODE,
704 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
705 	HIF_DEVICE_POWER_STATE_CHANGE,
706 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
707 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
708 	HIF_DEVICE_GET_OS_DEVICE,
709 	HIF_DEVICE_DEBUG_BUS_STATE,
710 	HIF_BMI_DONE,
711 	HIF_DEVICE_SET_TARGET_TYPE,
712 	HIF_DEVICE_SET_HTC_CONTEXT,
713 	HIF_DEVICE_GET_HTC_CONTEXT,
714 };
715 
716 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
717 struct HID_ACCESS_LOG {
718 	uint32_t seqnum;
719 	bool is_write;
720 	void *addr;
721 	uint32_t value;
722 };
723 #endif
724 
725 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
726 		uint32_t value);
727 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
728 
729 #define HIF_MAX_DEVICES                 1
730 /**
731  * struct htc_callbacks - Structure for HTC Callbacks methods
732  * @context:             context to pass to the dsrhandler
733  *                       note : rwCompletionHandler is provided the context
734  *                       passed to hif_read_write
735  * @rwCompletionHandler: Read / write completion handler
736  * @dsrHandler:          DSR Handler
737  */
738 struct htc_callbacks {
739 	void *context;
740 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
741 	QDF_STATUS(*dsr_handler)(void *context);
742 };
743 
744 /**
745  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
746  * @context: Private data context
747  * @set_recovery_in_progress: To Set Driver state for recovery in progress
748  * @is_recovery_in_progress: Query if driver state is recovery in progress
749  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
750  * @is_driver_unloading: Query if driver is unloading.
751  * @get_bandwidth_level: Query current bandwidth level for the driver
752  * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem
753  * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool
754  * This Structure provides callback pointer for HIF to query hdd for driver
755  * states.
756  */
757 struct hif_driver_state_callbacks {
758 	void *context;
759 	void (*set_recovery_in_progress)(void *context, uint8_t val);
760 	bool (*is_recovery_in_progress)(void *context);
761 	bool (*is_load_unload_in_progress)(void *context);
762 	bool (*is_driver_unloading)(void *context);
763 	bool (*is_target_ready)(void *context);
764 	int (*get_bandwidth_level)(void *context);
765 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
766 						       qdf_dma_addr_t *paddr,
767 						       uint32_t ring_type);
768 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
769 };
770 
771 /* This API detaches the HTC layer from the HIF device */
772 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
773 
774 /****************************************************************/
775 /* BMI and Diag window abstraction                              */
776 /****************************************************************/
777 
778 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
779 
780 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
781 				     * handled atomically by
782 				     * DiagRead/DiagWrite
783 				     */
784 
785 #ifdef WLAN_FEATURE_BMI
786 /*
787  * API to handle HIF-specific BMI message exchanges, this API is synchronous
788  * and only allowed to be called from a context that can block (sleep)
789  */
790 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
791 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
792 				uint8_t *pSendMessage, uint32_t Length,
793 				uint8_t *pResponseMessage,
794 				uint32_t *pResponseLength, uint32_t TimeoutMS);
795 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
796 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
797 #else /* WLAN_FEATURE_BMI */
798 static inline void
799 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
800 {
801 }
802 
803 static inline bool
804 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
805 {
806 	return false;
807 }
808 #endif /* WLAN_FEATURE_BMI */
809 
810 #ifdef HIF_CPU_CLEAR_AFFINITY
811 /**
812  * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
813  * @scn: HIF handle
814  * @intr_ctxt_id: interrupt group index
815  * @cpu: CPU core to clear
816  *
817  * Return: None
818  */
819 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
820 				       int intr_ctxt_id, int cpu);
821 #else
822 static inline
823 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
824 				       int intr_ctxt_id, int cpu)
825 {
826 }
827 #endif
828 
829 /*
830  * APIs to handle HIF specific diagnostic read accesses. These APIs are
831  * synchronous and only allowed to be called from a context that
832  * can block (sleep). They are not high performance APIs.
833  *
834  * hif_diag_read_access reads a 4 Byte aligned/length value from a
835  * Target register or memory word.
836  *
837  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
838  */
839 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
840 				uint32_t address, uint32_t *data);
841 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
842 		      uint8_t *data, int nbytes);
843 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
844 			void *ramdump_base, uint32_t address, uint32_t size);
845 /*
846  * APIs to handle HIF specific diagnostic write accesses. These APIs are
847  * synchronous and only allowed to be called from a context that
848  * can block (sleep).
849  * They are not high performance APIs.
850  *
851  * hif_diag_write_access writes a 4 Byte aligned/length value to a
852  * Target register or memory word.
853  *
854  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
855  */
856 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
857 				 uint32_t address, uint32_t data);
858 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
859 			uint32_t address, uint8_t *data, int nbytes);
860 
861 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
862 
863 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
864 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
865 
866 /*
867  * Set the FASTPATH_mode_on flag in sc, for use by data path
868  */
869 #ifdef WLAN_FEATURE_FASTPATH
870 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
871 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
872 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
873 
874 /**
875  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
876  * @handler: Callback funtcion
877  * @context: handle for callback function
878  *
879  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
880  */
881 QDF_STATUS hif_ce_fastpath_cb_register(
882 		struct hif_opaque_softc *hif_ctx,
883 		fastpath_msg_handler handler, void *context);
884 #else
885 static inline QDF_STATUS hif_ce_fastpath_cb_register(
886 		struct hif_opaque_softc *hif_ctx,
887 		fastpath_msg_handler handler, void *context)
888 {
889 	return QDF_STATUS_E_FAILURE;
890 }
891 
892 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
893 {
894 	return NULL;
895 }
896 
897 #endif
898 
899 /*
900  * Enable/disable CDC max performance workaround
901  * For max-performace set this to 0
902  * To allow SoC to enter sleep set this to 1
903  */
904 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
905 
906 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
907 			     qdf_shared_mem_t **ce_sr,
908 			     uint32_t *ce_sr_ring_size,
909 			     qdf_dma_addr_t *ce_reg_paddr);
910 
911 /**
912  * @brief List of callbacks - filled in by HTC.
913  */
914 struct hif_msg_callbacks {
915 	void *Context;
916 	/**< context meaningful to HTC */
917 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
918 					uint32_t transferID,
919 					uint32_t toeplitz_hash_result);
920 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
921 					uint8_t pipeID);
922 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
923 	void (*fwEventHandler)(void *context, QDF_STATUS status);
924 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
925 };
926 
927 enum hif_target_status {
928 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
929 	TARGET_STATUS_RESET,  /* target got reset */
930 	TARGET_STATUS_EJECT,  /* target got ejected */
931 	TARGET_STATUS_SUSPEND /*target got suspend */
932 };
933 
934 /**
935  * enum hif_attribute_flags: configure hif
936  *
937  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
938  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
939  *  							+ No pktlog CE
940  */
941 enum hif_attribute_flags {
942 	HIF_LOWDESC_CE_CFG = 1,
943 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
944 };
945 
946 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
947 	(attr |= (v & 0x01) << 5)
948 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
949 	(attr |= (v & 0x03) << 6)
950 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
951 	(attr |= (v & 0x01) << 13)
952 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
953 	(attr |= (v & 0x01) << 14)
954 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
955 	(attr |= (v & 0x01) << 15)
956 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
957 	(attr |= (v & 0x0FFF) << 16)
958 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
959 	(attr |= (v & 0x01) << 30)
960 
961 struct hif_ul_pipe_info {
962 	unsigned int nentries;
963 	unsigned int nentries_mask;
964 	unsigned int sw_index;
965 	unsigned int write_index; /* cached copy */
966 	unsigned int hw_index;    /* cached copy */
967 	void *base_addr_owner_space; /* Host address space */
968 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
969 };
970 
971 struct hif_dl_pipe_info {
972 	unsigned int nentries;
973 	unsigned int nentries_mask;
974 	unsigned int sw_index;
975 	unsigned int write_index; /* cached copy */
976 	unsigned int hw_index;    /* cached copy */
977 	void *base_addr_owner_space; /* Host address space */
978 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
979 };
980 
981 struct hif_pipe_addl_info {
982 	uint32_t pci_mem;
983 	uint32_t ctrl_addr;
984 	struct hif_ul_pipe_info ul_pipe;
985 	struct hif_dl_pipe_info dl_pipe;
986 };
987 
988 #ifdef CONFIG_SLUB_DEBUG_ON
989 #define MSG_FLUSH_NUM 16
990 #else /* PERF build */
991 #define MSG_FLUSH_NUM 32
992 #endif /* SLUB_DEBUG_ON */
993 
994 struct hif_bus_id;
995 
996 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
997 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
998 		     int opcode, void *config, uint32_t config_len);
999 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
1000 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
1001 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
1002 		   struct hif_msg_callbacks *callbacks);
1003 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
1004 void hif_stop(struct hif_opaque_softc *hif_ctx);
1005 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
1006 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
1007 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
1008 		      uint8_t cmd_id, bool start);
1009 
1010 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1011 				  uint32_t transferID, uint32_t nbytes,
1012 				  qdf_nbuf_t wbuf, uint32_t data_attr);
1013 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1014 			     int force);
1015 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
1016 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
1017 			  uint8_t *DLPipe);
1018 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
1019 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1020 			int *dl_is_polled);
1021 uint16_t
1022 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1023 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
1024 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
1025 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
1026 		     bool wait_for_it);
1027 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
1028 #ifndef HIF_PCI
1029 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
1030 {
1031 	return 0;
1032 }
1033 #else
1034 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
1035 #endif
1036 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1037 			u32 *revision, const char **target_name);
1038 
1039 #ifdef RECEIVE_OFFLOAD
1040 /**
1041  * hif_offld_flush_cb_register() - Register the offld flush callback
1042  * @scn: HIF opaque context
1043  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
1044  *			 Or GRO/LRO flush when RxThread is not enabled. Called
1045  *			 with corresponding context for flush.
1046  * Return: None
1047  */
1048 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1049 				 void (offld_flush_handler)(void *ol_ctx));
1050 
1051 /**
1052  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
1053  * @scn: HIF opaque context
1054  *
1055  * Return: None
1056  */
1057 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
1058 #endif
1059 
1060 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1061 /**
1062  * hif_exec_should_yield() - Check if hif napi context should yield
1063  * @hif_ctx - HIF opaque context
1064  * @grp_id - grp_id of the napi for which check needs to be done
1065  *
1066  * The function uses grp_id to look for NAPI and checks if NAPI needs to
1067  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
1068  * yield decision.
1069  *
1070  * Return: true if NAPI needs to yield, else false
1071  */
1072 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
1073 #else
1074 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
1075 					 uint grp_id)
1076 {
1077 	return false;
1078 }
1079 #endif
1080 
1081 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
1082 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
1083 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
1084 				      int htc_htt_tx_endpoint);
1085 
1086 /**
1087  * hif_open() - Create hif handle
1088  * @qdf_ctx: qdf context
1089  * @mode: Driver Mode
1090  * @bus_type: Bus Type
1091  * @cbk: CDS Callbacks
1092  * @psoc: psoc object manager
1093  *
1094  * API to open HIF Context
1095  *
1096  * Return: HIF Opaque Pointer
1097  */
1098 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1099 				  uint32_t mode,
1100 				  enum qdf_bus_type bus_type,
1101 				  struct hif_driver_state_callbacks *cbk,
1102 				  struct wlan_objmgr_psoc *psoc);
1103 
1104 /**
1105  * hif_init_dma_mask() - Set dma mask for the dev
1106  * @dev: dev for which DMA mask is to be set
1107  * @bus_type: bus type for the target
1108  *
1109  * This API sets the DMA mask for the device. before the datapath
1110  * memory pre-allocation is done. If the DMA mask is not set before
1111  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
1112  * and does not utilize the full device capability.
1113  *
1114  * Return: 0 - success, non-zero on failure.
1115  */
1116 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
1117 void hif_close(struct hif_opaque_softc *hif_ctx);
1118 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1119 		      void *bdev, const struct hif_bus_id *bid,
1120 		      enum qdf_bus_type bus_type,
1121 		      enum hif_enable_type type);
1122 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
1123 #ifdef CE_TASKLET_DEBUG_ENABLE
1124 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
1125 				 uint8_t value);
1126 #endif
1127 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
1128 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
1129 
1130 /**
1131  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
1132  * HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
1133  * HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
1134  * HIF_PM_CE_WAKE: Wake irq is CE interrupt
1135  */
1136 typedef enum {
1137 	HIF_PM_INVALID_WAKE,
1138 	HIF_PM_MSI_WAKE,
1139 	HIF_PM_CE_WAKE,
1140 } hif_pm_wake_irq_type;
1141 
1142 /**
1143  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1144  * @hif_ctx: HIF context
1145  *
1146  * Return: enum hif_pm_wake_irq_type
1147  */
1148 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1149 
1150 /**
1151  * enum wlan_rtpm_dbgid - runtime pm put/get debug id
1152  * @RTPM_ID_RESVERD:       Reserved
1153  * @RTPM_ID_WMI:           WMI sending msg, expect put happen at
1154  *                         tx completion from CE level directly.
1155  * @RTPM_ID_HTC:           pkt sending by HTT_DATA_MSG_SVC, expect
1156  *                         put from fw response or just in
1157  *                         htc_issue_packets
1158  * @RTPM_ID_QOS_NOTIFY:    pm qos notifer
1159  * @RTPM_ID_DP_TX_DESC_ALLOC_FREE:      tx desc alloc/free
1160  * @RTPM_ID_CE_SEND_FAST:  operation in ce_send_fast, not include
1161  *                         the pkt put happens outside this function
1162  * @RTPM_ID_SUSPEND_RESUME:     suspend/resume in hdd
1163  * @RTPM_ID_DW_TX_HW_ENQUEUE:   operation in functin dp_tx_hw_enqueue
1164  * @RTPM_ID_HAL_REO_CMD:        HAL_REO_CMD operation
1165  * @RTPM_ID_DP_PRINT_RING_STATS:  operation in dp_print_ring_stats
1166  * @RTPM_ID_PM_STOP:        operation in hif_pm_runtime_stop
1167  * @RTPM_ID_CONN_DISCONNECT:operation when issue disconnect
1168  * @RTPM_ID_SOC_REMOVE: operation in soc remove
1169  * @RTPM_ID_DRIVER_UNLOAD: operation in driver unload
1170  * @RTPM_ID_CE_INTR_HANDLER: operation from ce interrupt handler
1171  * @RTPM_ID_WAKE_INTR_HANDLER: operation from wake interrupt handler
1172  * @RTPM_ID_SOC_IDLE_SHUTDOWN: operation in soc idle shutdown
1173  * @RTPM_ID_HIF_FORCE_WAKE: operation in hif force wake
1174  */
1175 /* New value added to the enum must also be reflected in function
1176  *  rtpm_string_from_dbgid()
1177  */
1178 typedef enum {
1179 	RTPM_ID_RESVERD   = 0,
1180 	RTPM_ID_WMI,
1181 	RTPM_ID_HTC,
1182 	RTPM_ID_QOS_NOTIFY,
1183 	RTPM_ID_DP_TX_DESC_ALLOC_FREE,
1184 	RTPM_ID_CE_SEND_FAST,
1185 	RTPM_ID_SUSPEND_RESUME,
1186 	RTPM_ID_DW_TX_HW_ENQUEUE,
1187 	RTPM_ID_HAL_REO_CMD,
1188 	RTPM_ID_DP_PRINT_RING_STATS,
1189 	RTPM_ID_PM_STOP,
1190 	RTPM_ID_CONN_DISCONNECT,
1191 	RTPM_ID_SOC_REMOVE,
1192 	RTPM_ID_DRIVER_UNLOAD,
1193 	RTPM_ID_CE_INTR_HANDLER,
1194 	RTPM_ID_WAKE_INTR_HANDLER,
1195 	RTPM_ID_SOC_IDLE_SHUTDOWN,
1196 	RTPM_ID_HIF_FORCE_WAKE,
1197 
1198 	RTPM_ID_MAX,
1199 } wlan_rtpm_dbgid;
1200 
1201 /**
1202  * rtpm_string_from_dbgid() - Convert dbgid to respective string
1203  * @id -  debug id
1204  *
1205  * Debug support function to convert  dbgid to string.
1206  * Please note to add new string in the array at index equal to
1207  * its enum value in wlan_rtpm_dbgid.
1208  */
1209 static inline char *rtpm_string_from_dbgid(wlan_rtpm_dbgid id)
1210 {
1211 	static const char *strings[] = { "RTPM_ID_RESVERD",
1212 					"RTPM_ID_WMI",
1213 					"RTPM_ID_HTC",
1214 					"RTPM_ID_QOS_NOTIFY",
1215 					"RTPM_ID_DP_TX_DESC_ALLOC_FREE",
1216 					"RTPM_ID_CE_SEND_FAST",
1217 					"RTPM_ID_SUSPEND_RESUME",
1218 					"RTPM_ID_DW_TX_HW_ENQUEUE",
1219 					"RTPM_ID_HAL_REO_CMD",
1220 					"RTPM_ID_DP_PRINT_RING_STATS",
1221 					"RTPM_ID_PM_STOP",
1222 					"RTPM_ID_CONN_DISCONNECT",
1223 					"RTPM_ID_SOC_REMOVE",
1224 					"RTPM_ID_DRIVER_UNLOAD",
1225 					"RTPM_ID_CE_INTR_HANDLER",
1226 					"RTPM_ID_WAKE_INTR_HANDLER",
1227 					"RTPM_ID_SOC_IDLE_SHUTDOWN",
1228 					"RTPM_ID_MAX"};
1229 
1230 	return (char *)strings[id];
1231 }
1232 
1233 /**
1234  * enum hif_ep_vote_type - hif ep vote type
1235  * HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1236  * HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1237  */
1238 enum hif_ep_vote_type {
1239 	HIF_EP_VOTE_DP_ACCESS,
1240 	HIF_EP_VOTE_NONDP_ACCESS
1241 };
1242 
1243 /**
1244  * enum hif_ep_vote_access - hif ep vote access
1245  * HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1246  * HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transistion
1247  * HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1248  */
1249 enum hif_ep_vote_access {
1250 	HIF_EP_VOTE_ACCESS_ENABLE,
1251 	HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1252 	HIF_EP_VOTE_ACCESS_DISABLE
1253 };
1254 
1255 /**
1256  * enum hif_pm_link_state - hif link state
1257  * HIF_PM_LINK_STATE_DOWN: hif link state is down
1258  * HIF_PM_LINK_STATE_UP: hif link state is up
1259  */
1260 enum hif_pm_link_state {
1261 	HIF_PM_LINK_STATE_DOWN,
1262 	HIF_PM_LINK_STATE_UP
1263 };
1264 
1265 /**
1266  * enum hif_pm_htc_stats - hif runtime PM stats for HTC layer
1267  * HIF_PM_HTC_STATS_GET_HTT_RESPONSE: PM stats for RTPM GET for HTT packets
1268 				      with response
1269  * HIF_PM_HTC_STATS_GET_HTT_NO_RESPONSE: PM stats for RTPM GET for HTT packets
1270 					 with no response
1271  * HIF_PM_HTC_STATS_PUT_HTT_RESPONSE: PM stats for RTPM PUT for HTT packets
1272 				      with response
1273  * HIF_PM_HTC_STATS_PUT_HTT_NO_RESPONSE: PM stats for RTPM PUT for HTT packets
1274 					 with no response
1275  * HIF_PM_HTC_STATS_PUT_HTT_ERROR: PM stats for RTPM PUT for failed HTT packets
1276  * HIF_PM_HTC_STATS_PUT_HTC_CLEANUP: PM stats for RTPM PUT during HTC cleanup
1277  * HIF_PM_HTC_STATS_GET_HTC_KICK_QUEUES: PM stats for RTPM GET done during
1278  *                                       htc_kick_queues()
1279  * HIF_PM_HTC_STATS_PUT_HTC_KICK_QUEUES: PM stats for RTPM PUT done during
1280  *                                       htc_kick_queues()
1281  * HIF_PM_HTC_STATS_GET_HTT_FETCH_PKTS: PM stats for RTPM GET while fetching
1282  *                                      HTT packets from endpoint TX queue
1283  * HIF_PM_HTC_STATS_PUT_HTT_FETCH_PKTS: PM stats for RTPM PUT while fetching
1284  *                                      HTT packets from endpoint TX queue
1285  */
1286 enum hif_pm_htc_stats {
1287 	HIF_PM_HTC_STATS_GET_HTT_RESPONSE,
1288 	HIF_PM_HTC_STATS_GET_HTT_NO_RESPONSE,
1289 	HIF_PM_HTC_STATS_PUT_HTT_RESPONSE,
1290 	HIF_PM_HTC_STATS_PUT_HTT_NO_RESPONSE,
1291 	HIF_PM_HTC_STATS_PUT_HTT_ERROR,
1292 	HIF_PM_HTC_STATS_PUT_HTC_CLEANUP,
1293 	HIF_PM_HTC_STATS_GET_HTC_KICK_QUEUES,
1294 	HIF_PM_HTC_STATS_PUT_HTC_KICK_QUEUES,
1295 	HIF_PM_HTC_STATS_GET_HTT_FETCH_PKTS,
1296 	HIF_PM_HTC_STATS_PUT_HTT_FETCH_PKTS,
1297 };
1298 
1299 #ifdef FEATURE_RUNTIME_PM
1300 struct hif_pm_runtime_lock;
1301 
1302 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1303 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
1304 			    wlan_rtpm_dbgid rtpm_dbgid);
1305 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
1306 				    wlan_rtpm_dbgid rtpm_dbgid);
1307 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx,
1308 				  wlan_rtpm_dbgid rtpm_dbgid);
1309 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
1310 		       wlan_rtpm_dbgid rtpm_dbgid,
1311 		       bool is_critical_ctx);
1312 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1313 				 wlan_rtpm_dbgid rtpm_dbgid);
1314 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
1315 		       wlan_rtpm_dbgid rtpm_dbgid);
1316 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1317 			      wlan_rtpm_dbgid rtpm_dbgid);
1318 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx);
1319 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1320 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1321 			struct hif_pm_runtime_lock *lock);
1322 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1323 		struct hif_pm_runtime_lock *lock);
1324 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1325 		struct hif_pm_runtime_lock *lock);
1326 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx);
1327 void hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx);
1328 void hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx);
1329 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx);
1330 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
1331 					  int val);
1332 void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx);
1333 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
1334 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
1335 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx);
1336 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx,
1337 			       wlan_rtpm_dbgid rtpm_dbgid);
1338 void hif_pm_runtime_update_stats(struct hif_opaque_softc *hif_ctx,
1339 				 wlan_rtpm_dbgid rtpm_dbgid,
1340 				 enum hif_pm_htc_stats stats);
1341 
1342 /**
1343  * hif_pm_set_link_state() - set link state during RTPM
1344  * @hif_sc: HIF Context
1345  *
1346  * Return: None
1347  */
1348 void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val);
1349 
1350 /**
1351  * hif_is_link_state_up() - Is link state up
1352  * @hif_sc: HIF Context
1353  *
1354  * Return: 1 link is up, 0 link is down
1355  */
1356 uint8_t hif_pm_get_link_state(struct hif_opaque_softc *hif_handle);
1357 #else
1358 struct hif_pm_runtime_lock {
1359 	const char *name;
1360 };
1361 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
1362 static inline int
1363 hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
1364 			wlan_rtpm_dbgid rtpm_dbgid)
1365 { return 0; }
1366 static inline int
1367 hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
1368 				wlan_rtpm_dbgid rtpm_dbgid)
1369 { return 0; }
1370 static inline int
1371 hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx,
1372 			      wlan_rtpm_dbgid rtpm_dbgid)
1373 { return 0; }
1374 static inline void
1375 hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1376 			    wlan_rtpm_dbgid rtpm_dbgid)
1377 {}
1378 
1379 static inline int
1380 hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid,
1381 		   bool is_critical_ctx)
1382 { return 0; }
1383 static inline int
1384 hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid)
1385 { return 0; }
1386 static inline int
1387 hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1388 			  wlan_rtpm_dbgid rtpm_dbgid)
1389 { return 0; }
1390 static inline void
1391 hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {};
1392 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
1393 					const char *name)
1394 { return 0; }
1395 static inline void
1396 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1397 			struct hif_pm_runtime_lock *lock) {}
1398 
1399 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1400 		struct hif_pm_runtime_lock *lock)
1401 { return 0; }
1402 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1403 		struct hif_pm_runtime_lock *lock)
1404 { return 0; }
1405 static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
1406 { return false; }
1407 static inline void
1408 hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx)
1409 { return; }
1410 static inline void
1411 hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx)
1412 { return; }
1413 static inline int
1414 hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
1415 { return 0; }
1416 static inline void
1417 hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val)
1418 { return; }
1419 static inline void
1420 hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
1421 { return; }
1422 static inline void
1423 hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {};
1424 static inline int
1425 hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1426 { return 0; }
1427 static inline qdf_time_t
1428 hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
1429 { return 0; }
1430 static inline int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx,
1431 					     wlan_rtpm_dbgid rtpm_dbgid)
1432 { return 0; }
1433 static inline
1434 void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val)
1435 {}
1436 
1437 static inline
1438 void hif_pm_runtime_update_stats(struct hif_opaque_softc *hif_ctx,
1439 				 wlan_rtpm_dbgid rtpm_dbgid,
1440 				 enum hif_pm_htc_stats stats)
1441 {}
1442 #endif
1443 
1444 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1445 				 bool is_packet_log_enabled);
1446 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1447 
1448 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1449 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1450 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1451 
1452 #ifdef IPA_OFFLOAD
1453 /**
1454  * hif_get_ipa_hw_type() - get IPA hw type
1455  *
1456  * This API return the IPA hw type.
1457  *
1458  * Return: IPA hw type
1459  */
1460 static inline
1461 enum ipa_hw_type hif_get_ipa_hw_type(void)
1462 {
1463 	return ipa_get_hw_type();
1464 }
1465 
1466 /**
1467  * hif_get_ipa_present() - get IPA hw status
1468  *
1469  * This API return the IPA hw status.
1470  *
1471  * Return: true if IPA is present or false otherwise
1472  */
1473 static inline
1474 bool hif_get_ipa_present(void)
1475 {
1476 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
1477 		return true;
1478 	else
1479 		return false;
1480 }
1481 #endif
1482 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1483 /**
1484  * hif_bus_ealry_suspend() - stop non wmi tx traffic
1485  * @context: hif context
1486  */
1487 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1488 
1489 /**
1490  * hif_bus_late_resume() - resume non wmi traffic
1491  * @context: hif context
1492  */
1493 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1494 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1495 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1496 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1497 
1498 /**
1499  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1500  * @hif_ctx: an opaque HIF handle to use
1501  *
1502  * As opposed to the standard hif_irq_enable, this function always applies to
1503  * the APPS side kernel interrupt handling.
1504  *
1505  * Return: errno
1506  */
1507 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1508 
1509 /**
1510  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1511  * @hif_ctx: an opaque HIF handle to use
1512  *
1513  * As opposed to the standard hif_irq_disable, this function always applies to
1514  * the APPS side kernel interrupt handling.
1515  *
1516  * Return: errno
1517  */
1518 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1519 
1520 /**
1521  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1522  * @hif_ctx: an opaque HIF handle to use
1523  *
1524  * As opposed to the standard hif_irq_enable, this function always applies to
1525  * the APPS side kernel interrupt handling.
1526  *
1527  * Return: errno
1528  */
1529 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1530 
1531 /**
1532  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1533  * @hif_ctx: an opaque HIF handle to use
1534  *
1535  * As opposed to the standard hif_irq_disable, this function always applies to
1536  * the APPS side kernel interrupt handling.
1537  *
1538  * Return: errno
1539  */
1540 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1541 
1542 /**
1543  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1544  * @hif_ctx: an opaque HIF handle to use
1545  *
1546  * This function always applies to the APPS side kernel interrupt handling
1547  * to wake the system from suspend.
1548  *
1549  * Return: errno
1550  */
1551 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1552 
1553 /**
1554  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1555  * @hif_ctx: an opaque HIF handle to use
1556  *
1557  * This function always applies to the APPS side kernel interrupt handling
1558  * to disable the wake irq.
1559  *
1560  * Return: errno
1561  */
1562 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1563 
1564 /**
1565  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
1566  * @hif_ctx: an opaque HIF handle to use
1567  *
1568  * As opposed to the standard hif_irq_enable, this function always applies to
1569  * the APPS side kernel interrupt handling.
1570  *
1571  * Return: errno
1572  */
1573 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1574 
1575 /**
1576  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
1577  * @hif_ctx: an opaque HIF handle to use
1578  *
1579  * As opposed to the standard hif_irq_disable, this function always applies to
1580  * the APPS side kernel interrupt handling.
1581  *
1582  * Return: errno
1583  */
1584 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1585 
1586 #ifdef FEATURE_RUNTIME_PM
1587 void hif_print_runtime_pm_prevent_list(struct hif_opaque_softc *hif_ctx);
1588 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1589 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
1590 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1591 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1592 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
1593 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
1594 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
1595 #else
1596 static inline void
1597 hif_print_runtime_pm_prevent_list(struct hif_opaque_softc *hif_ctx)
1598 {}
1599 #endif
1600 
1601 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1602 int hif_dump_registers(struct hif_opaque_softc *scn);
1603 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1604 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1605 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1606 		     u32 *revision, const char **target_name);
1607 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1608 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1609 						   scn);
1610 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1611 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1612 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1613 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1614 			   hif_target_status);
1615 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1616 			 struct hif_config_info *cfg);
1617 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1618 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1619 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1620 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1621 			   uint32_t transfer_id, u_int32_t len);
1622 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1623 	uint32_t transfer_id, uint32_t download_len);
1624 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1625 void hif_ce_war_disable(void);
1626 void hif_ce_war_enable(void);
1627 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1628 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1629 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1630 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1631 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1632 		uint32_t pipe_num);
1633 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1634 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1635 
1636 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1637 				int rx_bundle_cnt);
1638 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1639 
1640 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1641 
1642 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1643 
1644 enum hif_exec_type {
1645 	HIF_EXEC_NAPI_TYPE,
1646 	HIF_EXEC_TASKLET_TYPE,
1647 };
1648 
1649 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
1650 
1651 /**
1652  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1653  * @softc: hif opaque context owning the exec context
1654  * @id: the id of the interrupt context
1655  *
1656  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1657  *         'id' registered with the OS
1658  */
1659 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1660 				uint8_t id);
1661 
1662 /**
1663  * hif_configure_ext_group_interrupts() - Congigure ext group intrrupts
1664  * @hif_ctx: hif opaque context
1665  *
1666  * Return: QDF_STATUS
1667  */
1668 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1669 
1670 /**
1671  * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group intrrupts
1672  * @hif_ctx: hif opaque context
1673  *
1674  * Return: None
1675  */
1676 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1677 
1678 /**
1679  * hif_register_ext_group() - API to register external group
1680  * interrupt handler.
1681  * @hif_ctx : HIF Context
1682  * @numirq: number of irq's in the group
1683  * @irq: array of irq values
1684  * @handler: callback interrupt handler function
1685  * @cb_ctx: context to passed in callback
1686  * @type: napi vs tasklet
1687  *
1688  * Return: QDF_STATUS
1689  */
1690 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1691 				  uint32_t numirq, uint32_t irq[],
1692 				  ext_intr_handler handler,
1693 				  void *cb_ctx, const char *context_name,
1694 				  enum hif_exec_type type, uint32_t scale);
1695 
1696 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1697 				const char *context_name);
1698 
1699 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1700 				u_int8_t pipeid,
1701 				struct hif_msg_callbacks *callbacks);
1702 
1703 /**
1704  * hif_print_napi_stats() - Display HIF NAPI stats
1705  * @hif_ctx - HIF opaque context
1706  *
1707  * Return: None
1708  */
1709 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
1710 
1711 /* hif_clear_napi_stats() - function clears the stats of the
1712  * latency when called.
1713  * @hif_ctx - the HIF context to assign the callback to
1714  *
1715  * Return: None
1716  */
1717 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
1718 
1719 #ifdef __cplusplus
1720 }
1721 #endif
1722 
1723 #ifdef FORCE_WAKE
1724 /**
1725  * hif_force_wake_request() - Function to wake from power collapse
1726  * @handle: HIF opaque handle
1727  *
1728  * Description: API to check if the device is awake or not before
1729  * read/write to BAR + 4K registers. If device is awake return
1730  * success otherwise write '1' to
1731  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
1732  * the device and does wakeup the PCI and MHI within 50ms
1733  * and then the device writes a value to
1734  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
1735  * handshake process to let the host know the device is awake.
1736  *
1737  * Return: zero - success/non-zero - failure
1738  */
1739 int hif_force_wake_request(struct hif_opaque_softc *handle);
1740 
1741 /**
1742  * hif_force_wake_release() - API to release/reset the SOC wake register
1743  * from interrupting the device.
1744  * @handle: HIF opaque handle
1745  *
1746  * Description: API to set the
1747  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
1748  * to release the interrupt line.
1749  *
1750  * Return: zero - success/non-zero - failure
1751  */
1752 int hif_force_wake_release(struct hif_opaque_softc *handle);
1753 #else
1754 static inline
1755 int hif_force_wake_request(struct hif_opaque_softc *handle)
1756 {
1757 	return 0;
1758 }
1759 
1760 static inline
1761 int hif_force_wake_release(struct hif_opaque_softc *handle)
1762 {
1763 	return 0;
1764 }
1765 #endif /* FORCE_WAKE */
1766 
1767 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1768 /**
1769  * hif_prevent_link_low_power_states() - Prevent from going to low power states
1770  * @hif - HIF opaque context
1771  *
1772  * Return: 0 on success. Error code on failure.
1773  */
1774 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
1775 
1776 /**
1777  * hif_allow_link_low_power_states() - Allow link to go to low power states
1778  * @hif - HIF opaque context
1779  *
1780  * Return: None
1781  */
1782 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
1783 
1784 #else
1785 
1786 static inline
1787 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
1788 {
1789 	return 0;
1790 }
1791 
1792 static inline
1793 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
1794 {
1795 }
1796 #endif
1797 
1798 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
1799 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
1800 
1801 /**
1802  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
1803  * @hif_ctx - the HIF context to assign the callback to
1804  * @callback - the callback to assign
1805  * @priv - the private data to pass to the callback when invoked
1806  *
1807  * Return: None
1808  */
1809 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1810 			       void (*callback)(void *),
1811 			       void *priv);
1812 /*
1813  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1814  * for defined here
1815  */
1816 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1817 ssize_t hif_dump_desc_trace_buf(struct device *dev,
1818 				struct device_attribute *attr, char *buf);
1819 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1820 					const char *buf, size_t size);
1821 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
1822 				const char *buf, size_t size);
1823 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
1824 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
1825 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
1826 
1827 /**
1828  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
1829  * @hif: hif context
1830  * @ce_service_max_yield_time: CE service max yield time to set
1831  *
1832  * This API storess CE service max yield time in hif context based
1833  * on ini value.
1834  *
1835  * Return: void
1836  */
1837 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1838 				       uint32_t ce_service_max_yield_time);
1839 
1840 /**
1841  * hif_get_ce_service_max_yield_time() - get CE service max yield time
1842  * @hif: hif context
1843  *
1844  * This API returns CE service max yield time.
1845  *
1846  * Return: CE service max yield time
1847  */
1848 unsigned long long
1849 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
1850 
1851 /**
1852  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
1853  * @hif: hif context
1854  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
1855  *
1856  * This API stores CE service max rx ind flush in hif context based
1857  * on ini value.
1858  *
1859  * Return: void
1860  */
1861 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1862 					 uint8_t ce_service_max_rx_ind_flush);
1863 
1864 #ifdef OL_ATH_SMART_LOGGING
1865 /*
1866  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1867  * @scn : HIF handler
1868  * @buf_cur: Current pointer in ring buffer
1869  * @buf_init:Start of the ring buffer
1870  * @buf_sz: Size of the ring buffer
1871  * @ce: Copy Engine id
1872  * @skb_sz: Max size of the SKB buffer to be copied
1873  *
1874  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1875  * and buffers pointed by them in to the given buf
1876  *
1877  * Return: Current pointer in ring buffer
1878  */
1879 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1880 			 uint8_t *buf_init, uint32_t buf_sz,
1881 			 uint32_t ce, uint32_t skb_sz);
1882 #endif /* OL_ATH_SMART_LOGGING */
1883 
1884 /*
1885  * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
1886  * to hif_opaque_softc handle
1887  * @hif_handle - hif_softc type
1888  *
1889  * Return: hif_opaque_softc type
1890  */
1891 static inline struct hif_opaque_softc *
1892 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
1893 {
1894 	return (struct hif_opaque_softc *)hif_handle;
1895 }
1896 
1897 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1898 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
1899 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx);
1900 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
1901 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1902 			    uint8_t type, uint8_t access);
1903 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1904 			       uint8_t type);
1905 #else
1906 static inline QDF_STATUS
1907 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1908 {
1909 	return QDF_STATUS_SUCCESS;
1910 }
1911 
1912 static inline void
1913 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1914 {
1915 }
1916 
1917 static inline void
1918 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1919 {
1920 }
1921 
1922 static inline void
1923 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1924 		       uint8_t type, uint8_t access)
1925 {
1926 }
1927 
1928 static inline uint8_t
1929 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1930 		       uint8_t type)
1931 {
1932 	return HIF_EP_VOTE_ACCESS_ENABLE;
1933 }
1934 #endif
1935 
1936 #ifdef FORCE_WAKE
1937 /**
1938  * hif_srng_init_phase(): Indicate srng initialization phase
1939  * to avoid force wake as UMAC power collapse is not yet
1940  * enabled
1941  * @hif_ctx: hif opaque handle
1942  * @init_phase: initialization phase
1943  *
1944  * Return:  None
1945  */
1946 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1947 			 bool init_phase);
1948 #else
1949 static inline
1950 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1951 			 bool init_phase)
1952 {
1953 }
1954 #endif /* FORCE_WAKE */
1955 
1956 #ifdef HIF_IPCI
1957 /**
1958  * hif_shutdown_notifier_cb - Call back for shutdown notifier
1959  * @ctx: hif handle
1960  *
1961  * Return:  None
1962  */
1963 void hif_shutdown_notifier_cb(void *ctx);
1964 #else
1965 static inline
1966 void hif_shutdown_notifier_cb(void *ctx)
1967 {
1968 }
1969 #endif /* HIF_IPCI */
1970 
1971 #ifdef HIF_CE_LOG_INFO
1972 /**
1973  * hif_log_ce_info() - API to log ce info
1974  * @scn: hif handle
1975  * @data: hang event data buffer
1976  * @offset: offset at which data needs to be written
1977  *
1978  * Return:  None
1979  */
1980 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
1981 		     unsigned int *offset);
1982 #else
1983 static inline
1984 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
1985 		     unsigned int *offset)
1986 {
1987 }
1988 #endif
1989 
1990 #ifdef HIF_CPU_PERF_AFFINE_MASK
1991 /**
1992  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
1993  * @hif_ctx: hif opaque handle
1994  *
1995  * This function is used to move the WLAN IRQs to perf cores in
1996  * case of defconfig builds.
1997  *
1998  * Return:  None
1999  */
2000 void hif_config_irq_set_perf_affinity_hint(
2001 	struct hif_opaque_softc *hif_ctx);
2002 
2003 #else
2004 static inline void hif_config_irq_set_perf_affinity_hint(
2005 	struct hif_opaque_softc *hif_ctx)
2006 {
2007 }
2008 #endif
2009 
2010 /**
2011  * hif_apps_grp_irqs_enable() - enable ext grp irqs
2012  * @hif - HIF opaque context
2013  *
2014  * Return: 0 on success. Error code on failure.
2015  */
2016 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
2017 
2018 /**
2019  * hif_apps_grp_irqs_disable() - disable ext grp irqs
2020  * @hif - HIF opaque context
2021  *
2022  * Return: 0 on success. Error code on failure.
2023  */
2024 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
2025 
2026 /**
2027  * hif_disable_grp_irqs() - disable ext grp irqs
2028  * @hif - HIF opaque context
2029  *
2030  * Return: 0 on success. Error code on failure.
2031  */
2032 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
2033 
2034 /**
2035  * hif_enable_grp_irqs() - enable ext grp irqs
2036  * @hif - HIF opaque context
2037  *
2038  * Return: 0 on success. Error code on failure.
2039  */
2040 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
2041 
2042 enum hif_credit_exchange_type {
2043 	HIF_REQUEST_CREDIT,
2044 	HIF_PROCESS_CREDIT_REPORT,
2045 };
2046 
2047 enum hif_detect_latency_type {
2048 	HIF_DETECT_TASKLET,
2049 	HIF_DETECT_CREDIT,
2050 	HIF_DETECT_UNKNOWN
2051 };
2052 
2053 #ifdef HIF_DETECTION_LATENCY_ENABLE
2054 void hif_latency_detect_credit_record_time(
2055 	enum hif_credit_exchange_type type,
2056 	struct hif_opaque_softc *hif_ctx);
2057 
2058 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
2059 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
2060 void hif_tasklet_latency(struct hif_softc *scn, bool from_timer);
2061 void hif_credit_latency(struct hif_softc *scn, bool from_timer);
2062 void hif_check_detection_latency(struct hif_softc *scn,
2063 				 bool from_timer,
2064 				 uint32_t bitmap_type);
2065 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
2066 #else
2067 static inline
2068 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
2069 {}
2070 
2071 static inline
2072 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
2073 {}
2074 
2075 static inline
2076 void hif_latency_detect_credit_record_time(
2077 	enum hif_credit_exchange_type type,
2078 	struct hif_opaque_softc *hif_ctx)
2079 {}
2080 static inline
2081 void hif_check_detection_latency(struct hif_softc *scn,
2082 				 bool from_timer,
2083 				 uint32_t bitmap_type)
2084 {}
2085 
2086 static inline
2087 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
2088 {}
2089 #endif
2090 
2091 #ifdef SYSTEM_PM_CHECK
2092 /**
2093  * __hif_system_pm_set_state() - Set system pm state
2094  * @hif: hif opaque handle
2095  * @state: system state
2096  *
2097  * Return:  None
2098  */
2099 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2100 			       enum hif_system_pm_state state);
2101 
2102 /**
2103  * hif_system_pm_set_state_on() - Set system pm state to ON
2104  * @hif: hif opaque handle
2105  *
2106  * Return:  None
2107  */
2108 static inline
2109 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2110 {
2111 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
2112 }
2113 
2114 /**
2115  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
2116  * @hif: hif opaque handle
2117  *
2118  * Return:  None
2119  */
2120 static inline
2121 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2122 {
2123 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
2124 }
2125 
2126 /**
2127  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
2128  * @hif: hif opaque handle
2129  *
2130  * Return:  None
2131  */
2132 static inline
2133 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2134 {
2135 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
2136 }
2137 
2138 /**
2139  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
2140  * @hif: hif opaque handle
2141  *
2142  * Return:  None
2143  */
2144 static inline
2145 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2146 {
2147 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
2148 }
2149 
2150 /**
2151  * hif_system_pm_get_state() - Get system pm state
2152  * @hif: hif opaque handle
2153  *
2154  * Return:  system state
2155  */
2156 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
2157 
2158 /**
2159  * hif_system_pm_state_check() - Check system state and trigger resume
2160  *  if required
2161  * @hif: hif opaque handle
2162  *
2163  * Return: 0 if system is in on state else error code
2164  */
2165 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
2166 #else
2167 static inline
2168 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2169 			       enum hif_system_pm_state state)
2170 {
2171 }
2172 
2173 static inline
2174 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2175 {
2176 }
2177 
2178 static inline
2179 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2180 {
2181 }
2182 
2183 static inline
2184 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2185 {
2186 }
2187 
2188 static inline
2189 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2190 {
2191 }
2192 
2193 static inline
2194 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2195 {
2196 	return 0;
2197 }
2198 
2199 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2200 {
2201 	return 0;
2202 }
2203 #endif
2204 
2205 #ifdef FEATURE_IRQ_AFFINITY
2206 /**
2207  * hif_set_grp_intr_affinity() - API to set affinity for grp
2208  *  intrs set in the bitmap
2209  * @scn: hif handle
2210  * @grp_intr_bitmask: grp intrs for which perf affinity should be
2211  *  applied
2212  * @perf: affine to perf or non-perf cluster
2213  *
2214  * Return: None
2215  */
2216 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2217 			       uint32_t grp_intr_bitmask, bool perf);
2218 #else
2219 static inline
2220 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2221 			       uint32_t grp_intr_bitmask, bool perf)
2222 {
2223 }
2224 #endif
2225 /**
2226  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
2227  * @hif_ctx: hif opaque handle
2228  *
2229  * Description:
2230  *   Gets number of WMI EPs configured in target svc map. Since EP map
2231  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
2232  *   configured for WMI service.
2233  *
2234  * Return:
2235  *  uint8_t: count for WMI eps in target svc map
2236  */
2237 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn);
2238 #endif /* _HIF_H_ */
2239