xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision cfe92008b4a1e9b187ef1ed3edcb3527f2eb9664)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HIF_H_
21 #define _HIF_H_
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif /* __cplusplus */
26 
27 /* Header files */
28 #include <qdf_status.h>
29 #include "qdf_ipa.h"
30 #include "qdf_nbuf.h"
31 #include "qdf_lro.h"
32 #include "ol_if_athvar.h"
33 #include <linux/platform_device.h>
34 #ifdef HIF_PCI
35 #include <linux/pci.h>
36 #endif /* HIF_PCI */
37 #ifdef HIF_USB
38 #include <linux/usb.h>
39 #endif /* HIF_USB */
40 #ifdef IPA_OFFLOAD
41 #include <linux/ipa.h>
42 #endif
43 #include "cfg_ucfg_api.h"
44 #include "qdf_dev.h"
45 #include <wlan_init_cfg.h>
46 
47 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
48 
49 typedef void __iomem *A_target_id_t;
50 typedef void *hif_handle_t;
51 
52 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
53 #define HIF_WORK_DRAIN_WAIT_CNT 50
54 
55 #define HIF_EP_WAKE_RESET_WAIT_CNT 10
56 #endif
57 
58 #define HIF_TYPE_AR6002   2
59 #define HIF_TYPE_AR6003   3
60 #define HIF_TYPE_AR6004   5
61 #define HIF_TYPE_AR9888   6
62 #define HIF_TYPE_AR6320   7
63 #define HIF_TYPE_AR6320V2 8
64 /* For attaching Peregrine 2.0 board host_reg_tbl only */
65 #define HIF_TYPE_AR9888V2 9
66 #define HIF_TYPE_ADRASTEA 10
67 #define HIF_TYPE_AR900B 11
68 #define HIF_TYPE_QCA9984 12
69 #define HIF_TYPE_QCA9888 14
70 #define HIF_TYPE_QCA8074 15
71 #define HIF_TYPE_QCA6290 16
72 #define HIF_TYPE_QCN7605 17
73 #define HIF_TYPE_QCA6390 18
74 #define HIF_TYPE_QCA8074V2 19
75 #define HIF_TYPE_QCA6018  20
76 #define HIF_TYPE_QCN9000 21
77 #define HIF_TYPE_QCA6490 22
78 #define HIF_TYPE_QCA6750 23
79 #define HIF_TYPE_QCA5018 24
80 #define HIF_TYPE_QCN6122 25
81 #define HIF_TYPE_KIWI 26
82 #define HIF_TYPE_QCN9224 27
83 #define HIF_TYPE_QCA9574 28
84 #define HIF_TYPE_MANGO 29
85 #define HIF_TYPE_QCA5332 30
86 #define HIF_TYPE_QCN9160 31
87 #define HIF_TYPE_PEACH 32
88 #define HIF_TYPE_WCN6450 33
89 #define HIF_TYPE_QCN6432 34
90 
91 #define DMA_COHERENT_MASK_DEFAULT   37
92 
93 #ifdef IPA_OFFLOAD
94 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
95 #endif
96 
97 /* enum hif_ic_irq - enum defining integrated chip irq numbers
98  * defining irq nubers that can be used by external modules like datapath
99  */
100 enum hif_ic_irq {
101 	host2wbm_desc_feed = 16,
102 	host2reo_re_injection,
103 	host2reo_command,
104 	host2rxdma_monitor_ring3,
105 	host2rxdma_monitor_ring2,
106 	host2rxdma_monitor_ring1,
107 	reo2host_exception,
108 	wbm2host_rx_release,
109 	reo2host_status,
110 	reo2host_destination_ring4,
111 	reo2host_destination_ring3,
112 	reo2host_destination_ring2,
113 	reo2host_destination_ring1,
114 	rxdma2host_monitor_destination_mac3,
115 	rxdma2host_monitor_destination_mac2,
116 	rxdma2host_monitor_destination_mac1,
117 	ppdu_end_interrupts_mac3,
118 	ppdu_end_interrupts_mac2,
119 	ppdu_end_interrupts_mac1,
120 	rxdma2host_monitor_status_ring_mac3,
121 	rxdma2host_monitor_status_ring_mac2,
122 	rxdma2host_monitor_status_ring_mac1,
123 	host2rxdma_host_buf_ring_mac3,
124 	host2rxdma_host_buf_ring_mac2,
125 	host2rxdma_host_buf_ring_mac1,
126 	rxdma2host_destination_ring_mac3,
127 	rxdma2host_destination_ring_mac2,
128 	rxdma2host_destination_ring_mac1,
129 	host2tcl_input_ring4,
130 	host2tcl_input_ring3,
131 	host2tcl_input_ring2,
132 	host2tcl_input_ring1,
133 	wbm2host_tx_completions_ring4,
134 	wbm2host_tx_completions_ring3,
135 	wbm2host_tx_completions_ring2,
136 	wbm2host_tx_completions_ring1,
137 	tcl2host_status_ring,
138 	txmon2host_monitor_destination_mac3,
139 	txmon2host_monitor_destination_mac2,
140 	txmon2host_monitor_destination_mac1,
141 	host2tx_monitor_ring1,
142 };
143 
144 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
145 enum hif_legacy_pci_irq {
146 	ce0,
147 	ce1,
148 	ce2,
149 	ce3,
150 	ce4,
151 	ce5,
152 	ce6,
153 	ce7,
154 	ce8,
155 	ce9,
156 	ce10,
157 	ce11,
158 	ce12,
159 	ce13,
160 	ce14,
161 	ce15,
162 	reo2sw8_intr2,
163 	reo2sw7_intr2,
164 	reo2sw6_intr2,
165 	reo2sw5_intr2,
166 	reo2sw4_intr2,
167 	reo2sw3_intr2,
168 	reo2sw2_intr2,
169 	reo2sw1_intr2,
170 	reo2sw0_intr2,
171 	reo2sw8_intr,
172 	reo2sw7_intr,
173 	reo2sw6_inrr,
174 	reo2sw5_intr,
175 	reo2sw4_intr,
176 	reo2sw3_intr,
177 	reo2sw2_intr,
178 	reo2sw1_intr,
179 	reo2sw0_intr,
180 	reo2status_intr2,
181 	reo_status,
182 	reo2rxdma_out_2,
183 	reo2rxdma_out_1,
184 	reo_cmd,
185 	sw2reo6,
186 	sw2reo5,
187 	sw2reo1,
188 	sw2reo,
189 	rxdma2reo_mlo_0_dst_ring1,
190 	rxdma2reo_mlo_0_dst_ring0,
191 	rxdma2reo_mlo_1_dst_ring1,
192 	rxdma2reo_mlo_1_dst_ring0,
193 	rxdma2reo_dst_ring1,
194 	rxdma2reo_dst_ring0,
195 	rxdma2sw_dst_ring1,
196 	rxdma2sw_dst_ring0,
197 	rxdma2release_dst_ring1,
198 	rxdma2release_dst_ring0,
199 	sw2rxdma_2_src_ring,
200 	sw2rxdma_1_src_ring,
201 	sw2rxdma_0,
202 	wbm2sw6_release2,
203 	wbm2sw5_release2,
204 	wbm2sw4_release2,
205 	wbm2sw3_release2,
206 	wbm2sw2_release2,
207 	wbm2sw1_release2,
208 	wbm2sw0_release2,
209 	wbm2sw6_release,
210 	wbm2sw5_release,
211 	wbm2sw4_release,
212 	wbm2sw3_release,
213 	wbm2sw2_release,
214 	wbm2sw1_release,
215 	wbm2sw0_release,
216 	wbm2sw_link,
217 	wbm_error_release,
218 	sw2txmon_src_ring,
219 	sw2rxmon_src_ring,
220 	txmon2sw_p1_intr1,
221 	txmon2sw_p1_intr0,
222 	txmon2sw_p0_dest1,
223 	txmon2sw_p0_dest0,
224 	rxmon2sw_p1_intr1,
225 	rxmon2sw_p1_intr0,
226 	rxmon2sw_p0_dest1,
227 	rxmon2sw_p0_dest0,
228 	sw_release,
229 	sw2tcl_credit2,
230 	sw2tcl_credit,
231 	sw2tcl4,
232 	sw2tcl5,
233 	sw2tcl3,
234 	sw2tcl2,
235 	sw2tcl1,
236 	sw2wbm1,
237 	misc_8,
238 	misc_7,
239 	misc_6,
240 	misc_5,
241 	misc_4,
242 	misc_3,
243 	misc_2,
244 	misc_1,
245 	misc_0,
246 };
247 #endif
248 
249 struct CE_state;
250 #ifdef QCA_WIFI_QCN9224
251 #define CE_COUNT_MAX 16
252 #else
253 #define CE_COUNT_MAX 12
254 #endif
255 
256 #ifndef HIF_MAX_GROUP
257 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS
258 #endif
259 
260 #ifdef CONFIG_BERYLLIUM
261 #define HIF_MAX_GRP_IRQ 25
262 #else
263 #define HIF_MAX_GRP_IRQ 16
264 #endif
265 
266 #ifndef NAPI_YIELD_BUDGET_BASED
267 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
268 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
269 #endif
270 #else  /* NAPI_YIELD_BUDGET_BASED */
271 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
272 #endif /* NAPI_YIELD_BUDGET_BASED */
273 
274 #define QCA_NAPI_BUDGET    64
275 #define QCA_NAPI_DEF_SCALE  \
276 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
277 
278 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
279 /* NOTE: "napi->scale" can be changed,
280  * but this does not change the number of buckets
281  */
282 #define QCA_NAPI_NUM_BUCKETS 4
283 
284 /**
285  * struct qca_napi_stat - stats structure for execution contexts
286  * @napi_schedules: number of times the schedule function is called
287  * @napi_polls: number of times the execution context runs
288  * @napi_completes: number of times that the generating interrupt is re-enabled
289  * @napi_workdone: cumulative of all work done reported by handler
290  * @cpu_corrected: incremented when execution context runs on a different core
291  *			than the one that its irq is affined to.
292  * @napi_budget_uses: histogram of work done per execution run
293  * @time_limit_reached: count of yields due to time limit thresholds
294  * @rxpkt_thresh_reached: count of yields due to a work limit
295  * @napi_max_poll_time:
296  * @poll_time_buckets: histogram of poll times for the napi
297  *
298  */
299 struct qca_napi_stat {
300 	uint32_t napi_schedules;
301 	uint32_t napi_polls;
302 	uint32_t napi_completes;
303 	uint32_t napi_workdone;
304 	uint32_t cpu_corrected;
305 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
306 	uint32_t time_limit_reached;
307 	uint32_t rxpkt_thresh_reached;
308 	unsigned long long napi_max_poll_time;
309 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
310 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
311 #endif
312 };
313 
314 /*Number of buckets for latency*/
315 #define HIF_SCHED_LATENCY_BUCKETS 8
316 
317 /*Buckets for latency between 0 to 2 ms*/
318 #define HIF_SCHED_LATENCY_BUCKET_0_2 2
319 /*Buckets for latency between 3 to 10 ms*/
320 #define HIF_SCHED_LATENCY_BUCKET_3_10 10
321 /*Buckets for latency between 11 to 20 ms*/
322 #define HIF_SCHED_LATENCY_BUCKET_11_20 20
323 /*Buckets for latency between 21 to 50 ms*/
324 #define HIF_SCHED_LATENCY_BUCKET_21_50 50
325 /*Buckets for latency between 50 to 100 ms*/
326 #define HIF_SCHED_LATENCY_BUCKET_51_100 100
327 /*Buckets for latency between 100 to 250 ms*/
328 #define HIF_SCHED_LATENCY_BUCKET_101_250 250
329 /*Buckets for latency between 250 to 500 ms*/
330 #define HIF_SCHED_LATENCY_BUCKET_251_500 500
331 
332 /**
333  * struct qca_napi_info - per NAPI instance data structure
334  * @netdev: dummy net_dev
335  * @hif_ctx:
336  * @napi:
337  * @scale:
338  * @id:
339  * @cpu:
340  * @irq:
341  * @cpumask:
342  * @stats:
343  * @offld_flush_cb:
344  * @rx_thread_napi:
345  * @rx_thread_netdev:
346  * @lro_ctx:
347  * @poll_start_time: napi poll service start time
348  * @sched_latency_stats: napi schedule latency stats
349  * @tstamp: napi schedule start timestamp
350  *
351  * This data structure holds stuff per NAPI instance.
352  * Note that, in the current implementation, though scale is
353  * an instance variable, it is set to the same value for all
354  * instances.
355  */
356 struct qca_napi_info {
357 	struct net_device    netdev; /* dummy net_dev */
358 	void                 *hif_ctx;
359 	struct napi_struct   napi;
360 	uint8_t              scale;   /* currently same on all instances */
361 	uint8_t              id;
362 	uint8_t              cpu;
363 	int                  irq;
364 	cpumask_t            cpumask;
365 	struct qca_napi_stat stats[NR_CPUS];
366 #ifdef RECEIVE_OFFLOAD
367 	/* will only be present for data rx CE's */
368 	void (*offld_flush_cb)(void *);
369 	struct napi_struct   rx_thread_napi;
370 	struct net_device    rx_thread_netdev;
371 #endif /* RECEIVE_OFFLOAD */
372 	qdf_lro_ctx_t        lro_ctx;
373 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
374 	unsigned long long poll_start_time;
375 #endif
376 #ifdef HIF_LATENCY_PROFILE_ENABLE
377 	uint64_t sched_latency_stats[HIF_SCHED_LATENCY_BUCKETS];
378 	uint64_t tstamp;
379 #endif
380 };
381 
382 enum qca_napi_tput_state {
383 	QCA_NAPI_TPUT_UNINITIALIZED,
384 	QCA_NAPI_TPUT_LO,
385 	QCA_NAPI_TPUT_HI
386 };
387 enum qca_napi_cpu_state {
388 	QCA_NAPI_CPU_UNINITIALIZED,
389 	QCA_NAPI_CPU_DOWN,
390 	QCA_NAPI_CPU_UP };
391 
392 /**
393  * struct qca_napi_cpu - an entry of the napi cpu table
394  * @state:
395  * @core_id:     physical core id of the core
396  * @cluster_id:  cluster this core belongs to
397  * @core_mask:   mask to match all core of this cluster
398  * @thread_mask: mask for this core within the cluster
399  * @max_freq:    maximum clock this core can be clocked at
400  *               same for all cpus of the same core.
401  * @napis:       bitmap of napi instances on this core
402  * @execs:       bitmap of execution contexts on this core
403  * @cluster_nxt: chain to link cores within the same cluster
404  *
405  * This structure represents a single entry in the napi cpu
406  * table. The table is part of struct qca_napi_data.
407  * This table is initialized by the init function, called while
408  * the first napi instance is being created, updated by hotplug
409  * notifier and when cpu affinity decisions are made (by throughput
410  * detection), and deleted when the last napi instance is removed.
411  */
412 struct qca_napi_cpu {
413 	enum qca_napi_cpu_state state;
414 	int			core_id;
415 	int			cluster_id;
416 	cpumask_t		core_mask;
417 	cpumask_t		thread_mask;
418 	unsigned int		max_freq;
419 	uint32_t		napis;
420 	uint32_t		execs;
421 	int			cluster_nxt;  /* index, not pointer */
422 };
423 
424 /**
425  * struct qca_napi_data - collection of napi data for a single hif context
426  * @hif_softc: pointer to the hif context
427  * @lock: spinlock used in the event state machine
428  * @state: state variable used in the napi stat machine
429  * @ce_map: bit map indicating which ce's have napis running
430  * @exec_map: bit map of instantiated exec contexts
431  * @user_cpu_affin_mask: CPU affinity mask from INI config.
432  * @napis:
433  * @napi_cpu: cpu info for irq affinity
434  * @lilcl_head:
435  * @bigcl_head:
436  * @napi_mode: irq affinity & clock voting mode
437  * @cpuhp_handler: CPU hotplug event registration handle
438  * @flags:
439  */
440 struct qca_napi_data {
441 	struct               hif_softc *hif_softc;
442 	qdf_spinlock_t       lock;
443 	uint32_t             state;
444 
445 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
446 	 * not used by clients (clients use an id returned by create)
447 	 */
448 	uint32_t             ce_map;
449 	uint32_t             exec_map;
450 	uint32_t             user_cpu_affin_mask;
451 	struct qca_napi_info *napis[CE_COUNT_MAX];
452 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
453 	int                  lilcl_head, bigcl_head;
454 	enum qca_napi_tput_state napi_mode;
455 	struct qdf_cpuhp_handler *cpuhp_handler;
456 	uint8_t              flags;
457 };
458 
459 /**
460  * struct hif_config_info - Place Holder for HIF configuration
461  * @enable_self_recovery: Self Recovery
462  * @enable_runtime_pm: Enable Runtime PM
463  * @runtime_pm_delay: Runtime PM Delay
464  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
465  *
466  * Structure for holding HIF ini parameters.
467  */
468 struct hif_config_info {
469 	bool enable_self_recovery;
470 #ifdef FEATURE_RUNTIME_PM
471 	uint8_t enable_runtime_pm;
472 	u_int32_t runtime_pm_delay;
473 #endif
474 	uint64_t rx_softirq_max_yield_duration_ns;
475 };
476 
477 /**
478  * struct hif_target_info - Target Information
479  * @target_version: Target Version
480  * @target_type: Target Type
481  * @target_revision: Target Revision
482  * @soc_version: SOC Version
483  * @hw_name: pointer to hardware name
484  *
485  * Structure to hold target information.
486  */
487 struct hif_target_info {
488 	uint32_t target_version;
489 	uint32_t target_type;
490 	uint32_t target_revision;
491 	uint32_t soc_version;
492 	char *hw_name;
493 };
494 
495 struct hif_opaque_softc {
496 };
497 
498 /**
499  * struct hif_ce_ring_info - CE ring information
500  * @ring_id: ring id
501  * @ring_dir: ring direction
502  * @num_entries: number of entries in ring
503  * @entry_size: ring entry size
504  * @ring_base_paddr: srng base physical address
505  * @hp_paddr: head pointer physical address
506  * @tp_paddr: tail pointer physical address
507  */
508 struct hif_ce_ring_info {
509 	uint8_t ring_id;
510 	uint8_t ring_dir;
511 	uint32_t num_entries;
512 	uint32_t entry_size;
513 	uint64_t ring_base_paddr;
514 	uint64_t hp_paddr;
515 	uint64_t tp_paddr;
516 };
517 
518 /**
519  * struct hif_direct_link_ce_info - Direct Link CE information
520  * @ce_id: CE ide
521  * @pipe_dir: Pipe direction
522  * @ring_info: ring information
523  */
524 struct hif_direct_link_ce_info {
525 	uint8_t ce_id;
526 	uint8_t pipe_dir;
527 	struct hif_ce_ring_info ring_info;
528 };
529 
530 /**
531  * enum hif_event_type - Type of DP events to be recorded
532  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
533  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
534  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
535  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
536  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
537  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
538  * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event
539  * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event
540  * @HIF_EVENT_IRQ_DISABLE_EXPIRED: IRQ disable expired event
541  */
542 enum hif_event_type {
543 	HIF_EVENT_IRQ_TRIGGER,
544 	HIF_EVENT_TIMER_ENTRY,
545 	HIF_EVENT_TIMER_EXIT,
546 	HIF_EVENT_BH_SCHED,
547 	HIF_EVENT_SRNG_ACCESS_START,
548 	HIF_EVENT_SRNG_ACCESS_END,
549 	HIF_EVENT_BH_COMPLETE,
550 	HIF_EVENT_BH_FORCE_BREAK,
551 	HIF_EVENT_IRQ_DISABLE_EXPIRED,
552 	/* Do check hif_hist_skip_event_record when adding new events */
553 };
554 
555 /**
556  * enum hif_system_pm_state - System PM state
557  * @HIF_SYSTEM_PM_STATE_ON: System in active state
558  * @HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
559  *  system resume
560  * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
561  *  system suspend
562  * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
563  */
564 enum hif_system_pm_state {
565 	HIF_SYSTEM_PM_STATE_ON,
566 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
567 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
568 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
569 };
570 
571 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
572 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
573 
574 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
575 /* HIF_EVENT_HIST_MAX should always be power of 2 */
576 #define HIF_EVENT_HIST_MAX		512
577 
578 #define HIF_EVENT_HIST_ENABLE_MASK	0xFF
579 
580 static inline uint64_t hif_get_log_timestamp(void)
581 {
582 	return qdf_get_log_timestamp();
583 }
584 
585 #else
586 
587 #define HIF_EVENT_HIST_MAX		32
588 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
589 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
590 
591 static inline uint64_t hif_get_log_timestamp(void)
592 {
593 	return qdf_sched_clock();
594 }
595 
596 #endif
597 
598 /**
599  * struct hif_event_record - an entry of the DP event history
600  * @hal_ring_id: ring id for which event is recorded
601  * @hp: head pointer of the ring (may not be applicable for all events)
602  * @tp: tail pointer of the ring (may not be applicable for all events)
603  * @cpu_id: cpu id on which the event occurred
604  * @timestamp: timestamp when event occurred
605  * @type: type of the event
606  *
607  * This structure represents the information stored for every datapath
608  * event which is logged in the history.
609  */
610 struct hif_event_record {
611 	uint8_t hal_ring_id;
612 	uint32_t hp;
613 	uint32_t tp;
614 	int cpu_id;
615 	uint64_t timestamp;
616 	enum hif_event_type type;
617 };
618 
619 /**
620  * struct hif_event_misc - history related misc info
621  * @last_irq_index: last irq event index in history
622  * @last_irq_ts: last irq timestamp
623  */
624 struct hif_event_misc {
625 	int32_t last_irq_index;
626 	uint64_t last_irq_ts;
627 };
628 
629 #ifdef WLAN_FEATURE_AFFINITY_MGR
630 /**
631  * struct hif_cpu_affinity - CPU affinity mask info for IRQ
632  *
633  * @current_irq_mask: Current CPU mask set for IRQ
634  * @wlan_requested_mask: CPU mask requested by WLAN
635  * @walt_taken_mask: Current CPU taken by Audio
636  * @last_updated: Last time IRQ CPU affinity was updated
637  * @last_affined_away: Last time when IRQ was affined away
638  * @update_requested: IRQ affinity hint set requested by WLAN
639  * @irq: IRQ number
640  */
641 struct hif_cpu_affinity {
642 	qdf_cpu_mask current_irq_mask;
643 	qdf_cpu_mask wlan_requested_mask;
644 	qdf_cpu_mask walt_taken_mask;
645 	uint64_t last_updated;
646 	uint64_t last_affined_away;
647 	bool update_requested;
648 	int irq;
649 };
650 #endif
651 
652 /**
653  * struct hif_event_history - history for one interrupt group
654  * @index: index to store new event
655  * @misc: event misc information
656  * @event: event entry
657  *
658  * This structure represents the datapath history for one
659  * interrupt group.
660  */
661 struct hif_event_history {
662 	qdf_atomic_t index;
663 	struct hif_event_misc misc;
664 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
665 };
666 
667 /**
668  * hif_hist_record_event() - Record one datapath event in history
669  * @hif_ctx: HIF opaque context
670  * @event: DP event entry
671  * @intr_grp_id: interrupt group ID registered with hif
672  *
673  * Return: None
674  */
675 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
676 			   struct hif_event_record *event,
677 			   uint8_t intr_grp_id);
678 
679 /**
680  * hif_event_history_init() - Initialize SRNG event history buffers
681  * @hif_ctx: HIF opaque context
682  * @id: context group ID for which history is recorded
683  *
684  * Returns: None
685  */
686 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
687 
688 /**
689  * hif_event_history_deinit() - De-initialize SRNG event history buffers
690  * @hif_ctx: HIF opaque context
691  * @id: context group ID for which history is recorded
692  *
693  * Returns: None
694  */
695 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
696 
697 /**
698  * hif_record_event() - Wrapper function to form and record DP event
699  * @hif_ctx: HIF opaque context
700  * @intr_grp_id: interrupt group ID registered with hif
701  * @hal_ring_id: ring id for which event is recorded
702  * @hp: head pointer index of the srng
703  * @tp: tail pointer index of the srng
704  * @type: type of the event to be logged in history
705  *
706  * Return: None
707  */
708 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
709 				    uint8_t intr_grp_id,
710 				    uint8_t hal_ring_id,
711 				    uint32_t hp,
712 				    uint32_t tp,
713 				    enum hif_event_type type)
714 {
715 	struct hif_event_record event;
716 
717 	event.hal_ring_id = hal_ring_id;
718 	event.hp = hp;
719 	event.tp = tp;
720 	event.type = type;
721 
722 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
723 
724 	return;
725 }
726 
727 #else
728 
729 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
730 				    uint8_t intr_grp_id,
731 				    uint8_t hal_ring_id,
732 				    uint32_t hp,
733 				    uint32_t tp,
734 				    enum hif_event_type type)
735 {
736 }
737 
738 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
739 					  uint8_t id)
740 {
741 }
742 
743 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
744 					    uint8_t id)
745 {
746 }
747 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
748 
749 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx);
750 
751 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
752 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx);
753 #else
754 static
755 inline void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) {}
756 #endif
757 
758 /**
759  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
760  *
761  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
762  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
763  *                         minimize power
764  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
765  *                         platform-specific measures to completely power-off
766  *                         the module and associated hardware (i.e. cut power
767  *                         supplies)
768  */
769 enum HIF_DEVICE_POWER_CHANGE_TYPE {
770 	HIF_DEVICE_POWER_UP,
771 	HIF_DEVICE_POWER_DOWN,
772 	HIF_DEVICE_POWER_CUT
773 };
774 
775 /**
776  * enum hif_enable_type: what triggered the enabling of hif
777  *
778  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
779  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
780  * @HIF_ENABLE_TYPE_MAX: Max value
781  */
782 enum hif_enable_type {
783 	HIF_ENABLE_TYPE_PROBE,
784 	HIF_ENABLE_TYPE_REINIT,
785 	HIF_ENABLE_TYPE_MAX
786 };
787 
788 /**
789  * enum hif_disable_type: what triggered the disabling of hif
790  *
791  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
792  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
793  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
794  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
795  * @HIF_DISABLE_TYPE_MAX: Max value
796  */
797 enum hif_disable_type {
798 	HIF_DISABLE_TYPE_PROBE_ERROR,
799 	HIF_DISABLE_TYPE_REINIT_ERROR,
800 	HIF_DISABLE_TYPE_REMOVE,
801 	HIF_DISABLE_TYPE_SHUTDOWN,
802 	HIF_DISABLE_TYPE_MAX
803 };
804 
805 /**
806  * enum hif_device_config_opcode: configure mode
807  *
808  * @HIF_DEVICE_POWER_STATE: device power state
809  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
810  * @HIF_DEVICE_GET_FIFO_ADDR: get block address
811  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
812  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
813  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
814  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
815  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
816  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
817  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
818  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
819  * @HIF_BMI_DONE: bmi done
820  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
821  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
822  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
823  */
824 enum hif_device_config_opcode {
825 	HIF_DEVICE_POWER_STATE = 0,
826 	HIF_DEVICE_GET_BLOCK_SIZE,
827 	HIF_DEVICE_GET_FIFO_ADDR,
828 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
829 	HIF_DEVICE_GET_IRQ_PROC_MODE,
830 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
831 	HIF_DEVICE_POWER_STATE_CHANGE,
832 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
833 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
834 	HIF_DEVICE_GET_OS_DEVICE,
835 	HIF_DEVICE_DEBUG_BUS_STATE,
836 	HIF_BMI_DONE,
837 	HIF_DEVICE_SET_TARGET_TYPE,
838 	HIF_DEVICE_SET_HTC_CONTEXT,
839 	HIF_DEVICE_GET_HTC_CONTEXT,
840 };
841 
842 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
843 struct HID_ACCESS_LOG {
844 	uint32_t seqnum;
845 	bool is_write;
846 	void *addr;
847 	uint32_t value;
848 };
849 #endif
850 
851 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
852 		uint32_t value);
853 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
854 
855 #define HIF_MAX_DEVICES                 1
856 /**
857  * struct htc_callbacks - Structure for HTC Callbacks methods
858  * @context:             context to pass to the @dsr_handler
859  *                       note : @rw_compl_handler is provided the context
860  *                       passed to hif_read_write
861  * @rw_compl_handler:    Read / write completion handler
862  * @dsr_handler:         DSR Handler
863  */
864 struct htc_callbacks {
865 	void *context;
866 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
867 	QDF_STATUS(*dsr_handler)(void *context);
868 };
869 
870 /**
871  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
872  * @context: Private data context
873  * @set_recovery_in_progress: To Set Driver state for recovery in progress
874  * @is_recovery_in_progress: Query if driver state is recovery in progress
875  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
876  * @is_driver_unloading: Query if driver is unloading.
877  * @is_target_ready:
878  * @get_bandwidth_level: Query current bandwidth level for the driver
879  * @prealloc_get_consistent_mem_unaligned: get prealloc unaligned consistent mem
880  * @prealloc_put_consistent_mem_unaligned: put unaligned consistent mem to pool
881  * @prealloc_get_multi_pages: get prealloc multi pages memory
882  * @prealloc_put_multi_pages: put prealloc multi pages memory back to pool
883  * This Structure provides callback pointer for HIF to query hdd for driver
884  * states.
885  */
886 struct hif_driver_state_callbacks {
887 	void *context;
888 	void (*set_recovery_in_progress)(void *context, uint8_t val);
889 	bool (*is_recovery_in_progress)(void *context);
890 	bool (*is_load_unload_in_progress)(void *context);
891 	bool (*is_driver_unloading)(void *context);
892 	bool (*is_target_ready)(void *context);
893 	int (*get_bandwidth_level)(void *context);
894 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
895 						       qdf_dma_addr_t *paddr,
896 						       uint32_t ring_type);
897 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
898 	void (*prealloc_get_multi_pages)(uint32_t desc_type,
899 					 qdf_size_t elem_size,
900 					 uint16_t elem_num,
901 					 struct qdf_mem_multi_page_t *pages,
902 					 bool cacheable);
903 	void (*prealloc_put_multi_pages)(uint32_t desc_type,
904 					 struct qdf_mem_multi_page_t *pages);
905 };
906 
907 /* This API detaches the HTC layer from the HIF device */
908 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
909 
910 /****************************************************************/
911 /* BMI and Diag window abstraction                              */
912 /****************************************************************/
913 
914 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
915 
916 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
917 				     * handled atomically by
918 				     * DiagRead/DiagWrite
919 				     */
920 
921 #ifdef WLAN_FEATURE_BMI
922 /*
923  * API to handle HIF-specific BMI message exchanges, this API is synchronous
924  * and only allowed to be called from a context that can block (sleep)
925  */
926 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
927 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
928 				uint8_t *pSendMessage, uint32_t Length,
929 				uint8_t *pResponseMessage,
930 				uint32_t *pResponseLength, uint32_t TimeoutMS);
931 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
932 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
933 #else /* WLAN_FEATURE_BMI */
934 static inline void
935 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
936 {
937 }
938 
939 static inline bool
940 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
941 {
942 	return false;
943 }
944 #endif /* WLAN_FEATURE_BMI */
945 
946 #ifdef HIF_CPU_CLEAR_AFFINITY
947 /**
948  * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
949  * @scn: HIF handle
950  * @intr_ctxt_id: interrupt group index
951  * @cpu: CPU core to clear
952  *
953  * Return: None
954  */
955 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
956 				       int intr_ctxt_id, int cpu);
957 #else
958 static inline
959 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
960 				       int intr_ctxt_id, int cpu)
961 {
962 }
963 #endif
964 
965 /*
966  * APIs to handle HIF specific diagnostic read accesses. These APIs are
967  * synchronous and only allowed to be called from a context that
968  * can block (sleep). They are not high performance APIs.
969  *
970  * hif_diag_read_access reads a 4 Byte aligned/length value from a
971  * Target register or memory word.
972  *
973  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
974  */
975 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
976 				uint32_t address, uint32_t *data);
977 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
978 		      uint8_t *data, int nbytes);
979 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
980 			void *ramdump_base, uint32_t address, uint32_t size);
981 /*
982  * APIs to handle HIF specific diagnostic write accesses. These APIs are
983  * synchronous and only allowed to be called from a context that
984  * can block (sleep).
985  * They are not high performance APIs.
986  *
987  * hif_diag_write_access writes a 4 Byte aligned/length value to a
988  * Target register or memory word.
989  *
990  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
991  */
992 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
993 				 uint32_t address, uint32_t data);
994 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
995 			uint32_t address, uint8_t *data, int nbytes);
996 
997 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
998 
999 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
1000 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
1001 
1002 /*
1003  * Set the FASTPATH_mode_on flag in sc, for use by data path
1004  */
1005 #ifdef WLAN_FEATURE_FASTPATH
1006 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
1007 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
1008 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
1009 
1010 /**
1011  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
1012  * @hif_ctx: HIF opaque context
1013  * @handler: Callback function
1014  * @context: handle for callback function
1015  *
1016  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
1017  */
1018 QDF_STATUS hif_ce_fastpath_cb_register(
1019 		struct hif_opaque_softc *hif_ctx,
1020 		fastpath_msg_handler handler, void *context);
1021 #else
1022 static inline QDF_STATUS hif_ce_fastpath_cb_register(
1023 		struct hif_opaque_softc *hif_ctx,
1024 		fastpath_msg_handler handler, void *context)
1025 {
1026 	return QDF_STATUS_E_FAILURE;
1027 }
1028 
1029 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
1030 {
1031 	return NULL;
1032 }
1033 
1034 #endif
1035 
1036 /*
1037  * Enable/disable CDC max performance workaround
1038  * For max-performance set this to 0
1039  * To allow SoC to enter sleep set this to 1
1040  */
1041 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
1042 
1043 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
1044 			     qdf_shared_mem_t **ce_sr,
1045 			     uint32_t *ce_sr_ring_size,
1046 			     qdf_dma_addr_t *ce_reg_paddr);
1047 
1048 /**
1049  * struct hif_msg_callbacks - List of callbacks - filled in by HTC.
1050  * @Context: context meaningful to HTC
1051  * @txCompletionHandler:
1052  * @rxCompletionHandler:
1053  * @txResourceAvailHandler:
1054  * @fwEventHandler:
1055  * @update_bundle_stats:
1056  */
1057 struct hif_msg_callbacks {
1058 	void *Context;
1059 	/**< context meaningful to HTC */
1060 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
1061 					uint32_t transferID,
1062 					uint32_t toeplitz_hash_result);
1063 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
1064 					uint8_t pipeID);
1065 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
1066 	void (*fwEventHandler)(void *context, QDF_STATUS status);
1067 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
1068 };
1069 
1070 enum hif_target_status {
1071 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
1072 	TARGET_STATUS_RESET,  /* target got reset */
1073 	TARGET_STATUS_EJECT,  /* target got ejected */
1074 	TARGET_STATUS_SUSPEND /*target got suspend */
1075 };
1076 
1077 /**
1078  * enum hif_attribute_flags: configure hif
1079  *
1080  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
1081  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
1082  *  							+ No pktlog CE
1083  */
1084 enum hif_attribute_flags {
1085 	HIF_LOWDESC_CE_CFG = 1,
1086 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
1087 };
1088 
1089 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
1090 	(attr |= (v & 0x01) << 5)
1091 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
1092 	(attr |= (v & 0x03) << 6)
1093 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
1094 	(attr |= (v & 0x01) << 13)
1095 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
1096 	(attr |= (v & 0x01) << 14)
1097 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
1098 	(attr |= (v & 0x01) << 15)
1099 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
1100 	(attr |= (v & 0x0FFF) << 16)
1101 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
1102 	(attr |= (v & 0x01) << 30)
1103 
1104 struct hif_ul_pipe_info {
1105 	unsigned int nentries;
1106 	unsigned int nentries_mask;
1107 	unsigned int sw_index;
1108 	unsigned int write_index; /* cached copy */
1109 	unsigned int hw_index;    /* cached copy */
1110 	void *base_addr_owner_space; /* Host address space */
1111 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1112 };
1113 
1114 struct hif_dl_pipe_info {
1115 	unsigned int nentries;
1116 	unsigned int nentries_mask;
1117 	unsigned int sw_index;
1118 	unsigned int write_index; /* cached copy */
1119 	unsigned int hw_index;    /* cached copy */
1120 	void *base_addr_owner_space; /* Host address space */
1121 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1122 };
1123 
1124 struct hif_pipe_addl_info {
1125 	uint32_t pci_mem;
1126 	uint32_t ctrl_addr;
1127 	struct hif_ul_pipe_info ul_pipe;
1128 	struct hif_dl_pipe_info dl_pipe;
1129 };
1130 
1131 #ifdef CONFIG_SLUB_DEBUG_ON
1132 #define MSG_FLUSH_NUM 16
1133 #else /* PERF build */
1134 #define MSG_FLUSH_NUM 32
1135 #endif /* SLUB_DEBUG_ON */
1136 
1137 struct hif_bus_id;
1138 
1139 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
1140 /**
1141  * hif_register_ce_custom_cb() - Helper API to register the custom callback
1142  * @hif_ctx: HIF opaque context
1143  * @pipe: Pipe number
1144  * @custom_cb: Custom call back function pointer
1145  * @custom_cb_context: Custom callback context
1146  *
1147  * return: QDF_STATUS
1148  */
1149 QDF_STATUS
1150 hif_register_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1151 			  void (*custom_cb)(void *), void *custom_cb_context);
1152 
1153 /**
1154  * hif_unregister_ce_custom_cb() - Helper API to unregister the custom callback
1155  * @hif_ctx: HIF opaque context
1156  * @pipe: Pipe number
1157  *
1158  * return: QDF_STATUS
1159  */
1160 QDF_STATUS
1161 hif_unregister_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1162 
1163 /**
1164  * hif_enable_ce_custom_cb() - Helper API to enable the custom callback
1165  * @hif_ctx: HIF opaque context
1166  * @pipe: Pipe number
1167  *
1168  * return: QDF_STATUS
1169  */
1170 QDF_STATUS
1171 hif_enable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1172 
1173 /**
1174  * hif_disable_ce_custom_cb() - Helper API to disable the custom callback
1175  * @hif_ctx: HIF opaque context
1176  * @pipe: Pipe number
1177  *
1178  * return: QDF_STATUS
1179  */
1180 QDF_STATUS
1181 hif_disable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1182 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
1183 
1184 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
1185 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
1186 		     int opcode, void *config, uint32_t config_len);
1187 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
1188 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
1189 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
1190 		   struct hif_msg_callbacks *callbacks);
1191 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
1192 void hif_stop(struct hif_opaque_softc *hif_ctx);
1193 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
1194 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
1195 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
1196 		      uint8_t cmd_id, bool start);
1197 
1198 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1199 				  uint32_t transferID, uint32_t nbytes,
1200 				  qdf_nbuf_t wbuf, uint32_t data_attr);
1201 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1202 			     int force);
1203 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1204 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
1205 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
1206 			  uint8_t *DLPipe);
1207 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
1208 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1209 			int *dl_is_polled);
1210 uint16_t
1211 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1212 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
1213 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
1214 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
1215 		     bool wait_for_it);
1216 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
1217 #ifndef HIF_PCI
1218 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
1219 {
1220 	return 0;
1221 }
1222 #else
1223 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
1224 #endif
1225 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1226 			u32 *revision, const char **target_name);
1227 
1228 #ifdef RECEIVE_OFFLOAD
1229 /**
1230  * hif_offld_flush_cb_register() - Register the offld flush callback
1231  * @scn: HIF opaque context
1232  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
1233  *			 Or GRO/LRO flush when RxThread is not enabled. Called
1234  *			 with corresponding context for flush.
1235  * Return: None
1236  */
1237 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1238 				 void (offld_flush_handler)(void *ol_ctx));
1239 
1240 /**
1241  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
1242  * @scn: HIF opaque context
1243  *
1244  * Return: None
1245  */
1246 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
1247 #endif
1248 
1249 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1250 /**
1251  * hif_exec_should_yield() - Check if hif napi context should yield
1252  * @hif_ctx: HIF opaque context
1253  * @grp_id: grp_id of the napi for which check needs to be done
1254  *
1255  * The function uses grp_id to look for NAPI and checks if NAPI needs to
1256  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
1257  * yield decision.
1258  *
1259  * Return: true if NAPI needs to yield, else false
1260  */
1261 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
1262 #else
1263 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
1264 					 uint grp_id)
1265 {
1266 	return false;
1267 }
1268 #endif
1269 
1270 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
1271 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
1272 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
1273 				      int htc_htt_tx_endpoint);
1274 
1275 /**
1276  * hif_open() - Create hif handle
1277  * @qdf_ctx: qdf context
1278  * @mode: Driver Mode
1279  * @bus_type: Bus Type
1280  * @cbk: CDS Callbacks
1281  * @psoc: psoc object manager
1282  *
1283  * API to open HIF Context
1284  *
1285  * Return: HIF Opaque Pointer
1286  */
1287 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1288 				  uint32_t mode,
1289 				  enum qdf_bus_type bus_type,
1290 				  struct hif_driver_state_callbacks *cbk,
1291 				  struct wlan_objmgr_psoc *psoc);
1292 
1293 /**
1294  * hif_init_dma_mask() - Set dma mask for the dev
1295  * @dev: dev for which DMA mask is to be set
1296  * @bus_type: bus type for the target
1297  *
1298  * This API sets the DMA mask for the device. before the datapath
1299  * memory pre-allocation is done. If the DMA mask is not set before
1300  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
1301  * and does not utilize the full device capability.
1302  *
1303  * Return: 0 - success, non-zero on failure.
1304  */
1305 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
1306 void hif_close(struct hif_opaque_softc *hif_ctx);
1307 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1308 		      void *bdev, const struct hif_bus_id *bid,
1309 		      enum qdf_bus_type bus_type,
1310 		      enum hif_enable_type type);
1311 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
1312 #ifdef CE_TASKLET_DEBUG_ENABLE
1313 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
1314 				 uint8_t value);
1315 #endif
1316 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
1317 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
1318 
1319 /**
1320  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
1321  * @HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
1322  * @HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
1323  * @HIF_PM_CE_WAKE: Wake irq is CE interrupt
1324  */
1325 typedef enum {
1326 	HIF_PM_INVALID_WAKE,
1327 	HIF_PM_MSI_WAKE,
1328 	HIF_PM_CE_WAKE,
1329 } hif_pm_wake_irq_type;
1330 
1331 /**
1332  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1333  * @hif_ctx: HIF context
1334  *
1335  * Return: enum hif_pm_wake_irq_type
1336  */
1337 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1338 
1339 /**
1340  * enum hif_ep_vote_type - hif ep vote type
1341  * @HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1342  * @HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1343  */
1344 enum hif_ep_vote_type {
1345 	HIF_EP_VOTE_DP_ACCESS,
1346 	HIF_EP_VOTE_NONDP_ACCESS
1347 };
1348 
1349 /**
1350  * enum hif_ep_vote_access - hif ep vote access
1351  * @HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1352  * @HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transition
1353  * @HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1354  */
1355 enum hif_ep_vote_access {
1356 	HIF_EP_VOTE_ACCESS_ENABLE,
1357 	HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1358 	HIF_EP_VOTE_ACCESS_DISABLE
1359 };
1360 
1361 /**
1362  * enum hif_rtpm_client_id - modules registered with runtime pm module
1363  * @HIF_RTPM_ID_RESERVED: Reserved ID
1364  * @HIF_RTPM_ID_HAL_REO_CMD: HAL REO commands
1365  * @HIF_RTPM_ID_WMI: WMI commands Tx
1366  * @HIF_RTPM_ID_HTT: HTT commands Tx
1367  * @HIF_RTPM_ID_DP: Datapath Tx path
1368  * @HIF_RTPM_ID_DP_RING_STATS: Datapath ring stats
1369  * @HIF_RTPM_ID_CE: CE Tx buffer posting
1370  * @HIF_RTPM_ID_FORCE_WAKE: Force wake request
1371  * @HIF_RTPM_ID_PM_QOS_NOTIFY:
1372  * @HIF_RTPM_ID_WIPHY_SUSPEND:
1373  * @HIF_RTPM_ID_MAX: Max id
1374  */
1375 enum  hif_rtpm_client_id {
1376 	HIF_RTPM_ID_RESERVED,
1377 	HIF_RTPM_ID_HAL_REO_CMD,
1378 	HIF_RTPM_ID_WMI,
1379 	HIF_RTPM_ID_HTT,
1380 	HIF_RTPM_ID_DP,
1381 	HIF_RTPM_ID_DP_RING_STATS,
1382 	HIF_RTPM_ID_CE,
1383 	HIF_RTPM_ID_FORCE_WAKE,
1384 	HIF_RTPM_ID_PM_QOS_NOTIFY,
1385 	HIF_RTPM_ID_WIPHY_SUSPEND,
1386 	HIF_RTPM_ID_MAX
1387 };
1388 
1389 /**
1390  * enum rpm_type - Get and Put calls types
1391  * @HIF_RTPM_GET_ASYNC: Increment usage count and when system is suspended
1392  *		      schedule resume process, return depends on pm state.
1393  * @HIF_RTPM_GET_FORCE: Increment usage count and when system is suspended
1394  *		      schedule resume process, returns success irrespective of
1395  *		      pm_state.
1396  * @HIF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
1397  *		     wait till process is resumed.
1398  * @HIF_RTPM_GET_NORESUME: Only increments usage count.
1399  * @HIF_RTPM_PUT_ASYNC: Decrements usage count and puts system in idle state.
1400  * @HIF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
1401  *			     suspended state.
1402  * @HIF_RTPM_PUT_NOIDLE: Decrements usage count.
1403  */
1404 enum rpm_type {
1405 	HIF_RTPM_GET_ASYNC,
1406 	HIF_RTPM_GET_FORCE,
1407 	HIF_RTPM_GET_SYNC,
1408 	HIF_RTPM_GET_NORESUME,
1409 	HIF_RTPM_PUT_ASYNC,
1410 	HIF_RTPM_PUT_SYNC_SUSPEND,
1411 	HIF_RTPM_PUT_NOIDLE,
1412 };
1413 
1414 /**
1415  * struct hif_pm_runtime_lock - data structure for preventing runtime suspend
1416  * @list: global list of runtime locks
1417  * @active: true if this lock is preventing suspend
1418  * @name: character string for tracking this lock
1419  */
1420 struct hif_pm_runtime_lock {
1421 	struct list_head list;
1422 	bool active;
1423 	const char *name;
1424 };
1425 
1426 #ifdef FEATURE_RUNTIME_PM
1427 /**
1428  * hif_rtpm_register() - Register a module with runtime PM.
1429  * @id: ID of the module which needs to be registered
1430  * @hif_rpm_cbk: callback to be called when get was called in suspended state.
1431  *
1432  * Return: success status if successfully registered
1433  */
1434 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
1435 
1436 /**
1437  * hif_rtpm_deregister() - Deregister the module
1438  * @id: ID of the module which needs to be de-registered
1439  */
1440 QDF_STATUS hif_rtpm_deregister(uint32_t id);
1441 
1442 /**
1443  * hif_rtpm_set_autosuspend_delay() - Set delay to trigger RTPM suspend
1444  * @delay: delay in ms to be set
1445  *
1446  * Return: Success if delay is set successfully
1447  */
1448 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay);
1449 
1450 /**
1451  * hif_rtpm_restore_autosuspend_delay() - Restore delay value to default value
1452  *
1453  * Return: Success if reset done. E_ALREADY if delay same as config value
1454  */
1455 QDF_STATUS hif_rtpm_restore_autosuspend_delay(void);
1456 
1457 /**
1458  * hif_rtpm_get_autosuspend_delay() -Get delay to trigger RTPM suspend
1459  *
1460  * Return: Delay in ms
1461  */
1462 int hif_rtpm_get_autosuspend_delay(void);
1463 
1464 /**
1465  * hif_runtime_lock_init() - API to initialize Runtime PM context
1466  * @lock: QDF lock context
1467  * @name: Context name
1468  *
1469  * This API initializes the Runtime PM context of the caller and
1470  * return the pointer.
1471  *
1472  * Return: None
1473  */
1474 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1475 
1476 /**
1477  * hif_runtime_lock_deinit() - This API frees the runtime pm context
1478  * @data: Runtime PM context
1479  *
1480  * Return: void
1481  */
1482 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data);
1483 
1484 /**
1485  * hif_rtpm_get() - Increment usage_count on the device to avoid suspend.
1486  * @type: get call types from hif_rpm_type
1487  * @id: ID of the module calling get()
1488  *
1489  * A get operation will prevent a runtime suspend until a
1490  * corresponding put is done.  This api should be used when accessing bus.
1491  *
1492  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1493  * THIS API WILL ONLY REQUEST THE RESUME AND NOT DO A GET!!!
1494  *
1495  * return: success if a get has been issued, else error code.
1496  */
1497 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id);
1498 
1499 /**
1500  * hif_rtpm_put() - do a put operation on the device
1501  * @type: put call types from hif_rpm_type
1502  * @id: ID of the module calling put()
1503  *
1504  * A put operation will allow a runtime suspend after a corresponding
1505  * get was done.  This api should be used when finished accessing bus.
1506  *
1507  * This api will return a failure if runtime pm is stopped
1508  * This api will return failure if it would decrement the usage count below 0.
1509  *
1510  * return: QDF_STATUS_SUCCESS if the put is performed
1511  */
1512 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id);
1513 
1514 /**
1515  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1516  * @data: runtime PM lock
1517  *
1518  * This function will prevent runtime suspend, by incrementing
1519  * device's usage count.
1520  *
1521  * Return: status
1522  */
1523 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data);
1524 
1525 /**
1526  * hif_pm_runtime_prevent_suspend_sync() - Synchronized prevent Runtime suspend
1527  * @data: runtime PM lock
1528  *
1529  * This function will prevent runtime suspend, by incrementing
1530  * device's usage count.
1531  *
1532  * Return: status
1533  */
1534 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data);
1535 
1536 /**
1537  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1538  * @data: runtime PM lock
1539  *
1540  * This function will allow runtime suspend, by decrementing
1541  * device's usage count.
1542  *
1543  * Return: status
1544  */
1545 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data);
1546 
1547 /**
1548  * hif_rtpm_request_resume() - Request resume if bus is suspended
1549  *
1550  * Return: None
1551  */
1552 void hif_rtpm_request_resume(void);
1553 
1554 /**
1555  * hif_rtpm_sync_resume() - Invoke synchronous runtime resume.
1556  *
1557  * This function will invoke synchronous runtime resume.
1558  *
1559  * Return: status
1560  */
1561 QDF_STATUS hif_rtpm_sync_resume(void);
1562 
1563 /**
1564  * hif_rtpm_check_and_request_resume() - check if bus is suspended and
1565  *                                       request resume.
1566  *
1567  * Return: void
1568  */
1569 void hif_rtpm_check_and_request_resume(void);
1570 
1571 /**
1572  * hif_rtpm_set_client_job() - Set job for the client.
1573  * @client_id: Client id for which job needs to be set
1574  *
1575  * If get failed due to system being in suspended state, set the client job so
1576  * when system resumes the client's job is called.
1577  *
1578  * Return: None
1579  */
1580 void hif_rtpm_set_client_job(uint32_t client_id);
1581 
1582 /**
1583  * hif_rtpm_mark_last_busy() - Mark last busy to delay retry to suspend
1584  * @id: ID marking last busy
1585  *
1586  * Return: None
1587  */
1588 void hif_rtpm_mark_last_busy(uint32_t id);
1589 
1590 /**
1591  * hif_rtpm_get_monitor_wake_intr() - API to get monitor_wake_intr
1592  *
1593  * monitor_wake_intr variable can be used to indicate if driver expects wake
1594  * MSI for runtime PM
1595  *
1596  * Return: monitor_wake_intr variable
1597  */
1598 int hif_rtpm_get_monitor_wake_intr(void);
1599 
1600 /**
1601  * hif_rtpm_set_monitor_wake_intr() - API to set monitor_wake_intr
1602  * @val: value to set
1603  *
1604  * monitor_wake_intr variable can be used to indicate if driver expects wake
1605  * MSI for runtime PM
1606  *
1607  * Return: void
1608  */
1609 void hif_rtpm_set_monitor_wake_intr(int val);
1610 
1611 /**
1612  * hif_pre_runtime_suspend() - book keeping before beginning runtime suspend.
1613  * @hif_ctx: HIF context
1614  *
1615  * Makes sure that the pci link will be taken down by the suspend operation.
1616  * If the hif layer is configured to leave the bus on, runtime suspend will
1617  * not save any power.
1618  *
1619  * Set the runtime suspend state to SUSPENDING.
1620  *
1621  * return -EINVAL if the bus won't go down.  otherwise return 0
1622  */
1623 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1624 
1625 /**
1626  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
1627  *
1628  * update the runtime pm state to RESUMING.
1629  * Return: void
1630  */
1631 void hif_pre_runtime_resume(void);
1632 
1633 /**
1634  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
1635  *
1636  * Record the success.
1637  * update the runtime_pm state to SUSPENDED
1638  * Return: void
1639  */
1640 void hif_process_runtime_suspend_success(void);
1641 
1642 /**
1643  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
1644  *
1645  * Record the failure.
1646  * mark last busy to delay a retry.
1647  * update the runtime_pm state back to ON
1648  *
1649  * Return: void
1650  */
1651 void hif_process_runtime_suspend_failure(void);
1652 
1653 /**
1654  * hif_process_runtime_resume_linkup() - bookkeeping of resuming link up
1655  *
1656  * update the runtime_pm state to RESUMING_LINKUP
1657  * Return: void
1658  */
1659 void hif_process_runtime_resume_linkup(void);
1660 
1661 /**
1662  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
1663  *
1664  * record the success.
1665  * update the runtime_pm state to SUSPENDED
1666  * Return: void
1667  */
1668 void hif_process_runtime_resume_success(void);
1669 
1670 /**
1671  * hif_rtpm_print_prevent_list() - list the clients preventing suspend.
1672  *
1673  * Return: None
1674  */
1675 void hif_rtpm_print_prevent_list(void);
1676 
1677 /**
1678  * hif_rtpm_suspend_lock() - spin_lock on marking runtime suspend
1679  *
1680  * Return: void
1681  */
1682 void hif_rtpm_suspend_lock(void);
1683 
1684 /**
1685  * hif_rtpm_suspend_unlock() - spin_unlock on marking runtime suspend
1686  *
1687  * Return: void
1688  */
1689 void hif_rtpm_suspend_unlock(void);
1690 
1691 /**
1692  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
1693  * @hif_ctx: HIF context
1694  *
1695  * Return: 0 for success and non-zero error code for failure
1696  */
1697 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1698 
1699 /**
1700  * hif_runtime_resume() - do the bus resume part of a runtime resume
1701  * @hif_ctx: HIF context
1702  *
1703  * Return: 0 for success and non-zero error code for failure
1704  */
1705 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1706 
1707 /**
1708  * hif_fastpath_resume() - resume fastpath for runtimepm
1709  * @hif_ctx: HIF context
1710  *
1711  * ensure that the fastpath write index register is up to date
1712  * since runtime pm may cause ce_send_fast to skip the register
1713  * write.
1714  *
1715  * fastpath only applicable to legacy copy engine
1716  */
1717 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1718 
1719 /**
1720  * hif_rtpm_get_state(): get rtpm link state
1721  *
1722  * Return: state
1723  */
1724 int hif_rtpm_get_state(void);
1725 
1726 /**
1727  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1728  * @hif_ctx: HIF context
1729  *
1730  * Return: None
1731  */
1732 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx);
1733 
1734 /**
1735  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1736  * @scn: HIF context
1737  * @ce_id: CE id
1738  *
1739  * Return: None
1740  */
1741 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1742 				      unsigned long ce_id);
1743 #else
1744 
1745 /**
1746  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1747  * @hif_ctx: HIF context
1748  *
1749  * Return: None
1750  */
1751 static inline
1752 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx) { }
1753 
1754 /**
1755  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1756  * @scn: HIF context
1757  * @ce_id: CE id
1758  *
1759  * Return: None
1760  */
1761 static inline
1762 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1763 				      unsigned long ce_id)
1764 { }
1765 
1766 static inline
1767 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
1768 { return QDF_STATUS_SUCCESS; }
1769 
1770 static inline
1771 QDF_STATUS hif_rtpm_deregister(uint32_t id)
1772 { return QDF_STATUS_SUCCESS; }
1773 
1774 static inline
1775 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
1776 { return QDF_STATUS_SUCCESS; }
1777 
1778 static inline QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
1779 { return QDF_STATUS_SUCCESS; }
1780 
1781 static inline int hif_rtpm_get_autosuspend_delay(void)
1782 { return 0; }
1783 
1784 static inline
1785 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1786 { return 0; }
1787 
1788 static inline
1789 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data)
1790 {}
1791 
1792 static inline
1793 int hif_rtpm_get(uint8_t type, uint32_t id)
1794 { return QDF_STATUS_SUCCESS; }
1795 
1796 static inline
1797 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
1798 { return QDF_STATUS_SUCCESS; }
1799 
1800 static inline
1801 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data)
1802 { return 0; }
1803 
1804 static inline
1805 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data)
1806 { return 0; }
1807 
1808 static inline
1809 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data)
1810 { return 0; }
1811 
1812 static inline
1813 QDF_STATUS hif_rtpm_sync_resume(void)
1814 { return QDF_STATUS_SUCCESS; }
1815 
1816 static inline
1817 void hif_rtpm_request_resume(void)
1818 {}
1819 
1820 static inline
1821 void hif_rtpm_check_and_request_resume(void)
1822 {}
1823 
1824 static inline
1825 void hif_rtpm_set_client_job(uint32_t client_id)
1826 {}
1827 
1828 static inline
1829 void hif_rtpm_print_prevent_list(void)
1830 {}
1831 
1832 static inline
1833 void hif_rtpm_suspend_unlock(void)
1834 {}
1835 
1836 static inline
1837 void hif_rtpm_suspend_lock(void)
1838 {}
1839 
1840 static inline
1841 int hif_rtpm_get_monitor_wake_intr(void)
1842 { return 0; }
1843 
1844 static inline
1845 void hif_rtpm_set_monitor_wake_intr(int val)
1846 {}
1847 
1848 static inline
1849 void hif_rtpm_mark_last_busy(uint32_t id)
1850 {}
1851 #endif
1852 
1853 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1854 				 bool is_packet_log_enabled);
1855 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1856 
1857 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1858 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1859 
1860 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1861 
1862 #ifdef IPA_OFFLOAD
1863 /**
1864  * hif_get_ipa_hw_type() - get IPA hw type
1865  *
1866  * This API return the IPA hw type.
1867  *
1868  * Return: IPA hw type
1869  */
1870 static inline
1871 enum ipa_hw_type hif_get_ipa_hw_type(void)
1872 {
1873 	return ipa_get_hw_type();
1874 }
1875 
1876 /**
1877  * hif_get_ipa_present() - get IPA hw status
1878  *
1879  * This API return the IPA hw status.
1880  *
1881  * Return: true if IPA is present or false otherwise
1882  */
1883 static inline
1884 bool hif_get_ipa_present(void)
1885 {
1886 	if (qdf_ipa_uc_reg_rdyCB(NULL) != -EPERM)
1887 		return true;
1888 	else
1889 		return false;
1890 }
1891 #endif
1892 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1893 
1894 /**
1895  * hif_bus_early_suspend() - stop non wmi tx traffic
1896  * @hif_ctx: hif context
1897  */
1898 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1899 
1900 /**
1901  * hif_bus_late_resume() - resume non wmi traffic
1902  * @hif_ctx: hif context
1903  */
1904 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1905 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1906 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1907 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1908 
1909 /**
1910  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1911  * @hif_ctx: an opaque HIF handle to use
1912  *
1913  * As opposed to the standard hif_irq_enable, this function always applies to
1914  * the APPS side kernel interrupt handling.
1915  *
1916  * Return: errno
1917  */
1918 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1919 
1920 /**
1921  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1922  * @hif_ctx: an opaque HIF handle to use
1923  *
1924  * As opposed to the standard hif_irq_disable, this function always applies to
1925  * the APPS side kernel interrupt handling.
1926  *
1927  * Return: errno
1928  */
1929 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1930 
1931 /**
1932  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1933  * @hif_ctx: an opaque HIF handle to use
1934  *
1935  * As opposed to the standard hif_irq_enable, this function always applies to
1936  * the APPS side kernel interrupt handling.
1937  *
1938  * Return: errno
1939  */
1940 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1941 
1942 /**
1943  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1944  * @hif_ctx: an opaque HIF handle to use
1945  *
1946  * As opposed to the standard hif_irq_disable, this function always applies to
1947  * the APPS side kernel interrupt handling.
1948  *
1949  * Return: errno
1950  */
1951 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1952 
1953 /**
1954  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1955  * @hif_ctx: an opaque HIF handle to use
1956  *
1957  * This function always applies to the APPS side kernel interrupt handling
1958  * to wake the system from suspend.
1959  *
1960  * Return: errno
1961  */
1962 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1963 
1964 /**
1965  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1966  * @hif_ctx: an opaque HIF handle to use
1967  *
1968  * This function always applies to the APPS side kernel interrupt handling
1969  * to disable the wake irq.
1970  *
1971  * Return: errno
1972  */
1973 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1974 
1975 /**
1976  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
1977  * @hif_ctx: an opaque HIF handle to use
1978  *
1979  * As opposed to the standard hif_irq_enable, this function always applies to
1980  * the APPS side kernel interrupt handling.
1981  *
1982  * Return: errno
1983  */
1984 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1985 
1986 /**
1987  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
1988  * @hif_ctx: an opaque HIF handle to use
1989  *
1990  * As opposed to the standard hif_irq_disable, this function always applies to
1991  * the APPS side kernel interrupt handling.
1992  *
1993  * Return: errno
1994  */
1995 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1996 
1997 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1998 int hif_dump_registers(struct hif_opaque_softc *scn);
1999 int ol_copy_ramdump(struct hif_opaque_softc *scn);
2000 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
2001 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
2002 		     u32 *revision, const char **target_name);
2003 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
2004 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
2005 						   scn);
2006 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
2007 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
2008 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
2009 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
2010 			   hif_target_status);
2011 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
2012 			 struct hif_config_info *cfg);
2013 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
2014 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2015 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
2016 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2017 			   uint32_t transfer_id, u_int32_t len);
2018 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
2019 	uint32_t transfer_id, uint32_t download_len);
2020 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
2021 void hif_ce_war_disable(void);
2022 void hif_ce_war_enable(void);
2023 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
2024 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
2025 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
2026 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
2027 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
2028 		uint32_t pipe_num);
2029 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
2030 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
2031 
2032 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
2033 				int rx_bundle_cnt);
2034 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
2035 
2036 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
2037 
2038 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
2039 
2040 enum hif_exec_type {
2041 	HIF_EXEC_NAPI_TYPE,
2042 	HIF_EXEC_TASKLET_TYPE,
2043 };
2044 
2045 typedef uint32_t (*ext_intr_handler)(void *, uint32_t, int);
2046 
2047 /**
2048  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
2049  * @softc: hif opaque context owning the exec context
2050  * @id: the id of the interrupt context
2051  *
2052  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
2053  *         'id' registered with the OS
2054  */
2055 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
2056 				uint8_t id);
2057 
2058 /**
2059  * hif_configure_ext_group_interrupts() - Configure ext group interrupts
2060  * @hif_ctx: hif opaque context
2061  *
2062  * Return: QDF_STATUS
2063  */
2064 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
2065 
2066 /**
2067  * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group interrupts
2068  * @hif_ctx: hif opaque context
2069  *
2070  * Return: None
2071  */
2072 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
2073 
2074 /**
2075  * hif_register_ext_group() - API to register external group
2076  * interrupt handler.
2077  * @hif_ctx : HIF Context
2078  * @numirq: number of irq's in the group
2079  * @irq: array of irq values
2080  * @handler: callback interrupt handler function
2081  * @cb_ctx: context to passed in callback
2082  * @context_name: text name of the context
2083  * @type: napi vs tasklet
2084  * @scale:
2085  *
2086  * Return: QDF_STATUS
2087  */
2088 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
2089 				  uint32_t numirq, uint32_t irq[],
2090 				  ext_intr_handler handler,
2091 				  void *cb_ctx, const char *context_name,
2092 				  enum hif_exec_type type, uint32_t scale);
2093 
2094 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
2095 				const char *context_name);
2096 
2097 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
2098 				u_int8_t pipeid,
2099 				struct hif_msg_callbacks *callbacks);
2100 
2101 /**
2102  * hif_print_napi_stats() - Display HIF NAPI stats
2103  * @hif_ctx: HIF opaque context
2104  *
2105  * Return: None
2106  */
2107 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
2108 
2109 /**
2110  * hif_clear_napi_stats() - function clears the stats of the
2111  * latency when called.
2112  * @hif_ctx: the HIF context to assign the callback to
2113  *
2114  * Return: None
2115  */
2116 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
2117 
2118 #ifdef __cplusplus
2119 }
2120 #endif
2121 
2122 #ifdef FORCE_WAKE
2123 /**
2124  * hif_force_wake_request() - Function to wake from power collapse
2125  * @handle: HIF opaque handle
2126  *
2127  * Description: API to check if the device is awake or not before
2128  * read/write to BAR + 4K registers. If device is awake return
2129  * success otherwise write '1' to
2130  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
2131  * the device and does wakeup the PCI and MHI within 50ms
2132  * and then the device writes a value to
2133  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
2134  * handshake process to let the host know the device is awake.
2135  *
2136  * Return: zero - success/non-zero - failure
2137  */
2138 int hif_force_wake_request(struct hif_opaque_softc *handle);
2139 
2140 /**
2141  * hif_force_wake_release() - API to release/reset the SOC wake register
2142  * from interrupting the device.
2143  * @handle: HIF opaque handle
2144  *
2145  * Description: API to set the
2146  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
2147  * to release the interrupt line.
2148  *
2149  * Return: zero - success/non-zero - failure
2150  */
2151 int hif_force_wake_release(struct hif_opaque_softc *handle);
2152 #else
2153 static inline
2154 int hif_force_wake_request(struct hif_opaque_softc *handle)
2155 {
2156 	return 0;
2157 }
2158 
2159 static inline
2160 int hif_force_wake_release(struct hif_opaque_softc *handle)
2161 {
2162 	return 0;
2163 }
2164 #endif /* FORCE_WAKE */
2165 
2166 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) || \
2167 	defined(FEATURE_HIF_DELAYED_REG_WRITE)
2168 /**
2169  * hif_prevent_link_low_power_states() - Prevent from going to low power states
2170  * @hif: HIF opaque context
2171  *
2172  * Return: 0 on success. Error code on failure.
2173  */
2174 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
2175 
2176 /**
2177  * hif_allow_link_low_power_states() - Allow link to go to low power states
2178  * @hif: HIF opaque context
2179  *
2180  * Return: None
2181  */
2182 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
2183 
2184 #else
2185 
2186 static inline
2187 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
2188 {
2189 	return 0;
2190 }
2191 
2192 static inline
2193 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
2194 {
2195 }
2196 #endif
2197 
2198 #ifdef IPA_OPT_WIFI_DP
2199 /**
2200  * hif_prevent_l1() - Prevent from going to low power states
2201  * @hif: HIF opaque context
2202  *
2203  * Return: 0 on success. Error code on failure.
2204  */
2205 int hif_prevent_l1(struct hif_opaque_softc *hif);
2206 
2207 /**
2208  * hif_allow_l1() - Allow link to go to low power states
2209  * @hif: HIF opaque context
2210  *
2211  * Return: None
2212  */
2213 void hif_allow_l1(struct hif_opaque_softc *hif);
2214 
2215 #else
2216 
2217 static inline
2218 int hif_prevent_l1(struct hif_opaque_softc *hif)
2219 {
2220 	return 0;
2221 }
2222 
2223 static inline
2224 void hif_allow_l1(struct hif_opaque_softc *hif)
2225 {
2226 }
2227 #endif
2228 
2229 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
2230 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
2231 void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle);
2232 
2233 /**
2234  * hif_get_dev_ba_cmem() - get base address of CMEM
2235  * @hif_handle: the HIF context
2236  *
2237  */
2238 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle);
2239 
2240 /**
2241  * hif_get_soc_version() - get soc major version from target info
2242  * @hif_handle: the HIF context
2243  *
2244  * Return: version number
2245  */
2246 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle);
2247 
2248 /**
2249  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
2250  * @hif_ctx: the HIF context to assign the callback to
2251  * @callback: the callback to assign
2252  * @priv: the private data to pass to the callback when invoked
2253  *
2254  * Return: None
2255  */
2256 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2257 			       void (*callback)(void *),
2258 			       void *priv);
2259 /*
2260  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
2261  * for defined here
2262  */
2263 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2264 ssize_t hif_dump_desc_trace_buf(struct device *dev,
2265 				struct device_attribute *attr, char *buf);
2266 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2267 					const char *buf, size_t size);
2268 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
2269 				const char *buf, size_t size);
2270 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
2271 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
2272 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
2273 
2274 /**
2275  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
2276  * @hif: hif context
2277  * @ce_service_max_yield_time: CE service max yield time to set
2278  *
2279  * This API storess CE service max yield time in hif context based
2280  * on ini value.
2281  *
2282  * Return: void
2283  */
2284 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2285 				       uint32_t ce_service_max_yield_time);
2286 
2287 /**
2288  * hif_get_ce_service_max_yield_time() - get CE service max yield time
2289  * @hif: hif context
2290  *
2291  * This API returns CE service max yield time.
2292  *
2293  * Return: CE service max yield time
2294  */
2295 unsigned long long
2296 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
2297 
2298 /**
2299  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
2300  * @hif: hif context
2301  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
2302  *
2303  * This API stores CE service max rx ind flush in hif context based
2304  * on ini value.
2305  *
2306  * Return: void
2307  */
2308 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2309 					 uint8_t ce_service_max_rx_ind_flush);
2310 
2311 #ifdef OL_ATH_SMART_LOGGING
2312 /**
2313  * hif_log_dump_ce() - Copy all the CE DEST ring to buf
2314  * @scn: HIF handler
2315  * @buf_cur: Current pointer in ring buffer
2316  * @buf_init:Start of the ring buffer
2317  * @buf_sz: Size of the ring buffer
2318  * @ce: Copy Engine id
2319  * @skb_sz: Max size of the SKB buffer to be copied
2320  *
2321  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2322  * and buffers pointed by them in to the given buf
2323  *
2324  * Return: Current pointer in ring buffer
2325  */
2326 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2327 			 uint8_t *buf_init, uint32_t buf_sz,
2328 			 uint32_t ce, uint32_t skb_sz);
2329 #endif /* OL_ATH_SMART_LOGGING */
2330 
2331 /**
2332  * hif_softc_to_hif_opaque_softc() - API to convert hif_softc handle
2333  * to hif_opaque_softc handle
2334  * @hif_handle: hif_softc type
2335  *
2336  * Return: hif_opaque_softc type
2337  */
2338 static inline struct hif_opaque_softc *
2339 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
2340 {
2341 	return (struct hif_opaque_softc *)hif_handle;
2342 }
2343 
2344 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
2345 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2346 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx);
2347 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2348 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2349 			    uint8_t type, uint8_t access);
2350 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2351 			       uint8_t type);
2352 #else
2353 static inline QDF_STATUS
2354 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2355 {
2356 	return QDF_STATUS_SUCCESS;
2357 }
2358 
2359 static inline void
2360 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
2361 {
2362 }
2363 
2364 static inline void
2365 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2366 {
2367 }
2368 
2369 static inline void
2370 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2371 		       uint8_t type, uint8_t access)
2372 {
2373 }
2374 
2375 static inline uint8_t
2376 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2377 		       uint8_t type)
2378 {
2379 	return HIF_EP_VOTE_ACCESS_ENABLE;
2380 }
2381 #endif
2382 
2383 #ifdef FORCE_WAKE
2384 /**
2385  * hif_srng_init_phase(): Indicate srng initialization phase
2386  * to avoid force wake as UMAC power collapse is not yet
2387  * enabled
2388  * @hif_ctx: hif opaque handle
2389  * @init_phase: initialization phase
2390  *
2391  * Return:  None
2392  */
2393 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2394 			 bool init_phase);
2395 #else
2396 static inline
2397 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2398 			 bool init_phase)
2399 {
2400 }
2401 #endif /* FORCE_WAKE */
2402 
2403 #ifdef HIF_IPCI
2404 /**
2405  * hif_shutdown_notifier_cb - Call back for shutdown notifier
2406  * @ctx: hif handle
2407  *
2408  * Return:  None
2409  */
2410 void hif_shutdown_notifier_cb(void *ctx);
2411 #else
2412 static inline
2413 void hif_shutdown_notifier_cb(void *ctx)
2414 {
2415 }
2416 #endif /* HIF_IPCI */
2417 
2418 #ifdef HIF_CE_LOG_INFO
2419 /**
2420  * hif_log_ce_info() - API to log ce info
2421  * @scn: hif handle
2422  * @data: hang event data buffer
2423  * @offset: offset at which data needs to be written
2424  *
2425  * Return:  None
2426  */
2427 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2428 		     unsigned int *offset);
2429 #else
2430 static inline
2431 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2432 		     unsigned int *offset)
2433 {
2434 }
2435 #endif
2436 
2437 #ifdef HIF_CPU_PERF_AFFINE_MASK
2438 /**
2439  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
2440  * @hif_ctx: hif opaque handle
2441  *
2442  * This function is used to move the WLAN IRQs to perf cores in
2443  * case of defconfig builds.
2444  *
2445  * Return:  None
2446  */
2447 void hif_config_irq_set_perf_affinity_hint(
2448 	struct hif_opaque_softc *hif_ctx);
2449 
2450 #else
2451 static inline void hif_config_irq_set_perf_affinity_hint(
2452 	struct hif_opaque_softc *hif_ctx)
2453 {
2454 }
2455 #endif
2456 
2457 /**
2458  * hif_apps_grp_irqs_enable() - enable ext grp irqs
2459  * @hif_ctx: HIF opaque context
2460  *
2461  * Return: 0 on success. Error code on failure.
2462  */
2463 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
2464 
2465 /**
2466  * hif_apps_grp_irqs_disable() - disable ext grp irqs
2467  * @hif_ctx: HIF opaque context
2468  *
2469  * Return: 0 on success. Error code on failure.
2470  */
2471 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
2472 
2473 /**
2474  * hif_disable_grp_irqs() - disable ext grp irqs
2475  * @scn: HIF opaque context
2476  *
2477  * Return: 0 on success. Error code on failure.
2478  */
2479 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
2480 
2481 /**
2482  * hif_enable_grp_irqs() - enable ext grp irqs
2483  * @scn: HIF opaque context
2484  *
2485  * Return: 0 on success. Error code on failure.
2486  */
2487 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
2488 
2489 enum hif_credit_exchange_type {
2490 	HIF_REQUEST_CREDIT,
2491 	HIF_PROCESS_CREDIT_REPORT,
2492 };
2493 
2494 enum hif_detect_latency_type {
2495 	HIF_DETECT_TASKLET,
2496 	HIF_DETECT_CREDIT,
2497 	HIF_DETECT_UNKNOWN
2498 };
2499 
2500 #ifdef HIF_DETECTION_LATENCY_ENABLE
2501 void hif_latency_detect_credit_record_time(
2502 	enum hif_credit_exchange_type type,
2503 	struct hif_opaque_softc *hif_ctx);
2504 
2505 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
2506 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
2507 void hif_check_detection_latency(struct hif_softc *scn,
2508 				 bool from_timer,
2509 				 uint32_t bitmap_type);
2510 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
2511 
2512 /**
2513  * hif_tasklet_latency_record_exec() - record execute time and
2514  * check the latency
2515  * @scn: HIF opaque context
2516  * @idx: CE id
2517  *
2518  * Return: None
2519  */
2520 void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx);
2521 
2522 /**
2523  * hif_tasklet_latency_record_sched() - record schedule time of a tasklet
2524  * @scn: HIF opaque context
2525  * @idx: CE id
2526  *
2527  * Return: None
2528  */
2529 void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx);
2530 #else
2531 static inline
2532 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
2533 {}
2534 
2535 static inline
2536 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
2537 {}
2538 
2539 static inline
2540 void hif_latency_detect_credit_record_time(
2541 	enum hif_credit_exchange_type type,
2542 	struct hif_opaque_softc *hif_ctx)
2543 {}
2544 static inline
2545 void hif_check_detection_latency(struct hif_softc *scn,
2546 				 bool from_timer,
2547 				 uint32_t bitmap_type)
2548 {}
2549 
2550 static inline
2551 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
2552 {}
2553 
2554 static inline
2555 void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx)
2556 {}
2557 
2558 static inline
2559 void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx)
2560 {}
2561 #endif
2562 
2563 #ifdef SYSTEM_PM_CHECK
2564 /**
2565  * __hif_system_pm_set_state() - Set system pm state
2566  * @hif: hif opaque handle
2567  * @state: system state
2568  *
2569  * Return:  None
2570  */
2571 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2572 			       enum hif_system_pm_state state);
2573 
2574 /**
2575  * hif_system_pm_set_state_on() - Set system pm state to ON
2576  * @hif: hif opaque handle
2577  *
2578  * Return:  None
2579  */
2580 static inline
2581 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2582 {
2583 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
2584 }
2585 
2586 /**
2587  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
2588  * @hif: hif opaque handle
2589  *
2590  * Return:  None
2591  */
2592 static inline
2593 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2594 {
2595 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
2596 }
2597 
2598 /**
2599  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
2600  * @hif: hif opaque handle
2601  *
2602  * Return:  None
2603  */
2604 static inline
2605 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2606 {
2607 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
2608 }
2609 
2610 /**
2611  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
2612  * @hif: hif opaque handle
2613  *
2614  * Return:  None
2615  */
2616 static inline
2617 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2618 {
2619 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
2620 }
2621 
2622 /**
2623  * hif_system_pm_get_state() - Get system pm state
2624  * @hif: hif opaque handle
2625  *
2626  * Return:  system state
2627  */
2628 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
2629 
2630 /**
2631  * hif_system_pm_state_check() - Check system state and trigger resume
2632  *  if required
2633  * @hif: hif opaque handle
2634  *
2635  * Return: 0 if system is in on state else error code
2636  */
2637 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
2638 #else
2639 static inline
2640 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2641 			       enum hif_system_pm_state state)
2642 {
2643 }
2644 
2645 static inline
2646 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2647 {
2648 }
2649 
2650 static inline
2651 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2652 {
2653 }
2654 
2655 static inline
2656 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2657 {
2658 }
2659 
2660 static inline
2661 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2662 {
2663 }
2664 
2665 static inline
2666 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2667 {
2668 	return 0;
2669 }
2670 
2671 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2672 {
2673 	return 0;
2674 }
2675 #endif
2676 
2677 #ifdef FEATURE_IRQ_AFFINITY
2678 /**
2679  * hif_set_grp_intr_affinity() - API to set affinity for grp
2680  *  intrs set in the bitmap
2681  * @scn: hif handle
2682  * @grp_intr_bitmask: grp intrs for which perf affinity should be
2683  *  applied
2684  * @perf: affine to perf or non-perf cluster
2685  *
2686  * Return: None
2687  */
2688 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2689 			       uint32_t grp_intr_bitmask, bool perf);
2690 #else
2691 static inline
2692 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2693 			       uint32_t grp_intr_bitmask, bool perf)
2694 {
2695 }
2696 #endif
2697 /**
2698  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
2699  * @scn: hif opaque handle
2700  *
2701  * Description:
2702  *   Gets number of WMI EPs configured in target svc map. Since EP map
2703  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
2704  *   configured for WMI service.
2705  *
2706  * Return:
2707  *  uint8_t: count for WMI eps in target svc map
2708  */
2709 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn);
2710 
2711 #ifdef DP_UMAC_HW_RESET_SUPPORT
2712 /**
2713  * hif_register_umac_reset_handler() - Register UMAC HW reset handler
2714  * @hif_scn: hif opaque handle
2715  * @irq_handler: irq callback handler function
2716  * @tl_handler: tasklet callback handler function
2717  * @cb_ctx: context to passed to @handler
2718  * @irq: irq number to be used for UMAC HW reset interrupt
2719  *
2720  * Return: QDF_STATUS of operation
2721  */
2722 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2723 					   bool (*irq_handler)(void *cb_ctx),
2724 					   int (*tl_handler)(void *cb_ctx),
2725 					   void *cb_ctx, int irq);
2726 
2727 /**
2728  * hif_unregister_umac_reset_handler() - Unregister UMAC HW reset handler
2729  * @hif_scn: hif opaque handle
2730  *
2731  * Return: QDF_STATUS of operation
2732  */
2733 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn);
2734 QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
2735 				  int *umac_reset_irq);
2736 #else
2737 static inline
2738 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2739 					   bool (*irq_handler)(void *cb_ctx),
2740 					   int (*tl_handler)(void *cb_ctx),
2741 					   void *cb_ctx, int irq)
2742 {
2743 	return QDF_STATUS_SUCCESS;
2744 }
2745 
2746 static inline
2747 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
2748 {
2749 	return QDF_STATUS_SUCCESS;
2750 }
2751 
2752 static inline
2753 QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
2754 				  int *umac_reset_irq)
2755 {
2756 	return QDF_STATUS_SUCCESS;
2757 }
2758 
2759 #endif /* DP_UMAC_HW_RESET_SUPPORT */
2760 
2761 #ifdef FEATURE_DIRECT_LINK
2762 /**
2763  * hif_set_irq_config_by_ceid() - Set irq configuration for CE given by id
2764  * @scn: hif opaque handle
2765  * @ce_id: CE id
2766  * @addr: irq trigger address
2767  * @data: irq trigger data
2768  *
2769  * Return: QDF status
2770  */
2771 QDF_STATUS
2772 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2773 			   uint64_t addr, uint32_t data);
2774 
2775 /**
2776  * hif_get_direct_link_ce_dest_srng_buffers() - Get Direct Link ce dest srng
2777  *  buffer information
2778  * @scn: hif opaque handle
2779  * @dma_addr: pointer to array of dma addresses
2780  * @buf_size: ce dest ring buffer size
2781  *
2782  * Return: Number of buffers attached to the dest srng.
2783  */
2784 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2785 						  uint64_t **dma_addr,
2786 						  uint32_t *buf_size);
2787 
2788 /**
2789  * hif_get_direct_link_ce_srng_info() - Get Direct Link CE srng information
2790  * @scn: hif opaque handle
2791  * @info: Direct Link CEs information
2792  * @max_ce_info_len: max array size of ce info
2793  *
2794  * Return: QDF status
2795  */
2796 QDF_STATUS
2797 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2798 				 struct hif_direct_link_ce_info *info,
2799 				 uint8_t max_ce_info_len);
2800 #else
2801 static inline QDF_STATUS
2802 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2803 			   uint64_t addr, uint32_t data)
2804 {
2805 	return QDF_STATUS_SUCCESS;
2806 }
2807 
2808 static inline
2809 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2810 						  uint64_t **dma_addr,
2811 						  uint32_t *buf_size)
2812 {
2813 	return 0;
2814 }
2815 
2816 static inline QDF_STATUS
2817 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2818 				 struct hif_direct_link_ce_info *info,
2819 				 uint8_t max_ce_info_len)
2820 {
2821 	return QDF_STATUS_SUCCESS;
2822 }
2823 #endif
2824 
2825 static inline QDF_STATUS
2826 hif_irq_set_affinity_hint(int irq_num, qdf_cpu_mask *cpu_mask)
2827 {
2828 	QDF_STATUS status;
2829 
2830 	qdf_dev_modify_irq_status(irq_num, IRQ_NO_BALANCING, 0);
2831 	status = qdf_dev_set_irq_affinity(irq_num,
2832 					  (struct qdf_cpu_mask *)cpu_mask);
2833 	qdf_dev_modify_irq_status(irq_num, 0, IRQ_NO_BALANCING);
2834 
2835 	return status;
2836 }
2837 
2838 #ifdef WLAN_FEATURE_AFFINITY_MGR
2839 /**
2840  * hif_affinity_mgr_init_ce_irq() - Init for CE IRQ
2841  * @scn: hif opaque handle
2842  * @id: CE ID
2843  * @irq: IRQ assigned
2844  *
2845  * Return: None
2846  */
2847 void
2848 hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq);
2849 
2850 /**
2851  * hif_affinity_mgr_init_grp_irq() - Init for group IRQ
2852  * @scn: hif opaque handle
2853  * @grp_id: GRP ID
2854  * @irq_num: IRQ number of hif ext group
2855  * @irq: IRQ number assigned
2856  *
2857  * Return: None
2858  */
2859 void
2860 hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
2861 			      int irq_num, int irq);
2862 
2863 /**
2864  * hif_affinity_mgr_set_qrg_irq_affinity() - Set affinity for group IRQ
2865  * @scn: hif opaque handle
2866  * @irq: IRQ assigned
2867  * @grp_id: GRP ID
2868  * @irq_index: IRQ number of hif ext group
2869  * @cpu_mask: reuquested cpu_mask for IRQ
2870  *
2871  * Return: status
2872  */
2873 QDF_STATUS
2874 hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
2875 				      uint32_t grp_id, uint32_t irq_index,
2876 				      qdf_cpu_mask *cpu_mask);
2877 
2878 /**
2879  * hif_affinity_mgr_set_ce_irq_affinity() - Set affinity for CE IRQ
2880  * @scn: hif opaque handle
2881  * @irq: IRQ assigned
2882  * @ce_id: CE ID
2883  * @cpu_mask: reuquested cpu_mask for IRQ
2884  *
2885  * Return: status
2886  */
2887 QDF_STATUS
2888 hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
2889 				     uint32_t ce_id, qdf_cpu_mask *cpu_mask);
2890 
2891 /**
2892  * hif_affinity_mgr_affine_irq() - Affine CE and GRP IRQs
2893  * @scn: hif opaque handle
2894  *
2895  * Return: None
2896  */
2897 void hif_affinity_mgr_affine_irq(struct hif_softc *scn);
2898 #else
2899 static inline void
2900 hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
2901 {
2902 }
2903 
2904 static inline void
2905 hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id, int irq_num,
2906 			      int irq)
2907 {
2908 }
2909 
2910 static inline QDF_STATUS
2911 hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
2912 				      uint32_t grp_id, uint32_t irq_index,
2913 				      qdf_cpu_mask *cpu_mask)
2914 {
2915 	return hif_irq_set_affinity_hint(irq, cpu_mask);
2916 }
2917 
2918 static inline QDF_STATUS
2919 hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
2920 				     uint32_t ce_id, qdf_cpu_mask *cpu_mask)
2921 {
2922 	return hif_irq_set_affinity_hint(irq, cpu_mask);
2923 }
2924 
2925 static inline
2926 void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
2927 {
2928 }
2929 #endif
2930 
2931 /**
2932  * hif_affinity_mgr_set_affinity() - Affine CE and GRP IRQs
2933  * @scn: hif opaque handle
2934  *
2935  * Return: None
2936  */
2937 void hif_affinity_mgr_set_affinity(struct hif_opaque_softc *scn);
2938 
2939 #ifdef FEATURE_HIF_DELAYED_REG_WRITE
2940 /**
2941  * hif_print_reg_write_stats() - Print hif delayed reg write stats
2942  * @hif_ctx: hif opaque handle
2943  *
2944  * Return: None
2945  */
2946 void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx);
2947 #else
2948 static inline void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
2949 {
2950 }
2951 #endif
2952 void hif_ce_print_ring_stats(struct hif_opaque_softc *hif_ctx);
2953 #endif /* _HIF_H_ */
2954