xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision c0d6f0176e7b1b012c9eef4d3ee54c109352040e)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HIF_H_
21 #define _HIF_H_
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif /* __cplusplus */
26 
27 /* Header files */
28 #include <qdf_status.h>
29 #include "qdf_ipa.h"
30 #include "qdf_nbuf.h"
31 #include "qdf_lro.h"
32 #include "ol_if_athvar.h"
33 #include <linux/platform_device.h>
34 #ifdef HIF_PCI
35 #include <linux/pci.h>
36 #endif /* HIF_PCI */
37 #ifdef HIF_USB
38 #include <linux/usb.h>
39 #endif /* HIF_USB */
40 #ifdef IPA_OFFLOAD
41 #include <linux/ipa.h>
42 #endif
43 #include "cfg_ucfg_api.h"
44 #include "qdf_dev.h"
45 #include <wlan_init_cfg.h>
46 
47 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
48 
49 typedef void __iomem *A_target_id_t;
50 typedef void *hif_handle_t;
51 
52 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
53 #define HIF_WORK_DRAIN_WAIT_CNT 50
54 
55 #define HIF_EP_WAKE_RESET_WAIT_CNT 10
56 #endif
57 
58 #define HIF_TYPE_AR6002   2
59 #define HIF_TYPE_AR6003   3
60 #define HIF_TYPE_AR6004   5
61 #define HIF_TYPE_AR9888   6
62 #define HIF_TYPE_AR6320   7
63 #define HIF_TYPE_AR6320V2 8
64 /* For attaching Peregrine 2.0 board host_reg_tbl only */
65 #define HIF_TYPE_AR9888V2 9
66 #define HIF_TYPE_ADRASTEA 10
67 #define HIF_TYPE_AR900B 11
68 #define HIF_TYPE_QCA9984 12
69 #define HIF_TYPE_QCA9888 14
70 #define HIF_TYPE_QCA8074 15
71 #define HIF_TYPE_QCA6290 16
72 #define HIF_TYPE_QCN7605 17
73 #define HIF_TYPE_QCA6390 18
74 #define HIF_TYPE_QCA8074V2 19
75 #define HIF_TYPE_QCA6018  20
76 #define HIF_TYPE_QCN9000 21
77 #define HIF_TYPE_QCA6490 22
78 #define HIF_TYPE_QCA6750 23
79 #define HIF_TYPE_QCA5018 24
80 #define HIF_TYPE_QCN6122 25
81 #define HIF_TYPE_KIWI 26
82 #define HIF_TYPE_QCN9224 27
83 #define HIF_TYPE_QCA9574 28
84 #define HIF_TYPE_MANGO 29
85 #define HIF_TYPE_QCA5332 30
86 #define HIF_TYPE_QCN9160 31
87 #define HIF_TYPE_PEACH 32
88 #define HIF_TYPE_WCN6450 33
89 #define HIF_TYPE_QCN6432 34
90 
91 #define DMA_COHERENT_MASK_DEFAULT   37
92 
93 #ifdef IPA_OFFLOAD
94 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
95 #endif
96 
97 /* enum hif_ic_irq - enum defining integrated chip irq numbers
98  * defining irq nubers that can be used by external modules like datapath
99  */
100 enum hif_ic_irq {
101 	host2wbm_desc_feed = 16,
102 	host2reo_re_injection,
103 	host2reo_command,
104 	host2rxdma_monitor_ring3,
105 	host2rxdma_monitor_ring2,
106 	host2rxdma_monitor_ring1,
107 	reo2host_exception,
108 	wbm2host_rx_release,
109 	reo2host_status,
110 	reo2host_destination_ring4,
111 	reo2host_destination_ring3,
112 	reo2host_destination_ring2,
113 	reo2host_destination_ring1,
114 	rxdma2host_monitor_destination_mac3,
115 	rxdma2host_monitor_destination_mac2,
116 	rxdma2host_monitor_destination_mac1,
117 	ppdu_end_interrupts_mac3,
118 	ppdu_end_interrupts_mac2,
119 	ppdu_end_interrupts_mac1,
120 	rxdma2host_monitor_status_ring_mac3,
121 	rxdma2host_monitor_status_ring_mac2,
122 	rxdma2host_monitor_status_ring_mac1,
123 	host2rxdma_host_buf_ring_mac3,
124 	host2rxdma_host_buf_ring_mac2,
125 	host2rxdma_host_buf_ring_mac1,
126 	rxdma2host_destination_ring_mac3,
127 	rxdma2host_destination_ring_mac2,
128 	rxdma2host_destination_ring_mac1,
129 	host2tcl_input_ring4,
130 	host2tcl_input_ring3,
131 	host2tcl_input_ring2,
132 	host2tcl_input_ring1,
133 	wbm2host_tx_completions_ring4,
134 	wbm2host_tx_completions_ring3,
135 	wbm2host_tx_completions_ring2,
136 	wbm2host_tx_completions_ring1,
137 	tcl2host_status_ring,
138 	txmon2host_monitor_destination_mac3,
139 	txmon2host_monitor_destination_mac2,
140 	txmon2host_monitor_destination_mac1,
141 	host2tx_monitor_ring1,
142 };
143 
144 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
145 enum hif_legacy_pci_irq {
146 	ce0,
147 	ce1,
148 	ce2,
149 	ce3,
150 	ce4,
151 	ce5,
152 	ce6,
153 	ce7,
154 	ce8,
155 	ce9,
156 	ce10,
157 	ce11,
158 	ce12,
159 	ce13,
160 	ce14,
161 	ce15,
162 	reo2sw8_intr2,
163 	reo2sw7_intr2,
164 	reo2sw6_intr2,
165 	reo2sw5_intr2,
166 	reo2sw4_intr2,
167 	reo2sw3_intr2,
168 	reo2sw2_intr2,
169 	reo2sw1_intr2,
170 	reo2sw0_intr2,
171 	reo2sw8_intr,
172 	reo2sw7_intr,
173 	reo2sw6_inrr,
174 	reo2sw5_intr,
175 	reo2sw4_intr,
176 	reo2sw3_intr,
177 	reo2sw2_intr,
178 	reo2sw1_intr,
179 	reo2sw0_intr,
180 	reo2status_intr2,
181 	reo_status,
182 	reo2rxdma_out_2,
183 	reo2rxdma_out_1,
184 	reo_cmd,
185 	sw2reo6,
186 	sw2reo5,
187 	sw2reo1,
188 	sw2reo,
189 	rxdma2reo_mlo_0_dst_ring1,
190 	rxdma2reo_mlo_0_dst_ring0,
191 	rxdma2reo_mlo_1_dst_ring1,
192 	rxdma2reo_mlo_1_dst_ring0,
193 	rxdma2reo_dst_ring1,
194 	rxdma2reo_dst_ring0,
195 	rxdma2sw_dst_ring1,
196 	rxdma2sw_dst_ring0,
197 	rxdma2release_dst_ring1,
198 	rxdma2release_dst_ring0,
199 	sw2rxdma_2_src_ring,
200 	sw2rxdma_1_src_ring,
201 	sw2rxdma_0,
202 	wbm2sw6_release2,
203 	wbm2sw5_release2,
204 	wbm2sw4_release2,
205 	wbm2sw3_release2,
206 	wbm2sw2_release2,
207 	wbm2sw1_release2,
208 	wbm2sw0_release2,
209 	wbm2sw6_release,
210 	wbm2sw5_release,
211 	wbm2sw4_release,
212 	wbm2sw3_release,
213 	wbm2sw2_release,
214 	wbm2sw1_release,
215 	wbm2sw0_release,
216 	wbm2sw_link,
217 	wbm_error_release,
218 	sw2txmon_src_ring,
219 	sw2rxmon_src_ring,
220 	txmon2sw_p1_intr1,
221 	txmon2sw_p1_intr0,
222 	txmon2sw_p0_dest1,
223 	txmon2sw_p0_dest0,
224 	rxmon2sw_p1_intr1,
225 	rxmon2sw_p1_intr0,
226 	rxmon2sw_p0_dest1,
227 	rxmon2sw_p0_dest0,
228 	sw_release,
229 	sw2tcl_credit2,
230 	sw2tcl_credit,
231 	sw2tcl4,
232 	sw2tcl5,
233 	sw2tcl3,
234 	sw2tcl2,
235 	sw2tcl1,
236 	sw2wbm1,
237 	misc_8,
238 	misc_7,
239 	misc_6,
240 	misc_5,
241 	misc_4,
242 	misc_3,
243 	misc_2,
244 	misc_1,
245 	misc_0,
246 };
247 #endif
248 
249 struct CE_state;
250 #ifdef QCA_WIFI_QCN9224
251 #define CE_COUNT_MAX 16
252 #else
253 #define CE_COUNT_MAX 12
254 #endif
255 
256 #ifndef HIF_MAX_GROUP
257 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS
258 #endif
259 
260 #ifdef CONFIG_BERYLLIUM
261 #define HIF_MAX_GRP_IRQ 25
262 #else
263 #define HIF_MAX_GRP_IRQ 16
264 #endif
265 
266 #ifndef NAPI_YIELD_BUDGET_BASED
267 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
268 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
269 #endif
270 #else  /* NAPI_YIELD_BUDGET_BASED */
271 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
272 #endif /* NAPI_YIELD_BUDGET_BASED */
273 
274 #define QCA_NAPI_BUDGET    64
275 #define QCA_NAPI_DEF_SCALE  \
276 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
277 
278 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
279 /* NOTE: "napi->scale" can be changed,
280  * but this does not change the number of buckets
281  */
282 #define QCA_NAPI_NUM_BUCKETS 4
283 
284 /**
285  * struct qca_napi_stat - stats structure for execution contexts
286  * @napi_schedules: number of times the schedule function is called
287  * @napi_polls: number of times the execution context runs
288  * @napi_completes: number of times that the generating interrupt is re-enabled
289  * @napi_workdone: cumulative of all work done reported by handler
290  * @cpu_corrected: incremented when execution context runs on a different core
291  *			than the one that its irq is affined to.
292  * @napi_budget_uses: histogram of work done per execution run
293  * @time_limit_reached: count of yields due to time limit thresholds
294  * @rxpkt_thresh_reached: count of yields due to a work limit
295  * @napi_max_poll_time:
296  * @poll_time_buckets: histogram of poll times for the napi
297  *
298  */
299 struct qca_napi_stat {
300 	uint32_t napi_schedules;
301 	uint32_t napi_polls;
302 	uint32_t napi_completes;
303 	uint32_t napi_workdone;
304 	uint32_t cpu_corrected;
305 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
306 	uint32_t time_limit_reached;
307 	uint32_t rxpkt_thresh_reached;
308 	unsigned long long napi_max_poll_time;
309 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
310 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
311 #endif
312 };
313 
314 /*Number of buckets for latency*/
315 #define HIF_SCHED_LATENCY_BUCKETS 8
316 
317 /*Buckets for latency between 0 to 2 ms*/
318 #define HIF_SCHED_LATENCY_BUCKET_0_2 2
319 /*Buckets for latency between 3 to 10 ms*/
320 #define HIF_SCHED_LATENCY_BUCKET_3_10 10
321 /*Buckets for latency between 11 to 20 ms*/
322 #define HIF_SCHED_LATENCY_BUCKET_11_20 20
323 /*Buckets for latency between 21 to 50 ms*/
324 #define HIF_SCHED_LATENCY_BUCKET_21_50 50
325 /*Buckets for latency between 50 to 100 ms*/
326 #define HIF_SCHED_LATENCY_BUCKET_51_100 100
327 /*Buckets for latency between 100 to 250 ms*/
328 #define HIF_SCHED_LATENCY_BUCKET_101_250 250
329 /*Buckets for latency between 250 to 500 ms*/
330 #define HIF_SCHED_LATENCY_BUCKET_251_500 500
331 
332 /**
333  * struct qca_napi_info - per NAPI instance data structure
334  * @netdev: dummy net_dev
335  * @hif_ctx:
336  * @napi:
337  * @scale:
338  * @id:
339  * @cpu:
340  * @irq:
341  * @cpumask:
342  * @stats:
343  * @offld_flush_cb:
344  * @rx_thread_napi:
345  * @rx_thread_netdev:
346  * @lro_ctx:
347  * @poll_start_time: napi poll service start time
348  * @sched_latency_stats: napi schedule latency stats
349  * @tstamp: napi schedule start timestamp
350  *
351  * This data structure holds stuff per NAPI instance.
352  * Note that, in the current implementation, though scale is
353  * an instance variable, it is set to the same value for all
354  * instances.
355  */
356 struct qca_napi_info {
357 	struct net_device    netdev; /* dummy net_dev */
358 	void                 *hif_ctx;
359 	struct napi_struct   napi;
360 	uint8_t              scale;   /* currently same on all instances */
361 	uint8_t              id;
362 	uint8_t              cpu;
363 	int                  irq;
364 	cpumask_t            cpumask;
365 	struct qca_napi_stat stats[NR_CPUS];
366 #ifdef RECEIVE_OFFLOAD
367 	/* will only be present for data rx CE's */
368 	void (*offld_flush_cb)(void *);
369 	struct napi_struct   rx_thread_napi;
370 	struct net_device    rx_thread_netdev;
371 #endif /* RECEIVE_OFFLOAD */
372 	qdf_lro_ctx_t        lro_ctx;
373 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
374 	unsigned long long poll_start_time;
375 #endif
376 #ifdef HIF_LATENCY_PROFILE_ENABLE
377 	uint64_t sched_latency_stats[HIF_SCHED_LATENCY_BUCKETS];
378 	uint64_t tstamp;
379 #endif
380 };
381 
382 enum qca_napi_tput_state {
383 	QCA_NAPI_TPUT_UNINITIALIZED,
384 	QCA_NAPI_TPUT_LO,
385 	QCA_NAPI_TPUT_HI
386 };
387 enum qca_napi_cpu_state {
388 	QCA_NAPI_CPU_UNINITIALIZED,
389 	QCA_NAPI_CPU_DOWN,
390 	QCA_NAPI_CPU_UP };
391 
392 /**
393  * struct qca_napi_cpu - an entry of the napi cpu table
394  * @state:
395  * @core_id:     physical core id of the core
396  * @cluster_id:  cluster this core belongs to
397  * @core_mask:   mask to match all core of this cluster
398  * @thread_mask: mask for this core within the cluster
399  * @max_freq:    maximum clock this core can be clocked at
400  *               same for all cpus of the same core.
401  * @napis:       bitmap of napi instances on this core
402  * @execs:       bitmap of execution contexts on this core
403  * @cluster_nxt: chain to link cores within the same cluster
404  *
405  * This structure represents a single entry in the napi cpu
406  * table. The table is part of struct qca_napi_data.
407  * This table is initialized by the init function, called while
408  * the first napi instance is being created, updated by hotplug
409  * notifier and when cpu affinity decisions are made (by throughput
410  * detection), and deleted when the last napi instance is removed.
411  */
412 struct qca_napi_cpu {
413 	enum qca_napi_cpu_state state;
414 	int			core_id;
415 	int			cluster_id;
416 	cpumask_t		core_mask;
417 	cpumask_t		thread_mask;
418 	unsigned int		max_freq;
419 	uint32_t		napis;
420 	uint32_t		execs;
421 	int			cluster_nxt;  /* index, not pointer */
422 };
423 
424 /**
425  * struct qca_napi_data - collection of napi data for a single hif context
426  * @hif_softc: pointer to the hif context
427  * @lock: spinlock used in the event state machine
428  * @state: state variable used in the napi stat machine
429  * @ce_map: bit map indicating which ce's have napis running
430  * @exec_map: bit map of instantiated exec contexts
431  * @user_cpu_affin_mask: CPU affinity mask from INI config.
432  * @napis:
433  * @napi_cpu: cpu info for irq affinity
434  * @lilcl_head:
435  * @bigcl_head:
436  * @napi_mode: irq affinity & clock voting mode
437  * @cpuhp_handler: CPU hotplug event registration handle
438  * @flags:
439  */
440 struct qca_napi_data {
441 	struct               hif_softc *hif_softc;
442 	qdf_spinlock_t       lock;
443 	uint32_t             state;
444 
445 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
446 	 * not used by clients (clients use an id returned by create)
447 	 */
448 	uint32_t             ce_map;
449 	uint32_t             exec_map;
450 	uint32_t             user_cpu_affin_mask;
451 	struct qca_napi_info *napis[CE_COUNT_MAX];
452 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
453 	int                  lilcl_head, bigcl_head;
454 	enum qca_napi_tput_state napi_mode;
455 	struct qdf_cpuhp_handler *cpuhp_handler;
456 	uint8_t              flags;
457 };
458 
459 /**
460  * struct hif_config_info - Place Holder for HIF configuration
461  * @enable_self_recovery: Self Recovery
462  * @enable_runtime_pm: Enable Runtime PM
463  * @runtime_pm_delay: Runtime PM Delay
464  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
465  *
466  * Structure for holding HIF ini parameters.
467  */
468 struct hif_config_info {
469 	bool enable_self_recovery;
470 #ifdef FEATURE_RUNTIME_PM
471 	uint8_t enable_runtime_pm;
472 	u_int32_t runtime_pm_delay;
473 #endif
474 	uint64_t rx_softirq_max_yield_duration_ns;
475 };
476 
477 /**
478  * struct hif_target_info - Target Information
479  * @target_version: Target Version
480  * @target_type: Target Type
481  * @target_revision: Target Revision
482  * @soc_version: SOC Version
483  * @hw_name: pointer to hardware name
484  *
485  * Structure to hold target information.
486  */
487 struct hif_target_info {
488 	uint32_t target_version;
489 	uint32_t target_type;
490 	uint32_t target_revision;
491 	uint32_t soc_version;
492 	char *hw_name;
493 };
494 
495 struct hif_opaque_softc {
496 };
497 
498 /**
499  * struct hif_ce_ring_info - CE ring information
500  * @ring_id: ring id
501  * @ring_dir: ring direction
502  * @num_entries: number of entries in ring
503  * @entry_size: ring entry size
504  * @ring_base_paddr: srng base physical address
505  * @hp_paddr: head pointer physical address
506  * @tp_paddr: tail pointer physical address
507  */
508 struct hif_ce_ring_info {
509 	uint8_t ring_id;
510 	uint8_t ring_dir;
511 	uint32_t num_entries;
512 	uint32_t entry_size;
513 	uint64_t ring_base_paddr;
514 	uint64_t hp_paddr;
515 	uint64_t tp_paddr;
516 };
517 
518 /**
519  * struct hif_direct_link_ce_info - Direct Link CE information
520  * @ce_id: CE ide
521  * @pipe_dir: Pipe direction
522  * @ring_info: ring information
523  */
524 struct hif_direct_link_ce_info {
525 	uint8_t ce_id;
526 	uint8_t pipe_dir;
527 	struct hif_ce_ring_info ring_info;
528 };
529 
530 /**
531  * enum hif_event_type - Type of DP events to be recorded
532  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
533  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
534  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
535  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
536  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
537  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
538  * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event
539  * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event
540  * @HIF_EVENT_IRQ_DISABLE_EXPIRED: IRQ disable expired event
541  */
542 enum hif_event_type {
543 	HIF_EVENT_IRQ_TRIGGER,
544 	HIF_EVENT_TIMER_ENTRY,
545 	HIF_EVENT_TIMER_EXIT,
546 	HIF_EVENT_BH_SCHED,
547 	HIF_EVENT_SRNG_ACCESS_START,
548 	HIF_EVENT_SRNG_ACCESS_END,
549 	HIF_EVENT_BH_COMPLETE,
550 	HIF_EVENT_BH_FORCE_BREAK,
551 	HIF_EVENT_IRQ_DISABLE_EXPIRED,
552 	/* Do check hif_hist_skip_event_record when adding new events */
553 };
554 
555 /**
556  * enum hif_system_pm_state - System PM state
557  * @HIF_SYSTEM_PM_STATE_ON: System in active state
558  * @HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
559  *  system resume
560  * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
561  *  system suspend
562  * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
563  */
564 enum hif_system_pm_state {
565 	HIF_SYSTEM_PM_STATE_ON,
566 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
567 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
568 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
569 };
570 
571 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
572 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
573 
574 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
575 /* HIF_EVENT_HIST_MAX should always be power of 2 */
576 #define HIF_EVENT_HIST_MAX		512
577 
578 #define HIF_EVENT_HIST_ENABLE_MASK	0xFF
579 
580 static inline uint64_t hif_get_log_timestamp(void)
581 {
582 	return qdf_get_log_timestamp();
583 }
584 
585 #else
586 
587 #define HIF_EVENT_HIST_MAX		32
588 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
589 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
590 
591 static inline uint64_t hif_get_log_timestamp(void)
592 {
593 	return qdf_sched_clock();
594 }
595 
596 #endif
597 
598 /**
599  * struct hif_event_record - an entry of the DP event history
600  * @hal_ring_id: ring id for which event is recorded
601  * @hp: head pointer of the ring (may not be applicable for all events)
602  * @tp: tail pointer of the ring (may not be applicable for all events)
603  * @cpu_id: cpu id on which the event occurred
604  * @timestamp: timestamp when event occurred
605  * @type: type of the event
606  *
607  * This structure represents the information stored for every datapath
608  * event which is logged in the history.
609  */
610 struct hif_event_record {
611 	uint8_t hal_ring_id;
612 	uint32_t hp;
613 	uint32_t tp;
614 	int cpu_id;
615 	uint64_t timestamp;
616 	enum hif_event_type type;
617 };
618 
619 /**
620  * struct hif_event_misc - history related misc info
621  * @last_irq_index: last irq event index in history
622  * @last_irq_ts: last irq timestamp
623  */
624 struct hif_event_misc {
625 	int32_t last_irq_index;
626 	uint64_t last_irq_ts;
627 };
628 
629 #ifdef WLAN_FEATURE_AFFINITY_MGR
630 /**
631  * struct hif_cpu_affinity - CPU affinity mask info for IRQ
632  *
633  * @current_irq_mask: Current CPU mask set for IRQ
634  * @wlan_requested_mask: CPU mask requested by WLAN
635  * @walt_taken_mask: Current CPU taken by Audio
636  * @last_updated: Last time IRQ CPU affinity was updated
637  * @last_affined_away: Last time when IRQ was affined away
638  * @update_requested: IRQ affinity hint set requested by WLAN
639  * @irq: IRQ number
640  */
641 struct hif_cpu_affinity {
642 	qdf_cpu_mask current_irq_mask;
643 	qdf_cpu_mask wlan_requested_mask;
644 	qdf_cpu_mask walt_taken_mask;
645 	uint64_t last_updated;
646 	uint64_t last_affined_away;
647 	bool update_requested;
648 	int irq;
649 };
650 #endif
651 
652 /**
653  * struct hif_event_history - history for one interrupt group
654  * @index: index to store new event
655  * @misc: event misc information
656  * @event: event entry
657  *
658  * This structure represents the datapath history for one
659  * interrupt group.
660  */
661 struct hif_event_history {
662 	qdf_atomic_t index;
663 	struct hif_event_misc misc;
664 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
665 };
666 
667 /**
668  * hif_desc_history_log_register() - Register hif_event_desc_history buffers
669  *
670  * Return: None
671  */
672 void hif_desc_history_log_register(void);
673 
674 /**
675  * hif_desc_history_log_unregister() - Unregister hif_event_desc_history
676  *
677  * Return: None
678  */
679 void hif_desc_history_log_unregister(void);
680 
681 /**
682  * hif_hist_record_event() - Record one datapath event in history
683  * @hif_ctx: HIF opaque context
684  * @event: DP event entry
685  * @intr_grp_id: interrupt group ID registered with hif
686  *
687  * Return: None
688  */
689 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
690 			   struct hif_event_record *event,
691 			   uint8_t intr_grp_id);
692 
693 /**
694  * hif_event_history_init() - Initialize SRNG event history buffers
695  * @hif_ctx: HIF opaque context
696  * @id: context group ID for which history is recorded
697  *
698  * Returns: None
699  */
700 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
701 
702 /**
703  * hif_event_history_deinit() - De-initialize SRNG event history buffers
704  * @hif_ctx: HIF opaque context
705  * @id: context group ID for which history is recorded
706  *
707  * Returns: None
708  */
709 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
710 
711 /**
712  * hif_record_event() - Wrapper function to form and record DP event
713  * @hif_ctx: HIF opaque context
714  * @intr_grp_id: interrupt group ID registered with hif
715  * @hal_ring_id: ring id for which event is recorded
716  * @hp: head pointer index of the srng
717  * @tp: tail pointer index of the srng
718  * @type: type of the event to be logged in history
719  *
720  * Return: None
721  */
722 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
723 				    uint8_t intr_grp_id,
724 				    uint8_t hal_ring_id,
725 				    uint32_t hp,
726 				    uint32_t tp,
727 				    enum hif_event_type type)
728 {
729 	struct hif_event_record event;
730 
731 	event.hal_ring_id = hal_ring_id;
732 	event.hp = hp;
733 	event.tp = tp;
734 	event.type = type;
735 
736 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
737 
738 	return;
739 }
740 
741 #else
742 static inline void hif_desc_history_log_register(void)
743 {
744 }
745 
746 static inline void hif_desc_history_log_unregister(void)
747 {
748 }
749 
750 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
751 				    uint8_t intr_grp_id,
752 				    uint8_t hal_ring_id,
753 				    uint32_t hp,
754 				    uint32_t tp,
755 				    enum hif_event_type type)
756 {
757 }
758 
759 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
760 					  uint8_t id)
761 {
762 }
763 
764 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
765 					    uint8_t id)
766 {
767 }
768 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
769 
770 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx);
771 
772 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
773 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx);
774 #else
775 static
776 inline void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) {}
777 #endif
778 
779 /**
780  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
781  *
782  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
783  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
784  *                         minimize power
785  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
786  *                         platform-specific measures to completely power-off
787  *                         the module and associated hardware (i.e. cut power
788  *                         supplies)
789  */
790 enum HIF_DEVICE_POWER_CHANGE_TYPE {
791 	HIF_DEVICE_POWER_UP,
792 	HIF_DEVICE_POWER_DOWN,
793 	HIF_DEVICE_POWER_CUT
794 };
795 
796 /**
797  * enum hif_enable_type: what triggered the enabling of hif
798  *
799  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
800  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
801  * @HIF_ENABLE_TYPE_MAX: Max value
802  */
803 enum hif_enable_type {
804 	HIF_ENABLE_TYPE_PROBE,
805 	HIF_ENABLE_TYPE_REINIT,
806 	HIF_ENABLE_TYPE_MAX
807 };
808 
809 /**
810  * enum hif_disable_type: what triggered the disabling of hif
811  *
812  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
813  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
814  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
815  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
816  * @HIF_DISABLE_TYPE_MAX: Max value
817  */
818 enum hif_disable_type {
819 	HIF_DISABLE_TYPE_PROBE_ERROR,
820 	HIF_DISABLE_TYPE_REINIT_ERROR,
821 	HIF_DISABLE_TYPE_REMOVE,
822 	HIF_DISABLE_TYPE_SHUTDOWN,
823 	HIF_DISABLE_TYPE_MAX
824 };
825 
826 /**
827  * enum hif_device_config_opcode: configure mode
828  *
829  * @HIF_DEVICE_POWER_STATE: device power state
830  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
831  * @HIF_DEVICE_GET_FIFO_ADDR: get block address
832  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
833  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
834  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
835  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
836  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
837  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
838  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
839  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
840  * @HIF_BMI_DONE: bmi done
841  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
842  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
843  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
844  */
845 enum hif_device_config_opcode {
846 	HIF_DEVICE_POWER_STATE = 0,
847 	HIF_DEVICE_GET_BLOCK_SIZE,
848 	HIF_DEVICE_GET_FIFO_ADDR,
849 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
850 	HIF_DEVICE_GET_IRQ_PROC_MODE,
851 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
852 	HIF_DEVICE_POWER_STATE_CHANGE,
853 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
854 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
855 	HIF_DEVICE_GET_OS_DEVICE,
856 	HIF_DEVICE_DEBUG_BUS_STATE,
857 	HIF_BMI_DONE,
858 	HIF_DEVICE_SET_TARGET_TYPE,
859 	HIF_DEVICE_SET_HTC_CONTEXT,
860 	HIF_DEVICE_GET_HTC_CONTEXT,
861 };
862 
863 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
864 struct HID_ACCESS_LOG {
865 	uint32_t seqnum;
866 	bool is_write;
867 	void *addr;
868 	uint32_t value;
869 };
870 #endif
871 
872 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
873 		uint32_t value);
874 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
875 
876 #define HIF_MAX_DEVICES                 1
877 /**
878  * struct htc_callbacks - Structure for HTC Callbacks methods
879  * @context:             context to pass to the @dsr_handler
880  *                       note : @rw_compl_handler is provided the context
881  *                       passed to hif_read_write
882  * @rw_compl_handler:    Read / write completion handler
883  * @dsr_handler:         DSR Handler
884  */
885 struct htc_callbacks {
886 	void *context;
887 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
888 	QDF_STATUS(*dsr_handler)(void *context);
889 };
890 
891 /**
892  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
893  * @context: Private data context
894  * @set_recovery_in_progress: To Set Driver state for recovery in progress
895  * @is_recovery_in_progress: Query if driver state is recovery in progress
896  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
897  * @is_driver_unloading: Query if driver is unloading.
898  * @is_target_ready:
899  * @get_bandwidth_level: Query current bandwidth level for the driver
900  * @prealloc_get_consistent_mem_unaligned: get prealloc unaligned consistent mem
901  * @prealloc_put_consistent_mem_unaligned: put unaligned consistent mem to pool
902  * @prealloc_get_multi_pages: get prealloc multi pages memory
903  * @prealloc_put_multi_pages: put prealloc multi pages memory back to pool
904  * This Structure provides callback pointer for HIF to query hdd for driver
905  * states.
906  */
907 struct hif_driver_state_callbacks {
908 	void *context;
909 	void (*set_recovery_in_progress)(void *context, uint8_t val);
910 	bool (*is_recovery_in_progress)(void *context);
911 	bool (*is_load_unload_in_progress)(void *context);
912 	bool (*is_driver_unloading)(void *context);
913 	bool (*is_target_ready)(void *context);
914 	int (*get_bandwidth_level)(void *context);
915 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
916 						       qdf_dma_addr_t *paddr,
917 						       uint32_t ring_type);
918 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
919 	void (*prealloc_get_multi_pages)(uint32_t desc_type,
920 					 qdf_size_t elem_size,
921 					 uint16_t elem_num,
922 					 struct qdf_mem_multi_page_t *pages,
923 					 bool cacheable);
924 	void (*prealloc_put_multi_pages)(uint32_t desc_type,
925 					 struct qdf_mem_multi_page_t *pages);
926 };
927 
928 /* This API detaches the HTC layer from the HIF device */
929 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
930 
931 /****************************************************************/
932 /* BMI and Diag window abstraction                              */
933 /****************************************************************/
934 
935 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
936 
937 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
938 				     * handled atomically by
939 				     * DiagRead/DiagWrite
940 				     */
941 
942 #ifdef WLAN_FEATURE_BMI
943 /*
944  * API to handle HIF-specific BMI message exchanges, this API is synchronous
945  * and only allowed to be called from a context that can block (sleep)
946  */
947 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
948 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
949 				uint8_t *pSendMessage, uint32_t Length,
950 				uint8_t *pResponseMessage,
951 				uint32_t *pResponseLength, uint32_t TimeoutMS);
952 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
953 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
954 #else /* WLAN_FEATURE_BMI */
955 static inline void
956 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
957 {
958 }
959 
960 static inline bool
961 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
962 {
963 	return false;
964 }
965 #endif /* WLAN_FEATURE_BMI */
966 
967 #ifdef HIF_CPU_CLEAR_AFFINITY
968 /**
969  * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
970  * @scn: HIF handle
971  * @intr_ctxt_id: interrupt group index
972  * @cpu: CPU core to clear
973  *
974  * Return: None
975  */
976 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
977 				       int intr_ctxt_id, int cpu);
978 #else
979 static inline
980 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
981 				       int intr_ctxt_id, int cpu)
982 {
983 }
984 #endif
985 
986 /*
987  * APIs to handle HIF specific diagnostic read accesses. These APIs are
988  * synchronous and only allowed to be called from a context that
989  * can block (sleep). They are not high performance APIs.
990  *
991  * hif_diag_read_access reads a 4 Byte aligned/length value from a
992  * Target register or memory word.
993  *
994  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
995  */
996 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
997 				uint32_t address, uint32_t *data);
998 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
999 		      uint8_t *data, int nbytes);
1000 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
1001 			void *ramdump_base, uint32_t address, uint32_t size);
1002 /*
1003  * APIs to handle HIF specific diagnostic write accesses. These APIs are
1004  * synchronous and only allowed to be called from a context that
1005  * can block (sleep).
1006  * They are not high performance APIs.
1007  *
1008  * hif_diag_write_access writes a 4 Byte aligned/length value to a
1009  * Target register or memory word.
1010  *
1011  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
1012  */
1013 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
1014 				 uint32_t address, uint32_t data);
1015 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
1016 			uint32_t address, uint8_t *data, int nbytes);
1017 
1018 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
1019 
1020 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
1021 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
1022 
1023 /*
1024  * Set the FASTPATH_mode_on flag in sc, for use by data path
1025  */
1026 #ifdef WLAN_FEATURE_FASTPATH
1027 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
1028 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
1029 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
1030 
1031 /**
1032  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
1033  * @hif_ctx: HIF opaque context
1034  * @handler: Callback function
1035  * @context: handle for callback function
1036  *
1037  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
1038  */
1039 QDF_STATUS hif_ce_fastpath_cb_register(
1040 		struct hif_opaque_softc *hif_ctx,
1041 		fastpath_msg_handler handler, void *context);
1042 #else
1043 static inline QDF_STATUS hif_ce_fastpath_cb_register(
1044 		struct hif_opaque_softc *hif_ctx,
1045 		fastpath_msg_handler handler, void *context)
1046 {
1047 	return QDF_STATUS_E_FAILURE;
1048 }
1049 
1050 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
1051 {
1052 	return NULL;
1053 }
1054 
1055 #endif
1056 
1057 /*
1058  * Enable/disable CDC max performance workaround
1059  * For max-performance set this to 0
1060  * To allow SoC to enter sleep set this to 1
1061  */
1062 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
1063 
1064 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
1065 			     qdf_shared_mem_t **ce_sr,
1066 			     uint32_t *ce_sr_ring_size,
1067 			     qdf_dma_addr_t *ce_reg_paddr);
1068 
1069 /**
1070  * struct hif_msg_callbacks - List of callbacks - filled in by HTC.
1071  * @Context: context meaningful to HTC
1072  * @txCompletionHandler:
1073  * @rxCompletionHandler:
1074  * @txResourceAvailHandler:
1075  * @fwEventHandler:
1076  * @update_bundle_stats:
1077  */
1078 struct hif_msg_callbacks {
1079 	void *Context;
1080 	/**< context meaningful to HTC */
1081 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
1082 					uint32_t transferID,
1083 					uint32_t toeplitz_hash_result);
1084 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
1085 					uint8_t pipeID);
1086 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
1087 	void (*fwEventHandler)(void *context, QDF_STATUS status);
1088 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
1089 };
1090 
1091 enum hif_target_status {
1092 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
1093 	TARGET_STATUS_RESET,  /* target got reset */
1094 	TARGET_STATUS_EJECT,  /* target got ejected */
1095 	TARGET_STATUS_SUSPEND /*target got suspend */
1096 };
1097 
1098 /**
1099  * enum hif_attribute_flags: configure hif
1100  *
1101  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
1102  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
1103  *  							+ No pktlog CE
1104  */
1105 enum hif_attribute_flags {
1106 	HIF_LOWDESC_CE_CFG = 1,
1107 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
1108 };
1109 
1110 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
1111 	(attr |= (v & 0x01) << 5)
1112 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
1113 	(attr |= (v & 0x03) << 6)
1114 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
1115 	(attr |= (v & 0x01) << 13)
1116 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
1117 	(attr |= (v & 0x01) << 14)
1118 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
1119 	(attr |= (v & 0x01) << 15)
1120 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
1121 	(attr |= (v & 0x0FFF) << 16)
1122 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
1123 	(attr |= (v & 0x01) << 30)
1124 
1125 struct hif_ul_pipe_info {
1126 	unsigned int nentries;
1127 	unsigned int nentries_mask;
1128 	unsigned int sw_index;
1129 	unsigned int write_index; /* cached copy */
1130 	unsigned int hw_index;    /* cached copy */
1131 	void *base_addr_owner_space; /* Host address space */
1132 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1133 };
1134 
1135 struct hif_dl_pipe_info {
1136 	unsigned int nentries;
1137 	unsigned int nentries_mask;
1138 	unsigned int sw_index;
1139 	unsigned int write_index; /* cached copy */
1140 	unsigned int hw_index;    /* cached copy */
1141 	void *base_addr_owner_space; /* Host address space */
1142 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1143 };
1144 
1145 struct hif_pipe_addl_info {
1146 	uint32_t pci_mem;
1147 	uint32_t ctrl_addr;
1148 	struct hif_ul_pipe_info ul_pipe;
1149 	struct hif_dl_pipe_info dl_pipe;
1150 };
1151 
1152 #ifdef CONFIG_SLUB_DEBUG_ON
1153 #define MSG_FLUSH_NUM 16
1154 #else /* PERF build */
1155 #define MSG_FLUSH_NUM 32
1156 #endif /* SLUB_DEBUG_ON */
1157 
1158 struct hif_bus_id;
1159 
1160 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
1161 /**
1162  * hif_register_ce_custom_cb() - Helper API to register the custom callback
1163  * @hif_ctx: HIF opaque context
1164  * @pipe: Pipe number
1165  * @custom_cb: Custom call back function pointer
1166  * @custom_cb_context: Custom callback context
1167  *
1168  * return: QDF_STATUS
1169  */
1170 QDF_STATUS
1171 hif_register_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1172 			  void (*custom_cb)(void *), void *custom_cb_context);
1173 
1174 /**
1175  * hif_unregister_ce_custom_cb() - Helper API to unregister the custom callback
1176  * @hif_ctx: HIF opaque context
1177  * @pipe: Pipe number
1178  *
1179  * return: QDF_STATUS
1180  */
1181 QDF_STATUS
1182 hif_unregister_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1183 
1184 /**
1185  * hif_enable_ce_custom_cb() - Helper API to enable the custom callback
1186  * @hif_ctx: HIF opaque context
1187  * @pipe: Pipe number
1188  *
1189  * return: QDF_STATUS
1190  */
1191 QDF_STATUS
1192 hif_enable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1193 
1194 /**
1195  * hif_disable_ce_custom_cb() - Helper API to disable the custom callback
1196  * @hif_ctx: HIF opaque context
1197  * @pipe: Pipe number
1198  *
1199  * return: QDF_STATUS
1200  */
1201 QDF_STATUS
1202 hif_disable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1203 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
1204 
1205 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
1206 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
1207 		     int opcode, void *config, uint32_t config_len);
1208 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
1209 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
1210 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
1211 		   struct hif_msg_callbacks *callbacks);
1212 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
1213 void hif_stop(struct hif_opaque_softc *hif_ctx);
1214 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
1215 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
1216 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
1217 		      uint8_t cmd_id, bool start);
1218 
1219 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1220 				  uint32_t transferID, uint32_t nbytes,
1221 				  qdf_nbuf_t wbuf, uint32_t data_attr);
1222 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1223 			     int force);
1224 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1225 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
1226 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
1227 			  uint8_t *DLPipe);
1228 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
1229 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1230 			int *dl_is_polled);
1231 uint16_t
1232 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1233 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
1234 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
1235 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
1236 		     bool wait_for_it);
1237 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
1238 #ifndef HIF_PCI
1239 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
1240 {
1241 	return 0;
1242 }
1243 #else
1244 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
1245 #endif
1246 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1247 			u32 *revision, const char **target_name);
1248 
1249 #ifdef RECEIVE_OFFLOAD
1250 /**
1251  * hif_offld_flush_cb_register() - Register the offld flush callback
1252  * @scn: HIF opaque context
1253  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
1254  *			 Or GRO/LRO flush when RxThread is not enabled. Called
1255  *			 with corresponding context for flush.
1256  * Return: None
1257  */
1258 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1259 				 void (offld_flush_handler)(void *ol_ctx));
1260 
1261 /**
1262  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
1263  * @scn: HIF opaque context
1264  *
1265  * Return: None
1266  */
1267 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
1268 #endif
1269 
1270 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1271 /**
1272  * hif_exec_should_yield() - Check if hif napi context should yield
1273  * @hif_ctx: HIF opaque context
1274  * @grp_id: grp_id of the napi for which check needs to be done
1275  *
1276  * The function uses grp_id to look for NAPI and checks if NAPI needs to
1277  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
1278  * yield decision.
1279  *
1280  * Return: true if NAPI needs to yield, else false
1281  */
1282 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
1283 #else
1284 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
1285 					 uint grp_id)
1286 {
1287 	return false;
1288 }
1289 #endif
1290 
1291 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
1292 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
1293 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
1294 				      int htc_htt_tx_endpoint);
1295 
1296 /**
1297  * hif_open() - Create hif handle
1298  * @qdf_ctx: qdf context
1299  * @mode: Driver Mode
1300  * @bus_type: Bus Type
1301  * @cbk: CDS Callbacks
1302  * @psoc: psoc object manager
1303  *
1304  * API to open HIF Context
1305  *
1306  * Return: HIF Opaque Pointer
1307  */
1308 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1309 				  uint32_t mode,
1310 				  enum qdf_bus_type bus_type,
1311 				  struct hif_driver_state_callbacks *cbk,
1312 				  struct wlan_objmgr_psoc *psoc);
1313 
1314 /**
1315  * hif_init_dma_mask() - Set dma mask for the dev
1316  * @dev: dev for which DMA mask is to be set
1317  * @bus_type: bus type for the target
1318  *
1319  * This API sets the DMA mask for the device. before the datapath
1320  * memory pre-allocation is done. If the DMA mask is not set before
1321  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
1322  * and does not utilize the full device capability.
1323  *
1324  * Return: 0 - success, non-zero on failure.
1325  */
1326 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
1327 void hif_close(struct hif_opaque_softc *hif_ctx);
1328 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1329 		      void *bdev, const struct hif_bus_id *bid,
1330 		      enum qdf_bus_type bus_type,
1331 		      enum hif_enable_type type);
1332 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
1333 #ifdef CE_TASKLET_DEBUG_ENABLE
1334 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
1335 				 uint8_t value);
1336 #endif
1337 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
1338 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
1339 
1340 /**
1341  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
1342  * @HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
1343  * @HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
1344  * @HIF_PM_CE_WAKE: Wake irq is CE interrupt
1345  */
1346 typedef enum {
1347 	HIF_PM_INVALID_WAKE,
1348 	HIF_PM_MSI_WAKE,
1349 	HIF_PM_CE_WAKE,
1350 } hif_pm_wake_irq_type;
1351 
1352 /**
1353  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1354  * @hif_ctx: HIF context
1355  *
1356  * Return: enum hif_pm_wake_irq_type
1357  */
1358 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1359 
1360 /**
1361  * enum hif_ep_vote_type - hif ep vote type
1362  * @HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1363  * @HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1364  */
1365 enum hif_ep_vote_type {
1366 	HIF_EP_VOTE_DP_ACCESS,
1367 	HIF_EP_VOTE_NONDP_ACCESS
1368 };
1369 
1370 /**
1371  * enum hif_ep_vote_access - hif ep vote access
1372  * @HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1373  * @HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transition
1374  * @HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1375  */
1376 enum hif_ep_vote_access {
1377 	HIF_EP_VOTE_ACCESS_ENABLE,
1378 	HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1379 	HIF_EP_VOTE_ACCESS_DISABLE
1380 };
1381 
1382 /**
1383  * enum hif_rtpm_client_id - modules registered with runtime pm module
1384  * @HIF_RTPM_ID_RESERVED: Reserved ID
1385  * @HIF_RTPM_ID_HAL_REO_CMD: HAL REO commands
1386  * @HIF_RTPM_ID_WMI: WMI commands Tx
1387  * @HIF_RTPM_ID_HTT: HTT commands Tx
1388  * @HIF_RTPM_ID_DP: Datapath Tx path
1389  * @HIF_RTPM_ID_DP_RING_STATS: Datapath ring stats
1390  * @HIF_RTPM_ID_CE: CE Tx buffer posting
1391  * @HIF_RTPM_ID_FORCE_WAKE: Force wake request
1392  * @HIF_RTPM_ID_PM_QOS_NOTIFY:
1393  * @HIF_RTPM_ID_WIPHY_SUSPEND:
1394  * @HIF_RTPM_ID_MAX: Max id
1395  */
1396 enum  hif_rtpm_client_id {
1397 	HIF_RTPM_ID_RESERVED,
1398 	HIF_RTPM_ID_HAL_REO_CMD,
1399 	HIF_RTPM_ID_WMI,
1400 	HIF_RTPM_ID_HTT,
1401 	HIF_RTPM_ID_DP,
1402 	HIF_RTPM_ID_DP_RING_STATS,
1403 	HIF_RTPM_ID_CE,
1404 	HIF_RTPM_ID_FORCE_WAKE,
1405 	HIF_RTPM_ID_PM_QOS_NOTIFY,
1406 	HIF_RTPM_ID_WIPHY_SUSPEND,
1407 	HIF_RTPM_ID_MAX
1408 };
1409 
1410 /**
1411  * enum rpm_type - Get and Put calls types
1412  * @HIF_RTPM_GET_ASYNC: Increment usage count and when system is suspended
1413  *		      schedule resume process, return depends on pm state.
1414  * @HIF_RTPM_GET_FORCE: Increment usage count and when system is suspended
1415  *		      schedule resume process, returns success irrespective of
1416  *		      pm_state.
1417  * @HIF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
1418  *		     wait till process is resumed.
1419  * @HIF_RTPM_GET_NORESUME: Only increments usage count.
1420  * @HIF_RTPM_PUT_ASYNC: Decrements usage count and puts system in idle state.
1421  * @HIF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
1422  *			     suspended state.
1423  * @HIF_RTPM_PUT_NOIDLE: Decrements usage count.
1424  */
1425 enum rpm_type {
1426 	HIF_RTPM_GET_ASYNC,
1427 	HIF_RTPM_GET_FORCE,
1428 	HIF_RTPM_GET_SYNC,
1429 	HIF_RTPM_GET_NORESUME,
1430 	HIF_RTPM_PUT_ASYNC,
1431 	HIF_RTPM_PUT_SYNC_SUSPEND,
1432 	HIF_RTPM_PUT_NOIDLE,
1433 };
1434 
1435 /**
1436  * struct hif_pm_runtime_lock - data structure for preventing runtime suspend
1437  * @list: global list of runtime locks
1438  * @active: true if this lock is preventing suspend
1439  * @name: character string for tracking this lock
1440  */
1441 struct hif_pm_runtime_lock {
1442 	struct list_head list;
1443 	bool active;
1444 	const char *name;
1445 };
1446 
1447 #ifdef FEATURE_RUNTIME_PM
1448 /**
1449  * hif_rtpm_register() - Register a module with runtime PM.
1450  * @id: ID of the module which needs to be registered
1451  * @hif_rpm_cbk: callback to be called when get was called in suspended state.
1452  *
1453  * Return: success status if successfully registered
1454  */
1455 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
1456 
1457 /**
1458  * hif_rtpm_deregister() - Deregister the module
1459  * @id: ID of the module which needs to be de-registered
1460  */
1461 QDF_STATUS hif_rtpm_deregister(uint32_t id);
1462 
1463 /**
1464  * hif_rtpm_set_autosuspend_delay() - Set delay to trigger RTPM suspend
1465  * @delay: delay in ms to be set
1466  *
1467  * Return: Success if delay is set successfully
1468  */
1469 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay);
1470 
1471 /**
1472  * hif_rtpm_restore_autosuspend_delay() - Restore delay value to default value
1473  *
1474  * Return: Success if reset done. E_ALREADY if delay same as config value
1475  */
1476 QDF_STATUS hif_rtpm_restore_autosuspend_delay(void);
1477 
1478 /**
1479  * hif_rtpm_get_autosuspend_delay() -Get delay to trigger RTPM suspend
1480  *
1481  * Return: Delay in ms
1482  */
1483 int hif_rtpm_get_autosuspend_delay(void);
1484 
1485 /**
1486  * hif_runtime_lock_init() - API to initialize Runtime PM context
1487  * @lock: QDF lock context
1488  * @name: Context name
1489  *
1490  * This API initializes the Runtime PM context of the caller and
1491  * return the pointer.
1492  *
1493  * Return: None
1494  */
1495 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1496 
1497 /**
1498  * hif_runtime_lock_deinit() - This API frees the runtime pm context
1499  * @data: Runtime PM context
1500  *
1501  * Return: void
1502  */
1503 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data);
1504 
1505 /**
1506  * hif_rtpm_get() - Increment usage_count on the device to avoid suspend.
1507  * @type: get call types from hif_rpm_type
1508  * @id: ID of the module calling get()
1509  *
1510  * A get operation will prevent a runtime suspend until a
1511  * corresponding put is done.  This api should be used when accessing bus.
1512  *
1513  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1514  * THIS API WILL ONLY REQUEST THE RESUME AND NOT DO A GET!!!
1515  *
1516  * return: success if a get has been issued, else error code.
1517  */
1518 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id);
1519 
1520 /**
1521  * hif_rtpm_put() - do a put operation on the device
1522  * @type: put call types from hif_rpm_type
1523  * @id: ID of the module calling put()
1524  *
1525  * A put operation will allow a runtime suspend after a corresponding
1526  * get was done.  This api should be used when finished accessing bus.
1527  *
1528  * This api will return a failure if runtime pm is stopped
1529  * This api will return failure if it would decrement the usage count below 0.
1530  *
1531  * return: QDF_STATUS_SUCCESS if the put is performed
1532  */
1533 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id);
1534 
1535 /**
1536  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1537  * @data: runtime PM lock
1538  *
1539  * This function will prevent runtime suspend, by incrementing
1540  * device's usage count.
1541  *
1542  * Return: status
1543  */
1544 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data);
1545 
1546 /**
1547  * hif_pm_runtime_prevent_suspend_sync() - Synchronized prevent Runtime suspend
1548  * @data: runtime PM lock
1549  *
1550  * This function will prevent runtime suspend, by incrementing
1551  * device's usage count.
1552  *
1553  * Return: status
1554  */
1555 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data);
1556 
1557 /**
1558  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1559  * @data: runtime PM lock
1560  *
1561  * This function will allow runtime suspend, by decrementing
1562  * device's usage count.
1563  *
1564  * Return: status
1565  */
1566 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data);
1567 
1568 /**
1569  * hif_rtpm_request_resume() - Request resume if bus is suspended
1570  *
1571  * Return: None
1572  */
1573 void hif_rtpm_request_resume(void);
1574 
1575 /**
1576  * hif_rtpm_sync_resume() - Invoke synchronous runtime resume.
1577  *
1578  * This function will invoke synchronous runtime resume.
1579  *
1580  * Return: status
1581  */
1582 QDF_STATUS hif_rtpm_sync_resume(void);
1583 
1584 /**
1585  * hif_rtpm_check_and_request_resume() - check if bus is suspended and
1586  *                                       request resume.
1587  *
1588  * Return: void
1589  */
1590 void hif_rtpm_check_and_request_resume(void);
1591 
1592 /**
1593  * hif_rtpm_set_client_job() - Set job for the client.
1594  * @client_id: Client id for which job needs to be set
1595  *
1596  * If get failed due to system being in suspended state, set the client job so
1597  * when system resumes the client's job is called.
1598  *
1599  * Return: None
1600  */
1601 void hif_rtpm_set_client_job(uint32_t client_id);
1602 
1603 /**
1604  * hif_rtpm_mark_last_busy() - Mark last busy to delay retry to suspend
1605  * @id: ID marking last busy
1606  *
1607  * Return: None
1608  */
1609 void hif_rtpm_mark_last_busy(uint32_t id);
1610 
1611 /**
1612  * hif_rtpm_get_monitor_wake_intr() - API to get monitor_wake_intr
1613  *
1614  * monitor_wake_intr variable can be used to indicate if driver expects wake
1615  * MSI for runtime PM
1616  *
1617  * Return: monitor_wake_intr variable
1618  */
1619 int hif_rtpm_get_monitor_wake_intr(void);
1620 
1621 /**
1622  * hif_rtpm_set_monitor_wake_intr() - API to set monitor_wake_intr
1623  * @val: value to set
1624  *
1625  * monitor_wake_intr variable can be used to indicate if driver expects wake
1626  * MSI for runtime PM
1627  *
1628  * Return: void
1629  */
1630 void hif_rtpm_set_monitor_wake_intr(int val);
1631 
1632 /**
1633  * hif_pre_runtime_suspend() - book keeping before beginning runtime suspend.
1634  * @hif_ctx: HIF context
1635  *
1636  * Makes sure that the pci link will be taken down by the suspend operation.
1637  * If the hif layer is configured to leave the bus on, runtime suspend will
1638  * not save any power.
1639  *
1640  * Set the runtime suspend state to SUSPENDING.
1641  *
1642  * return -EINVAL if the bus won't go down.  otherwise return 0
1643  */
1644 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1645 
1646 /**
1647  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
1648  *
1649  * update the runtime pm state to RESUMING.
1650  * Return: void
1651  */
1652 void hif_pre_runtime_resume(void);
1653 
1654 /**
1655  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
1656  *
1657  * Record the success.
1658  * update the runtime_pm state to SUSPENDED
1659  * Return: void
1660  */
1661 void hif_process_runtime_suspend_success(void);
1662 
1663 /**
1664  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
1665  *
1666  * Record the failure.
1667  * mark last busy to delay a retry.
1668  * update the runtime_pm state back to ON
1669  *
1670  * Return: void
1671  */
1672 void hif_process_runtime_suspend_failure(void);
1673 
1674 /**
1675  * hif_process_runtime_resume_linkup() - bookkeeping of resuming link up
1676  *
1677  * update the runtime_pm state to RESUMING_LINKUP
1678  * Return: void
1679  */
1680 void hif_process_runtime_resume_linkup(void);
1681 
1682 /**
1683  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
1684  *
1685  * record the success.
1686  * update the runtime_pm state to SUSPENDED
1687  * Return: void
1688  */
1689 void hif_process_runtime_resume_success(void);
1690 
1691 /**
1692  * hif_rtpm_print_prevent_list() - list the clients preventing suspend.
1693  *
1694  * Return: None
1695  */
1696 void hif_rtpm_print_prevent_list(void);
1697 
1698 /**
1699  * hif_rtpm_suspend_lock() - spin_lock on marking runtime suspend
1700  *
1701  * Return: void
1702  */
1703 void hif_rtpm_suspend_lock(void);
1704 
1705 /**
1706  * hif_rtpm_suspend_unlock() - spin_unlock on marking runtime suspend
1707  *
1708  * Return: void
1709  */
1710 void hif_rtpm_suspend_unlock(void);
1711 
1712 /**
1713  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
1714  * @hif_ctx: HIF context
1715  *
1716  * Return: 0 for success and non-zero error code for failure
1717  */
1718 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1719 
1720 /**
1721  * hif_runtime_resume() - do the bus resume part of a runtime resume
1722  * @hif_ctx: HIF context
1723  *
1724  * Return: 0 for success and non-zero error code for failure
1725  */
1726 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1727 
1728 /**
1729  * hif_fastpath_resume() - resume fastpath for runtimepm
1730  * @hif_ctx: HIF context
1731  *
1732  * ensure that the fastpath write index register is up to date
1733  * since runtime pm may cause ce_send_fast to skip the register
1734  * write.
1735  *
1736  * fastpath only applicable to legacy copy engine
1737  */
1738 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1739 
1740 /**
1741  * hif_rtpm_get_state(): get rtpm link state
1742  *
1743  * Return: state
1744  */
1745 int hif_rtpm_get_state(void);
1746 
1747 /**
1748  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1749  * @hif_ctx: HIF context
1750  *
1751  * Return: None
1752  */
1753 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx);
1754 
1755 /**
1756  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1757  * @scn: HIF context
1758  * @ce_id: CE id
1759  *
1760  * Return: None
1761  */
1762 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1763 				      unsigned long ce_id);
1764 
1765 /**
1766  * hif_set_enable_rpm() - Set enable_rpm value
1767  * @hif_hdl: hif opaque handle
1768  *
1769  *  Return: None
1770  */
1771 void hif_set_enable_rpm(struct hif_opaque_softc *hif_hdl);
1772 
1773 #else
1774 
1775 /**
1776  * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1777  * @hif_ctx: HIF context
1778  *
1779  * Return: None
1780  */
1781 static inline
1782 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx) { }
1783 
1784 /**
1785  * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1786  * @scn: HIF context
1787  * @ce_id: CE id
1788  *
1789  * Return: None
1790  */
1791 static inline
1792 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1793 				      unsigned long ce_id)
1794 { }
1795 
1796 static inline
1797 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
1798 { return QDF_STATUS_SUCCESS; }
1799 
1800 static inline
1801 QDF_STATUS hif_rtpm_deregister(uint32_t id)
1802 { return QDF_STATUS_SUCCESS; }
1803 
1804 static inline
1805 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
1806 { return QDF_STATUS_SUCCESS; }
1807 
1808 static inline QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
1809 { return QDF_STATUS_SUCCESS; }
1810 
1811 static inline int hif_rtpm_get_autosuspend_delay(void)
1812 { return 0; }
1813 
1814 static inline
1815 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1816 { return 0; }
1817 
1818 static inline
1819 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data)
1820 {}
1821 
1822 static inline
1823 int hif_rtpm_get(uint8_t type, uint32_t id)
1824 { return QDF_STATUS_SUCCESS; }
1825 
1826 static inline
1827 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
1828 { return QDF_STATUS_SUCCESS; }
1829 
1830 static inline
1831 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data)
1832 { return 0; }
1833 
1834 static inline
1835 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data)
1836 { return 0; }
1837 
1838 static inline
1839 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data)
1840 { return 0; }
1841 
1842 static inline
1843 QDF_STATUS hif_rtpm_sync_resume(void)
1844 { return QDF_STATUS_SUCCESS; }
1845 
1846 static inline
1847 void hif_rtpm_request_resume(void)
1848 {}
1849 
1850 static inline
1851 void hif_rtpm_check_and_request_resume(void)
1852 {}
1853 
1854 static inline
1855 void hif_rtpm_set_client_job(uint32_t client_id)
1856 {}
1857 
1858 static inline
1859 void hif_rtpm_print_prevent_list(void)
1860 {}
1861 
1862 static inline
1863 void hif_rtpm_suspend_unlock(void)
1864 {}
1865 
1866 static inline
1867 void hif_rtpm_suspend_lock(void)
1868 {}
1869 
1870 static inline
1871 int hif_rtpm_get_monitor_wake_intr(void)
1872 { return 0; }
1873 
1874 static inline
1875 void hif_rtpm_set_monitor_wake_intr(int val)
1876 {}
1877 
1878 static inline
1879 void hif_rtpm_mark_last_busy(uint32_t id)
1880 {}
1881 
1882 static inline
1883 void hif_set_enable_rpm(struct hif_opaque_softc *hif_hdl)
1884 {
1885 }
1886 #endif
1887 
1888 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1889 				 bool is_packet_log_enabled);
1890 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1891 
1892 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1893 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1894 
1895 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1896 
1897 #ifdef IPA_OFFLOAD
1898 /**
1899  * hif_get_ipa_hw_type() - get IPA hw type
1900  *
1901  * This API return the IPA hw type.
1902  *
1903  * Return: IPA hw type
1904  */
1905 static inline
1906 enum ipa_hw_type hif_get_ipa_hw_type(void)
1907 {
1908 	return ipa_get_hw_type();
1909 }
1910 
1911 /**
1912  * hif_get_ipa_present() - get IPA hw status
1913  *
1914  * This API return the IPA hw status.
1915  *
1916  * Return: true if IPA is present or false otherwise
1917  */
1918 static inline
1919 bool hif_get_ipa_present(void)
1920 {
1921 	if (qdf_ipa_uc_reg_rdyCB(NULL) != -EPERM)
1922 		return true;
1923 	else
1924 		return false;
1925 }
1926 #endif
1927 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1928 
1929 /**
1930  * hif_bus_early_suspend() - stop non wmi tx traffic
1931  * @hif_ctx: hif context
1932  */
1933 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1934 
1935 /**
1936  * hif_bus_late_resume() - resume non wmi traffic
1937  * @hif_ctx: hif context
1938  */
1939 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1940 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1941 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1942 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1943 
1944 /**
1945  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1946  * @hif_ctx: an opaque HIF handle to use
1947  *
1948  * As opposed to the standard hif_irq_enable, this function always applies to
1949  * the APPS side kernel interrupt handling.
1950  *
1951  * Return: errno
1952  */
1953 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1954 
1955 /**
1956  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1957  * @hif_ctx: an opaque HIF handle to use
1958  *
1959  * As opposed to the standard hif_irq_disable, this function always applies to
1960  * the APPS side kernel interrupt handling.
1961  *
1962  * Return: errno
1963  */
1964 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1965 
1966 /**
1967  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1968  * @hif_ctx: an opaque HIF handle to use
1969  *
1970  * As opposed to the standard hif_irq_enable, this function always applies to
1971  * the APPS side kernel interrupt handling.
1972  *
1973  * Return: errno
1974  */
1975 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1976 
1977 /**
1978  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1979  * @hif_ctx: an opaque HIF handle to use
1980  *
1981  * As opposed to the standard hif_irq_disable, this function always applies to
1982  * the APPS side kernel interrupt handling.
1983  *
1984  * Return: errno
1985  */
1986 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1987 
1988 /**
1989  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1990  * @hif_ctx: an opaque HIF handle to use
1991  *
1992  * This function always applies to the APPS side kernel interrupt handling
1993  * to wake the system from suspend.
1994  *
1995  * Return: errno
1996  */
1997 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1998 
1999 /**
2000  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
2001  * @hif_ctx: an opaque HIF handle to use
2002  *
2003  * This function always applies to the APPS side kernel interrupt handling
2004  * to disable the wake irq.
2005  *
2006  * Return: errno
2007  */
2008 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
2009 
2010 /**
2011  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
2012  * @hif_ctx: an opaque HIF handle to use
2013  *
2014  * As opposed to the standard hif_irq_enable, this function always applies to
2015  * the APPS side kernel interrupt handling.
2016  *
2017  * Return: errno
2018  */
2019 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
2020 
2021 /**
2022  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
2023  * @hif_ctx: an opaque HIF handle to use
2024  *
2025  * As opposed to the standard hif_irq_disable, this function always applies to
2026  * the APPS side kernel interrupt handling.
2027  *
2028  * Return: errno
2029  */
2030 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
2031 
2032 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
2033 int hif_dump_registers(struct hif_opaque_softc *scn);
2034 int ol_copy_ramdump(struct hif_opaque_softc *scn);
2035 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
2036 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
2037 		     u32 *revision, const char **target_name);
2038 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
2039 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
2040 						   scn);
2041 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
2042 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
2043 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
2044 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
2045 			   hif_target_status);
2046 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
2047 			 struct hif_config_info *cfg);
2048 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
2049 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2050 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
2051 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2052 			   uint32_t transfer_id, u_int32_t len);
2053 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
2054 	uint32_t transfer_id, uint32_t download_len);
2055 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
2056 void hif_ce_war_disable(void);
2057 void hif_ce_war_enable(void);
2058 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
2059 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
2060 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
2061 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
2062 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
2063 		uint32_t pipe_num);
2064 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
2065 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
2066 
2067 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
2068 				int rx_bundle_cnt);
2069 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
2070 
2071 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
2072 
2073 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
2074 
2075 enum hif_exec_type {
2076 	HIF_EXEC_NAPI_TYPE,
2077 	HIF_EXEC_TASKLET_TYPE,
2078 };
2079 
2080 typedef uint32_t (*ext_intr_handler)(void *, uint32_t, int);
2081 
2082 /**
2083  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
2084  * @softc: hif opaque context owning the exec context
2085  * @id: the id of the interrupt context
2086  *
2087  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
2088  *         'id' registered with the OS
2089  */
2090 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
2091 				uint8_t id);
2092 
2093 /**
2094  * hif_configure_ext_group_interrupts() - Configure ext group interrupts
2095  * @hif_ctx: hif opaque context
2096  *
2097  * Return: QDF_STATUS
2098  */
2099 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
2100 
2101 /**
2102  * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group interrupts
2103  * @hif_ctx: hif opaque context
2104  *
2105  * Return: None
2106  */
2107 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
2108 
2109 /**
2110  * hif_register_ext_group() - API to register external group
2111  * interrupt handler.
2112  * @hif_ctx : HIF Context
2113  * @numirq: number of irq's in the group
2114  * @irq: array of irq values
2115  * @handler: callback interrupt handler function
2116  * @cb_ctx: context to passed in callback
2117  * @context_name: text name of the context
2118  * @type: napi vs tasklet
2119  * @scale:
2120  *
2121  * Return: QDF_STATUS
2122  */
2123 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
2124 				  uint32_t numirq, uint32_t irq[],
2125 				  ext_intr_handler handler,
2126 				  void *cb_ctx, const char *context_name,
2127 				  enum hif_exec_type type, uint32_t scale);
2128 
2129 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
2130 				const char *context_name);
2131 
2132 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
2133 				u_int8_t pipeid,
2134 				struct hif_msg_callbacks *callbacks);
2135 
2136 /**
2137  * hif_print_napi_stats() - Display HIF NAPI stats
2138  * @hif_ctx: HIF opaque context
2139  *
2140  * Return: None
2141  */
2142 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
2143 
2144 /**
2145  * hif_clear_napi_stats() - function clears the stats of the
2146  * latency when called.
2147  * @hif_ctx: the HIF context to assign the callback to
2148  *
2149  * Return: None
2150  */
2151 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
2152 
2153 #ifdef __cplusplus
2154 }
2155 #endif
2156 
2157 #ifdef FORCE_WAKE
2158 /**
2159  * hif_force_wake_request() - Function to wake from power collapse
2160  * @handle: HIF opaque handle
2161  *
2162  * Description: API to check if the device is awake or not before
2163  * read/write to BAR + 4K registers. If device is awake return
2164  * success otherwise write '1' to
2165  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
2166  * the device and does wakeup the PCI and MHI within 50ms
2167  * and then the device writes a value to
2168  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
2169  * handshake process to let the host know the device is awake.
2170  *
2171  * Return: zero - success/non-zero - failure
2172  */
2173 int hif_force_wake_request(struct hif_opaque_softc *handle);
2174 
2175 /**
2176  * hif_force_wake_release() - API to release/reset the SOC wake register
2177  * from interrupting the device.
2178  * @handle: HIF opaque handle
2179  *
2180  * Description: API to set the
2181  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
2182  * to release the interrupt line.
2183  *
2184  * Return: zero - success/non-zero - failure
2185  */
2186 int hif_force_wake_release(struct hif_opaque_softc *handle);
2187 #else
2188 static inline
2189 int hif_force_wake_request(struct hif_opaque_softc *handle)
2190 {
2191 	return 0;
2192 }
2193 
2194 static inline
2195 int hif_force_wake_release(struct hif_opaque_softc *handle)
2196 {
2197 	return 0;
2198 }
2199 #endif /* FORCE_WAKE */
2200 
2201 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) || \
2202 	defined(FEATURE_HIF_DELAYED_REG_WRITE)
2203 /**
2204  * hif_prevent_link_low_power_states() - Prevent from going to low power states
2205  * @hif: HIF opaque context
2206  *
2207  * Return: 0 on success. Error code on failure.
2208  */
2209 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
2210 
2211 /**
2212  * hif_allow_link_low_power_states() - Allow link to go to low power states
2213  * @hif: HIF opaque context
2214  *
2215  * Return: None
2216  */
2217 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
2218 
2219 #else
2220 
2221 static inline
2222 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
2223 {
2224 	return 0;
2225 }
2226 
2227 static inline
2228 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
2229 {
2230 }
2231 #endif
2232 
2233 #ifdef IPA_OPT_WIFI_DP
2234 /**
2235  * hif_prevent_l1() - Prevent from going to low power states
2236  * @hif: HIF opaque context
2237  *
2238  * Return: 0 on success. Error code on failure.
2239  */
2240 int hif_prevent_l1(struct hif_opaque_softc *hif);
2241 
2242 /**
2243  * hif_allow_l1() - Allow link to go to low power states
2244  * @hif: HIF opaque context
2245  *
2246  * Return: None
2247  */
2248 void hif_allow_l1(struct hif_opaque_softc *hif);
2249 
2250 #else
2251 
2252 static inline
2253 int hif_prevent_l1(struct hif_opaque_softc *hif)
2254 {
2255 	return 0;
2256 }
2257 
2258 static inline
2259 void hif_allow_l1(struct hif_opaque_softc *hif)
2260 {
2261 }
2262 #endif
2263 
2264 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
2265 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
2266 void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle);
2267 
2268 /**
2269  * hif_get_dev_ba_cmem() - get base address of CMEM
2270  * @hif_handle: the HIF context
2271  *
2272  */
2273 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle);
2274 
2275 /**
2276  * hif_get_soc_version() - get soc major version from target info
2277  * @hif_handle: the HIF context
2278  *
2279  * Return: version number
2280  */
2281 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle);
2282 
2283 /**
2284  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
2285  * @hif_ctx: the HIF context to assign the callback to
2286  * @callback: the callback to assign
2287  * @priv: the private data to pass to the callback when invoked
2288  *
2289  * Return: None
2290  */
2291 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2292 			       void (*callback)(void *),
2293 			       void *priv);
2294 /*
2295  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
2296  * for defined here
2297  */
2298 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2299 ssize_t hif_dump_desc_trace_buf(struct device *dev,
2300 				struct device_attribute *attr, char *buf);
2301 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2302 					const char *buf, size_t size);
2303 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
2304 				const char *buf, size_t size);
2305 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
2306 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
2307 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
2308 
2309 /**
2310  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
2311  * @hif: hif context
2312  * @ce_service_max_yield_time: CE service max yield time to set
2313  *
2314  * This API storess CE service max yield time in hif context based
2315  * on ini value.
2316  *
2317  * Return: void
2318  */
2319 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2320 				       uint32_t ce_service_max_yield_time);
2321 
2322 /**
2323  * hif_get_ce_service_max_yield_time() - get CE service max yield time
2324  * @hif: hif context
2325  *
2326  * This API returns CE service max yield time.
2327  *
2328  * Return: CE service max yield time
2329  */
2330 unsigned long long
2331 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
2332 
2333 /**
2334  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
2335  * @hif: hif context
2336  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
2337  *
2338  * This API stores CE service max rx ind flush in hif context based
2339  * on ini value.
2340  *
2341  * Return: void
2342  */
2343 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2344 					 uint8_t ce_service_max_rx_ind_flush);
2345 
2346 #ifdef OL_ATH_SMART_LOGGING
2347 /**
2348  * hif_log_dump_ce() - Copy all the CE DEST ring to buf
2349  * @scn: HIF handler
2350  * @buf_cur: Current pointer in ring buffer
2351  * @buf_init:Start of the ring buffer
2352  * @buf_sz: Size of the ring buffer
2353  * @ce: Copy Engine id
2354  * @skb_sz: Max size of the SKB buffer to be copied
2355  *
2356  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2357  * and buffers pointed by them in to the given buf
2358  *
2359  * Return: Current pointer in ring buffer
2360  */
2361 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2362 			 uint8_t *buf_init, uint32_t buf_sz,
2363 			 uint32_t ce, uint32_t skb_sz);
2364 #endif /* OL_ATH_SMART_LOGGING */
2365 
2366 /**
2367  * hif_softc_to_hif_opaque_softc() - API to convert hif_softc handle
2368  * to hif_opaque_softc handle
2369  * @hif_handle: hif_softc type
2370  *
2371  * Return: hif_opaque_softc type
2372  */
2373 static inline struct hif_opaque_softc *
2374 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
2375 {
2376 	return (struct hif_opaque_softc *)hif_handle;
2377 }
2378 
2379 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
2380 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2381 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx);
2382 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2383 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2384 			    uint8_t type, uint8_t access);
2385 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2386 			       uint8_t type);
2387 #else
2388 static inline QDF_STATUS
2389 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2390 {
2391 	return QDF_STATUS_SUCCESS;
2392 }
2393 
2394 static inline void
2395 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
2396 {
2397 }
2398 
2399 static inline void
2400 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2401 {
2402 }
2403 
2404 static inline void
2405 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2406 		       uint8_t type, uint8_t access)
2407 {
2408 }
2409 
2410 static inline uint8_t
2411 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2412 		       uint8_t type)
2413 {
2414 	return HIF_EP_VOTE_ACCESS_ENABLE;
2415 }
2416 #endif
2417 
2418 #ifdef FORCE_WAKE
2419 /**
2420  * hif_srng_init_phase(): Indicate srng initialization phase
2421  * to avoid force wake as UMAC power collapse is not yet
2422  * enabled
2423  * @hif_ctx: hif opaque handle
2424  * @init_phase: initialization phase
2425  *
2426  * Return:  None
2427  */
2428 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2429 			 bool init_phase);
2430 #else
2431 static inline
2432 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2433 			 bool init_phase)
2434 {
2435 }
2436 #endif /* FORCE_WAKE */
2437 
2438 #ifdef HIF_IPCI
2439 /**
2440  * hif_shutdown_notifier_cb - Call back for shutdown notifier
2441  * @ctx: hif handle
2442  *
2443  * Return:  None
2444  */
2445 void hif_shutdown_notifier_cb(void *ctx);
2446 #else
2447 static inline
2448 void hif_shutdown_notifier_cb(void *ctx)
2449 {
2450 }
2451 #endif /* HIF_IPCI */
2452 
2453 #ifdef HIF_CE_LOG_INFO
2454 /**
2455  * hif_log_ce_info() - API to log ce info
2456  * @scn: hif handle
2457  * @data: hang event data buffer
2458  * @offset: offset at which data needs to be written
2459  *
2460  * Return:  None
2461  */
2462 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2463 		     unsigned int *offset);
2464 #else
2465 static inline
2466 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2467 		     unsigned int *offset)
2468 {
2469 }
2470 #endif
2471 
2472 #ifdef HIF_CPU_PERF_AFFINE_MASK
2473 /**
2474  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
2475  * @hif_ctx: hif opaque handle
2476  *
2477  * This function is used to move the WLAN IRQs to perf cores in
2478  * case of defconfig builds.
2479  *
2480  * Return:  None
2481  */
2482 void hif_config_irq_set_perf_affinity_hint(
2483 	struct hif_opaque_softc *hif_ctx);
2484 
2485 #else
2486 static inline void hif_config_irq_set_perf_affinity_hint(
2487 	struct hif_opaque_softc *hif_ctx)
2488 {
2489 }
2490 #endif
2491 
2492 /**
2493  * hif_apps_grp_irqs_enable() - enable ext grp irqs
2494  * @hif_ctx: HIF opaque context
2495  *
2496  * Return: 0 on success. Error code on failure.
2497  */
2498 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
2499 
2500 /**
2501  * hif_apps_grp_irqs_disable() - disable ext grp irqs
2502  * @hif_ctx: HIF opaque context
2503  *
2504  * Return: 0 on success. Error code on failure.
2505  */
2506 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
2507 
2508 /**
2509  * hif_disable_grp_irqs() - disable ext grp irqs
2510  * @scn: HIF opaque context
2511  *
2512  * Return: 0 on success. Error code on failure.
2513  */
2514 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
2515 
2516 /**
2517  * hif_enable_grp_irqs() - enable ext grp irqs
2518  * @scn: HIF opaque context
2519  *
2520  * Return: 0 on success. Error code on failure.
2521  */
2522 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
2523 
2524 enum hif_credit_exchange_type {
2525 	HIF_REQUEST_CREDIT,
2526 	HIF_PROCESS_CREDIT_REPORT,
2527 };
2528 
2529 enum hif_detect_latency_type {
2530 	HIF_DETECT_TASKLET,
2531 	HIF_DETECT_CREDIT,
2532 	HIF_DETECT_UNKNOWN
2533 };
2534 
2535 #ifdef HIF_DETECTION_LATENCY_ENABLE
2536 void hif_latency_detect_credit_record_time(
2537 	enum hif_credit_exchange_type type,
2538 	struct hif_opaque_softc *hif_ctx);
2539 
2540 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
2541 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
2542 void hif_check_detection_latency(struct hif_softc *scn,
2543 				 bool from_timer,
2544 				 uint32_t bitmap_type);
2545 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
2546 
2547 /**
2548  * hif_tasklet_latency_record_exec() - record execute time and
2549  * check the latency
2550  * @scn: HIF opaque context
2551  * @idx: CE id
2552  *
2553  * Return: None
2554  */
2555 void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx);
2556 
2557 /**
2558  * hif_tasklet_latency_record_sched() - record schedule time of a tasklet
2559  * @scn: HIF opaque context
2560  * @idx: CE id
2561  *
2562  * Return: None
2563  */
2564 void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx);
2565 #else
2566 static inline
2567 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
2568 {}
2569 
2570 static inline
2571 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
2572 {}
2573 
2574 static inline
2575 void hif_latency_detect_credit_record_time(
2576 	enum hif_credit_exchange_type type,
2577 	struct hif_opaque_softc *hif_ctx)
2578 {}
2579 static inline
2580 void hif_check_detection_latency(struct hif_softc *scn,
2581 				 bool from_timer,
2582 				 uint32_t bitmap_type)
2583 {}
2584 
2585 static inline
2586 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
2587 {}
2588 
2589 static inline
2590 void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx)
2591 {}
2592 
2593 static inline
2594 void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx)
2595 {}
2596 #endif
2597 
2598 #ifdef SYSTEM_PM_CHECK
2599 /**
2600  * __hif_system_pm_set_state() - Set system pm state
2601  * @hif: hif opaque handle
2602  * @state: system state
2603  *
2604  * Return:  None
2605  */
2606 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2607 			       enum hif_system_pm_state state);
2608 
2609 /**
2610  * hif_system_pm_set_state_on() - Set system pm state to ON
2611  * @hif: hif opaque handle
2612  *
2613  * Return:  None
2614  */
2615 static inline
2616 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2617 {
2618 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
2619 }
2620 
2621 /**
2622  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
2623  * @hif: hif opaque handle
2624  *
2625  * Return:  None
2626  */
2627 static inline
2628 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2629 {
2630 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
2631 }
2632 
2633 /**
2634  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
2635  * @hif: hif opaque handle
2636  *
2637  * Return:  None
2638  */
2639 static inline
2640 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2641 {
2642 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
2643 }
2644 
2645 /**
2646  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
2647  * @hif: hif opaque handle
2648  *
2649  * Return:  None
2650  */
2651 static inline
2652 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2653 {
2654 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
2655 }
2656 
2657 /**
2658  * hif_system_pm_get_state() - Get system pm state
2659  * @hif: hif opaque handle
2660  *
2661  * Return:  system state
2662  */
2663 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
2664 
2665 /**
2666  * hif_system_pm_state_check() - Check system state and trigger resume
2667  *  if required
2668  * @hif: hif opaque handle
2669  *
2670  * Return: 0 if system is in on state else error code
2671  */
2672 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
2673 #else
2674 static inline
2675 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2676 			       enum hif_system_pm_state state)
2677 {
2678 }
2679 
2680 static inline
2681 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2682 {
2683 }
2684 
2685 static inline
2686 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2687 {
2688 }
2689 
2690 static inline
2691 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2692 {
2693 }
2694 
2695 static inline
2696 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2697 {
2698 }
2699 
2700 static inline
2701 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2702 {
2703 	return 0;
2704 }
2705 
2706 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2707 {
2708 	return 0;
2709 }
2710 #endif
2711 
2712 #ifdef FEATURE_IRQ_AFFINITY
2713 /**
2714  * hif_set_grp_intr_affinity() - API to set affinity for grp
2715  *  intrs set in the bitmap
2716  * @scn: hif handle
2717  * @grp_intr_bitmask: grp intrs for which perf affinity should be
2718  *  applied
2719  * @perf: affine to perf or non-perf cluster
2720  *
2721  * Return: None
2722  */
2723 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2724 			       uint32_t grp_intr_bitmask, bool perf);
2725 #else
2726 static inline
2727 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2728 			       uint32_t grp_intr_bitmask, bool perf)
2729 {
2730 }
2731 #endif
2732 /**
2733  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
2734  * @scn: hif opaque handle
2735  *
2736  * Description:
2737  *   Gets number of WMI EPs configured in target svc map. Since EP map
2738  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
2739  *   configured for WMI service.
2740  *
2741  * Return:
2742  *  uint8_t: count for WMI eps in target svc map
2743  */
2744 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn);
2745 
2746 #ifdef DP_UMAC_HW_RESET_SUPPORT
2747 /**
2748  * hif_register_umac_reset_handler() - Register UMAC HW reset handler
2749  * @hif_scn: hif opaque handle
2750  * @irq_handler: irq callback handler function
2751  * @tl_handler: tasklet callback handler function
2752  * @cb_ctx: context to passed to @handler
2753  * @irq: irq number to be used for UMAC HW reset interrupt
2754  *
2755  * Return: QDF_STATUS of operation
2756  */
2757 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2758 					   bool (*irq_handler)(void *cb_ctx),
2759 					   int (*tl_handler)(void *cb_ctx),
2760 					   void *cb_ctx, int irq);
2761 
2762 /**
2763  * hif_unregister_umac_reset_handler() - Unregister UMAC HW reset handler
2764  * @hif_scn: hif opaque handle
2765  *
2766  * Return: QDF_STATUS of operation
2767  */
2768 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn);
2769 QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
2770 				  int *umac_reset_irq);
2771 #else
2772 static inline
2773 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2774 					   bool (*irq_handler)(void *cb_ctx),
2775 					   int (*tl_handler)(void *cb_ctx),
2776 					   void *cb_ctx, int irq)
2777 {
2778 	return QDF_STATUS_SUCCESS;
2779 }
2780 
2781 static inline
2782 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
2783 {
2784 	return QDF_STATUS_SUCCESS;
2785 }
2786 
2787 static inline
2788 QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
2789 				  int *umac_reset_irq)
2790 {
2791 	return QDF_STATUS_SUCCESS;
2792 }
2793 
2794 #endif /* DP_UMAC_HW_RESET_SUPPORT */
2795 
2796 #ifdef FEATURE_DIRECT_LINK
2797 /**
2798  * hif_set_irq_config_by_ceid() - Set irq configuration for CE given by id
2799  * @scn: hif opaque handle
2800  * @ce_id: CE id
2801  * @addr: irq trigger address
2802  * @data: irq trigger data
2803  *
2804  * Return: QDF status
2805  */
2806 QDF_STATUS
2807 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2808 			   uint64_t addr, uint32_t data);
2809 
2810 /**
2811  * hif_get_direct_link_ce_dest_srng_buffers() - Get Direct Link ce dest srng
2812  *  buffer information
2813  * @scn: hif opaque handle
2814  * @dma_addr: pointer to array of dma addresses
2815  * @buf_size: ce dest ring buffer size
2816  *
2817  * Return: Number of buffers attached to the dest srng.
2818  */
2819 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2820 						  uint64_t **dma_addr,
2821 						  uint32_t *buf_size);
2822 
2823 /**
2824  * hif_get_direct_link_ce_srng_info() - Get Direct Link CE srng information
2825  * @scn: hif opaque handle
2826  * @info: Direct Link CEs information
2827  * @max_ce_info_len: max array size of ce info
2828  *
2829  * Return: QDF status
2830  */
2831 QDF_STATUS
2832 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2833 				 struct hif_direct_link_ce_info *info,
2834 				 uint8_t max_ce_info_len);
2835 #else
2836 static inline QDF_STATUS
2837 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2838 			   uint64_t addr, uint32_t data)
2839 {
2840 	return QDF_STATUS_SUCCESS;
2841 }
2842 
2843 static inline
2844 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2845 						  uint64_t **dma_addr,
2846 						  uint32_t *buf_size)
2847 {
2848 	return 0;
2849 }
2850 
2851 static inline QDF_STATUS
2852 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2853 				 struct hif_direct_link_ce_info *info,
2854 				 uint8_t max_ce_info_len)
2855 {
2856 	return QDF_STATUS_SUCCESS;
2857 }
2858 #endif
2859 
2860 static inline QDF_STATUS
2861 hif_irq_set_affinity_hint(int irq_num, qdf_cpu_mask *cpu_mask)
2862 {
2863 	QDF_STATUS status;
2864 
2865 	qdf_dev_modify_irq_status(irq_num, IRQ_NO_BALANCING, 0);
2866 	status = qdf_dev_set_irq_affinity(irq_num,
2867 					  (struct qdf_cpu_mask *)cpu_mask);
2868 	qdf_dev_modify_irq_status(irq_num, 0, IRQ_NO_BALANCING);
2869 
2870 	return status;
2871 }
2872 
2873 #ifdef WLAN_FEATURE_AFFINITY_MGR
2874 /**
2875  * hif_affinity_mgr_init_ce_irq() - Init for CE IRQ
2876  * @scn: hif opaque handle
2877  * @id: CE ID
2878  * @irq: IRQ assigned
2879  *
2880  * Return: None
2881  */
2882 void
2883 hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq);
2884 
2885 /**
2886  * hif_affinity_mgr_init_grp_irq() - Init for group IRQ
2887  * @scn: hif opaque handle
2888  * @grp_id: GRP ID
2889  * @irq_num: IRQ number of hif ext group
2890  * @irq: IRQ number assigned
2891  *
2892  * Return: None
2893  */
2894 void
2895 hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
2896 			      int irq_num, int irq);
2897 
2898 /**
2899  * hif_affinity_mgr_set_qrg_irq_affinity() - Set affinity for group IRQ
2900  * @scn: hif opaque handle
2901  * @irq: IRQ assigned
2902  * @grp_id: GRP ID
2903  * @irq_index: IRQ number of hif ext group
2904  * @cpu_mask: reuquested cpu_mask for IRQ
2905  *
2906  * Return: status
2907  */
2908 QDF_STATUS
2909 hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
2910 				      uint32_t grp_id, uint32_t irq_index,
2911 				      qdf_cpu_mask *cpu_mask);
2912 
2913 /**
2914  * hif_affinity_mgr_set_ce_irq_affinity() - Set affinity for CE IRQ
2915  * @scn: hif opaque handle
2916  * @irq: IRQ assigned
2917  * @ce_id: CE ID
2918  * @cpu_mask: reuquested cpu_mask for IRQ
2919  *
2920  * Return: status
2921  */
2922 QDF_STATUS
2923 hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
2924 				     uint32_t ce_id, qdf_cpu_mask *cpu_mask);
2925 
2926 /**
2927  * hif_affinity_mgr_affine_irq() - Affine CE and GRP IRQs
2928  * @scn: hif opaque handle
2929  *
2930  * Return: None
2931  */
2932 void hif_affinity_mgr_affine_irq(struct hif_softc *scn);
2933 #else
2934 static inline void
2935 hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
2936 {
2937 }
2938 
2939 static inline void
2940 hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id, int irq_num,
2941 			      int irq)
2942 {
2943 }
2944 
2945 static inline QDF_STATUS
2946 hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
2947 				      uint32_t grp_id, uint32_t irq_index,
2948 				      qdf_cpu_mask *cpu_mask)
2949 {
2950 	return hif_irq_set_affinity_hint(irq, cpu_mask);
2951 }
2952 
2953 static inline QDF_STATUS
2954 hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
2955 				     uint32_t ce_id, qdf_cpu_mask *cpu_mask)
2956 {
2957 	return hif_irq_set_affinity_hint(irq, cpu_mask);
2958 }
2959 
2960 static inline
2961 void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
2962 {
2963 }
2964 #endif
2965 
2966 /**
2967  * hif_affinity_mgr_set_affinity() - Affine CE and GRP IRQs
2968  * @scn: hif opaque handle
2969  *
2970  * Return: None
2971  */
2972 void hif_affinity_mgr_set_affinity(struct hif_opaque_softc *scn);
2973 
2974 #ifdef FEATURE_HIF_DELAYED_REG_WRITE
2975 /**
2976  * hif_print_reg_write_stats() - Print hif delayed reg write stats
2977  * @hif_ctx: hif opaque handle
2978  *
2979  * Return: None
2980  */
2981 void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx);
2982 #else
2983 static inline void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
2984 {
2985 }
2986 #endif
2987 void hif_ce_print_ring_stats(struct hif_opaque_softc *hif_ctx);
2988 #endif /* _HIF_H_ */
2989