xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 2ea97ac98512848a8d721c76dddf82576e7c417e)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HIF_H_
20 #define _HIF_H_
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif /* __cplusplus */
25 
26 /* Header files */
27 #include <qdf_status.h>
28 #include "qdf_nbuf.h"
29 #include "qdf_lro.h"
30 #include "ol_if_athvar.h"
31 #include <linux/platform_device.h>
32 #ifdef HIF_PCI
33 #include <linux/pci.h>
34 #endif /* HIF_PCI */
35 #ifdef HIF_USB
36 #include <linux/usb.h>
37 #endif /* HIF_USB */
38 #ifdef IPA_OFFLOAD
39 #include <linux/ipa.h>
40 #endif
41 #include "cfg_ucfg_api.h"
42 #include "qdf_dev.h"
43 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
44 
45 typedef void __iomem *A_target_id_t;
46 typedef void *hif_handle_t;
47 
48 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
49 #define HIF_WORK_DRAIN_WAIT_CNT 10
50 #endif
51 
52 #define HIF_TYPE_AR6002   2
53 #define HIF_TYPE_AR6003   3
54 #define HIF_TYPE_AR6004   5
55 #define HIF_TYPE_AR9888   6
56 #define HIF_TYPE_AR6320   7
57 #define HIF_TYPE_AR6320V2 8
58 /* For attaching Peregrine 2.0 board host_reg_tbl only */
59 #define HIF_TYPE_AR9888V2 9
60 #define HIF_TYPE_ADRASTEA 10
61 #define HIF_TYPE_AR900B 11
62 #define HIF_TYPE_QCA9984 12
63 #define HIF_TYPE_IPQ4019 13
64 #define HIF_TYPE_QCA9888 14
65 #define HIF_TYPE_QCA8074 15
66 #define HIF_TYPE_QCA6290 16
67 #define HIF_TYPE_QCN7605 17
68 #define HIF_TYPE_QCA6390 18
69 #define HIF_TYPE_QCA8074V2 19
70 #define HIF_TYPE_QCA6018  20
71 #define HIF_TYPE_QCN9000 21
72 #define HIF_TYPE_QCA6490 22
73 #define HIF_TYPE_QCA6750 23
74 #define HIF_TYPE_QCA5018 24
75 #define HIF_TYPE_QCN6122 25
76 #define HIF_TYPE_WCN7850 26
77 #define HIF_TYPE_QCN9224 27
78 
79 #define DMA_COHERENT_MASK_DEFAULT   37
80 
81 #ifdef IPA_OFFLOAD
82 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
83 #endif
84 
85 /* enum hif_ic_irq - enum defining integrated chip irq numbers
86  * defining irq nubers that can be used by external modules like datapath
87  */
88 enum hif_ic_irq {
89 	host2wbm_desc_feed = 16,
90 	host2reo_re_injection,
91 	host2reo_command,
92 	host2rxdma_monitor_ring3,
93 	host2rxdma_monitor_ring2,
94 	host2rxdma_monitor_ring1,
95 	reo2host_exception,
96 	wbm2host_rx_release,
97 	reo2host_status,
98 	reo2host_destination_ring4,
99 	reo2host_destination_ring3,
100 	reo2host_destination_ring2,
101 	reo2host_destination_ring1,
102 	rxdma2host_monitor_destination_mac3,
103 	rxdma2host_monitor_destination_mac2,
104 	rxdma2host_monitor_destination_mac1,
105 	ppdu_end_interrupts_mac3,
106 	ppdu_end_interrupts_mac2,
107 	ppdu_end_interrupts_mac1,
108 	rxdma2host_monitor_status_ring_mac3,
109 	rxdma2host_monitor_status_ring_mac2,
110 	rxdma2host_monitor_status_ring_mac1,
111 	host2rxdma_host_buf_ring_mac3,
112 	host2rxdma_host_buf_ring_mac2,
113 	host2rxdma_host_buf_ring_mac1,
114 	rxdma2host_destination_ring_mac3,
115 	rxdma2host_destination_ring_mac2,
116 	rxdma2host_destination_ring_mac1,
117 	host2tcl_input_ring4,
118 	host2tcl_input_ring3,
119 	host2tcl_input_ring2,
120 	host2tcl_input_ring1,
121 	wbm2host_tx_completions_ring3,
122 	wbm2host_tx_completions_ring2,
123 	wbm2host_tx_completions_ring1,
124 	tcl2host_status_ring,
125 };
126 
127 struct CE_state;
128 #ifdef QCA_WIFI_QCN9224
129 #define CE_COUNT_MAX 16
130 #else
131 #define CE_COUNT_MAX 12
132 #endif
133 #define HIF_MAX_GRP_IRQ 16
134 
135 #ifndef HIF_MAX_GROUP
136 #define HIF_MAX_GROUP 7
137 #endif
138 
139 #ifndef NAPI_YIELD_BUDGET_BASED
140 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
141 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
142 #endif
143 #else  /* NAPI_YIELD_BUDGET_BASED */
144 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
145 #endif /* NAPI_YIELD_BUDGET_BASED */
146 
147 #define QCA_NAPI_BUDGET    64
148 #define QCA_NAPI_DEF_SCALE  \
149 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
150 
151 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
152 /* NOTE: "napi->scale" can be changed,
153  * but this does not change the number of buckets
154  */
155 #define QCA_NAPI_NUM_BUCKETS 4
156 
157 /**
158  * qca_napi_stat - stats structure for execution contexts
159  * @napi_schedules - number of times the schedule function is called
160  * @napi_polls - number of times the execution context runs
161  * @napi_completes - number of times that the generating interrupt is reenabled
162  * @napi_workdone - cumulative of all work done reported by handler
163  * @cpu_corrected - incremented when execution context runs on a different core
164  *			than the one that its irq is affined to.
165  * @napi_budget_uses - histogram of work done per execution run
166  * @time_limit_reache - count of yields due to time limit threshholds
167  * @rxpkt_thresh_reached - count of yields due to a work limit
168  * @poll_time_buckets - histogram of poll times for the napi
169  *
170  */
171 struct qca_napi_stat {
172 	uint32_t napi_schedules;
173 	uint32_t napi_polls;
174 	uint32_t napi_completes;
175 	uint32_t napi_workdone;
176 	uint32_t cpu_corrected;
177 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
178 	uint32_t time_limit_reached;
179 	uint32_t rxpkt_thresh_reached;
180 	unsigned long long napi_max_poll_time;
181 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
182 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
183 #endif
184 };
185 
186 
187 /**
188  * per NAPI instance data structure
189  * This data structure holds stuff per NAPI instance.
190  * Note that, in the current implementation, though scale is
191  * an instance variable, it is set to the same value for all
192  * instances.
193  */
194 struct qca_napi_info {
195 	struct net_device    netdev; /* dummy net_dev */
196 	void                 *hif_ctx;
197 	struct napi_struct   napi;
198 	uint8_t              scale;   /* currently same on all instances */
199 	uint8_t              id;
200 	uint8_t              cpu;
201 	int                  irq;
202 	cpumask_t            cpumask;
203 	struct qca_napi_stat stats[NR_CPUS];
204 #ifdef RECEIVE_OFFLOAD
205 	/* will only be present for data rx CE's */
206 	void (*offld_flush_cb)(void *);
207 	struct napi_struct   rx_thread_napi;
208 	struct net_device    rx_thread_netdev;
209 #endif /* RECEIVE_OFFLOAD */
210 	qdf_lro_ctx_t        lro_ctx;
211 };
212 
213 enum qca_napi_tput_state {
214 	QCA_NAPI_TPUT_UNINITIALIZED,
215 	QCA_NAPI_TPUT_LO,
216 	QCA_NAPI_TPUT_HI
217 };
218 enum qca_napi_cpu_state {
219 	QCA_NAPI_CPU_UNINITIALIZED,
220 	QCA_NAPI_CPU_DOWN,
221 	QCA_NAPI_CPU_UP };
222 
223 /**
224  * struct qca_napi_cpu - an entry of the napi cpu table
225  * @core_id:     physical core id of the core
226  * @cluster_id:  cluster this core belongs to
227  * @core_mask:   mask to match all core of this cluster
228  * @thread_mask: mask for this core within the cluster
229  * @max_freq:    maximum clock this core can be clocked at
230  *               same for all cpus of the same core.
231  * @napis:       bitmap of napi instances on this core
232  * @execs:       bitmap of execution contexts on this core
233  * cluster_nxt:  chain to link cores within the same cluster
234  *
235  * This structure represents a single entry in the napi cpu
236  * table. The table is part of struct qca_napi_data.
237  * This table is initialized by the init function, called while
238  * the first napi instance is being created, updated by hotplug
239  * notifier and when cpu affinity decisions are made (by throughput
240  * detection), and deleted when the last napi instance is removed.
241  */
242 struct qca_napi_cpu {
243 	enum qca_napi_cpu_state state;
244 	int			core_id;
245 	int			cluster_id;
246 	cpumask_t		core_mask;
247 	cpumask_t		thread_mask;
248 	unsigned int		max_freq;
249 	uint32_t		napis;
250 	uint32_t		execs;
251 	int			cluster_nxt;  /* index, not pointer */
252 };
253 
254 /**
255  * struct qca_napi_data - collection of napi data for a single hif context
256  * @hif_softc: pointer to the hif context
257  * @lock: spinlock used in the event state machine
258  * @state: state variable used in the napi stat machine
259  * @ce_map: bit map indicating which ce's have napis running
260  * @exec_map: bit map of instanciated exec contexts
261  * @user_cpu_affin_map: CPU affinity map from INI config.
262  * @napi_cpu: cpu info for irq affinty
263  * @lilcl_head:
264  * @bigcl_head:
265  * @napi_mode: irq affinity & clock voting mode
266  * @cpuhp_handler: CPU hotplug event registration handle
267  */
268 struct qca_napi_data {
269 	struct               hif_softc *hif_softc;
270 	qdf_spinlock_t       lock;
271 	uint32_t             state;
272 
273 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
274 	 * not used by clients (clients use an id returned by create)
275 	 */
276 	uint32_t             ce_map;
277 	uint32_t             exec_map;
278 	uint32_t             user_cpu_affin_mask;
279 	struct qca_napi_info *napis[CE_COUNT_MAX];
280 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
281 	int                  lilcl_head, bigcl_head;
282 	enum qca_napi_tput_state napi_mode;
283 	struct qdf_cpuhp_handler *cpuhp_handler;
284 	uint8_t              flags;
285 };
286 
287 /**
288  * struct hif_config_info - Place Holder for HIF configuration
289  * @enable_self_recovery: Self Recovery
290  * @enable_runtime_pm: Enable Runtime PM
291  * @runtime_pm_delay: Runtime PM Delay
292  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
293  *
294  * Structure for holding HIF ini parameters.
295  */
296 struct hif_config_info {
297 	bool enable_self_recovery;
298 #ifdef FEATURE_RUNTIME_PM
299 	uint8_t enable_runtime_pm;
300 	u_int32_t runtime_pm_delay;
301 #endif
302 	uint64_t rx_softirq_max_yield_duration_ns;
303 };
304 
305 /**
306  * struct hif_target_info - Target Information
307  * @target_version: Target Version
308  * @target_type: Target Type
309  * @target_revision: Target Revision
310  * @soc_version: SOC Version
311  * @hw_name: pointer to hardware name
312  *
313  * Structure to hold target information.
314  */
315 struct hif_target_info {
316 	uint32_t target_version;
317 	uint32_t target_type;
318 	uint32_t target_revision;
319 	uint32_t soc_version;
320 	char *hw_name;
321 };
322 
323 struct hif_opaque_softc {
324 };
325 
326 /**
327  * enum hif_event_type - Type of DP events to be recorded
328  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
329  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
330  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
331  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
332  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
333  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
334  */
335 enum hif_event_type {
336 	HIF_EVENT_IRQ_TRIGGER,
337 	HIF_EVENT_TIMER_ENTRY,
338 	HIF_EVENT_TIMER_EXIT,
339 	HIF_EVENT_BH_SCHED,
340 	HIF_EVENT_SRNG_ACCESS_START,
341 	HIF_EVENT_SRNG_ACCESS_END,
342 	/* Do check hif_hist_skip_event_record when adding new events */
343 };
344 
345 /**
346  * enum hif_system_pm_state - System PM state
347  * HIF_SYSTEM_PM_STATE_ON: System in active state
348  * HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
349  *  system resume
350  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
351  *  system suspend
352  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
353  */
354 enum hif_system_pm_state {
355 	HIF_SYSTEM_PM_STATE_ON,
356 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
357 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
358 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
359 };
360 
361 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
362 
363 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
364 /* HIF_EVENT_HIST_MAX should always be power of 2 */
365 #define HIF_EVENT_HIST_MAX		512
366 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
367 #define HIF_EVENT_HIST_ENABLE_MASK	0x3F
368 
369 static inline uint64_t hif_get_log_timestamp(void)
370 {
371 	return qdf_get_log_timestamp();
372 }
373 
374 #else
375 
376 #define HIF_EVENT_HIST_MAX		32
377 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
378 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
379 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
380 
381 static inline uint64_t hif_get_log_timestamp(void)
382 {
383 	return qdf_sched_clock();
384 }
385 
386 #endif
387 
388 /**
389  * struct hif_event_record - an entry of the DP event history
390  * @hal_ring_id: ring id for which event is recorded
391  * @hp: head pointer of the ring (may not be applicable for all events)
392  * @tp: tail pointer of the ring (may not be applicable for all events)
393  * @cpu_id: cpu id on which the event occurred
394  * @timestamp: timestamp when event occurred
395  * @type: type of the event
396  *
397  * This structure represents the information stored for every datapath
398  * event which is logged in the history.
399  */
400 struct hif_event_record {
401 	uint8_t hal_ring_id;
402 	uint32_t hp;
403 	uint32_t tp;
404 	int cpu_id;
405 	uint64_t timestamp;
406 	enum hif_event_type type;
407 };
408 
409 /**
410  * struct hif_event_misc - history related misc info
411  * @last_irq_index: last irq event index in history
412  * @last_irq_ts: last irq timestamp
413  */
414 struct hif_event_misc {
415 	int32_t last_irq_index;
416 	uint64_t last_irq_ts;
417 };
418 
419 /**
420  * struct hif_event_history - history for one interrupt group
421  * @index: index to store new event
422  * @event: event entry
423  *
424  * This structure represents the datapath history for one
425  * interrupt group.
426  */
427 struct hif_event_history {
428 	qdf_atomic_t index;
429 	struct hif_event_misc misc;
430 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
431 };
432 
433 /**
434  * hif_hist_record_event() - Record one datapath event in history
435  * @hif_ctx: HIF opaque context
436  * @event: DP event entry
437  * @intr_grp_id: interrupt group ID registered with hif
438  *
439  * Return: None
440  */
441 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
442 			   struct hif_event_record *event,
443 			   uint8_t intr_grp_id);
444 
445 /**
446  * hif_event_history_init() - Initialize SRNG event history buffers
447  * @hif_ctx: HIF opaque context
448  * @id: context group ID for which history is recorded
449  *
450  * Returns: None
451  */
452 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
453 
454 /**
455  * hif_event_history_deinit() - De-initialize SRNG event history buffers
456  * @hif_ctx: HIF opaque context
457  * @id: context group ID for which history is recorded
458  *
459  * Returns: None
460  */
461 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
462 
463 /**
464  * hif_record_event() - Wrapper function to form and record DP event
465  * @hif_ctx: HIF opaque context
466  * @intr_grp_id: interrupt group ID registered with hif
467  * @hal_ring_id: ring id for which event is recorded
468  * @hp: head pointer index of the srng
469  * @tp: tail pointer index of the srng
470  * @type: type of the event to be logged in history
471  *
472  * Return: None
473  */
474 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
475 				    uint8_t intr_grp_id,
476 				    uint8_t hal_ring_id,
477 				    uint32_t hp,
478 				    uint32_t tp,
479 				    enum hif_event_type type)
480 {
481 	struct hif_event_record event;
482 
483 	event.hal_ring_id = hal_ring_id;
484 	event.hp = hp;
485 	event.tp = tp;
486 	event.type = type;
487 
488 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
489 
490 	return;
491 }
492 
493 #else
494 
495 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
496 				    uint8_t intr_grp_id,
497 				    uint8_t hal_ring_id,
498 				    uint32_t hp,
499 				    uint32_t tp,
500 				    enum hif_event_type type)
501 {
502 }
503 
504 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
505 					  uint8_t id)
506 {
507 }
508 
509 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
510 					    uint8_t id)
511 {
512 }
513 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
514 
515 /**
516  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
517  *
518  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
519  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
520  *                         minimize power
521  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
522  *                         platform-specific measures to completely power-off
523  *                         the module and associated hardware (i.e. cut power
524  *                         supplies)
525  */
526 enum HIF_DEVICE_POWER_CHANGE_TYPE {
527 	HIF_DEVICE_POWER_UP,
528 	HIF_DEVICE_POWER_DOWN,
529 	HIF_DEVICE_POWER_CUT
530 };
531 
532 /**
533  * enum hif_enable_type: what triggered the enabling of hif
534  *
535  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
536  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
537  */
538 enum hif_enable_type {
539 	HIF_ENABLE_TYPE_PROBE,
540 	HIF_ENABLE_TYPE_REINIT,
541 	HIF_ENABLE_TYPE_MAX
542 };
543 
544 /**
545  * enum hif_disable_type: what triggered the disabling of hif
546  *
547  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
548  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
549  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
550  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
551  */
552 enum hif_disable_type {
553 	HIF_DISABLE_TYPE_PROBE_ERROR,
554 	HIF_DISABLE_TYPE_REINIT_ERROR,
555 	HIF_DISABLE_TYPE_REMOVE,
556 	HIF_DISABLE_TYPE_SHUTDOWN,
557 	HIF_DISABLE_TYPE_MAX
558 };
559 /**
560  * enum hif_device_config_opcode: configure mode
561  *
562  * @HIF_DEVICE_POWER_STATE: device power state
563  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
564  * @HIF_DEVICE_GET_ADDR: get block address
565  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
566  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
567  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
568  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
569  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
570  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
571  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
572  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
573  * @HIF_BMI_DONE: bmi done
574  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
575  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
576  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
577  */
578 enum hif_device_config_opcode {
579 	HIF_DEVICE_POWER_STATE = 0,
580 	HIF_DEVICE_GET_BLOCK_SIZE,
581 	HIF_DEVICE_GET_FIFO_ADDR,
582 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
583 	HIF_DEVICE_GET_IRQ_PROC_MODE,
584 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
585 	HIF_DEVICE_POWER_STATE_CHANGE,
586 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
587 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
588 	HIF_DEVICE_GET_OS_DEVICE,
589 	HIF_DEVICE_DEBUG_BUS_STATE,
590 	HIF_BMI_DONE,
591 	HIF_DEVICE_SET_TARGET_TYPE,
592 	HIF_DEVICE_SET_HTC_CONTEXT,
593 	HIF_DEVICE_GET_HTC_CONTEXT,
594 };
595 
596 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
597 struct HID_ACCESS_LOG {
598 	uint32_t seqnum;
599 	bool is_write;
600 	void *addr;
601 	uint32_t value;
602 };
603 #endif
604 
605 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
606 		uint32_t value);
607 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
608 
609 #define HIF_MAX_DEVICES                 1
610 /**
611  * struct htc_callbacks - Structure for HTC Callbacks methods
612  * @context:             context to pass to the dsrhandler
613  *                       note : rwCompletionHandler is provided the context
614  *                       passed to hif_read_write
615  * @rwCompletionHandler: Read / write completion handler
616  * @dsrHandler:          DSR Handler
617  */
618 struct htc_callbacks {
619 	void *context;
620 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
621 	QDF_STATUS(*dsr_handler)(void *context);
622 };
623 
624 /**
625  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
626  * @context: Private data context
627  * @set_recovery_in_progress: To Set Driver state for recovery in progress
628  * @is_recovery_in_progress: Query if driver state is recovery in progress
629  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
630  * @is_driver_unloading: Query if driver is unloading.
631  * @get_bandwidth_level: Query current bandwidth level for the driver
632  * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem
633  * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool
634  * This Structure provides callback pointer for HIF to query hdd for driver
635  * states.
636  */
637 struct hif_driver_state_callbacks {
638 	void *context;
639 	void (*set_recovery_in_progress)(void *context, uint8_t val);
640 	bool (*is_recovery_in_progress)(void *context);
641 	bool (*is_load_unload_in_progress)(void *context);
642 	bool (*is_driver_unloading)(void *context);
643 	bool (*is_target_ready)(void *context);
644 	int (*get_bandwidth_level)(void *context);
645 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
646 						       qdf_dma_addr_t *paddr,
647 						       uint32_t ring_type);
648 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
649 };
650 
651 /* This API detaches the HTC layer from the HIF device */
652 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
653 
654 /****************************************************************/
655 /* BMI and Diag window abstraction                              */
656 /****************************************************************/
657 
658 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
659 
660 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
661 				     * handled atomically by
662 				     * DiagRead/DiagWrite
663 				     */
664 
665 #ifdef WLAN_FEATURE_BMI
666 /*
667  * API to handle HIF-specific BMI message exchanges, this API is synchronous
668  * and only allowed to be called from a context that can block (sleep)
669  */
670 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
671 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
672 				uint8_t *pSendMessage, uint32_t Length,
673 				uint8_t *pResponseMessage,
674 				uint32_t *pResponseLength, uint32_t TimeoutMS);
675 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
676 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
677 #else /* WLAN_FEATURE_BMI */
678 static inline void
679 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
680 {
681 }
682 
683 static inline bool
684 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
685 {
686 	return false;
687 }
688 #endif /* WLAN_FEATURE_BMI */
689 
690 /*
691  * APIs to handle HIF specific diagnostic read accesses. These APIs are
692  * synchronous and only allowed to be called from a context that
693  * can block (sleep). They are not high performance APIs.
694  *
695  * hif_diag_read_access reads a 4 Byte aligned/length value from a
696  * Target register or memory word.
697  *
698  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
699  */
700 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
701 				uint32_t address, uint32_t *data);
702 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
703 		      uint8_t *data, int nbytes);
704 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
705 			void *ramdump_base, uint32_t address, uint32_t size);
706 /*
707  * APIs to handle HIF specific diagnostic write accesses. These APIs are
708  * synchronous and only allowed to be called from a context that
709  * can block (sleep).
710  * They are not high performance APIs.
711  *
712  * hif_diag_write_access writes a 4 Byte aligned/length value to a
713  * Target register or memory word.
714  *
715  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
716  */
717 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
718 				 uint32_t address, uint32_t data);
719 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
720 			uint32_t address, uint8_t *data, int nbytes);
721 
722 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
723 
724 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
725 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
726 
727 /*
728  * Set the FASTPATH_mode_on flag in sc, for use by data path
729  */
730 #ifdef WLAN_FEATURE_FASTPATH
731 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
732 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
733 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
734 
735 /**
736  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
737  * @handler: Callback funtcion
738  * @context: handle for callback function
739  *
740  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
741  */
742 QDF_STATUS hif_ce_fastpath_cb_register(
743 		struct hif_opaque_softc *hif_ctx,
744 		fastpath_msg_handler handler, void *context);
745 #else
746 static inline QDF_STATUS hif_ce_fastpath_cb_register(
747 		struct hif_opaque_softc *hif_ctx,
748 		fastpath_msg_handler handler, void *context)
749 {
750 	return QDF_STATUS_E_FAILURE;
751 }
752 
753 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
754 {
755 	return NULL;
756 }
757 
758 #endif
759 
760 /*
761  * Enable/disable CDC max performance workaround
762  * For max-performace set this to 0
763  * To allow SoC to enter sleep set this to 1
764  */
765 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
766 
767 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
768 			     qdf_shared_mem_t **ce_sr,
769 			     uint32_t *ce_sr_ring_size,
770 			     qdf_dma_addr_t *ce_reg_paddr);
771 
772 /**
773  * @brief List of callbacks - filled in by HTC.
774  */
775 struct hif_msg_callbacks {
776 	void *Context;
777 	/**< context meaningful to HTC */
778 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
779 					uint32_t transferID,
780 					uint32_t toeplitz_hash_result);
781 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
782 					uint8_t pipeID);
783 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
784 	void (*fwEventHandler)(void *context, QDF_STATUS status);
785 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
786 };
787 
788 enum hif_target_status {
789 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
790 	TARGET_STATUS_RESET,  /* target got reset */
791 	TARGET_STATUS_EJECT,  /* target got ejected */
792 	TARGET_STATUS_SUSPEND /*target got suspend */
793 };
794 
795 /**
796  * enum hif_attribute_flags: configure hif
797  *
798  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
799  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
800  *  							+ No pktlog CE
801  */
802 enum hif_attribute_flags {
803 	HIF_LOWDESC_CE_CFG = 1,
804 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
805 };
806 
807 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
808 	(attr |= (v & 0x01) << 5)
809 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
810 	(attr |= (v & 0x03) << 6)
811 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
812 	(attr |= (v & 0x01) << 13)
813 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
814 	(attr |= (v & 0x01) << 14)
815 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
816 	(attr |= (v & 0x01) << 15)
817 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
818 	(attr |= (v & 0x0FFF) << 16)
819 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
820 	(attr |= (v & 0x01) << 30)
821 
822 struct hif_ul_pipe_info {
823 	unsigned int nentries;
824 	unsigned int nentries_mask;
825 	unsigned int sw_index;
826 	unsigned int write_index; /* cached copy */
827 	unsigned int hw_index;    /* cached copy */
828 	void *base_addr_owner_space; /* Host address space */
829 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
830 };
831 
832 struct hif_dl_pipe_info {
833 	unsigned int nentries;
834 	unsigned int nentries_mask;
835 	unsigned int sw_index;
836 	unsigned int write_index; /* cached copy */
837 	unsigned int hw_index;    /* cached copy */
838 	void *base_addr_owner_space; /* Host address space */
839 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
840 };
841 
842 struct hif_pipe_addl_info {
843 	uint32_t pci_mem;
844 	uint32_t ctrl_addr;
845 	struct hif_ul_pipe_info ul_pipe;
846 	struct hif_dl_pipe_info dl_pipe;
847 };
848 
849 #ifdef CONFIG_SLUB_DEBUG_ON
850 #define MSG_FLUSH_NUM 16
851 #else /* PERF build */
852 #define MSG_FLUSH_NUM 32
853 #endif /* SLUB_DEBUG_ON */
854 
855 struct hif_bus_id;
856 
857 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
858 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
859 		     int opcode, void *config, uint32_t config_len);
860 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
861 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
862 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
863 		   struct hif_msg_callbacks *callbacks);
864 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
865 void hif_stop(struct hif_opaque_softc *hif_ctx);
866 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
867 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
868 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
869 		      uint8_t cmd_id, bool start);
870 
871 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
872 				  uint32_t transferID, uint32_t nbytes,
873 				  qdf_nbuf_t wbuf, uint32_t data_attr);
874 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
875 			     int force);
876 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
877 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
878 			  uint8_t *DLPipe);
879 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
880 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
881 			int *dl_is_polled);
882 uint16_t
883 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
884 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
885 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
886 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
887 		     bool wait_for_it);
888 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
889 #ifndef HIF_PCI
890 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
891 {
892 	return 0;
893 }
894 #else
895 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
896 #endif
897 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
898 			u32 *revision, const char **target_name);
899 
900 #ifdef RECEIVE_OFFLOAD
901 /**
902  * hif_offld_flush_cb_register() - Register the offld flush callback
903  * @scn: HIF opaque context
904  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
905  *			 Or GRO/LRO flush when RxThread is not enabled. Called
906  *			 with corresponding context for flush.
907  * Return: None
908  */
909 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
910 				 void (offld_flush_handler)(void *ol_ctx));
911 
912 /**
913  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
914  * @scn: HIF opaque context
915  *
916  * Return: None
917  */
918 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
919 #endif
920 
921 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
922 /**
923  * hif_exec_should_yield() - Check if hif napi context should yield
924  * @hif_ctx - HIF opaque context
925  * @grp_id - grp_id of the napi for which check needs to be done
926  *
927  * The function uses grp_id to look for NAPI and checks if NAPI needs to
928  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
929  * yield decision.
930  *
931  * Return: true if NAPI needs to yield, else false
932  */
933 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
934 #else
935 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
936 					 uint grp_id)
937 {
938 	return false;
939 }
940 #endif
941 
942 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
943 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
944 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
945 				      int htc_htt_tx_endpoint);
946 
947 /**
948  * hif_open() - Create hif handle
949  * @qdf_ctx: qdf context
950  * @mode: Driver Mode
951  * @bus_type: Bus Type
952  * @cbk: CDS Callbacks
953  * @psoc: psoc object manager
954  *
955  * API to open HIF Context
956  *
957  * Return: HIF Opaque Pointer
958  */
959 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
960 				  uint32_t mode,
961 				  enum qdf_bus_type bus_type,
962 				  struct hif_driver_state_callbacks *cbk,
963 				  struct wlan_objmgr_psoc *psoc);
964 
965 /**
966  * hif_init_dma_mask() - Set dma mask for the dev
967  * @dev: dev for which DMA mask is to be set
968  * @bus_type: bus type for the target
969  *
970  * This API sets the DMA mask for the device. before the datapath
971  * memory pre-allocation is done. If the DMA mask is not set before
972  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
973  * and does not utilize the full device capability.
974  *
975  * Return: 0 - success, non-zero on failure.
976  */
977 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
978 void hif_close(struct hif_opaque_softc *hif_ctx);
979 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
980 		      void *bdev, const struct hif_bus_id *bid,
981 		      enum qdf_bus_type bus_type,
982 		      enum hif_enable_type type);
983 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
984 #ifdef CE_TASKLET_DEBUG_ENABLE
985 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
986 				 uint8_t value);
987 #endif
988 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
989 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
990 
991 /**
992  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
993  * HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
994  * HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
995  * HIF_PM_CE_WAKE: Wake irq is CE interrupt
996  */
997 typedef enum {
998 	HIF_PM_INVALID_WAKE,
999 	HIF_PM_MSI_WAKE,
1000 	HIF_PM_CE_WAKE,
1001 } hif_pm_wake_irq_type;
1002 
1003 /**
1004  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1005  * @hif_ctx: HIF context
1006  *
1007  * Return: enum hif_pm_wake_irq_type
1008  */
1009 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1010 
1011 /**
1012  * enum wlan_rtpm_dbgid - runtime pm put/get debug id
1013  * @RTPM_ID_RESVERD:       Reserved
1014  * @RTPM_ID_WMI:           WMI sending msg, expect put happen at
1015  *                         tx completion from CE level directly.
1016  * @RTPM_ID_HTC:           pkt sending by HTT_DATA_MSG_SVC, expect
1017  *                         put from fw response or just in
1018  *                         htc_issue_packets
1019  * @RTPM_ID_QOS_NOTIFY:    pm qos notifer
1020  * @RTPM_ID_DP_TX_DESC_ALLOC_FREE:      tx desc alloc/free
1021  * @RTPM_ID_CE_SEND_FAST:  operation in ce_send_fast, not include
1022  *                         the pkt put happens outside this function
1023  * @RTPM_ID_SUSPEND_RESUME:     suspend/resume in hdd
1024  * @RTPM_ID_DW_TX_HW_ENQUEUE:   operation in functin dp_tx_hw_enqueue
1025  * @RTPM_ID_HAL_REO_CMD:        HAL_REO_CMD operation
1026  * @RTPM_ID_DP_PRINT_RING_STATS:  operation in dp_print_ring_stats
1027  */
1028 /* New value added to the enum must also be reflected in function
1029  *  rtpm_string_from_dbgid()
1030  */
1031 typedef enum {
1032 	RTPM_ID_RESVERD   = 0,
1033 	RTPM_ID_WMI       = 1,
1034 	RTPM_ID_HTC       = 2,
1035 	RTPM_ID_QOS_NOTIFY  = 3,
1036 	RTPM_ID_DP_TX_DESC_ALLOC_FREE  = 4,
1037 	RTPM_ID_CE_SEND_FAST       = 5,
1038 	RTPM_ID_SUSPEND_RESUME     = 6,
1039 	RTPM_ID_DW_TX_HW_ENQUEUE   = 7,
1040 	RTPM_ID_HAL_REO_CMD        = 8,
1041 	RTPM_ID_DP_PRINT_RING_STATS  = 9,
1042 
1043 	RTPM_ID_MAX,
1044 } wlan_rtpm_dbgid;
1045 
1046 /**
1047  * rtpm_string_from_dbgid() - Convert dbgid to respective string
1048  * @id -  debug id
1049  *
1050  * Debug support function to convert  dbgid to string.
1051  * Please note to add new string in the array at index equal to
1052  * its enum value in wlan_rtpm_dbgid.
1053  */
1054 static inline char *rtpm_string_from_dbgid(wlan_rtpm_dbgid id)
1055 {
1056 	static const char *strings[] = { "RTPM_ID_RESVERD",
1057 					"RTPM_ID_WMI",
1058 					"RTPM_ID_HTC",
1059 					"RTPM_ID_QOS_NOTIFY",
1060 					"RTPM_ID_DP_TX_DESC_ALLOC_FREE",
1061 					"RTPM_ID_CE_SEND_FAST",
1062 					"RTPM_ID_SUSPEND_RESUME",
1063 					"RTPM_ID_DW_TX_HW_ENQUEUE",
1064 					"RTPM_ID_HAL_REO_CMD",
1065 					"RTPM_ID_DP_PRINT_RING_STATS",
1066 					"RTPM_ID_MAX"};
1067 
1068 	return (char *)strings[id];
1069 }
1070 
1071 /**
1072  * enum hif_ep_vote_type - hif ep vote type
1073  * HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1074  * HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1075  */
1076 enum hif_ep_vote_type {
1077 	HIF_EP_VOTE_DP_ACCESS,
1078 	HIF_EP_VOTE_NONDP_ACCESS
1079 };
1080 
1081 /**
1082  * enum hif_ep_vote_access - hif ep vote access
1083  * HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1084  * HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1085  */
1086 enum hif_ep_vote_access {
1087 	HIF_EP_VOTE_ACCESS_ENABLE,
1088 	HIF_EP_VOTE_ACCESS_DISABLE
1089 };
1090 
1091 /**
1092  * enum hif_pm_link_state - hif link state
1093  * HIF_PM_LINK_STATE_DOWN: hif link state is down
1094  * HIF_PM_LINK_STATE_UP: hif link state is up
1095  */
1096 enum hif_pm_link_state {
1097 	HIF_PM_LINK_STATE_DOWN,
1098 	HIF_PM_LINK_STATE_UP
1099 };
1100 
1101 /**
1102  * enum hif_pm_htc_stats - hif runtime PM stats for HTC layer
1103  * HIF_PM_HTC_STATS_GET_HTT_RESPONSE: PM stats for RTPM GET for HTT packets
1104 				      with response
1105  * HIF_PM_HTC_STATS_GET_HTT_NO_RESPONSE: PM stats for RTPM GET for HTT packets
1106 					 with no response
1107  * HIF_PM_HTC_STATS_PUT_HTT_RESPONSE: PM stats for RTPM PUT for HTT packets
1108 				      with response
1109  * HIF_PM_HTC_STATS_PUT_HTT_NO_RESPONSE: PM stats for RTPM PUT for HTT packets
1110 					 with no response
1111  * HIF_PM_HTC_STATS_PUT_HTT_ERROR: PM stats for RTPM PUT for failed HTT packets
1112  * HIF_PM_HTC_STATS_PUT_HTC_CLEANUP: PM stats for RTPM PUT during HTC cleanup
1113  */
1114 enum hif_pm_htc_stats {
1115 	HIF_PM_HTC_STATS_GET_HTT_RESPONSE,
1116 	HIF_PM_HTC_STATS_GET_HTT_NO_RESPONSE,
1117 	HIF_PM_HTC_STATS_PUT_HTT_RESPONSE,
1118 	HIF_PM_HTC_STATS_PUT_HTT_NO_RESPONSE,
1119 	HIF_PM_HTC_STATS_PUT_HTT_ERROR,
1120 	HIF_PM_HTC_STATS_PUT_HTC_CLEANUP,
1121 };
1122 
1123 #ifdef FEATURE_RUNTIME_PM
1124 struct hif_pm_runtime_lock;
1125 
1126 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1127 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
1128 			    wlan_rtpm_dbgid rtpm_dbgid);
1129 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
1130 				    wlan_rtpm_dbgid rtpm_dbgid);
1131 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx);
1132 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
1133 		       wlan_rtpm_dbgid rtpm_dbgid,
1134 		       bool is_critical_ctx);
1135 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1136 				 wlan_rtpm_dbgid rtpm_dbgid);
1137 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
1138 		       wlan_rtpm_dbgid rtpm_dbgid);
1139 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1140 			      wlan_rtpm_dbgid rtpm_dbgid);
1141 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx);
1142 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1143 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1144 			struct hif_pm_runtime_lock *lock);
1145 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1146 		struct hif_pm_runtime_lock *lock);
1147 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1148 		struct hif_pm_runtime_lock *lock);
1149 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx);
1150 void hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx);
1151 void hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx);
1152 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx);
1153 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
1154 					  int val);
1155 void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx);
1156 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
1157 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
1158 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx);
1159 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx);
1160 void hif_pm_runtime_update_stats(struct hif_opaque_softc *hif_ctx,
1161 				 wlan_rtpm_dbgid rtpm_dbgid,
1162 				 enum hif_pm_htc_stats stats);
1163 
1164 /**
1165  * hif_pm_set_link_state() - set link state during RTPM
1166  * @hif_sc: HIF Context
1167  *
1168  * Return: None
1169  */
1170 void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val);
1171 
1172 /**
1173  * hif_is_link_state_up() - Is link state up
1174  * @hif_sc: HIF Context
1175  *
1176  * Return: 1 link is up, 0 link is down
1177  */
1178 uint8_t hif_pm_get_link_state(struct hif_opaque_softc *hif_handle);
1179 #else
1180 struct hif_pm_runtime_lock {
1181 	const char *name;
1182 };
1183 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
1184 static inline int
1185 hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
1186 			wlan_rtpm_dbgid rtpm_dbgid)
1187 { return 0; }
1188 static inline int
1189 hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
1190 				wlan_rtpm_dbgid rtpm_dbgid)
1191 { return 0; }
1192 static inline int
1193 hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
1194 { return 0; }
1195 static inline void
1196 hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1197 			    wlan_rtpm_dbgid rtpm_dbgid)
1198 {}
1199 
1200 static inline int
1201 hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid,
1202 		   bool is_critical_ctx)
1203 { return 0; }
1204 static inline int
1205 hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid)
1206 { return 0; }
1207 static inline int
1208 hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1209 			  wlan_rtpm_dbgid rtpm_dbgid)
1210 { return 0; }
1211 static inline void
1212 hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {};
1213 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
1214 					const char *name)
1215 { return 0; }
1216 static inline void
1217 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1218 			struct hif_pm_runtime_lock *lock) {}
1219 
1220 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1221 		struct hif_pm_runtime_lock *lock)
1222 { return 0; }
1223 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1224 		struct hif_pm_runtime_lock *lock)
1225 { return 0; }
1226 static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
1227 { return false; }
1228 static inline void
1229 hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx)
1230 { return; }
1231 static inline void
1232 hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx)
1233 { return; }
1234 static inline int
1235 hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
1236 { return 0; }
1237 static inline void
1238 hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val)
1239 { return; }
1240 static inline void
1241 hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
1242 { return; }
1243 static inline void
1244 hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {};
1245 static inline int
1246 hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1247 { return 0; }
1248 static inline qdf_time_t
1249 hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
1250 { return 0; }
1251 static inline int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
1252 { return 0; }
1253 static inline
1254 void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val)
1255 {}
1256 
1257 static inline
1258 void hif_pm_runtime_update_stats(struct hif_opaque_softc *hif_ctx,
1259 				 wlan_rtpm_dbgid rtpm_dbgid,
1260 				 enum hif_pm_htc_stats stats)
1261 {}
1262 #endif
1263 
1264 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1265 				 bool is_packet_log_enabled);
1266 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1267 
1268 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1269 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1270 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1271 
1272 #ifdef IPA_OFFLOAD
1273 /**
1274  * hif_get_ipa_hw_type() - get IPA hw type
1275  *
1276  * This API return the IPA hw type.
1277  *
1278  * Return: IPA hw type
1279  */
1280 static inline
1281 enum ipa_hw_type hif_get_ipa_hw_type(void)
1282 {
1283 	return ipa_get_hw_type();
1284 }
1285 
1286 /**
1287  * hif_get_ipa_present() - get IPA hw status
1288  *
1289  * This API return the IPA hw status.
1290  *
1291  * Return: true if IPA is present or false otherwise
1292  */
1293 static inline
1294 bool hif_get_ipa_present(void)
1295 {
1296 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
1297 		return true;
1298 	else
1299 		return false;
1300 }
1301 #endif
1302 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1303 /**
1304  * hif_bus_ealry_suspend() - stop non wmi tx traffic
1305  * @context: hif context
1306  */
1307 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1308 
1309 /**
1310  * hif_bus_late_resume() - resume non wmi traffic
1311  * @context: hif context
1312  */
1313 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1314 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1315 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1316 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1317 
1318 /**
1319  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1320  * @hif_ctx: an opaque HIF handle to use
1321  *
1322  * As opposed to the standard hif_irq_enable, this function always applies to
1323  * the APPS side kernel interrupt handling.
1324  *
1325  * Return: errno
1326  */
1327 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1328 
1329 /**
1330  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1331  * @hif_ctx: an opaque HIF handle to use
1332  *
1333  * As opposed to the standard hif_irq_disable, this function always applies to
1334  * the APPS side kernel interrupt handling.
1335  *
1336  * Return: errno
1337  */
1338 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1339 
1340 /**
1341  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1342  * @hif_ctx: an opaque HIF handle to use
1343  *
1344  * As opposed to the standard hif_irq_enable, this function always applies to
1345  * the APPS side kernel interrupt handling.
1346  *
1347  * Return: errno
1348  */
1349 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1350 
1351 /**
1352  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1353  * @hif_ctx: an opaque HIF handle to use
1354  *
1355  * As opposed to the standard hif_irq_disable, this function always applies to
1356  * the APPS side kernel interrupt handling.
1357  *
1358  * Return: errno
1359  */
1360 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1361 
1362 /**
1363  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1364  * @hif_ctx: an opaque HIF handle to use
1365  *
1366  * This function always applies to the APPS side kernel interrupt handling
1367  * to wake the system from suspend.
1368  *
1369  * Return: errno
1370  */
1371 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1372 
1373 /**
1374  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1375  * @hif_ctx: an opaque HIF handle to use
1376  *
1377  * This function always applies to the APPS side kernel interrupt handling
1378  * to disable the wake irq.
1379  *
1380  * Return: errno
1381  */
1382 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1383 
1384 /**
1385  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
1386  * @hif_ctx: an opaque HIF handle to use
1387  *
1388  * As opposed to the standard hif_irq_enable, this function always applies to
1389  * the APPS side kernel interrupt handling.
1390  *
1391  * Return: errno
1392  */
1393 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1394 
1395 /**
1396  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
1397  * @hif_ctx: an opaque HIF handle to use
1398  *
1399  * As opposed to the standard hif_irq_disable, this function always applies to
1400  * the APPS side kernel interrupt handling.
1401  *
1402  * Return: errno
1403  */
1404 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1405 
1406 #ifdef FEATURE_RUNTIME_PM
1407 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1408 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
1409 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1410 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1411 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
1412 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
1413 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
1414 #endif
1415 
1416 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1417 int hif_dump_registers(struct hif_opaque_softc *scn);
1418 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1419 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1420 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1421 		     u32 *revision, const char **target_name);
1422 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1423 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1424 						   scn);
1425 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1426 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1427 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1428 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1429 			   hif_target_status);
1430 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1431 			 struct hif_config_info *cfg);
1432 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1433 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1434 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1435 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1436 			   uint32_t transfer_id, u_int32_t len);
1437 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1438 	uint32_t transfer_id, uint32_t download_len);
1439 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1440 void hif_ce_war_disable(void);
1441 void hif_ce_war_enable(void);
1442 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1443 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1444 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1445 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1446 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1447 		uint32_t pipe_num);
1448 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1449 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1450 
1451 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1452 				int rx_bundle_cnt);
1453 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1454 
1455 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1456 
1457 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1458 
1459 enum hif_exec_type {
1460 	HIF_EXEC_NAPI_TYPE,
1461 	HIF_EXEC_TASKLET_TYPE,
1462 };
1463 
1464 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
1465 
1466 /**
1467  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1468  * @softc: hif opaque context owning the exec context
1469  * @id: the id of the interrupt context
1470  *
1471  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1472  *         'id' registered with the OS
1473  */
1474 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1475 				uint8_t id);
1476 
1477 /**
1478  * hif_configure_ext_group_interrupts() - Congigure ext group intrrupts
1479  * @hif_ctx: hif opaque context
1480  *
1481  * Return: QDF_STATUS
1482  */
1483 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1484 
1485 /**
1486  * hif_register_ext_group() - API to register external group
1487  * interrupt handler.
1488  * @hif_ctx : HIF Context
1489  * @numirq: number of irq's in the group
1490  * @irq: array of irq values
1491  * @handler: callback interrupt handler function
1492  * @cb_ctx: context to passed in callback
1493  * @type: napi vs tasklet
1494  *
1495  * Return: QDF_STATUS
1496  */
1497 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1498 				  uint32_t numirq, uint32_t irq[],
1499 				  ext_intr_handler handler,
1500 				  void *cb_ctx, const char *context_name,
1501 				  enum hif_exec_type type, uint32_t scale);
1502 
1503 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1504 				const char *context_name);
1505 
1506 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1507 				u_int8_t pipeid,
1508 				struct hif_msg_callbacks *callbacks);
1509 
1510 /**
1511  * hif_print_napi_stats() - Display HIF NAPI stats
1512  * @hif_ctx - HIF opaque context
1513  *
1514  * Return: None
1515  */
1516 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
1517 
1518 /* hif_clear_napi_stats() - function clears the stats of the
1519  * latency when called.
1520  * @hif_ctx - the HIF context to assign the callback to
1521  *
1522  * Return: None
1523  */
1524 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
1525 
1526 #ifdef __cplusplus
1527 }
1528 #endif
1529 
1530 #ifdef FORCE_WAKE
1531 /**
1532  * hif_force_wake_request() - Function to wake from power collapse
1533  * @handle: HIF opaque handle
1534  *
1535  * Description: API to check if the device is awake or not before
1536  * read/write to BAR + 4K registers. If device is awake return
1537  * success otherwise write '1' to
1538  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
1539  * the device and does wakeup the PCI and MHI within 50ms
1540  * and then the device writes a value to
1541  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
1542  * handshake process to let the host know the device is awake.
1543  *
1544  * Return: zero - success/non-zero - failure
1545  */
1546 int hif_force_wake_request(struct hif_opaque_softc *handle);
1547 
1548 /**
1549  * hif_force_wake_release() - API to release/reset the SOC wake register
1550  * from interrupting the device.
1551  * @handle: HIF opaque handle
1552  *
1553  * Description: API to set the
1554  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
1555  * to release the interrupt line.
1556  *
1557  * Return: zero - success/non-zero - failure
1558  */
1559 int hif_force_wake_release(struct hif_opaque_softc *handle);
1560 #else
1561 static inline
1562 int hif_force_wake_request(struct hif_opaque_softc *handle)
1563 {
1564 	return 0;
1565 }
1566 
1567 static inline
1568 int hif_force_wake_release(struct hif_opaque_softc *handle)
1569 {
1570 	return 0;
1571 }
1572 #endif /* FORCE_WAKE */
1573 
1574 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1575 /**
1576  * hif_prevent_link_low_power_states() - Prevent from going to low power states
1577  * @hif - HIF opaque context
1578  *
1579  * Return: 0 on success. Error code on failure.
1580  */
1581 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
1582 
1583 /**
1584  * hif_allow_link_low_power_states() - Allow link to go to low power states
1585  * @hif - HIF opaque context
1586  *
1587  * Return: None
1588  */
1589 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
1590 
1591 #else
1592 
1593 static inline
1594 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
1595 {
1596 	return 0;
1597 }
1598 
1599 static inline
1600 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
1601 {
1602 }
1603 #endif
1604 
1605 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
1606 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
1607 
1608 /**
1609  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
1610  * @hif_ctx - the HIF context to assign the callback to
1611  * @callback - the callback to assign
1612  * @priv - the private data to pass to the callback when invoked
1613  *
1614  * Return: None
1615  */
1616 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1617 			       void (*callback)(void *),
1618 			       void *priv);
1619 /*
1620  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1621  * for defined here
1622  */
1623 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1624 ssize_t hif_dump_desc_trace_buf(struct device *dev,
1625 				struct device_attribute *attr, char *buf);
1626 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1627 					const char *buf, size_t size);
1628 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
1629 				const char *buf, size_t size);
1630 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
1631 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
1632 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
1633 
1634 /**
1635  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
1636  * @hif: hif context
1637  * @ce_service_max_yield_time: CE service max yield time to set
1638  *
1639  * This API storess CE service max yield time in hif context based
1640  * on ini value.
1641  *
1642  * Return: void
1643  */
1644 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1645 				       uint32_t ce_service_max_yield_time);
1646 
1647 /**
1648  * hif_get_ce_service_max_yield_time() - get CE service max yield time
1649  * @hif: hif context
1650  *
1651  * This API returns CE service max yield time.
1652  *
1653  * Return: CE service max yield time
1654  */
1655 unsigned long long
1656 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
1657 
1658 /**
1659  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
1660  * @hif: hif context
1661  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
1662  *
1663  * This API stores CE service max rx ind flush in hif context based
1664  * on ini value.
1665  *
1666  * Return: void
1667  */
1668 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1669 					 uint8_t ce_service_max_rx_ind_flush);
1670 
1671 #ifdef OL_ATH_SMART_LOGGING
1672 /*
1673  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1674  * @scn : HIF handler
1675  * @buf_cur: Current pointer in ring buffer
1676  * @buf_init:Start of the ring buffer
1677  * @buf_sz: Size of the ring buffer
1678  * @ce: Copy Engine id
1679  * @skb_sz: Max size of the SKB buffer to be copied
1680  *
1681  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1682  * and buffers pointed by them in to the given buf
1683  *
1684  * Return: Current pointer in ring buffer
1685  */
1686 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1687 			 uint8_t *buf_init, uint32_t buf_sz,
1688 			 uint32_t ce, uint32_t skb_sz);
1689 #endif /* OL_ATH_SMART_LOGGING */
1690 
1691 /*
1692  * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
1693  * to hif_opaque_softc handle
1694  * @hif_handle - hif_softc type
1695  *
1696  * Return: hif_opaque_softc type
1697  */
1698 static inline struct hif_opaque_softc *
1699 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
1700 {
1701 	return (struct hif_opaque_softc *)hif_handle;
1702 }
1703 
1704 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1705 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
1706 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
1707 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1708 			    uint8_t type, uint8_t access);
1709 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1710 			       uint8_t type);
1711 #else
1712 static inline QDF_STATUS
1713 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1714 {
1715 	return QDF_STATUS_SUCCESS;
1716 }
1717 
1718 static inline void
1719 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1720 {
1721 }
1722 
1723 static inline void
1724 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1725 		       uint8_t type, uint8_t access)
1726 {
1727 }
1728 
1729 static inline uint8_t
1730 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1731 		       uint8_t type)
1732 {
1733 	return HIF_EP_VOTE_ACCESS_ENABLE;
1734 }
1735 #endif
1736 
1737 #ifdef FORCE_WAKE
1738 /**
1739  * hif_srng_init_phase(): Indicate srng initialization phase
1740  * to avoid force wake as UMAC power collapse is not yet
1741  * enabled
1742  * @hif_ctx: hif opaque handle
1743  * @init_phase: initialization phase
1744  *
1745  * Return:  None
1746  */
1747 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1748 			 bool init_phase);
1749 #else
1750 static inline
1751 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1752 			 bool init_phase)
1753 {
1754 }
1755 #endif /* FORCE_WAKE */
1756 
1757 #ifdef HIF_IPCI
1758 /**
1759  * hif_shutdown_notifier_cb - Call back for shutdown notifier
1760  * @ctx: hif handle
1761  *
1762  * Return:  None
1763  */
1764 void hif_shutdown_notifier_cb(void *ctx);
1765 #else
1766 static inline
1767 void hif_shutdown_notifier_cb(void *ctx)
1768 {
1769 }
1770 #endif /* HIF_IPCI */
1771 
1772 #ifdef HIF_CE_LOG_INFO
1773 /**
1774  * hif_log_ce_info() - API to log ce info
1775  * @scn: hif handle
1776  * @data: hang event data buffer
1777  * @offset: offset at which data needs to be written
1778  *
1779  * Return:  None
1780  */
1781 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
1782 		     unsigned int *offset);
1783 #else
1784 static inline
1785 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
1786 		     unsigned int *offset)
1787 {
1788 }
1789 #endif
1790 
1791 #ifdef HIF_CPU_PERF_AFFINE_MASK
1792 /**
1793  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
1794  * @hif_ctx: hif opaque handle
1795  *
1796  * This function is used to move the WLAN IRQs to perf cores in
1797  * case of defconfig builds.
1798  *
1799  * Return:  None
1800  */
1801 void hif_config_irq_set_perf_affinity_hint(
1802 	struct hif_opaque_softc *hif_ctx);
1803 
1804 #else
1805 static inline void hif_config_irq_set_perf_affinity_hint(
1806 	struct hif_opaque_softc *hif_ctx)
1807 {
1808 }
1809 #endif
1810 
1811 /**
1812  * hif_apps_grp_irqs_enable() - enable ext grp irqs
1813  * @hif - HIF opaque context
1814  *
1815  * Return: 0 on success. Error code on failure.
1816  */
1817 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
1818 
1819 /**
1820  * hif_apps_grp_irqs_disable() - disable ext grp irqs
1821  * @hif - HIF opaque context
1822  *
1823  * Return: 0 on success. Error code on failure.
1824  */
1825 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
1826 
1827 /**
1828  * hif_disable_grp_irqs() - disable ext grp irqs
1829  * @hif - HIF opaque context
1830  *
1831  * Return: 0 on success. Error code on failure.
1832  */
1833 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
1834 
1835 /**
1836  * hif_enable_grp_irqs() - enable ext grp irqs
1837  * @hif - HIF opaque context
1838  *
1839  * Return: 0 on success. Error code on failure.
1840  */
1841 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
1842 
1843 enum hif_credit_exchange_type {
1844 	HIF_REQUEST_CREDIT,
1845 	HIF_PROCESS_CREDIT_REPORT,
1846 };
1847 
1848 enum hif_detect_latency_type {
1849 	HIF_DETECT_TASKLET,
1850 	HIF_DETECT_CREDIT,
1851 	HIF_DETECT_UNKNOWN
1852 };
1853 
1854 #ifdef HIF_DETECTION_LATENCY_ENABLE
1855 void hif_latency_detect_credit_record_time(
1856 	enum hif_credit_exchange_type type,
1857 	struct hif_opaque_softc *hif_ctx);
1858 
1859 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
1860 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
1861 void hif_check_detection_latency(struct hif_softc *scn,
1862 				 bool from_timer,
1863 				 uint32_t bitmap_type);
1864 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
1865 #else
1866 static inline
1867 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
1868 {}
1869 
1870 static inline
1871 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
1872 {}
1873 
1874 static inline
1875 void hif_latency_detect_credit_record_time(
1876 	enum hif_credit_exchange_type type,
1877 	struct hif_opaque_softc *hif_ctx)
1878 {}
1879 static inline
1880 void hif_check_detection_latency(struct hif_softc *scn,
1881 				 bool from_timer,
1882 				 uint32_t bitmap_type)
1883 {}
1884 
1885 static inline
1886 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
1887 {}
1888 #endif
1889 
1890 #ifdef SYSTEM_PM_CHECK
1891 /**
1892  * __hif_system_pm_set_state() - Set system pm state
1893  * @hif: hif opaque handle
1894  * @state: system state
1895  *
1896  * Return:  None
1897  */
1898 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
1899 			       enum hif_system_pm_state state);
1900 
1901 /**
1902  * hif_system_pm_set_state_on() - Set system pm state to ON
1903  * @hif: hif opaque handle
1904  *
1905  * Return:  None
1906  */
1907 static inline
1908 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
1909 {
1910 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
1911 }
1912 
1913 /**
1914  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
1915  * @hif: hif opaque handle
1916  *
1917  * Return:  None
1918  */
1919 static inline
1920 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
1921 {
1922 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
1923 }
1924 
1925 /**
1926  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
1927  * @hif: hif opaque handle
1928  *
1929  * Return:  None
1930  */
1931 static inline
1932 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
1933 {
1934 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
1935 }
1936 
1937 /**
1938  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
1939  * @hif: hif opaque handle
1940  *
1941  * Return:  None
1942  */
1943 static inline
1944 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
1945 {
1946 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
1947 }
1948 
1949 /**
1950  * hif_system_pm_get_state() - Get system pm state
1951  * @hif: hif opaque handle
1952  *
1953  * Return:  system state
1954  */
1955 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
1956 
1957 /**
1958  * hif_system_pm_state_check() - Check system state and trigger resume
1959  *  if required
1960  * @hif: hif opaque handle
1961  *
1962  * Return: 0 if system is in on state else error code
1963  */
1964 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
1965 #else
1966 static inline
1967 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
1968 			       enum hif_system_pm_state state)
1969 {
1970 }
1971 
1972 static inline
1973 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
1974 {
1975 }
1976 
1977 static inline
1978 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
1979 {
1980 }
1981 
1982 static inline
1983 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
1984 {
1985 }
1986 
1987 static inline
1988 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
1989 {
1990 }
1991 
1992 static inline
1993 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
1994 {
1995 	return 0;
1996 }
1997 
1998 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
1999 {
2000 	return 0;
2001 }
2002 #endif
2003 #endif /* _HIF_H_ */
2004