xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision dcd269c55a5ec9cd1c96e0c8ebb0250f38f4a946)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HIF_H_
20 #define _HIF_H_
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif /* __cplusplus */
25 
26 /* Header files */
27 #include <qdf_status.h>
28 #include "qdf_nbuf.h"
29 #include "qdf_lro.h"
30 #include "ol_if_athvar.h"
31 #include <linux/platform_device.h>
32 #ifdef HIF_PCI
33 #include <linux/pci.h>
34 #endif /* HIF_PCI */
35 #ifdef HIF_USB
36 #include <linux/usb.h>
37 #endif /* HIF_USB */
38 #ifdef IPA_OFFLOAD
39 #include <linux/ipa.h>
40 #endif
41 #include "cfg_ucfg_api.h"
42 #include "qdf_dev.h"
43 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
44 
45 typedef void __iomem *A_target_id_t;
46 typedef void *hif_handle_t;
47 
48 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
49 #define HIF_WORK_DRAIN_WAIT_CNT 10
50 #endif
51 
52 #define HIF_TYPE_AR6002   2
53 #define HIF_TYPE_AR6003   3
54 #define HIF_TYPE_AR6004   5
55 #define HIF_TYPE_AR9888   6
56 #define HIF_TYPE_AR6320   7
57 #define HIF_TYPE_AR6320V2 8
58 /* For attaching Peregrine 2.0 board host_reg_tbl only */
59 #define HIF_TYPE_AR9888V2 9
60 #define HIF_TYPE_ADRASTEA 10
61 #define HIF_TYPE_AR900B 11
62 #define HIF_TYPE_QCA9984 12
63 #define HIF_TYPE_IPQ4019 13
64 #define HIF_TYPE_QCA9888 14
65 #define HIF_TYPE_QCA8074 15
66 #define HIF_TYPE_QCA6290 16
67 #define HIF_TYPE_QCN7605 17
68 #define HIF_TYPE_QCA6390 18
69 #define HIF_TYPE_QCA8074V2 19
70 #define HIF_TYPE_QCA6018  20
71 #define HIF_TYPE_QCN9000 21
72 #define HIF_TYPE_QCA6490 22
73 #define HIF_TYPE_QCA6750 23
74 #define HIF_TYPE_QCA5018 24
75 #define HIF_TYPE_QCN6122 25
76 #define HIF_TYPE_WCN7850 26
77 #define HIF_TYPE_QCN9224 27
78 #define HIF_TYPE_QCA9574 28
79 
80 #define DMA_COHERENT_MASK_DEFAULT   37
81 
82 #ifdef IPA_OFFLOAD
83 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
84 #endif
85 
86 /* enum hif_ic_irq - enum defining integrated chip irq numbers
87  * defining irq nubers that can be used by external modules like datapath
88  */
89 enum hif_ic_irq {
90 	host2wbm_desc_feed = 16,
91 	host2reo_re_injection,
92 	host2reo_command,
93 	host2rxdma_monitor_ring3,
94 	host2rxdma_monitor_ring2,
95 	host2rxdma_monitor_ring1,
96 	reo2host_exception,
97 	wbm2host_rx_release,
98 	reo2host_status,
99 	reo2host_destination_ring4,
100 	reo2host_destination_ring3,
101 	reo2host_destination_ring2,
102 	reo2host_destination_ring1,
103 	rxdma2host_monitor_destination_mac3,
104 	rxdma2host_monitor_destination_mac2,
105 	rxdma2host_monitor_destination_mac1,
106 	ppdu_end_interrupts_mac3,
107 	ppdu_end_interrupts_mac2,
108 	ppdu_end_interrupts_mac1,
109 	rxdma2host_monitor_status_ring_mac3,
110 	rxdma2host_monitor_status_ring_mac2,
111 	rxdma2host_monitor_status_ring_mac1,
112 	host2rxdma_host_buf_ring_mac3,
113 	host2rxdma_host_buf_ring_mac2,
114 	host2rxdma_host_buf_ring_mac1,
115 	rxdma2host_destination_ring_mac3,
116 	rxdma2host_destination_ring_mac2,
117 	rxdma2host_destination_ring_mac1,
118 	host2tcl_input_ring4,
119 	host2tcl_input_ring3,
120 	host2tcl_input_ring2,
121 	host2tcl_input_ring1,
122 	wbm2host_tx_completions_ring3,
123 	wbm2host_tx_completions_ring2,
124 	wbm2host_tx_completions_ring1,
125 	tcl2host_status_ring,
126 };
127 
128 struct CE_state;
129 #ifdef QCA_WIFI_QCN9224
130 #define CE_COUNT_MAX 16
131 #else
132 #define CE_COUNT_MAX 12
133 #endif
134 #define HIF_MAX_GRP_IRQ 16
135 
136 #ifndef HIF_MAX_GROUP
137 #define HIF_MAX_GROUP 7
138 #endif
139 
140 #ifndef NAPI_YIELD_BUDGET_BASED
141 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
142 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
143 #endif
144 #else  /* NAPI_YIELD_BUDGET_BASED */
145 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
146 #endif /* NAPI_YIELD_BUDGET_BASED */
147 
148 #define QCA_NAPI_BUDGET    64
149 #define QCA_NAPI_DEF_SCALE  \
150 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
151 
152 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
153 /* NOTE: "napi->scale" can be changed,
154  * but this does not change the number of buckets
155  */
156 #define QCA_NAPI_NUM_BUCKETS 4
157 
158 /**
159  * qca_napi_stat - stats structure for execution contexts
160  * @napi_schedules - number of times the schedule function is called
161  * @napi_polls - number of times the execution context runs
162  * @napi_completes - number of times that the generating interrupt is reenabled
163  * @napi_workdone - cumulative of all work done reported by handler
164  * @cpu_corrected - incremented when execution context runs on a different core
165  *			than the one that its irq is affined to.
166  * @napi_budget_uses - histogram of work done per execution run
167  * @time_limit_reache - count of yields due to time limit threshholds
168  * @rxpkt_thresh_reached - count of yields due to a work limit
169  * @poll_time_buckets - histogram of poll times for the napi
170  *
171  */
172 struct qca_napi_stat {
173 	uint32_t napi_schedules;
174 	uint32_t napi_polls;
175 	uint32_t napi_completes;
176 	uint32_t napi_workdone;
177 	uint32_t cpu_corrected;
178 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
179 	uint32_t time_limit_reached;
180 	uint32_t rxpkt_thresh_reached;
181 	unsigned long long napi_max_poll_time;
182 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
183 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
184 #endif
185 };
186 
187 
188 /**
189  * per NAPI instance data structure
190  * This data structure holds stuff per NAPI instance.
191  * Note that, in the current implementation, though scale is
192  * an instance variable, it is set to the same value for all
193  * instances.
194  */
195 struct qca_napi_info {
196 	struct net_device    netdev; /* dummy net_dev */
197 	void                 *hif_ctx;
198 	struct napi_struct   napi;
199 	uint8_t              scale;   /* currently same on all instances */
200 	uint8_t              id;
201 	uint8_t              cpu;
202 	int                  irq;
203 	cpumask_t            cpumask;
204 	struct qca_napi_stat stats[NR_CPUS];
205 #ifdef RECEIVE_OFFLOAD
206 	/* will only be present for data rx CE's */
207 	void (*offld_flush_cb)(void *);
208 	struct napi_struct   rx_thread_napi;
209 	struct net_device    rx_thread_netdev;
210 #endif /* RECEIVE_OFFLOAD */
211 	qdf_lro_ctx_t        lro_ctx;
212 };
213 
214 enum qca_napi_tput_state {
215 	QCA_NAPI_TPUT_UNINITIALIZED,
216 	QCA_NAPI_TPUT_LO,
217 	QCA_NAPI_TPUT_HI
218 };
219 enum qca_napi_cpu_state {
220 	QCA_NAPI_CPU_UNINITIALIZED,
221 	QCA_NAPI_CPU_DOWN,
222 	QCA_NAPI_CPU_UP };
223 
224 /**
225  * struct qca_napi_cpu - an entry of the napi cpu table
226  * @core_id:     physical core id of the core
227  * @cluster_id:  cluster this core belongs to
228  * @core_mask:   mask to match all core of this cluster
229  * @thread_mask: mask for this core within the cluster
230  * @max_freq:    maximum clock this core can be clocked at
231  *               same for all cpus of the same core.
232  * @napis:       bitmap of napi instances on this core
233  * @execs:       bitmap of execution contexts on this core
234  * cluster_nxt:  chain to link cores within the same cluster
235  *
236  * This structure represents a single entry in the napi cpu
237  * table. The table is part of struct qca_napi_data.
238  * This table is initialized by the init function, called while
239  * the first napi instance is being created, updated by hotplug
240  * notifier and when cpu affinity decisions are made (by throughput
241  * detection), and deleted when the last napi instance is removed.
242  */
243 struct qca_napi_cpu {
244 	enum qca_napi_cpu_state state;
245 	int			core_id;
246 	int			cluster_id;
247 	cpumask_t		core_mask;
248 	cpumask_t		thread_mask;
249 	unsigned int		max_freq;
250 	uint32_t		napis;
251 	uint32_t		execs;
252 	int			cluster_nxt;  /* index, not pointer */
253 };
254 
255 /**
256  * struct qca_napi_data - collection of napi data for a single hif context
257  * @hif_softc: pointer to the hif context
258  * @lock: spinlock used in the event state machine
259  * @state: state variable used in the napi stat machine
260  * @ce_map: bit map indicating which ce's have napis running
261  * @exec_map: bit map of instanciated exec contexts
262  * @user_cpu_affin_map: CPU affinity map from INI config.
263  * @napi_cpu: cpu info for irq affinty
264  * @lilcl_head:
265  * @bigcl_head:
266  * @napi_mode: irq affinity & clock voting mode
267  * @cpuhp_handler: CPU hotplug event registration handle
268  */
269 struct qca_napi_data {
270 	struct               hif_softc *hif_softc;
271 	qdf_spinlock_t       lock;
272 	uint32_t             state;
273 
274 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
275 	 * not used by clients (clients use an id returned by create)
276 	 */
277 	uint32_t             ce_map;
278 	uint32_t             exec_map;
279 	uint32_t             user_cpu_affin_mask;
280 	struct qca_napi_info *napis[CE_COUNT_MAX];
281 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
282 	int                  lilcl_head, bigcl_head;
283 	enum qca_napi_tput_state napi_mode;
284 	struct qdf_cpuhp_handler *cpuhp_handler;
285 	uint8_t              flags;
286 };
287 
288 /**
289  * struct hif_config_info - Place Holder for HIF configuration
290  * @enable_self_recovery: Self Recovery
291  * @enable_runtime_pm: Enable Runtime PM
292  * @runtime_pm_delay: Runtime PM Delay
293  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
294  *
295  * Structure for holding HIF ini parameters.
296  */
297 struct hif_config_info {
298 	bool enable_self_recovery;
299 #ifdef FEATURE_RUNTIME_PM
300 	uint8_t enable_runtime_pm;
301 	u_int32_t runtime_pm_delay;
302 #endif
303 	uint64_t rx_softirq_max_yield_duration_ns;
304 };
305 
306 /**
307  * struct hif_target_info - Target Information
308  * @target_version: Target Version
309  * @target_type: Target Type
310  * @target_revision: Target Revision
311  * @soc_version: SOC Version
312  * @hw_name: pointer to hardware name
313  *
314  * Structure to hold target information.
315  */
316 struct hif_target_info {
317 	uint32_t target_version;
318 	uint32_t target_type;
319 	uint32_t target_revision;
320 	uint32_t soc_version;
321 	char *hw_name;
322 };
323 
324 struct hif_opaque_softc {
325 };
326 
327 /**
328  * enum hif_event_type - Type of DP events to be recorded
329  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
330  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
331  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
332  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
333  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
334  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
335  */
336 enum hif_event_type {
337 	HIF_EVENT_IRQ_TRIGGER,
338 	HIF_EVENT_TIMER_ENTRY,
339 	HIF_EVENT_TIMER_EXIT,
340 	HIF_EVENT_BH_SCHED,
341 	HIF_EVENT_SRNG_ACCESS_START,
342 	HIF_EVENT_SRNG_ACCESS_END,
343 	/* Do check hif_hist_skip_event_record when adding new events */
344 };
345 
346 /**
347  * enum hif_system_pm_state - System PM state
348  * HIF_SYSTEM_PM_STATE_ON: System in active state
349  * HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
350  *  system resume
351  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
352  *  system suspend
353  * HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
354  */
355 enum hif_system_pm_state {
356 	HIF_SYSTEM_PM_STATE_ON,
357 	HIF_SYSTEM_PM_STATE_BUS_RESUMING,
358 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
359 	HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
360 };
361 
362 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
363 
364 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
365 /* HIF_EVENT_HIST_MAX should always be power of 2 */
366 #define HIF_EVENT_HIST_MAX		512
367 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
368 #define HIF_EVENT_HIST_ENABLE_MASK	0x3F
369 
370 static inline uint64_t hif_get_log_timestamp(void)
371 {
372 	return qdf_get_log_timestamp();
373 }
374 
375 #else
376 
377 #define HIF_EVENT_HIST_MAX		32
378 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
379 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
380 #define HIF_EVENT_HIST_ENABLE_MASK	0x19
381 
382 static inline uint64_t hif_get_log_timestamp(void)
383 {
384 	return qdf_sched_clock();
385 }
386 
387 #endif
388 
389 /**
390  * struct hif_event_record - an entry of the DP event history
391  * @hal_ring_id: ring id for which event is recorded
392  * @hp: head pointer of the ring (may not be applicable for all events)
393  * @tp: tail pointer of the ring (may not be applicable for all events)
394  * @cpu_id: cpu id on which the event occurred
395  * @timestamp: timestamp when event occurred
396  * @type: type of the event
397  *
398  * This structure represents the information stored for every datapath
399  * event which is logged in the history.
400  */
401 struct hif_event_record {
402 	uint8_t hal_ring_id;
403 	uint32_t hp;
404 	uint32_t tp;
405 	int cpu_id;
406 	uint64_t timestamp;
407 	enum hif_event_type type;
408 };
409 
410 /**
411  * struct hif_event_misc - history related misc info
412  * @last_irq_index: last irq event index in history
413  * @last_irq_ts: last irq timestamp
414  */
415 struct hif_event_misc {
416 	int32_t last_irq_index;
417 	uint64_t last_irq_ts;
418 };
419 
420 /**
421  * struct hif_event_history - history for one interrupt group
422  * @index: index to store new event
423  * @event: event entry
424  *
425  * This structure represents the datapath history for one
426  * interrupt group.
427  */
428 struct hif_event_history {
429 	qdf_atomic_t index;
430 	struct hif_event_misc misc;
431 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
432 };
433 
434 /**
435  * hif_hist_record_event() - Record one datapath event in history
436  * @hif_ctx: HIF opaque context
437  * @event: DP event entry
438  * @intr_grp_id: interrupt group ID registered with hif
439  *
440  * Return: None
441  */
442 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
443 			   struct hif_event_record *event,
444 			   uint8_t intr_grp_id);
445 
446 /**
447  * hif_event_history_init() - Initialize SRNG event history buffers
448  * @hif_ctx: HIF opaque context
449  * @id: context group ID for which history is recorded
450  *
451  * Returns: None
452  */
453 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
454 
455 /**
456  * hif_event_history_deinit() - De-initialize SRNG event history buffers
457  * @hif_ctx: HIF opaque context
458  * @id: context group ID for which history is recorded
459  *
460  * Returns: None
461  */
462 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
463 
464 /**
465  * hif_record_event() - Wrapper function to form and record DP event
466  * @hif_ctx: HIF opaque context
467  * @intr_grp_id: interrupt group ID registered with hif
468  * @hal_ring_id: ring id for which event is recorded
469  * @hp: head pointer index of the srng
470  * @tp: tail pointer index of the srng
471  * @type: type of the event to be logged in history
472  *
473  * Return: None
474  */
475 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
476 				    uint8_t intr_grp_id,
477 				    uint8_t hal_ring_id,
478 				    uint32_t hp,
479 				    uint32_t tp,
480 				    enum hif_event_type type)
481 {
482 	struct hif_event_record event;
483 
484 	event.hal_ring_id = hal_ring_id;
485 	event.hp = hp;
486 	event.tp = tp;
487 	event.type = type;
488 
489 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
490 
491 	return;
492 }
493 
494 #else
495 
496 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
497 				    uint8_t intr_grp_id,
498 				    uint8_t hal_ring_id,
499 				    uint32_t hp,
500 				    uint32_t tp,
501 				    enum hif_event_type type)
502 {
503 }
504 
505 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
506 					  uint8_t id)
507 {
508 }
509 
510 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
511 					    uint8_t id)
512 {
513 }
514 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
515 
516 /**
517  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
518  *
519  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
520  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
521  *                         minimize power
522  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
523  *                         platform-specific measures to completely power-off
524  *                         the module and associated hardware (i.e. cut power
525  *                         supplies)
526  */
527 enum HIF_DEVICE_POWER_CHANGE_TYPE {
528 	HIF_DEVICE_POWER_UP,
529 	HIF_DEVICE_POWER_DOWN,
530 	HIF_DEVICE_POWER_CUT
531 };
532 
533 /**
534  * enum hif_enable_type: what triggered the enabling of hif
535  *
536  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
537  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
538  */
539 enum hif_enable_type {
540 	HIF_ENABLE_TYPE_PROBE,
541 	HIF_ENABLE_TYPE_REINIT,
542 	HIF_ENABLE_TYPE_MAX
543 };
544 
545 /**
546  * enum hif_disable_type: what triggered the disabling of hif
547  *
548  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
549  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
550  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
551  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
552  */
553 enum hif_disable_type {
554 	HIF_DISABLE_TYPE_PROBE_ERROR,
555 	HIF_DISABLE_TYPE_REINIT_ERROR,
556 	HIF_DISABLE_TYPE_REMOVE,
557 	HIF_DISABLE_TYPE_SHUTDOWN,
558 	HIF_DISABLE_TYPE_MAX
559 };
560 /**
561  * enum hif_device_config_opcode: configure mode
562  *
563  * @HIF_DEVICE_POWER_STATE: device power state
564  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
565  * @HIF_DEVICE_GET_ADDR: get block address
566  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
567  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
568  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
569  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
570  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
571  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
572  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
573  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
574  * @HIF_BMI_DONE: bmi done
575  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
576  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
577  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
578  */
579 enum hif_device_config_opcode {
580 	HIF_DEVICE_POWER_STATE = 0,
581 	HIF_DEVICE_GET_BLOCK_SIZE,
582 	HIF_DEVICE_GET_FIFO_ADDR,
583 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
584 	HIF_DEVICE_GET_IRQ_PROC_MODE,
585 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
586 	HIF_DEVICE_POWER_STATE_CHANGE,
587 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
588 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
589 	HIF_DEVICE_GET_OS_DEVICE,
590 	HIF_DEVICE_DEBUG_BUS_STATE,
591 	HIF_BMI_DONE,
592 	HIF_DEVICE_SET_TARGET_TYPE,
593 	HIF_DEVICE_SET_HTC_CONTEXT,
594 	HIF_DEVICE_GET_HTC_CONTEXT,
595 };
596 
597 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
598 struct HID_ACCESS_LOG {
599 	uint32_t seqnum;
600 	bool is_write;
601 	void *addr;
602 	uint32_t value;
603 };
604 #endif
605 
606 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
607 		uint32_t value);
608 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
609 
610 #define HIF_MAX_DEVICES                 1
611 /**
612  * struct htc_callbacks - Structure for HTC Callbacks methods
613  * @context:             context to pass to the dsrhandler
614  *                       note : rwCompletionHandler is provided the context
615  *                       passed to hif_read_write
616  * @rwCompletionHandler: Read / write completion handler
617  * @dsrHandler:          DSR Handler
618  */
619 struct htc_callbacks {
620 	void *context;
621 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
622 	QDF_STATUS(*dsr_handler)(void *context);
623 };
624 
625 /**
626  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
627  * @context: Private data context
628  * @set_recovery_in_progress: To Set Driver state for recovery in progress
629  * @is_recovery_in_progress: Query if driver state is recovery in progress
630  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
631  * @is_driver_unloading: Query if driver is unloading.
632  * @get_bandwidth_level: Query current bandwidth level for the driver
633  * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem
634  * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool
635  * This Structure provides callback pointer for HIF to query hdd for driver
636  * states.
637  */
638 struct hif_driver_state_callbacks {
639 	void *context;
640 	void (*set_recovery_in_progress)(void *context, uint8_t val);
641 	bool (*is_recovery_in_progress)(void *context);
642 	bool (*is_load_unload_in_progress)(void *context);
643 	bool (*is_driver_unloading)(void *context);
644 	bool (*is_target_ready)(void *context);
645 	int (*get_bandwidth_level)(void *context);
646 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
647 						       qdf_dma_addr_t *paddr,
648 						       uint32_t ring_type);
649 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
650 };
651 
652 /* This API detaches the HTC layer from the HIF device */
653 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
654 
655 /****************************************************************/
656 /* BMI and Diag window abstraction                              */
657 /****************************************************************/
658 
659 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
660 
661 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
662 				     * handled atomically by
663 				     * DiagRead/DiagWrite
664 				     */
665 
666 #ifdef WLAN_FEATURE_BMI
667 /*
668  * API to handle HIF-specific BMI message exchanges, this API is synchronous
669  * and only allowed to be called from a context that can block (sleep)
670  */
671 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
672 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
673 				uint8_t *pSendMessage, uint32_t Length,
674 				uint8_t *pResponseMessage,
675 				uint32_t *pResponseLength, uint32_t TimeoutMS);
676 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
677 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
678 #else /* WLAN_FEATURE_BMI */
679 static inline void
680 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
681 {
682 }
683 
684 static inline bool
685 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
686 {
687 	return false;
688 }
689 #endif /* WLAN_FEATURE_BMI */
690 
691 /*
692  * APIs to handle HIF specific diagnostic read accesses. These APIs are
693  * synchronous and only allowed to be called from a context that
694  * can block (sleep). They are not high performance APIs.
695  *
696  * hif_diag_read_access reads a 4 Byte aligned/length value from a
697  * Target register or memory word.
698  *
699  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
700  */
701 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
702 				uint32_t address, uint32_t *data);
703 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
704 		      uint8_t *data, int nbytes);
705 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
706 			void *ramdump_base, uint32_t address, uint32_t size);
707 /*
708  * APIs to handle HIF specific diagnostic write accesses. These APIs are
709  * synchronous and only allowed to be called from a context that
710  * can block (sleep).
711  * They are not high performance APIs.
712  *
713  * hif_diag_write_access writes a 4 Byte aligned/length value to a
714  * Target register or memory word.
715  *
716  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
717  */
718 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
719 				 uint32_t address, uint32_t data);
720 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
721 			uint32_t address, uint8_t *data, int nbytes);
722 
723 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
724 
725 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
726 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
727 
728 /*
729  * Set the FASTPATH_mode_on flag in sc, for use by data path
730  */
731 #ifdef WLAN_FEATURE_FASTPATH
732 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
733 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
734 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
735 
736 /**
737  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
738  * @handler: Callback funtcion
739  * @context: handle for callback function
740  *
741  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
742  */
743 QDF_STATUS hif_ce_fastpath_cb_register(
744 		struct hif_opaque_softc *hif_ctx,
745 		fastpath_msg_handler handler, void *context);
746 #else
747 static inline QDF_STATUS hif_ce_fastpath_cb_register(
748 		struct hif_opaque_softc *hif_ctx,
749 		fastpath_msg_handler handler, void *context)
750 {
751 	return QDF_STATUS_E_FAILURE;
752 }
753 
754 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
755 {
756 	return NULL;
757 }
758 
759 #endif
760 
761 /*
762  * Enable/disable CDC max performance workaround
763  * For max-performace set this to 0
764  * To allow SoC to enter sleep set this to 1
765  */
766 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
767 
768 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
769 			     qdf_shared_mem_t **ce_sr,
770 			     uint32_t *ce_sr_ring_size,
771 			     qdf_dma_addr_t *ce_reg_paddr);
772 
773 /**
774  * @brief List of callbacks - filled in by HTC.
775  */
776 struct hif_msg_callbacks {
777 	void *Context;
778 	/**< context meaningful to HTC */
779 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
780 					uint32_t transferID,
781 					uint32_t toeplitz_hash_result);
782 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
783 					uint8_t pipeID);
784 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
785 	void (*fwEventHandler)(void *context, QDF_STATUS status);
786 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
787 };
788 
789 enum hif_target_status {
790 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
791 	TARGET_STATUS_RESET,  /* target got reset */
792 	TARGET_STATUS_EJECT,  /* target got ejected */
793 	TARGET_STATUS_SUSPEND /*target got suspend */
794 };
795 
796 /**
797  * enum hif_attribute_flags: configure hif
798  *
799  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
800  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
801  *  							+ No pktlog CE
802  */
803 enum hif_attribute_flags {
804 	HIF_LOWDESC_CE_CFG = 1,
805 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
806 };
807 
808 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
809 	(attr |= (v & 0x01) << 5)
810 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
811 	(attr |= (v & 0x03) << 6)
812 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
813 	(attr |= (v & 0x01) << 13)
814 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
815 	(attr |= (v & 0x01) << 14)
816 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
817 	(attr |= (v & 0x01) << 15)
818 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
819 	(attr |= (v & 0x0FFF) << 16)
820 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
821 	(attr |= (v & 0x01) << 30)
822 
823 struct hif_ul_pipe_info {
824 	unsigned int nentries;
825 	unsigned int nentries_mask;
826 	unsigned int sw_index;
827 	unsigned int write_index; /* cached copy */
828 	unsigned int hw_index;    /* cached copy */
829 	void *base_addr_owner_space; /* Host address space */
830 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
831 };
832 
833 struct hif_dl_pipe_info {
834 	unsigned int nentries;
835 	unsigned int nentries_mask;
836 	unsigned int sw_index;
837 	unsigned int write_index; /* cached copy */
838 	unsigned int hw_index;    /* cached copy */
839 	void *base_addr_owner_space; /* Host address space */
840 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
841 };
842 
843 struct hif_pipe_addl_info {
844 	uint32_t pci_mem;
845 	uint32_t ctrl_addr;
846 	struct hif_ul_pipe_info ul_pipe;
847 	struct hif_dl_pipe_info dl_pipe;
848 };
849 
850 #ifdef CONFIG_SLUB_DEBUG_ON
851 #define MSG_FLUSH_NUM 16
852 #else /* PERF build */
853 #define MSG_FLUSH_NUM 32
854 #endif /* SLUB_DEBUG_ON */
855 
856 struct hif_bus_id;
857 
858 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
859 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
860 		     int opcode, void *config, uint32_t config_len);
861 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
862 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
863 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
864 		   struct hif_msg_callbacks *callbacks);
865 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
866 void hif_stop(struct hif_opaque_softc *hif_ctx);
867 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
868 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
869 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
870 		      uint8_t cmd_id, bool start);
871 
872 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
873 				  uint32_t transferID, uint32_t nbytes,
874 				  qdf_nbuf_t wbuf, uint32_t data_attr);
875 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
876 			     int force);
877 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
878 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
879 			  uint8_t *DLPipe);
880 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
881 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
882 			int *dl_is_polled);
883 uint16_t
884 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
885 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
886 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
887 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
888 		     bool wait_for_it);
889 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
890 #ifndef HIF_PCI
891 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
892 {
893 	return 0;
894 }
895 #else
896 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
897 #endif
898 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
899 			u32 *revision, const char **target_name);
900 
901 #ifdef RECEIVE_OFFLOAD
902 /**
903  * hif_offld_flush_cb_register() - Register the offld flush callback
904  * @scn: HIF opaque context
905  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
906  *			 Or GRO/LRO flush when RxThread is not enabled. Called
907  *			 with corresponding context for flush.
908  * Return: None
909  */
910 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
911 				 void (offld_flush_handler)(void *ol_ctx));
912 
913 /**
914  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
915  * @scn: HIF opaque context
916  *
917  * Return: None
918  */
919 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
920 #endif
921 
922 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
923 /**
924  * hif_exec_should_yield() - Check if hif napi context should yield
925  * @hif_ctx - HIF opaque context
926  * @grp_id - grp_id of the napi for which check needs to be done
927  *
928  * The function uses grp_id to look for NAPI and checks if NAPI needs to
929  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
930  * yield decision.
931  *
932  * Return: true if NAPI needs to yield, else false
933  */
934 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
935 #else
936 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
937 					 uint grp_id)
938 {
939 	return false;
940 }
941 #endif
942 
943 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
944 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
945 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
946 				      int htc_htt_tx_endpoint);
947 
948 /**
949  * hif_open() - Create hif handle
950  * @qdf_ctx: qdf context
951  * @mode: Driver Mode
952  * @bus_type: Bus Type
953  * @cbk: CDS Callbacks
954  * @psoc: psoc object manager
955  *
956  * API to open HIF Context
957  *
958  * Return: HIF Opaque Pointer
959  */
960 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
961 				  uint32_t mode,
962 				  enum qdf_bus_type bus_type,
963 				  struct hif_driver_state_callbacks *cbk,
964 				  struct wlan_objmgr_psoc *psoc);
965 
966 /**
967  * hif_init_dma_mask() - Set dma mask for the dev
968  * @dev: dev for which DMA mask is to be set
969  * @bus_type: bus type for the target
970  *
971  * This API sets the DMA mask for the device. before the datapath
972  * memory pre-allocation is done. If the DMA mask is not set before
973  * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
974  * and does not utilize the full device capability.
975  *
976  * Return: 0 - success, non-zero on failure.
977  */
978 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
979 void hif_close(struct hif_opaque_softc *hif_ctx);
980 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
981 		      void *bdev, const struct hif_bus_id *bid,
982 		      enum qdf_bus_type bus_type,
983 		      enum hif_enable_type type);
984 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
985 #ifdef CE_TASKLET_DEBUG_ENABLE
986 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
987 				 uint8_t value);
988 #endif
989 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
990 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
991 
992 /**
993  * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
994  * HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
995  * HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
996  * HIF_PM_CE_WAKE: Wake irq is CE interrupt
997  */
998 typedef enum {
999 	HIF_PM_INVALID_WAKE,
1000 	HIF_PM_MSI_WAKE,
1001 	HIF_PM_CE_WAKE,
1002 } hif_pm_wake_irq_type;
1003 
1004 /**
1005  * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1006  * @hif_ctx: HIF context
1007  *
1008  * Return: enum hif_pm_wake_irq_type
1009  */
1010 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1011 
1012 /**
1013  * enum wlan_rtpm_dbgid - runtime pm put/get debug id
1014  * @RTPM_ID_RESVERD:       Reserved
1015  * @RTPM_ID_WMI:           WMI sending msg, expect put happen at
1016  *                         tx completion from CE level directly.
1017  * @RTPM_ID_HTC:           pkt sending by HTT_DATA_MSG_SVC, expect
1018  *                         put from fw response or just in
1019  *                         htc_issue_packets
1020  * @RTPM_ID_QOS_NOTIFY:    pm qos notifer
1021  * @RTPM_ID_DP_TX_DESC_ALLOC_FREE:      tx desc alloc/free
1022  * @RTPM_ID_CE_SEND_FAST:  operation in ce_send_fast, not include
1023  *                         the pkt put happens outside this function
1024  * @RTPM_ID_SUSPEND_RESUME:     suspend/resume in hdd
1025  * @RTPM_ID_DW_TX_HW_ENQUEUE:   operation in functin dp_tx_hw_enqueue
1026  * @RTPM_ID_HAL_REO_CMD:        HAL_REO_CMD operation
1027  * @RTPM_ID_DP_PRINT_RING_STATS:  operation in dp_print_ring_stats
1028  */
1029 /* New value added to the enum must also be reflected in function
1030  *  rtpm_string_from_dbgid()
1031  */
1032 typedef enum {
1033 	RTPM_ID_RESVERD   = 0,
1034 	RTPM_ID_WMI       = 1,
1035 	RTPM_ID_HTC       = 2,
1036 	RTPM_ID_QOS_NOTIFY  = 3,
1037 	RTPM_ID_DP_TX_DESC_ALLOC_FREE  = 4,
1038 	RTPM_ID_CE_SEND_FAST       = 5,
1039 	RTPM_ID_SUSPEND_RESUME     = 6,
1040 	RTPM_ID_DW_TX_HW_ENQUEUE   = 7,
1041 	RTPM_ID_HAL_REO_CMD        = 8,
1042 	RTPM_ID_DP_PRINT_RING_STATS  = 9,
1043 
1044 	RTPM_ID_MAX,
1045 } wlan_rtpm_dbgid;
1046 
1047 /**
1048  * rtpm_string_from_dbgid() - Convert dbgid to respective string
1049  * @id -  debug id
1050  *
1051  * Debug support function to convert  dbgid to string.
1052  * Please note to add new string in the array at index equal to
1053  * its enum value in wlan_rtpm_dbgid.
1054  */
1055 static inline char *rtpm_string_from_dbgid(wlan_rtpm_dbgid id)
1056 {
1057 	static const char *strings[] = { "RTPM_ID_RESVERD",
1058 					"RTPM_ID_WMI",
1059 					"RTPM_ID_HTC",
1060 					"RTPM_ID_QOS_NOTIFY",
1061 					"RTPM_ID_DP_TX_DESC_ALLOC_FREE",
1062 					"RTPM_ID_CE_SEND_FAST",
1063 					"RTPM_ID_SUSPEND_RESUME",
1064 					"RTPM_ID_DW_TX_HW_ENQUEUE",
1065 					"RTPM_ID_HAL_REO_CMD",
1066 					"RTPM_ID_DP_PRINT_RING_STATS",
1067 					"RTPM_ID_MAX"};
1068 
1069 	return (char *)strings[id];
1070 }
1071 
1072 /**
1073  * enum hif_ep_vote_type - hif ep vote type
1074  * HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1075  * HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1076  */
1077 enum hif_ep_vote_type {
1078 	HIF_EP_VOTE_DP_ACCESS,
1079 	HIF_EP_VOTE_NONDP_ACCESS
1080 };
1081 
1082 /**
1083  * enum hif_ep_vote_access - hif ep vote access
1084  * HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1085  * HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transistion
1086  * HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1087  */
1088 enum hif_ep_vote_access {
1089 	HIF_EP_VOTE_ACCESS_ENABLE,
1090 	HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1091 	HIF_EP_VOTE_ACCESS_DISABLE
1092 };
1093 
1094 /**
1095  * enum hif_pm_link_state - hif link state
1096  * HIF_PM_LINK_STATE_DOWN: hif link state is down
1097  * HIF_PM_LINK_STATE_UP: hif link state is up
1098  */
1099 enum hif_pm_link_state {
1100 	HIF_PM_LINK_STATE_DOWN,
1101 	HIF_PM_LINK_STATE_UP
1102 };
1103 
1104 /**
1105  * enum hif_pm_htc_stats - hif runtime PM stats for HTC layer
1106  * HIF_PM_HTC_STATS_GET_HTT_RESPONSE: PM stats for RTPM GET for HTT packets
1107 				      with response
1108  * HIF_PM_HTC_STATS_GET_HTT_NO_RESPONSE: PM stats for RTPM GET for HTT packets
1109 					 with no response
1110  * HIF_PM_HTC_STATS_PUT_HTT_RESPONSE: PM stats for RTPM PUT for HTT packets
1111 				      with response
1112  * HIF_PM_HTC_STATS_PUT_HTT_NO_RESPONSE: PM stats for RTPM PUT for HTT packets
1113 					 with no response
1114  * HIF_PM_HTC_STATS_PUT_HTT_ERROR: PM stats for RTPM PUT for failed HTT packets
1115  * HIF_PM_HTC_STATS_PUT_HTC_CLEANUP: PM stats for RTPM PUT during HTC cleanup
1116  * HIF_PM_HTC_STATS_GET_HTC_KICK_QUEUES: PM stats for RTPM GET done during
1117  *                                       htc_kick_queues()
1118  * HIF_PM_HTC_STATS_PUT_HTC_KICK_QUEUES: PM stats for RTPM PUT done during
1119  *                                       htc_kick_queues()
1120  * HIF_PM_HTC_STATS_GET_HTT_FETCH_PKTS: PM stats for RTPM GET while fetching
1121  *                                      HTT packets from endpoint TX queue
1122  * HIF_PM_HTC_STATS_PUT_HTT_FETCH_PKTS: PM stats for RTPM PUT while fetching
1123  *                                      HTT packets from endpoint TX queue
1124  */
1125 enum hif_pm_htc_stats {
1126 	HIF_PM_HTC_STATS_GET_HTT_RESPONSE,
1127 	HIF_PM_HTC_STATS_GET_HTT_NO_RESPONSE,
1128 	HIF_PM_HTC_STATS_PUT_HTT_RESPONSE,
1129 	HIF_PM_HTC_STATS_PUT_HTT_NO_RESPONSE,
1130 	HIF_PM_HTC_STATS_PUT_HTT_ERROR,
1131 	HIF_PM_HTC_STATS_PUT_HTC_CLEANUP,
1132 	HIF_PM_HTC_STATS_GET_HTC_KICK_QUEUES,
1133 	HIF_PM_HTC_STATS_PUT_HTC_KICK_QUEUES,
1134 	HIF_PM_HTC_STATS_GET_HTT_FETCH_PKTS,
1135 	HIF_PM_HTC_STATS_PUT_HTT_FETCH_PKTS,
1136 };
1137 
1138 #ifdef FEATURE_RUNTIME_PM
1139 struct hif_pm_runtime_lock;
1140 
1141 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1142 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
1143 			    wlan_rtpm_dbgid rtpm_dbgid);
1144 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
1145 				    wlan_rtpm_dbgid rtpm_dbgid);
1146 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx);
1147 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
1148 		       wlan_rtpm_dbgid rtpm_dbgid,
1149 		       bool is_critical_ctx);
1150 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1151 				 wlan_rtpm_dbgid rtpm_dbgid);
1152 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
1153 		       wlan_rtpm_dbgid rtpm_dbgid);
1154 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1155 			      wlan_rtpm_dbgid rtpm_dbgid);
1156 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx);
1157 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1158 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1159 			struct hif_pm_runtime_lock *lock);
1160 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1161 		struct hif_pm_runtime_lock *lock);
1162 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1163 		struct hif_pm_runtime_lock *lock);
1164 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx);
1165 void hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx);
1166 void hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx);
1167 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx);
1168 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
1169 					  int val);
1170 void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx);
1171 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
1172 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
1173 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx);
1174 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx);
1175 void hif_pm_runtime_update_stats(struct hif_opaque_softc *hif_ctx,
1176 				 wlan_rtpm_dbgid rtpm_dbgid,
1177 				 enum hif_pm_htc_stats stats);
1178 
1179 /**
1180  * hif_pm_set_link_state() - set link state during RTPM
1181  * @hif_sc: HIF Context
1182  *
1183  * Return: None
1184  */
1185 void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val);
1186 
1187 /**
1188  * hif_is_link_state_up() - Is link state up
1189  * @hif_sc: HIF Context
1190  *
1191  * Return: 1 link is up, 0 link is down
1192  */
1193 uint8_t hif_pm_get_link_state(struct hif_opaque_softc *hif_handle);
1194 #else
1195 struct hif_pm_runtime_lock {
1196 	const char *name;
1197 };
1198 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
1199 static inline int
1200 hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
1201 			wlan_rtpm_dbgid rtpm_dbgid)
1202 { return 0; }
1203 static inline int
1204 hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
1205 				wlan_rtpm_dbgid rtpm_dbgid)
1206 { return 0; }
1207 static inline int
1208 hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
1209 { return 0; }
1210 static inline void
1211 hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1212 			    wlan_rtpm_dbgid rtpm_dbgid)
1213 {}
1214 
1215 static inline int
1216 hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid,
1217 		   bool is_critical_ctx)
1218 { return 0; }
1219 static inline int
1220 hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid)
1221 { return 0; }
1222 static inline int
1223 hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1224 			  wlan_rtpm_dbgid rtpm_dbgid)
1225 { return 0; }
1226 static inline void
1227 hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {};
1228 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
1229 					const char *name)
1230 { return 0; }
1231 static inline void
1232 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1233 			struct hif_pm_runtime_lock *lock) {}
1234 
1235 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1236 		struct hif_pm_runtime_lock *lock)
1237 { return 0; }
1238 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1239 		struct hif_pm_runtime_lock *lock)
1240 { return 0; }
1241 static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
1242 { return false; }
1243 static inline void
1244 hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx)
1245 { return; }
1246 static inline void
1247 hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx)
1248 { return; }
1249 static inline int
1250 hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
1251 { return 0; }
1252 static inline void
1253 hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val)
1254 { return; }
1255 static inline void
1256 hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
1257 { return; }
1258 static inline void
1259 hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {};
1260 static inline int
1261 hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1262 { return 0; }
1263 static inline qdf_time_t
1264 hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
1265 { return 0; }
1266 static inline int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
1267 { return 0; }
1268 static inline
1269 void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val)
1270 {}
1271 
1272 static inline
1273 void hif_pm_runtime_update_stats(struct hif_opaque_softc *hif_ctx,
1274 				 wlan_rtpm_dbgid rtpm_dbgid,
1275 				 enum hif_pm_htc_stats stats)
1276 {}
1277 #endif
1278 
1279 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1280 				 bool is_packet_log_enabled);
1281 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1282 
1283 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1284 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1285 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1286 
1287 #ifdef IPA_OFFLOAD
1288 /**
1289  * hif_get_ipa_hw_type() - get IPA hw type
1290  *
1291  * This API return the IPA hw type.
1292  *
1293  * Return: IPA hw type
1294  */
1295 static inline
1296 enum ipa_hw_type hif_get_ipa_hw_type(void)
1297 {
1298 	return ipa_get_hw_type();
1299 }
1300 
1301 /**
1302  * hif_get_ipa_present() - get IPA hw status
1303  *
1304  * This API return the IPA hw status.
1305  *
1306  * Return: true if IPA is present or false otherwise
1307  */
1308 static inline
1309 bool hif_get_ipa_present(void)
1310 {
1311 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
1312 		return true;
1313 	else
1314 		return false;
1315 }
1316 #endif
1317 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1318 /**
1319  * hif_bus_ealry_suspend() - stop non wmi tx traffic
1320  * @context: hif context
1321  */
1322 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1323 
1324 /**
1325  * hif_bus_late_resume() - resume non wmi traffic
1326  * @context: hif context
1327  */
1328 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1329 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1330 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1331 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1332 
1333 /**
1334  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1335  * @hif_ctx: an opaque HIF handle to use
1336  *
1337  * As opposed to the standard hif_irq_enable, this function always applies to
1338  * the APPS side kernel interrupt handling.
1339  *
1340  * Return: errno
1341  */
1342 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1343 
1344 /**
1345  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1346  * @hif_ctx: an opaque HIF handle to use
1347  *
1348  * As opposed to the standard hif_irq_disable, this function always applies to
1349  * the APPS side kernel interrupt handling.
1350  *
1351  * Return: errno
1352  */
1353 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1354 
1355 /**
1356  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1357  * @hif_ctx: an opaque HIF handle to use
1358  *
1359  * As opposed to the standard hif_irq_enable, this function always applies to
1360  * the APPS side kernel interrupt handling.
1361  *
1362  * Return: errno
1363  */
1364 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1365 
1366 /**
1367  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1368  * @hif_ctx: an opaque HIF handle to use
1369  *
1370  * As opposed to the standard hif_irq_disable, this function always applies to
1371  * the APPS side kernel interrupt handling.
1372  *
1373  * Return: errno
1374  */
1375 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1376 
1377 /**
1378  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1379  * @hif_ctx: an opaque HIF handle to use
1380  *
1381  * This function always applies to the APPS side kernel interrupt handling
1382  * to wake the system from suspend.
1383  *
1384  * Return: errno
1385  */
1386 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1387 
1388 /**
1389  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1390  * @hif_ctx: an opaque HIF handle to use
1391  *
1392  * This function always applies to the APPS side kernel interrupt handling
1393  * to disable the wake irq.
1394  *
1395  * Return: errno
1396  */
1397 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1398 
1399 /**
1400  * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
1401  * @hif_ctx: an opaque HIF handle to use
1402  *
1403  * As opposed to the standard hif_irq_enable, this function always applies to
1404  * the APPS side kernel interrupt handling.
1405  *
1406  * Return: errno
1407  */
1408 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1409 
1410 /**
1411  * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
1412  * @hif_ctx: an opaque HIF handle to use
1413  *
1414  * As opposed to the standard hif_irq_disable, this function always applies to
1415  * the APPS side kernel interrupt handling.
1416  *
1417  * Return: errno
1418  */
1419 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
1420 
1421 #ifdef FEATURE_RUNTIME_PM
1422 void hif_print_runtime_pm_prevent_list(struct hif_opaque_softc *hif_ctx);
1423 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1424 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
1425 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1426 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1427 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
1428 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
1429 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
1430 #else
1431 static inline void
1432 hif_print_runtime_pm_prevent_list(struct hif_opaque_softc *hif_ctx)
1433 {}
1434 #endif
1435 
1436 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1437 int hif_dump_registers(struct hif_opaque_softc *scn);
1438 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1439 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1440 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1441 		     u32 *revision, const char **target_name);
1442 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1443 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1444 						   scn);
1445 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1446 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1447 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1448 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1449 			   hif_target_status);
1450 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1451 			 struct hif_config_info *cfg);
1452 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1453 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1454 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1455 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1456 			   uint32_t transfer_id, u_int32_t len);
1457 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1458 	uint32_t transfer_id, uint32_t download_len);
1459 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1460 void hif_ce_war_disable(void);
1461 void hif_ce_war_enable(void);
1462 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1463 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1464 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1465 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1466 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1467 		uint32_t pipe_num);
1468 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1469 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1470 
1471 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1472 				int rx_bundle_cnt);
1473 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1474 
1475 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1476 
1477 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1478 
1479 enum hif_exec_type {
1480 	HIF_EXEC_NAPI_TYPE,
1481 	HIF_EXEC_TASKLET_TYPE,
1482 };
1483 
1484 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
1485 
1486 /**
1487  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1488  * @softc: hif opaque context owning the exec context
1489  * @id: the id of the interrupt context
1490  *
1491  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1492  *         'id' registered with the OS
1493  */
1494 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1495 				uint8_t id);
1496 
1497 /**
1498  * hif_configure_ext_group_interrupts() - Congigure ext group intrrupts
1499  * @hif_ctx: hif opaque context
1500  *
1501  * Return: QDF_STATUS
1502  */
1503 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1504 
1505 /**
1506  * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group intrrupts
1507  * @hif_ctx: hif opaque context
1508  *
1509  * Return: None
1510  */
1511 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1512 
1513 /**
1514  * hif_register_ext_group() - API to register external group
1515  * interrupt handler.
1516  * @hif_ctx : HIF Context
1517  * @numirq: number of irq's in the group
1518  * @irq: array of irq values
1519  * @handler: callback interrupt handler function
1520  * @cb_ctx: context to passed in callback
1521  * @type: napi vs tasklet
1522  *
1523  * Return: QDF_STATUS
1524  */
1525 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1526 				  uint32_t numirq, uint32_t irq[],
1527 				  ext_intr_handler handler,
1528 				  void *cb_ctx, const char *context_name,
1529 				  enum hif_exec_type type, uint32_t scale);
1530 
1531 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1532 				const char *context_name);
1533 
1534 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1535 				u_int8_t pipeid,
1536 				struct hif_msg_callbacks *callbacks);
1537 
1538 /**
1539  * hif_print_napi_stats() - Display HIF NAPI stats
1540  * @hif_ctx - HIF opaque context
1541  *
1542  * Return: None
1543  */
1544 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
1545 
1546 /* hif_clear_napi_stats() - function clears the stats of the
1547  * latency when called.
1548  * @hif_ctx - the HIF context to assign the callback to
1549  *
1550  * Return: None
1551  */
1552 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
1553 
1554 #ifdef __cplusplus
1555 }
1556 #endif
1557 
1558 #ifdef FORCE_WAKE
1559 /**
1560  * hif_force_wake_request() - Function to wake from power collapse
1561  * @handle: HIF opaque handle
1562  *
1563  * Description: API to check if the device is awake or not before
1564  * read/write to BAR + 4K registers. If device is awake return
1565  * success otherwise write '1' to
1566  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
1567  * the device and does wakeup the PCI and MHI within 50ms
1568  * and then the device writes a value to
1569  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
1570  * handshake process to let the host know the device is awake.
1571  *
1572  * Return: zero - success/non-zero - failure
1573  */
1574 int hif_force_wake_request(struct hif_opaque_softc *handle);
1575 
1576 /**
1577  * hif_force_wake_release() - API to release/reset the SOC wake register
1578  * from interrupting the device.
1579  * @handle: HIF opaque handle
1580  *
1581  * Description: API to set the
1582  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
1583  * to release the interrupt line.
1584  *
1585  * Return: zero - success/non-zero - failure
1586  */
1587 int hif_force_wake_release(struct hif_opaque_softc *handle);
1588 #else
1589 static inline
1590 int hif_force_wake_request(struct hif_opaque_softc *handle)
1591 {
1592 	return 0;
1593 }
1594 
1595 static inline
1596 int hif_force_wake_release(struct hif_opaque_softc *handle)
1597 {
1598 	return 0;
1599 }
1600 #endif /* FORCE_WAKE */
1601 
1602 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1603 /**
1604  * hif_prevent_link_low_power_states() - Prevent from going to low power states
1605  * @hif - HIF opaque context
1606  *
1607  * Return: 0 on success. Error code on failure.
1608  */
1609 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
1610 
1611 /**
1612  * hif_allow_link_low_power_states() - Allow link to go to low power states
1613  * @hif - HIF opaque context
1614  *
1615  * Return: None
1616  */
1617 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
1618 
1619 #else
1620 
1621 static inline
1622 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
1623 {
1624 	return 0;
1625 }
1626 
1627 static inline
1628 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
1629 {
1630 }
1631 #endif
1632 
1633 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
1634 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
1635 
1636 /**
1637  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
1638  * @hif_ctx - the HIF context to assign the callback to
1639  * @callback - the callback to assign
1640  * @priv - the private data to pass to the callback when invoked
1641  *
1642  * Return: None
1643  */
1644 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1645 			       void (*callback)(void *),
1646 			       void *priv);
1647 /*
1648  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1649  * for defined here
1650  */
1651 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1652 ssize_t hif_dump_desc_trace_buf(struct device *dev,
1653 				struct device_attribute *attr, char *buf);
1654 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1655 					const char *buf, size_t size);
1656 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
1657 				const char *buf, size_t size);
1658 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
1659 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
1660 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
1661 
1662 /**
1663  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
1664  * @hif: hif context
1665  * @ce_service_max_yield_time: CE service max yield time to set
1666  *
1667  * This API storess CE service max yield time in hif context based
1668  * on ini value.
1669  *
1670  * Return: void
1671  */
1672 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1673 				       uint32_t ce_service_max_yield_time);
1674 
1675 /**
1676  * hif_get_ce_service_max_yield_time() - get CE service max yield time
1677  * @hif: hif context
1678  *
1679  * This API returns CE service max yield time.
1680  *
1681  * Return: CE service max yield time
1682  */
1683 unsigned long long
1684 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
1685 
1686 /**
1687  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
1688  * @hif: hif context
1689  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
1690  *
1691  * This API stores CE service max rx ind flush in hif context based
1692  * on ini value.
1693  *
1694  * Return: void
1695  */
1696 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1697 					 uint8_t ce_service_max_rx_ind_flush);
1698 
1699 #ifdef OL_ATH_SMART_LOGGING
1700 /*
1701  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1702  * @scn : HIF handler
1703  * @buf_cur: Current pointer in ring buffer
1704  * @buf_init:Start of the ring buffer
1705  * @buf_sz: Size of the ring buffer
1706  * @ce: Copy Engine id
1707  * @skb_sz: Max size of the SKB buffer to be copied
1708  *
1709  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1710  * and buffers pointed by them in to the given buf
1711  *
1712  * Return: Current pointer in ring buffer
1713  */
1714 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1715 			 uint8_t *buf_init, uint32_t buf_sz,
1716 			 uint32_t ce, uint32_t skb_sz);
1717 #endif /* OL_ATH_SMART_LOGGING */
1718 
1719 /*
1720  * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
1721  * to hif_opaque_softc handle
1722  * @hif_handle - hif_softc type
1723  *
1724  * Return: hif_opaque_softc type
1725  */
1726 static inline struct hif_opaque_softc *
1727 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
1728 {
1729 	return (struct hif_opaque_softc *)hif_handle;
1730 }
1731 
1732 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
1733 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
1734 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
1735 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1736 			    uint8_t type, uint8_t access);
1737 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1738 			       uint8_t type);
1739 #else
1740 static inline QDF_STATUS
1741 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1742 {
1743 	return QDF_STATUS_SUCCESS;
1744 }
1745 
1746 static inline void
1747 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1748 {
1749 }
1750 
1751 static inline void
1752 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1753 		       uint8_t type, uint8_t access)
1754 {
1755 }
1756 
1757 static inline uint8_t
1758 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1759 		       uint8_t type)
1760 {
1761 	return HIF_EP_VOTE_ACCESS_ENABLE;
1762 }
1763 #endif
1764 
1765 #ifdef FORCE_WAKE
1766 /**
1767  * hif_srng_init_phase(): Indicate srng initialization phase
1768  * to avoid force wake as UMAC power collapse is not yet
1769  * enabled
1770  * @hif_ctx: hif opaque handle
1771  * @init_phase: initialization phase
1772  *
1773  * Return:  None
1774  */
1775 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1776 			 bool init_phase);
1777 #else
1778 static inline
1779 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1780 			 bool init_phase)
1781 {
1782 }
1783 #endif /* FORCE_WAKE */
1784 
1785 #ifdef HIF_IPCI
1786 /**
1787  * hif_shutdown_notifier_cb - Call back for shutdown notifier
1788  * @ctx: hif handle
1789  *
1790  * Return:  None
1791  */
1792 void hif_shutdown_notifier_cb(void *ctx);
1793 #else
1794 static inline
1795 void hif_shutdown_notifier_cb(void *ctx)
1796 {
1797 }
1798 #endif /* HIF_IPCI */
1799 
1800 #ifdef HIF_CE_LOG_INFO
1801 /**
1802  * hif_log_ce_info() - API to log ce info
1803  * @scn: hif handle
1804  * @data: hang event data buffer
1805  * @offset: offset at which data needs to be written
1806  *
1807  * Return:  None
1808  */
1809 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
1810 		     unsigned int *offset);
1811 #else
1812 static inline
1813 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
1814 		     unsigned int *offset)
1815 {
1816 }
1817 #endif
1818 
1819 #ifdef HIF_CPU_PERF_AFFINE_MASK
1820 /**
1821  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
1822  * @hif_ctx: hif opaque handle
1823  *
1824  * This function is used to move the WLAN IRQs to perf cores in
1825  * case of defconfig builds.
1826  *
1827  * Return:  None
1828  */
1829 void hif_config_irq_set_perf_affinity_hint(
1830 	struct hif_opaque_softc *hif_ctx);
1831 
1832 #else
1833 static inline void hif_config_irq_set_perf_affinity_hint(
1834 	struct hif_opaque_softc *hif_ctx)
1835 {
1836 }
1837 #endif
1838 
1839 /**
1840  * hif_apps_grp_irqs_enable() - enable ext grp irqs
1841  * @hif - HIF opaque context
1842  *
1843  * Return: 0 on success. Error code on failure.
1844  */
1845 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
1846 
1847 /**
1848  * hif_apps_grp_irqs_disable() - disable ext grp irqs
1849  * @hif - HIF opaque context
1850  *
1851  * Return: 0 on success. Error code on failure.
1852  */
1853 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
1854 
1855 /**
1856  * hif_disable_grp_irqs() - disable ext grp irqs
1857  * @hif - HIF opaque context
1858  *
1859  * Return: 0 on success. Error code on failure.
1860  */
1861 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
1862 
1863 /**
1864  * hif_enable_grp_irqs() - enable ext grp irqs
1865  * @hif - HIF opaque context
1866  *
1867  * Return: 0 on success. Error code on failure.
1868  */
1869 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
1870 
1871 enum hif_credit_exchange_type {
1872 	HIF_REQUEST_CREDIT,
1873 	HIF_PROCESS_CREDIT_REPORT,
1874 };
1875 
1876 enum hif_detect_latency_type {
1877 	HIF_DETECT_TASKLET,
1878 	HIF_DETECT_CREDIT,
1879 	HIF_DETECT_UNKNOWN
1880 };
1881 
1882 #ifdef HIF_DETECTION_LATENCY_ENABLE
1883 void hif_latency_detect_credit_record_time(
1884 	enum hif_credit_exchange_type type,
1885 	struct hif_opaque_softc *hif_ctx);
1886 
1887 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
1888 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
1889 void hif_check_detection_latency(struct hif_softc *scn,
1890 				 bool from_timer,
1891 				 uint32_t bitmap_type);
1892 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
1893 #else
1894 static inline
1895 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
1896 {}
1897 
1898 static inline
1899 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
1900 {}
1901 
1902 static inline
1903 void hif_latency_detect_credit_record_time(
1904 	enum hif_credit_exchange_type type,
1905 	struct hif_opaque_softc *hif_ctx)
1906 {}
1907 static inline
1908 void hif_check_detection_latency(struct hif_softc *scn,
1909 				 bool from_timer,
1910 				 uint32_t bitmap_type)
1911 {}
1912 
1913 static inline
1914 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
1915 {}
1916 #endif
1917 
1918 #ifdef SYSTEM_PM_CHECK
1919 /**
1920  * __hif_system_pm_set_state() - Set system pm state
1921  * @hif: hif opaque handle
1922  * @state: system state
1923  *
1924  * Return:  None
1925  */
1926 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
1927 			       enum hif_system_pm_state state);
1928 
1929 /**
1930  * hif_system_pm_set_state_on() - Set system pm state to ON
1931  * @hif: hif opaque handle
1932  *
1933  * Return:  None
1934  */
1935 static inline
1936 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
1937 {
1938 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
1939 }
1940 
1941 /**
1942  * hif_system_pm_set_state_resuming() - Set system pm state to resuming
1943  * @hif: hif opaque handle
1944  *
1945  * Return:  None
1946  */
1947 static inline
1948 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
1949 {
1950 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
1951 }
1952 
1953 /**
1954  * hif_system_pm_set_state_suspending() - Set system pm state to suspending
1955  * @hif: hif opaque handle
1956  *
1957  * Return:  None
1958  */
1959 static inline
1960 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
1961 {
1962 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
1963 }
1964 
1965 /**
1966  * hif_system_pm_set_state_suspended() - Set system pm state to suspended
1967  * @hif: hif opaque handle
1968  *
1969  * Return:  None
1970  */
1971 static inline
1972 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
1973 {
1974 	__hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
1975 }
1976 
1977 /**
1978  * hif_system_pm_get_state() - Get system pm state
1979  * @hif: hif opaque handle
1980  *
1981  * Return:  system state
1982  */
1983 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
1984 
1985 /**
1986  * hif_system_pm_state_check() - Check system state and trigger resume
1987  *  if required
1988  * @hif: hif opaque handle
1989  *
1990  * Return: 0 if system is in on state else error code
1991  */
1992 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
1993 #else
1994 static inline
1995 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
1996 			       enum hif_system_pm_state state)
1997 {
1998 }
1999 
2000 static inline
2001 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2002 {
2003 }
2004 
2005 static inline
2006 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2007 {
2008 }
2009 
2010 static inline
2011 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2012 {
2013 }
2014 
2015 static inline
2016 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2017 {
2018 }
2019 
2020 static inline
2021 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2022 {
2023 	return 0;
2024 }
2025 
2026 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2027 {
2028 	return 0;
2029 }
2030 #endif
2031 #endif /* _HIF_H_ */
2032