xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HIF_H_
20 #define _HIF_H_
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif /* __cplusplus */
25 
26 /* Header files */
27 #include <qdf_status.h>
28 #include "qdf_nbuf.h"
29 #include "qdf_lro.h"
30 #include "ol_if_athvar.h"
31 #include <linux/platform_device.h>
32 #ifdef HIF_PCI
33 #include <linux/pci.h>
34 #endif /* HIF_PCI */
35 #ifdef HIF_USB
36 #include <linux/usb.h>
37 #endif /* HIF_USB */
38 #ifdef IPA_OFFLOAD
39 #include <linux/ipa.h>
40 #endif
41 #include "cfg_ucfg_api.h"
42 #include "qdf_dev.h"
43 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
44 
45 typedef void __iomem *A_target_id_t;
46 typedef void *hif_handle_t;
47 
48 #define HIF_TYPE_AR6002   2
49 #define HIF_TYPE_AR6003   3
50 #define HIF_TYPE_AR6004   5
51 #define HIF_TYPE_AR9888   6
52 #define HIF_TYPE_AR6320   7
53 #define HIF_TYPE_AR6320V2 8
54 /* For attaching Peregrine 2.0 board host_reg_tbl only */
55 #define HIF_TYPE_AR9888V2 9
56 #define HIF_TYPE_ADRASTEA 10
57 #define HIF_TYPE_AR900B 11
58 #define HIF_TYPE_QCA9984 12
59 #define HIF_TYPE_IPQ4019 13
60 #define HIF_TYPE_QCA9888 14
61 #define HIF_TYPE_QCA8074 15
62 #define HIF_TYPE_QCA6290 16
63 #define HIF_TYPE_QCN7605 17
64 #define HIF_TYPE_QCA6390 18
65 #define HIF_TYPE_QCA8074V2 19
66 #define HIF_TYPE_QCA6018  20
67 #define HIF_TYPE_QCN9000 21
68 #define HIF_TYPE_QCA6490 22
69 #define HIF_TYPE_QCA6750 23
70 #define HIF_TYPE_QCA5018 24
71 #define HIF_TYPE_QCN9100 25
72 
73 #define DMA_COHERENT_MASK_DEFAULT   37
74 
75 #ifdef IPA_OFFLOAD
76 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
77 #endif
78 
79 /* enum hif_ic_irq - enum defining integrated chip irq numbers
80  * defining irq nubers that can be used by external modules like datapath
81  */
82 enum hif_ic_irq {
83 	host2wbm_desc_feed = 16,
84 	host2reo_re_injection,
85 	host2reo_command,
86 	host2rxdma_monitor_ring3,
87 	host2rxdma_monitor_ring2,
88 	host2rxdma_monitor_ring1,
89 	reo2host_exception,
90 	wbm2host_rx_release,
91 	reo2host_status,
92 	reo2host_destination_ring4,
93 	reo2host_destination_ring3,
94 	reo2host_destination_ring2,
95 	reo2host_destination_ring1,
96 	rxdma2host_monitor_destination_mac3,
97 	rxdma2host_monitor_destination_mac2,
98 	rxdma2host_monitor_destination_mac1,
99 	ppdu_end_interrupts_mac3,
100 	ppdu_end_interrupts_mac2,
101 	ppdu_end_interrupts_mac1,
102 	rxdma2host_monitor_status_ring_mac3,
103 	rxdma2host_monitor_status_ring_mac2,
104 	rxdma2host_monitor_status_ring_mac1,
105 	host2rxdma_host_buf_ring_mac3,
106 	host2rxdma_host_buf_ring_mac2,
107 	host2rxdma_host_buf_ring_mac1,
108 	rxdma2host_destination_ring_mac3,
109 	rxdma2host_destination_ring_mac2,
110 	rxdma2host_destination_ring_mac1,
111 	host2tcl_input_ring4,
112 	host2tcl_input_ring3,
113 	host2tcl_input_ring2,
114 	host2tcl_input_ring1,
115 	wbm2host_tx_completions_ring3,
116 	wbm2host_tx_completions_ring2,
117 	wbm2host_tx_completions_ring1,
118 	tcl2host_status_ring,
119 };
120 
121 struct CE_state;
122 #define CE_COUNT_MAX 12
123 #define HIF_MAX_GRP_IRQ 16
124 
125 #ifndef HIF_MAX_GROUP
126 #define HIF_MAX_GROUP 7
127 #endif
128 
129 #ifndef NAPI_YIELD_BUDGET_BASED
130 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
131 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
132 #endif
133 #else  /* NAPI_YIELD_BUDGET_BASED */
134 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
135 #endif /* NAPI_YIELD_BUDGET_BASED */
136 
137 #define QCA_NAPI_BUDGET    64
138 #define QCA_NAPI_DEF_SCALE  \
139 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
140 
141 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
142 /* NOTE: "napi->scale" can be changed,
143  * but this does not change the number of buckets
144  */
145 #define QCA_NAPI_NUM_BUCKETS 4
146 
147 /**
148  * qca_napi_stat - stats structure for execution contexts
149  * @napi_schedules - number of times the schedule function is called
150  * @napi_polls - number of times the execution context runs
151  * @napi_completes - number of times that the generating interrupt is reenabled
152  * @napi_workdone - cumulative of all work done reported by handler
153  * @cpu_corrected - incremented when execution context runs on a different core
154  *			than the one that its irq is affined to.
155  * @napi_budget_uses - histogram of work done per execution run
156  * @time_limit_reache - count of yields due to time limit threshholds
157  * @rxpkt_thresh_reached - count of yields due to a work limit
158  * @poll_time_buckets - histogram of poll times for the napi
159  *
160  */
161 struct qca_napi_stat {
162 	uint32_t napi_schedules;
163 	uint32_t napi_polls;
164 	uint32_t napi_completes;
165 	uint32_t napi_workdone;
166 	uint32_t cpu_corrected;
167 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
168 	uint32_t time_limit_reached;
169 	uint32_t rxpkt_thresh_reached;
170 	unsigned long long napi_max_poll_time;
171 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
172 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
173 #endif
174 };
175 
176 
177 /**
178  * per NAPI instance data structure
179  * This data structure holds stuff per NAPI instance.
180  * Note that, in the current implementation, though scale is
181  * an instance variable, it is set to the same value for all
182  * instances.
183  */
184 struct qca_napi_info {
185 	struct net_device    netdev; /* dummy net_dev */
186 	void                 *hif_ctx;
187 	struct napi_struct   napi;
188 	uint8_t              scale;   /* currently same on all instances */
189 	uint8_t              id;
190 	uint8_t              cpu;
191 	int                  irq;
192 	cpumask_t            cpumask;
193 	struct qca_napi_stat stats[NR_CPUS];
194 #ifdef RECEIVE_OFFLOAD
195 	/* will only be present for data rx CE's */
196 	void (*offld_flush_cb)(void *);
197 	struct napi_struct   rx_thread_napi;
198 	struct net_device    rx_thread_netdev;
199 #endif /* RECEIVE_OFFLOAD */
200 	qdf_lro_ctx_t        lro_ctx;
201 };
202 
203 enum qca_napi_tput_state {
204 	QCA_NAPI_TPUT_UNINITIALIZED,
205 	QCA_NAPI_TPUT_LO,
206 	QCA_NAPI_TPUT_HI
207 };
208 enum qca_napi_cpu_state {
209 	QCA_NAPI_CPU_UNINITIALIZED,
210 	QCA_NAPI_CPU_DOWN,
211 	QCA_NAPI_CPU_UP };
212 
213 /**
214  * struct qca_napi_cpu - an entry of the napi cpu table
215  * @core_id:     physical core id of the core
216  * @cluster_id:  cluster this core belongs to
217  * @core_mask:   mask to match all core of this cluster
218  * @thread_mask: mask for this core within the cluster
219  * @max_freq:    maximum clock this core can be clocked at
220  *               same for all cpus of the same core.
221  * @napis:       bitmap of napi instances on this core
222  * @execs:       bitmap of execution contexts on this core
223  * cluster_nxt:  chain to link cores within the same cluster
224  *
225  * This structure represents a single entry in the napi cpu
226  * table. The table is part of struct qca_napi_data.
227  * This table is initialized by the init function, called while
228  * the first napi instance is being created, updated by hotplug
229  * notifier and when cpu affinity decisions are made (by throughput
230  * detection), and deleted when the last napi instance is removed.
231  */
232 struct qca_napi_cpu {
233 	enum qca_napi_cpu_state state;
234 	int			core_id;
235 	int			cluster_id;
236 	cpumask_t		core_mask;
237 	cpumask_t		thread_mask;
238 	unsigned int		max_freq;
239 	uint32_t		napis;
240 	uint32_t		execs;
241 	int			cluster_nxt;  /* index, not pointer */
242 };
243 
244 /**
245  * struct qca_napi_data - collection of napi data for a single hif context
246  * @hif_softc: pointer to the hif context
247  * @lock: spinlock used in the event state machine
248  * @state: state variable used in the napi stat machine
249  * @ce_map: bit map indicating which ce's have napis running
250  * @exec_map: bit map of instanciated exec contexts
251  * @user_cpu_affin_map: CPU affinity map from INI config.
252  * @napi_cpu: cpu info for irq affinty
253  * @lilcl_head:
254  * @bigcl_head:
255  * @napi_mode: irq affinity & clock voting mode
256  * @cpuhp_handler: CPU hotplug event registration handle
257  */
258 struct qca_napi_data {
259 	struct               hif_softc *hif_softc;
260 	qdf_spinlock_t       lock;
261 	uint32_t             state;
262 
263 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
264 	 * not used by clients (clients use an id returned by create)
265 	 */
266 	uint32_t             ce_map;
267 	uint32_t             exec_map;
268 	uint32_t             user_cpu_affin_mask;
269 	struct qca_napi_info *napis[CE_COUNT_MAX];
270 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
271 	int                  lilcl_head, bigcl_head;
272 	enum qca_napi_tput_state napi_mode;
273 	struct qdf_cpuhp_handler *cpuhp_handler;
274 	uint8_t              flags;
275 };
276 
277 /**
278  * struct hif_config_info - Place Holder for HIF configuration
279  * @enable_self_recovery: Self Recovery
280  * @enable_runtime_pm: Enable Runtime PM
281  * @runtime_pm_delay: Runtime PM Delay
282  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
283  *
284  * Structure for holding HIF ini parameters.
285  */
286 struct hif_config_info {
287 	bool enable_self_recovery;
288 #ifdef FEATURE_RUNTIME_PM
289 	uint8_t enable_runtime_pm;
290 	u_int32_t runtime_pm_delay;
291 #endif
292 	uint64_t rx_softirq_max_yield_duration_ns;
293 };
294 
295 /**
296  * struct hif_target_info - Target Information
297  * @target_version: Target Version
298  * @target_type: Target Type
299  * @target_revision: Target Revision
300  * @soc_version: SOC Version
301  * @hw_name: pointer to hardware name
302  *
303  * Structure to hold target information.
304  */
305 struct hif_target_info {
306 	uint32_t target_version;
307 	uint32_t target_type;
308 	uint32_t target_revision;
309 	uint32_t soc_version;
310 	char *hw_name;
311 };
312 
313 struct hif_opaque_softc {
314 };
315 
316 /**
317  * enum hif_event_type - Type of DP events to be recorded
318  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
319  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
320  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
321  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
322  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
323  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
324  */
325 enum hif_event_type {
326 	HIF_EVENT_IRQ_TRIGGER,
327 	HIF_EVENT_TIMER_ENTRY,
328 	HIF_EVENT_TIMER_EXIT,
329 	HIF_EVENT_BH_SCHED,
330 	HIF_EVENT_SRNG_ACCESS_START,
331 	HIF_EVENT_SRNG_ACCESS_END,
332 	/* Do check hif_hist_skip_event_record when adding new events */
333 };
334 
335 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
336 
337 /* HIF_EVENT_HIST_MAX should always be power of 2 */
338 #define HIF_EVENT_HIST_MAX		512
339 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
340 #define HIF_EVENT_HIST_DISABLE_MASK	0
341 
342 /**
343  * struct hif_event_record - an entry of the DP event history
344  * @hal_ring_id: ring id for which event is recorded
345  * @hp: head pointer of the ring (may not be applicable for all events)
346  * @tp: tail pointer of the ring (may not be applicable for all events)
347  * @cpu_id: cpu id on which the event occurred
348  * @timestamp: timestamp when event occurred
349  * @type: type of the event
350  *
351  * This structure represents the information stored for every datapath
352  * event which is logged in the history.
353  */
354 struct hif_event_record {
355 	uint8_t hal_ring_id;
356 	uint32_t hp;
357 	uint32_t tp;
358 	int cpu_id;
359 	uint64_t timestamp;
360 	enum hif_event_type type;
361 };
362 
363 /**
364  * struct hif_event_misc - history related misc info
365  * @last_irq_index: last irq event index in history
366  * @last_irq_ts: last irq timestamp
367  */
368 struct hif_event_misc {
369 	int32_t last_irq_index;
370 	uint64_t last_irq_ts;
371 };
372 
373 /**
374  * struct hif_event_history - history for one interrupt group
375  * @index: index to store new event
376  * @event: event entry
377  *
378  * This structure represents the datapath history for one
379  * interrupt group.
380  */
381 struct hif_event_history {
382 	qdf_atomic_t index;
383 	struct hif_event_misc misc;
384 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
385 };
386 
387 /**
388  * hif_hist_record_event() - Record one datapath event in history
389  * @hif_ctx: HIF opaque context
390  * @event: DP event entry
391  * @intr_grp_id: interrupt group ID registered with hif
392  *
393  * Return: None
394  */
395 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
396 			   struct hif_event_record *event,
397 			   uint8_t intr_grp_id);
398 
399 /**
400  * hif_event_history_init() - Initialize SRNG event history buffers
401  * @hif_ctx: HIF opaque context
402  * @id: context group ID for which history is recorded
403  *
404  * Returns: None
405  */
406 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
407 
408 /**
409  * hif_event_history_deinit() - De-initialize SRNG event history buffers
410  * @hif_ctx: HIF opaque context
411  * @id: context group ID for which history is recorded
412  *
413  * Returns: None
414  */
415 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
416 
417 /**
418  * hif_record_event() - Wrapper function to form and record DP event
419  * @hif_ctx: HIF opaque context
420  * @intr_grp_id: interrupt group ID registered with hif
421  * @hal_ring_id: ring id for which event is recorded
422  * @hp: head pointer index of the srng
423  * @tp: tail pointer index of the srng
424  * @type: type of the event to be logged in history
425  *
426  * Return: None
427  */
428 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
429 				    uint8_t intr_grp_id,
430 				    uint8_t hal_ring_id,
431 				    uint32_t hp,
432 				    uint32_t tp,
433 				    enum hif_event_type type)
434 {
435 	struct hif_event_record event;
436 
437 	event.hal_ring_id = hal_ring_id;
438 	event.hp = hp;
439 	event.tp = tp;
440 	event.type = type;
441 
442 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
443 
444 	return;
445 }
446 
447 #else
448 
449 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
450 				    uint8_t intr_grp_id,
451 				    uint8_t hal_ring_id,
452 				    uint32_t hp,
453 				    uint32_t tp,
454 				    enum hif_event_type type)
455 {
456 }
457 
458 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
459 					  uint8_t id)
460 {
461 }
462 
463 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
464 					    uint8_t id)
465 {
466 }
467 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
468 
469 /**
470  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
471  *
472  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
473  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
474  *                         minimize power
475  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
476  *                         platform-specific measures to completely power-off
477  *                         the module and associated hardware (i.e. cut power
478  *                         supplies)
479  */
480 enum HIF_DEVICE_POWER_CHANGE_TYPE {
481 	HIF_DEVICE_POWER_UP,
482 	HIF_DEVICE_POWER_DOWN,
483 	HIF_DEVICE_POWER_CUT
484 };
485 
486 /**
487  * enum hif_enable_type: what triggered the enabling of hif
488  *
489  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
490  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
491  */
492 enum hif_enable_type {
493 	HIF_ENABLE_TYPE_PROBE,
494 	HIF_ENABLE_TYPE_REINIT,
495 	HIF_ENABLE_TYPE_MAX
496 };
497 
498 /**
499  * enum hif_disable_type: what triggered the disabling of hif
500  *
501  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
502  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
503  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
504  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
505  */
506 enum hif_disable_type {
507 	HIF_DISABLE_TYPE_PROBE_ERROR,
508 	HIF_DISABLE_TYPE_REINIT_ERROR,
509 	HIF_DISABLE_TYPE_REMOVE,
510 	HIF_DISABLE_TYPE_SHUTDOWN,
511 	HIF_DISABLE_TYPE_MAX
512 };
513 /**
514  * enum hif_device_config_opcode: configure mode
515  *
516  * @HIF_DEVICE_POWER_STATE: device power state
517  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
518  * @HIF_DEVICE_GET_ADDR: get block address
519  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
520  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
521  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
522  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
523  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
524  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
525  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
526  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
527  * @HIF_BMI_DONE: bmi done
528  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
529  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
530  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
531  */
532 enum hif_device_config_opcode {
533 	HIF_DEVICE_POWER_STATE = 0,
534 	HIF_DEVICE_GET_BLOCK_SIZE,
535 	HIF_DEVICE_GET_FIFO_ADDR,
536 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
537 	HIF_DEVICE_GET_IRQ_PROC_MODE,
538 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
539 	HIF_DEVICE_POWER_STATE_CHANGE,
540 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
541 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
542 	HIF_DEVICE_GET_OS_DEVICE,
543 	HIF_DEVICE_DEBUG_BUS_STATE,
544 	HIF_BMI_DONE,
545 	HIF_DEVICE_SET_TARGET_TYPE,
546 	HIF_DEVICE_SET_HTC_CONTEXT,
547 	HIF_DEVICE_GET_HTC_CONTEXT,
548 };
549 
550 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
551 struct HID_ACCESS_LOG {
552 	uint32_t seqnum;
553 	bool is_write;
554 	void *addr;
555 	uint32_t value;
556 };
557 #endif
558 
559 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
560 		uint32_t value);
561 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
562 
563 #define HIF_MAX_DEVICES                 1
564 /**
565  * struct htc_callbacks - Structure for HTC Callbacks methods
566  * @context:             context to pass to the dsrhandler
567  *                       note : rwCompletionHandler is provided the context
568  *                       passed to hif_read_write
569  * @rwCompletionHandler: Read / write completion handler
570  * @dsrHandler:          DSR Handler
571  */
572 struct htc_callbacks {
573 	void *context;
574 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
575 	QDF_STATUS(*dsr_handler)(void *context);
576 };
577 
578 /**
579  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
580  * @context: Private data context
581  * @set_recovery_in_progress: To Set Driver state for recovery in progress
582  * @is_recovery_in_progress: Query if driver state is recovery in progress
583  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
584  * @is_driver_unloading: Query if driver is unloading.
585  * @get_bandwidth_level: Query current bandwidth level for the driver
586  * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem
587  * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool
588  * This Structure provides callback pointer for HIF to query hdd for driver
589  * states.
590  */
591 struct hif_driver_state_callbacks {
592 	void *context;
593 	void (*set_recovery_in_progress)(void *context, uint8_t val);
594 	bool (*is_recovery_in_progress)(void *context);
595 	bool (*is_load_unload_in_progress)(void *context);
596 	bool (*is_driver_unloading)(void *context);
597 	bool (*is_target_ready)(void *context);
598 	int (*get_bandwidth_level)(void *context);
599 	void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
600 						       qdf_dma_addr_t *paddr,
601 						       uint32_t ring_type);
602 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
603 };
604 
605 /* This API detaches the HTC layer from the HIF device */
606 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
607 
608 /****************************************************************/
609 /* BMI and Diag window abstraction                              */
610 /****************************************************************/
611 
612 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
613 
614 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
615 				     * handled atomically by
616 				     * DiagRead/DiagWrite
617 				     */
618 
619 #ifdef WLAN_FEATURE_BMI
620 /*
621  * API to handle HIF-specific BMI message exchanges, this API is synchronous
622  * and only allowed to be called from a context that can block (sleep)
623  */
624 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
625 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
626 				uint8_t *pSendMessage, uint32_t Length,
627 				uint8_t *pResponseMessage,
628 				uint32_t *pResponseLength, uint32_t TimeoutMS);
629 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
630 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
631 #else /* WLAN_FEATURE_BMI */
632 static inline void
633 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
634 {
635 }
636 
637 static inline bool
638 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
639 {
640 	return false;
641 }
642 #endif /* WLAN_FEATURE_BMI */
643 
644 /*
645  * APIs to handle HIF specific diagnostic read accesses. These APIs are
646  * synchronous and only allowed to be called from a context that
647  * can block (sleep). They are not high performance APIs.
648  *
649  * hif_diag_read_access reads a 4 Byte aligned/length value from a
650  * Target register or memory word.
651  *
652  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
653  */
654 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
655 				uint32_t address, uint32_t *data);
656 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
657 		      uint8_t *data, int nbytes);
658 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
659 			void *ramdump_base, uint32_t address, uint32_t size);
660 /*
661  * APIs to handle HIF specific diagnostic write accesses. These APIs are
662  * synchronous and only allowed to be called from a context that
663  * can block (sleep).
664  * They are not high performance APIs.
665  *
666  * hif_diag_write_access writes a 4 Byte aligned/length value to a
667  * Target register or memory word.
668  *
669  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
670  */
671 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
672 				 uint32_t address, uint32_t data);
673 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
674 			uint32_t address, uint8_t *data, int nbytes);
675 
676 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
677 
678 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
679 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
680 
681 /*
682  * Set the FASTPATH_mode_on flag in sc, for use by data path
683  */
684 #ifdef WLAN_FEATURE_FASTPATH
685 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
686 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
687 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
688 
689 /**
690  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
691  * @handler: Callback funtcion
692  * @context: handle for callback function
693  *
694  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
695  */
696 QDF_STATUS hif_ce_fastpath_cb_register(
697 		struct hif_opaque_softc *hif_ctx,
698 		fastpath_msg_handler handler, void *context);
699 #else
700 static inline QDF_STATUS hif_ce_fastpath_cb_register(
701 		struct hif_opaque_softc *hif_ctx,
702 		fastpath_msg_handler handler, void *context)
703 {
704 	return QDF_STATUS_E_FAILURE;
705 }
706 
707 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
708 {
709 	return NULL;
710 }
711 
712 #endif
713 
714 /*
715  * Enable/disable CDC max performance workaround
716  * For max-performace set this to 0
717  * To allow SoC to enter sleep set this to 1
718  */
719 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
720 
721 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
722 			     qdf_shared_mem_t **ce_sr,
723 			     uint32_t *ce_sr_ring_size,
724 			     qdf_dma_addr_t *ce_reg_paddr);
725 
726 /**
727  * @brief List of callbacks - filled in by HTC.
728  */
729 struct hif_msg_callbacks {
730 	void *Context;
731 	/**< context meaningful to HTC */
732 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
733 					uint32_t transferID,
734 					uint32_t toeplitz_hash_result);
735 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
736 					uint8_t pipeID);
737 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
738 	void (*fwEventHandler)(void *context, QDF_STATUS status);
739 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
740 };
741 
742 enum hif_target_status {
743 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
744 	TARGET_STATUS_RESET,  /* target got reset */
745 	TARGET_STATUS_EJECT,  /* target got ejected */
746 	TARGET_STATUS_SUSPEND /*target got suspend */
747 };
748 
749 /**
750  * enum hif_attribute_flags: configure hif
751  *
752  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
753  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
754  *  							+ No pktlog CE
755  */
756 enum hif_attribute_flags {
757 	HIF_LOWDESC_CE_CFG = 1,
758 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
759 };
760 
761 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
762 	(attr |= (v & 0x01) << 5)
763 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
764 	(attr |= (v & 0x03) << 6)
765 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
766 	(attr |= (v & 0x01) << 13)
767 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
768 	(attr |= (v & 0x01) << 14)
769 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
770 	(attr |= (v & 0x01) << 15)
771 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
772 	(attr |= (v & 0x0FFF) << 16)
773 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
774 	(attr |= (v & 0x01) << 30)
775 
776 struct hif_ul_pipe_info {
777 	unsigned int nentries;
778 	unsigned int nentries_mask;
779 	unsigned int sw_index;
780 	unsigned int write_index; /* cached copy */
781 	unsigned int hw_index;    /* cached copy */
782 	void *base_addr_owner_space; /* Host address space */
783 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
784 };
785 
786 struct hif_dl_pipe_info {
787 	unsigned int nentries;
788 	unsigned int nentries_mask;
789 	unsigned int sw_index;
790 	unsigned int write_index; /* cached copy */
791 	unsigned int hw_index;    /* cached copy */
792 	void *base_addr_owner_space; /* Host address space */
793 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
794 };
795 
796 struct hif_pipe_addl_info {
797 	uint32_t pci_mem;
798 	uint32_t ctrl_addr;
799 	struct hif_ul_pipe_info ul_pipe;
800 	struct hif_dl_pipe_info dl_pipe;
801 };
802 
803 #ifdef CONFIG_SLUB_DEBUG_ON
804 #define MSG_FLUSH_NUM 16
805 #else /* PERF build */
806 #define MSG_FLUSH_NUM 32
807 #endif /* SLUB_DEBUG_ON */
808 
809 struct hif_bus_id;
810 
811 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
812 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
813 		     int opcode, void *config, uint32_t config_len);
814 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
815 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
816 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
817 		   struct hif_msg_callbacks *callbacks);
818 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
819 void hif_stop(struct hif_opaque_softc *hif_ctx);
820 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
821 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
822 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
823 		      uint8_t cmd_id, bool start);
824 
825 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
826 				  uint32_t transferID, uint32_t nbytes,
827 				  qdf_nbuf_t wbuf, uint32_t data_attr);
828 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
829 			     int force);
830 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
831 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
832 			  uint8_t *DLPipe);
833 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
834 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
835 			int *dl_is_polled);
836 uint16_t
837 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
838 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
839 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
840 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
841 		     bool wait_for_it);
842 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
843 #ifndef HIF_PCI
844 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
845 {
846 	return 0;
847 }
848 #else
849 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
850 #endif
851 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
852 			u32 *revision, const char **target_name);
853 
854 #ifdef RECEIVE_OFFLOAD
855 /**
856  * hif_offld_flush_cb_register() - Register the offld flush callback
857  * @scn: HIF opaque context
858  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
859  *			 Or GRO/LRO flush when RxThread is not enabled. Called
860  *			 with corresponding context for flush.
861  * Return: None
862  */
863 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
864 				 void (offld_flush_handler)(void *ol_ctx));
865 
866 /**
867  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
868  * @scn: HIF opaque context
869  *
870  * Return: None
871  */
872 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
873 #endif
874 
875 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
876 /**
877  * hif_exec_should_yield() - Check if hif napi context should yield
878  * @hif_ctx - HIF opaque context
879  * @grp_id - grp_id of the napi for which check needs to be done
880  *
881  * The function uses grp_id to look for NAPI and checks if NAPI needs to
882  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
883  * yield decision.
884  *
885  * Return: true if NAPI needs to yield, else false
886  */
887 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
888 #else
889 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
890 					 uint grp_id)
891 {
892 	return false;
893 }
894 #endif
895 
896 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
897 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
898 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
899 				      int htc_htt_tx_endpoint);
900 
901 /**
902  * hif_open() - Create hif handle
903  * @qdf_ctx: qdf context
904  * @mode: Driver Mode
905  * @bus_type: Bus Type
906  * @cbk: CDS Callbacks
907  * @psoc: psoc object manager
908  *
909  * API to open HIF Context
910  *
911  * Return: HIF Opaque Pointer
912  */
913 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
914 				  uint32_t mode,
915 				  enum qdf_bus_type bus_type,
916 				  struct hif_driver_state_callbacks *cbk,
917 				  struct wlan_objmgr_psoc *psoc);
918 
919 void hif_close(struct hif_opaque_softc *hif_ctx);
920 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
921 		      void *bdev, const struct hif_bus_id *bid,
922 		      enum qdf_bus_type bus_type,
923 		      enum hif_enable_type type);
924 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
925 #ifdef CE_TASKLET_DEBUG_ENABLE
926 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
927 				 uint8_t value);
928 #endif
929 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
930 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
931 
932 /**
933  * enum wlan_rtpm_dbgid - runtime pm put/get debug id
934  * @RTPM_ID_RESVERD:       Reserved
935  * @RTPM_ID_WMI:           WMI sending msg, expect put happen at
936  *                         tx completion from CE level directly.
937  * @RTPM_ID_HTC:           pkt sending by HTT_DATA_MSG_SVC, expect
938  *                         put from fw response or just in
939  *                         htc_issue_packets
940  * @RTPM_ID_QOS_NOTIFY:    pm qos notifer
941  * @RTPM_ID_DP_TX_DESC_ALLOC_FREE:      tx desc alloc/free
942  * @RTPM_ID_CE_SEND_FAST:  operation in ce_send_fast, not include
943  *                         the pkt put happens outside this function
944  * @RTPM_ID_SUSPEND_RESUME:     suspend/resume in hdd
945  * @RTPM_ID_DW_TX_HW_ENQUEUE:   operation in functin dp_tx_hw_enqueue
946  * @RTPM_ID_HAL_REO_CMD:        HAL_REO_CMD operation
947  * @RTPM_ID_DP_PRINT_RING_STATS:  operation in dp_print_ring_stats
948  */
949 /* New value added to the enum must also be reflected in function
950  *  rtpm_string_from_dbgid()
951  */
952 typedef enum {
953 	RTPM_ID_RESVERD   = 0,
954 	RTPM_ID_WMI       = 1,
955 	RTPM_ID_HTC       = 2,
956 	RTPM_ID_QOS_NOTIFY  = 3,
957 	RTPM_ID_DP_TX_DESC_ALLOC_FREE  = 4,
958 	RTPM_ID_CE_SEND_FAST       = 5,
959 	RTPM_ID_SUSPEND_RESUME     = 6,
960 	RTPM_ID_DW_TX_HW_ENQUEUE   = 7,
961 	RTPM_ID_HAL_REO_CMD        = 8,
962 	RTPM_ID_DP_PRINT_RING_STATS  = 9,
963 
964 	RTPM_ID_MAX,
965 } wlan_rtpm_dbgid;
966 
967 /**
968  * rtpm_string_from_dbgid() - Convert dbgid to respective string
969  * @id -  debug id
970  *
971  * Debug support function to convert  dbgid to string.
972  * Please note to add new string in the array at index equal to
973  * its enum value in wlan_rtpm_dbgid.
974  */
975 static inline char *rtpm_string_from_dbgid(wlan_rtpm_dbgid id)
976 {
977 	static const char *strings[] = { "RTPM_ID_RESVERD",
978 					"RTPM_ID_WMI",
979 					"RTPM_ID_HTC",
980 					"RTPM_ID_QOS_NOTIFY",
981 					"RTPM_ID_DP_TX_DESC_ALLOC_FREE",
982 					"RTPM_ID_CE_SEND_FAST",
983 					"RTPM_ID_SUSPEND_RESUME",
984 					"RTPM_ID_DW_TX_HW_ENQUEUE",
985 					"RTPM_ID_HAL_REO_CMD",
986 					"RTPM_ID_DP_PRINT_RING_STATS",
987 					"RTPM_ID_MAX"};
988 
989 	return (char *)strings[id];
990 }
991 
992 #ifdef FEATURE_RUNTIME_PM
993 struct hif_pm_runtime_lock;
994 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
995 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
996 			    wlan_rtpm_dbgid rtpm_dbgid);
997 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
998 				    wlan_rtpm_dbgid rtpm_dbgid);
999 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx);
1000 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
1001 		       wlan_rtpm_dbgid rtpm_dbgid);
1002 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1003 				 wlan_rtpm_dbgid rtpm_dbgid);
1004 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
1005 		       wlan_rtpm_dbgid rtpm_dbgid);
1006 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1007 			      wlan_rtpm_dbgid rtpm_dbgid);
1008 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx);
1009 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1010 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1011 			struct hif_pm_runtime_lock *lock);
1012 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1013 		struct hif_pm_runtime_lock *lock);
1014 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1015 		struct hif_pm_runtime_lock *lock);
1016 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx);
1017 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx);
1018 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
1019 					  int val);
1020 void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx);
1021 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
1022 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
1023 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx);
1024 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx);
1025 #else
1026 struct hif_pm_runtime_lock {
1027 	const char *name;
1028 };
1029 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
1030 static inline int
1031 hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
1032 			wlan_rtpm_dbgid rtpm_dbgid)
1033 { return 0; }
1034 static inline int
1035 hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
1036 				wlan_rtpm_dbgid rtpm_dbgid)
1037 { return 0; }
1038 static inline int
1039 hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
1040 { return 0; }
1041 static inline void
1042 hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1043 			    wlan_rtpm_dbgid rtpm_dbgid)
1044 {}
1045 
1046 static inline int
1047 hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid)
1048 { return 0; }
1049 static inline int
1050 hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid)
1051 { return 0; }
1052 static inline int
1053 hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1054 			  wlan_rtpm_dbgid rtpm_dbgid)
1055 { return 0; }
1056 static inline void
1057 hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {};
1058 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
1059 					const char *name)
1060 { return 0; }
1061 static inline void
1062 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1063 			struct hif_pm_runtime_lock *lock) {}
1064 
1065 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1066 		struct hif_pm_runtime_lock *lock)
1067 { return 0; }
1068 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1069 		struct hif_pm_runtime_lock *lock)
1070 { return 0; }
1071 static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
1072 { return false; }
1073 static inline int
1074 hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
1075 { return 0; }
1076 static inline void
1077 hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val)
1078 { return; }
1079 static inline void
1080 hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
1081 { return; }
1082 static inline void
1083 hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {};
1084 static inline int
1085 hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1086 { return 0; }
1087 static inline qdf_time_t
1088 hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
1089 { return 0; }
1090 static inline int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
1091 { return 0; }
1092 #endif
1093 
1094 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1095 				 bool is_packet_log_enabled);
1096 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1097 
1098 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1099 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1100 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1101 
1102 #ifdef IPA_OFFLOAD
1103 /**
1104  * hif_get_ipa_hw_type() - get IPA hw type
1105  *
1106  * This API return the IPA hw type.
1107  *
1108  * Return: IPA hw type
1109  */
1110 static inline
1111 enum ipa_hw_type hif_get_ipa_hw_type(void)
1112 {
1113 	return ipa_get_hw_type();
1114 }
1115 
1116 /**
1117  * hif_get_ipa_present() - get IPA hw status
1118  *
1119  * This API return the IPA hw status.
1120  *
1121  * Return: true if IPA is present or false otherwise
1122  */
1123 static inline
1124 bool hif_get_ipa_present(void)
1125 {
1126 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
1127 		return true;
1128 	else
1129 		return false;
1130 }
1131 #endif
1132 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1133 /**
1134  * hif_bus_ealry_suspend() - stop non wmi tx traffic
1135  * @context: hif context
1136  */
1137 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1138 
1139 /**
1140  * hif_bus_late_resume() - resume non wmi traffic
1141  * @context: hif context
1142  */
1143 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1144 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1145 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1146 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1147 
1148 /**
1149  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1150  * @hif_ctx: an opaque HIF handle to use
1151  *
1152  * As opposed to the standard hif_irq_enable, this function always applies to
1153  * the APPS side kernel interrupt handling.
1154  *
1155  * Return: errno
1156  */
1157 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1158 
1159 /**
1160  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1161  * @hif_ctx: an opaque HIF handle to use
1162  *
1163  * As opposed to the standard hif_irq_disable, this function always applies to
1164  * the APPS side kernel interrupt handling.
1165  *
1166  * Return: errno
1167  */
1168 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1169 
1170 /**
1171  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1172  * @hif_ctx: an opaque HIF handle to use
1173  *
1174  * As opposed to the standard hif_irq_enable, this function always applies to
1175  * the APPS side kernel interrupt handling.
1176  *
1177  * Return: errno
1178  */
1179 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1180 
1181 /**
1182  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1183  * @hif_ctx: an opaque HIF handle to use
1184  *
1185  * As opposed to the standard hif_irq_disable, this function always applies to
1186  * the APPS side kernel interrupt handling.
1187  *
1188  * Return: errno
1189  */
1190 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1191 
1192 /**
1193  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1194  * @hif_ctx: an opaque HIF handle to use
1195  *
1196  * This function always applies to the APPS side kernel interrupt handling
1197  * to wake the system from suspend.
1198  *
1199  * Return: errno
1200  */
1201 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1202 
1203 /**
1204  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1205  * @hif_ctx: an opaque HIF handle to use
1206  *
1207  * This function always applies to the APPS side kernel interrupt handling
1208  * to disable the wake irq.
1209  *
1210  * Return: errno
1211  */
1212 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1213 
1214 #ifdef FEATURE_RUNTIME_PM
1215 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1216 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
1217 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1218 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1219 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
1220 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
1221 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
1222 #endif
1223 
1224 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1225 int hif_dump_registers(struct hif_opaque_softc *scn);
1226 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1227 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1228 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1229 		     u32 *revision, const char **target_name);
1230 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1231 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1232 						   scn);
1233 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1234 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1235 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1236 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1237 			   hif_target_status);
1238 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1239 			 struct hif_config_info *cfg);
1240 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1241 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1242 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1243 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1244 			   uint32_t transfer_id, u_int32_t len);
1245 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1246 	uint32_t transfer_id, uint32_t download_len);
1247 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1248 void hif_ce_war_disable(void);
1249 void hif_ce_war_enable(void);
1250 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1251 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1252 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1253 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1254 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1255 		uint32_t pipe_num);
1256 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1257 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1258 
1259 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1260 				int rx_bundle_cnt);
1261 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1262 
1263 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1264 
1265 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1266 
1267 enum hif_exec_type {
1268 	HIF_EXEC_NAPI_TYPE,
1269 	HIF_EXEC_TASKLET_TYPE,
1270 };
1271 
1272 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
1273 
1274 /**
1275  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1276  * @softc: hif opaque context owning the exec context
1277  * @id: the id of the interrupt context
1278  *
1279  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1280  *         'id' registered with the OS
1281  */
1282 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1283 				uint8_t id);
1284 
1285 /**
1286  * hif_configure_ext_group_interrupts() - Congigure ext group intrrupts
1287  * @hif_ctx: hif opaque context
1288  *
1289  * Return: QDF_STATUS
1290  */
1291 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1292 
1293 /**
1294  * hif_register_ext_group() - API to register external group
1295  * interrupt handler.
1296  * @hif_ctx : HIF Context
1297  * @numirq: number of irq's in the group
1298  * @irq: array of irq values
1299  * @handler: callback interrupt handler function
1300  * @cb_ctx: context to passed in callback
1301  * @type: napi vs tasklet
1302  *
1303  * Return: QDF_STATUS
1304  */
1305 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1306 				  uint32_t numirq, uint32_t irq[],
1307 				  ext_intr_handler handler,
1308 				  void *cb_ctx, const char *context_name,
1309 				  enum hif_exec_type type, uint32_t scale);
1310 
1311 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1312 				const char *context_name);
1313 
1314 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1315 				u_int8_t pipeid,
1316 				struct hif_msg_callbacks *callbacks);
1317 
1318 /**
1319  * hif_print_napi_stats() - Display HIF NAPI stats
1320  * @hif_ctx - HIF opaque context
1321  *
1322  * Return: None
1323  */
1324 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
1325 
1326 /* hif_clear_napi_stats() - function clears the stats of the
1327  * latency when called.
1328  * @hif_ctx - the HIF context to assign the callback to
1329  *
1330  * Return: None
1331  */
1332 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
1333 
1334 #ifdef __cplusplus
1335 }
1336 #endif
1337 
1338 #ifdef FORCE_WAKE
1339 /**
1340  * hif_force_wake_request() - Function to wake from power collapse
1341  * @handle: HIF opaque handle
1342  *
1343  * Description: API to check if the device is awake or not before
1344  * read/write to BAR + 4K registers. If device is awake return
1345  * success otherwise write '1' to
1346  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
1347  * the device and does wakeup the PCI and MHI within 50ms
1348  * and then the device writes a value to
1349  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
1350  * handshake process to let the host know the device is awake.
1351  *
1352  * Return: zero - success/non-zero - failure
1353  */
1354 int hif_force_wake_request(struct hif_opaque_softc *handle);
1355 
1356 /**
1357  * hif_force_wake_release() - API to release/reset the SOC wake register
1358  * from interrupting the device.
1359  * @handle: HIF opaque handle
1360  *
1361  * Description: API to set the
1362  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
1363  * to release the interrupt line.
1364  *
1365  * Return: zero - success/non-zero - failure
1366  */
1367 int hif_force_wake_release(struct hif_opaque_softc *handle);
1368 #else
1369 static inline
1370 int hif_force_wake_request(struct hif_opaque_softc *handle)
1371 {
1372 	return 0;
1373 }
1374 
1375 static inline
1376 int hif_force_wake_release(struct hif_opaque_softc *handle)
1377 {
1378 	return 0;
1379 }
1380 #endif /* FORCE_WAKE */
1381 
1382 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1383 /**
1384  * hif_prevent_link_low_power_states() - Prevent from going to low power states
1385  * @hif - HIF opaque context
1386  *
1387  * Return: 0 on success. Error code on failure.
1388  */
1389 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
1390 
1391 /**
1392  * hif_allow_link_low_power_states() - Allow link to go to low power states
1393  * @hif - HIF opaque context
1394  *
1395  * Return: None
1396  */
1397 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
1398 
1399 #else
1400 
1401 static inline
1402 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
1403 {
1404 	return 0;
1405 }
1406 
1407 static inline
1408 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
1409 {
1410 }
1411 #endif
1412 
1413 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
1414 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
1415 
1416 /**
1417  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
1418  * @hif_ctx - the HIF context to assign the callback to
1419  * @callback - the callback to assign
1420  * @priv - the private data to pass to the callback when invoked
1421  *
1422  * Return: None
1423  */
1424 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1425 			       void (*callback)(void *),
1426 			       void *priv);
1427 /*
1428  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1429  * for defined here
1430  */
1431 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1432 ssize_t hif_dump_desc_trace_buf(struct device *dev,
1433 				struct device_attribute *attr, char *buf);
1434 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1435 					const char *buf, size_t size);
1436 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
1437 				const char *buf, size_t size);
1438 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
1439 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
1440 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
1441 
1442 /**
1443  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
1444  * @hif: hif context
1445  * @ce_service_max_yield_time: CE service max yield time to set
1446  *
1447  * This API storess CE service max yield time in hif context based
1448  * on ini value.
1449  *
1450  * Return: void
1451  */
1452 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1453 				       uint32_t ce_service_max_yield_time);
1454 
1455 /**
1456  * hif_get_ce_service_max_yield_time() - get CE service max yield time
1457  * @hif: hif context
1458  *
1459  * This API returns CE service max yield time.
1460  *
1461  * Return: CE service max yield time
1462  */
1463 unsigned long long
1464 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
1465 
1466 /**
1467  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
1468  * @hif: hif context
1469  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
1470  *
1471  * This API stores CE service max rx ind flush in hif context based
1472  * on ini value.
1473  *
1474  * Return: void
1475  */
1476 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1477 					 uint8_t ce_service_max_rx_ind_flush);
1478 
1479 #ifdef OL_ATH_SMART_LOGGING
1480 /*
1481  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1482  * @scn : HIF handler
1483  * @buf_cur: Current pointer in ring buffer
1484  * @buf_init:Start of the ring buffer
1485  * @buf_sz: Size of the ring buffer
1486  * @ce: Copy Engine id
1487  * @skb_sz: Max size of the SKB buffer to be copied
1488  *
1489  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1490  * and buffers pointed by them in to the given buf
1491  *
1492  * Return: Current pointer in ring buffer
1493  */
1494 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1495 			 uint8_t *buf_init, uint32_t buf_sz,
1496 			 uint32_t ce, uint32_t skb_sz);
1497 #endif /* OL_ATH_SMART_LOGGING */
1498 
1499 /*
1500  * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
1501  * to hif_opaque_softc handle
1502  * @hif_handle - hif_softc type
1503  *
1504  * Return: hif_opaque_softc type
1505  */
1506 static inline struct hif_opaque_softc *
1507 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
1508 {
1509 	return (struct hif_opaque_softc *)hif_handle;
1510 }
1511 
1512 #ifdef FORCE_WAKE
1513 /**
1514  * hif_srng_init_phase(): Indicate srng initialization phase
1515  * to avoid force wake as UMAC power collapse is not yet
1516  * enabled
1517  * @hif_ctx: hif opaque handle
1518  * @init_phase: initialization phase
1519  *
1520  * Return:  None
1521  */
1522 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1523 			 bool init_phase);
1524 #else
1525 static inline
1526 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1527 			 bool init_phase)
1528 {
1529 }
1530 #endif /* FORCE_WAKE */
1531 
1532 #ifdef HIF_IPCI
1533 /**
1534  * hif_shutdown_notifier_cb - Call back for shutdown notifier
1535  * @ctx: hif handle
1536  *
1537  * Return:  None
1538  */
1539 void hif_shutdown_notifier_cb(void *ctx);
1540 #else
1541 static inline
1542 void hif_shutdown_notifier_cb(void *ctx)
1543 {
1544 }
1545 #endif /* HIF_IPCI */
1546 
1547 #ifdef HIF_CE_LOG_INFO
1548 /**
1549  * hif_log_ce_info() - API to log ce info
1550  * @scn: hif handle
1551  * @data: hang event data buffer
1552  * @offset: offset at which data needs to be written
1553  *
1554  * Return:  None
1555  */
1556 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
1557 		     unsigned int *offset);
1558 #else
1559 static inline
1560 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
1561 		     unsigned int *offset)
1562 {
1563 }
1564 #endif
1565 
1566 #ifdef HIF_CPU_PERF_AFFINE_MASK
1567 /**
1568  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
1569  * @hif_ctx: hif opaque handle
1570  *
1571  * This function is used to move the WLAN IRQs to perf cores in
1572  * case of defconfig builds.
1573  *
1574  * Return:  None
1575  */
1576 void hif_config_irq_set_perf_affinity_hint(
1577 	struct hif_opaque_softc *hif_ctx);
1578 
1579 #else
1580 static inline void hif_config_irq_set_perf_affinity_hint(
1581 	struct hif_opaque_softc *hif_ctx)
1582 {
1583 }
1584 #endif
1585 #endif /* _HIF_H_ */
1586