xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HIF_H_
20 #define _HIF_H_
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif /* __cplusplus */
25 
26 /* Header files */
27 #include <qdf_status.h>
28 #include "qdf_nbuf.h"
29 #include "qdf_lro.h"
30 #include "ol_if_athvar.h"
31 #include <linux/platform_device.h>
32 #ifdef HIF_PCI
33 #include <linux/pci.h>
34 #endif /* HIF_PCI */
35 #ifdef HIF_USB
36 #include <linux/usb.h>
37 #endif /* HIF_USB */
38 #ifdef IPA_OFFLOAD
39 #include <linux/ipa.h>
40 #endif
41 #include "cfg_ucfg_api.h"
42 #include "qdf_dev.h"
43 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
44 
45 typedef void __iomem *A_target_id_t;
46 typedef void *hif_handle_t;
47 
48 #define HIF_TYPE_AR6002   2
49 #define HIF_TYPE_AR6003   3
50 #define HIF_TYPE_AR6004   5
51 #define HIF_TYPE_AR9888   6
52 #define HIF_TYPE_AR6320   7
53 #define HIF_TYPE_AR6320V2 8
54 /* For attaching Peregrine 2.0 board host_reg_tbl only */
55 #define HIF_TYPE_AR9888V2 9
56 #define HIF_TYPE_ADRASTEA 10
57 #define HIF_TYPE_AR900B 11
58 #define HIF_TYPE_QCA9984 12
59 #define HIF_TYPE_IPQ4019 13
60 #define HIF_TYPE_QCA9888 14
61 #define HIF_TYPE_QCA8074 15
62 #define HIF_TYPE_QCA6290 16
63 #define HIF_TYPE_QCN7605 17
64 #define HIF_TYPE_QCA6390 18
65 #define HIF_TYPE_QCA8074V2 19
66 #define HIF_TYPE_QCA6018  20
67 #define HIF_TYPE_QCN9000 21
68 #define HIF_TYPE_QCA6490 22
69 #define HIF_TYPE_QCA6750 23
70 #define HIF_TYPE_QCA5018 24
71 
72 #define DMA_COHERENT_MASK_DEFAULT   37
73 
74 #ifdef IPA_OFFLOAD
75 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
76 #endif
77 
78 /* enum hif_ic_irq - enum defining integrated chip irq numbers
79  * defining irq nubers that can be used by external modules like datapath
80  */
81 enum hif_ic_irq {
82 	host2wbm_desc_feed = 16,
83 	host2reo_re_injection,
84 	host2reo_command,
85 	host2rxdma_monitor_ring3,
86 	host2rxdma_monitor_ring2,
87 	host2rxdma_monitor_ring1,
88 	reo2host_exception,
89 	wbm2host_rx_release,
90 	reo2host_status,
91 	reo2host_destination_ring4,
92 	reo2host_destination_ring3,
93 	reo2host_destination_ring2,
94 	reo2host_destination_ring1,
95 	rxdma2host_monitor_destination_mac3,
96 	rxdma2host_monitor_destination_mac2,
97 	rxdma2host_monitor_destination_mac1,
98 	ppdu_end_interrupts_mac3,
99 	ppdu_end_interrupts_mac2,
100 	ppdu_end_interrupts_mac1,
101 	rxdma2host_monitor_status_ring_mac3,
102 	rxdma2host_monitor_status_ring_mac2,
103 	rxdma2host_monitor_status_ring_mac1,
104 	host2rxdma_host_buf_ring_mac3,
105 	host2rxdma_host_buf_ring_mac2,
106 	host2rxdma_host_buf_ring_mac1,
107 	rxdma2host_destination_ring_mac3,
108 	rxdma2host_destination_ring_mac2,
109 	rxdma2host_destination_ring_mac1,
110 	host2tcl_input_ring4,
111 	host2tcl_input_ring3,
112 	host2tcl_input_ring2,
113 	host2tcl_input_ring1,
114 	wbm2host_tx_completions_ring3,
115 	wbm2host_tx_completions_ring2,
116 	wbm2host_tx_completions_ring1,
117 	tcl2host_status_ring,
118 };
119 
120 struct CE_state;
121 #define CE_COUNT_MAX 12
122 #define HIF_MAX_GRP_IRQ 16
123 
124 #ifndef HIF_MAX_GROUP
125 #define HIF_MAX_GROUP 7
126 #endif
127 
128 #ifndef NAPI_YIELD_BUDGET_BASED
129 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
130 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
131 #endif
132 #else  /* NAPI_YIELD_BUDGET_BASED */
133 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
134 #endif /* NAPI_YIELD_BUDGET_BASED */
135 
136 #define QCA_NAPI_BUDGET    64
137 #define QCA_NAPI_DEF_SCALE  \
138 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
139 
140 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
141 /* NOTE: "napi->scale" can be changed,
142  * but this does not change the number of buckets
143  */
144 #define QCA_NAPI_NUM_BUCKETS 4
145 
146 /**
147  * qca_napi_stat - stats structure for execution contexts
148  * @napi_schedules - number of times the schedule function is called
149  * @napi_polls - number of times the execution context runs
150  * @napi_completes - number of times that the generating interrupt is reenabled
151  * @napi_workdone - cumulative of all work done reported by handler
152  * @cpu_corrected - incremented when execution context runs on a different core
153  *			than the one that its irq is affined to.
154  * @napi_budget_uses - histogram of work done per execution run
155  * @time_limit_reache - count of yields due to time limit threshholds
156  * @rxpkt_thresh_reached - count of yields due to a work limit
157  * @poll_time_buckets - histogram of poll times for the napi
158  *
159  */
160 struct qca_napi_stat {
161 	uint32_t napi_schedules;
162 	uint32_t napi_polls;
163 	uint32_t napi_completes;
164 	uint32_t napi_workdone;
165 	uint32_t cpu_corrected;
166 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
167 	uint32_t time_limit_reached;
168 	uint32_t rxpkt_thresh_reached;
169 	unsigned long long napi_max_poll_time;
170 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
171 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
172 #endif
173 };
174 
175 
176 /**
177  * per NAPI instance data structure
178  * This data structure holds stuff per NAPI instance.
179  * Note that, in the current implementation, though scale is
180  * an instance variable, it is set to the same value for all
181  * instances.
182  */
183 struct qca_napi_info {
184 	struct net_device    netdev; /* dummy net_dev */
185 	void                 *hif_ctx;
186 	struct napi_struct   napi;
187 	uint8_t              scale;   /* currently same on all instances */
188 	uint8_t              id;
189 	uint8_t              cpu;
190 	int                  irq;
191 	cpumask_t            cpumask;
192 	struct qca_napi_stat stats[NR_CPUS];
193 #ifdef RECEIVE_OFFLOAD
194 	/* will only be present for data rx CE's */
195 	void (*offld_flush_cb)(void *);
196 	struct napi_struct   rx_thread_napi;
197 	struct net_device    rx_thread_netdev;
198 #endif /* RECEIVE_OFFLOAD */
199 	qdf_lro_ctx_t        lro_ctx;
200 };
201 
202 enum qca_napi_tput_state {
203 	QCA_NAPI_TPUT_UNINITIALIZED,
204 	QCA_NAPI_TPUT_LO,
205 	QCA_NAPI_TPUT_HI
206 };
207 enum qca_napi_cpu_state {
208 	QCA_NAPI_CPU_UNINITIALIZED,
209 	QCA_NAPI_CPU_DOWN,
210 	QCA_NAPI_CPU_UP };
211 
212 /**
213  * struct qca_napi_cpu - an entry of the napi cpu table
214  * @core_id:     physical core id of the core
215  * @cluster_id:  cluster this core belongs to
216  * @core_mask:   mask to match all core of this cluster
217  * @thread_mask: mask for this core within the cluster
218  * @max_freq:    maximum clock this core can be clocked at
219  *               same for all cpus of the same core.
220  * @napis:       bitmap of napi instances on this core
221  * @execs:       bitmap of execution contexts on this core
222  * cluster_nxt:  chain to link cores within the same cluster
223  *
224  * This structure represents a single entry in the napi cpu
225  * table. The table is part of struct qca_napi_data.
226  * This table is initialized by the init function, called while
227  * the first napi instance is being created, updated by hotplug
228  * notifier and when cpu affinity decisions are made (by throughput
229  * detection), and deleted when the last napi instance is removed.
230  */
231 struct qca_napi_cpu {
232 	enum qca_napi_cpu_state state;
233 	int			core_id;
234 	int			cluster_id;
235 	cpumask_t		core_mask;
236 	cpumask_t		thread_mask;
237 	unsigned int		max_freq;
238 	uint32_t		napis;
239 	uint32_t		execs;
240 	int			cluster_nxt;  /* index, not pointer */
241 };
242 
243 /**
244  * struct qca_napi_data - collection of napi data for a single hif context
245  * @hif_softc: pointer to the hif context
246  * @lock: spinlock used in the event state machine
247  * @state: state variable used in the napi stat machine
248  * @ce_map: bit map indicating which ce's have napis running
249  * @exec_map: bit map of instanciated exec contexts
250  * @user_cpu_affin_map: CPU affinity map from INI config.
251  * @napi_cpu: cpu info for irq affinty
252  * @lilcl_head:
253  * @bigcl_head:
254  * @napi_mode: irq affinity & clock voting mode
255  * @cpuhp_handler: CPU hotplug event registration handle
256  */
257 struct qca_napi_data {
258 	struct               hif_softc *hif_softc;
259 	qdf_spinlock_t       lock;
260 	uint32_t             state;
261 
262 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
263 	 * not used by clients (clients use an id returned by create)
264 	 */
265 	uint32_t             ce_map;
266 	uint32_t             exec_map;
267 	uint32_t             user_cpu_affin_mask;
268 	struct qca_napi_info *napis[CE_COUNT_MAX];
269 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
270 	int                  lilcl_head, bigcl_head;
271 	enum qca_napi_tput_state napi_mode;
272 	struct qdf_cpuhp_handler *cpuhp_handler;
273 	uint8_t              flags;
274 };
275 
276 /**
277  * struct hif_config_info - Place Holder for HIF configuration
278  * @enable_self_recovery: Self Recovery
279  * @enable_runtime_pm: Enable Runtime PM
280  * @runtime_pm_delay: Runtime PM Delay
281  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
282  *
283  * Structure for holding HIF ini parameters.
284  */
285 struct hif_config_info {
286 	bool enable_self_recovery;
287 #ifdef FEATURE_RUNTIME_PM
288 	uint8_t enable_runtime_pm;
289 	u_int32_t runtime_pm_delay;
290 #endif
291 	uint64_t rx_softirq_max_yield_duration_ns;
292 };
293 
294 /**
295  * struct hif_target_info - Target Information
296  * @target_version: Target Version
297  * @target_type: Target Type
298  * @target_revision: Target Revision
299  * @soc_version: SOC Version
300  * @hw_name: pointer to hardware name
301  *
302  * Structure to hold target information.
303  */
304 struct hif_target_info {
305 	uint32_t target_version;
306 	uint32_t target_type;
307 	uint32_t target_revision;
308 	uint32_t soc_version;
309 	char *hw_name;
310 };
311 
312 struct hif_opaque_softc {
313 };
314 
315 /**
316  * enum hif_event_type - Type of DP events to be recorded
317  * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
318  * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
319  * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
320  * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
321  * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
322  * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
323  */
324 enum hif_event_type {
325 	HIF_EVENT_IRQ_TRIGGER,
326 	HIF_EVENT_TIMER_ENTRY,
327 	HIF_EVENT_TIMER_EXIT,
328 	HIF_EVENT_BH_SCHED,
329 	HIF_EVENT_SRNG_ACCESS_START,
330 	HIF_EVENT_SRNG_ACCESS_END,
331 };
332 
333 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
334 
335 /* HIF_EVENT_HIST_MAX should always be power of 2 */
336 #define HIF_EVENT_HIST_MAX		512
337 #define HIF_NUM_INT_CONTEXTS		HIF_MAX_GROUP
338 #define HIF_EVENT_HIST_DISABLE_MASK	0
339 
340 /**
341  * struct hif_event_record - an entry of the DP event history
342  * @hal_ring_id: ring id for which event is recorded
343  * @hp: head pointer of the ring (may not be applicable for all events)
344  * @tp: tail pointer of the ring (may not be applicable for all events)
345  * @cpu_id: cpu id on which the event occurred
346  * @timestamp: timestamp when event occurred
347  * @type: type of the event
348  *
349  * This structure represents the information stored for every datapath
350  * event which is logged in the history.
351  */
352 struct hif_event_record {
353 	uint8_t hal_ring_id;
354 	uint32_t hp;
355 	uint32_t tp;
356 	int cpu_id;
357 	uint64_t timestamp;
358 	enum hif_event_type type;
359 };
360 
361 /**
362  * struct hif_event_history - history for one interrupt group
363  * @index: index to store new event
364  * @event: event entry
365  *
366  * This structure represents the datapath history for one
367  * interrupt group.
368  */
369 struct hif_event_history {
370 	qdf_atomic_t index;
371 	struct hif_event_record event[HIF_EVENT_HIST_MAX];
372 };
373 
374 /**
375  * hif_hist_record_event() - Record one datapath event in history
376  * @hif_ctx: HIF opaque context
377  * @event: DP event entry
378  * @intr_grp_id: interrupt group ID registered with hif
379  *
380  * Return: None
381  */
382 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
383 			   struct hif_event_record *event,
384 			   uint8_t intr_grp_id);
385 
386 /**
387  * hif_event_history_init() - Initialize SRNG event history buffers
388  * @hif_ctx: HIF opaque context
389  * @id: context group ID for which history is recorded
390  *
391  * Returns: None
392  */
393 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
394 
395 /**
396  * hif_event_history_deinit() - De-initialize SRNG event history buffers
397  * @hif_ctx: HIF opaque context
398  * @id: context group ID for which history is recorded
399  *
400  * Returns: None
401  */
402 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
403 
404 /**
405  * hif_record_event() - Wrapper function to form and record DP event
406  * @hif_ctx: HIF opaque context
407  * @intr_grp_id: interrupt group ID registered with hif
408  * @hal_ring_id: ring id for which event is recorded
409  * @hp: head pointer index of the srng
410  * @tp: tail pointer index of the srng
411  * @type: type of the event to be logged in history
412  *
413  * Return: None
414  */
415 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
416 				    uint8_t intr_grp_id,
417 				    uint8_t hal_ring_id,
418 				    uint32_t hp,
419 				    uint32_t tp,
420 				    enum hif_event_type type)
421 {
422 	struct hif_event_record event;
423 
424 	event.hal_ring_id = hal_ring_id;
425 	event.hp = hp;
426 	event.tp = tp;
427 	event.type = type;
428 
429 	hif_hist_record_event(hif_ctx, &event, intr_grp_id);
430 
431 	return;
432 }
433 
434 #else
435 
436 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
437 				    uint8_t intr_grp_id,
438 				    uint8_t hal_ring_id,
439 				    uint32_t hp,
440 				    uint32_t tp,
441 				    enum hif_event_type type)
442 {
443 }
444 
445 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
446 					  uint8_t id)
447 {
448 }
449 
450 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
451 					    uint8_t id)
452 {
453 }
454 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
455 
456 /**
457  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
458  *
459  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
460  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
461  *                         minimize power
462  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
463  *                         platform-specific measures to completely power-off
464  *                         the module and associated hardware (i.e. cut power
465  *                         supplies)
466  */
467 enum HIF_DEVICE_POWER_CHANGE_TYPE {
468 	HIF_DEVICE_POWER_UP,
469 	HIF_DEVICE_POWER_DOWN,
470 	HIF_DEVICE_POWER_CUT
471 };
472 
473 /**
474  * enum hif_enable_type: what triggered the enabling of hif
475  *
476  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
477  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
478  */
479 enum hif_enable_type {
480 	HIF_ENABLE_TYPE_PROBE,
481 	HIF_ENABLE_TYPE_REINIT,
482 	HIF_ENABLE_TYPE_MAX
483 };
484 
485 /**
486  * enum hif_disable_type: what triggered the disabling of hif
487  *
488  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
489  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
490  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
491  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
492  */
493 enum hif_disable_type {
494 	HIF_DISABLE_TYPE_PROBE_ERROR,
495 	HIF_DISABLE_TYPE_REINIT_ERROR,
496 	HIF_DISABLE_TYPE_REMOVE,
497 	HIF_DISABLE_TYPE_SHUTDOWN,
498 	HIF_DISABLE_TYPE_MAX
499 };
500 /**
501  * enum hif_device_config_opcode: configure mode
502  *
503  * @HIF_DEVICE_POWER_STATE: device power state
504  * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
505  * @HIF_DEVICE_GET_ADDR: get block address
506  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
507  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
508  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
509  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
510  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
511  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
512  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
513  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
514  * @HIF_BMI_DONE: bmi done
515  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
516  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
517  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
518  */
519 enum hif_device_config_opcode {
520 	HIF_DEVICE_POWER_STATE = 0,
521 	HIF_DEVICE_GET_BLOCK_SIZE,
522 	HIF_DEVICE_GET_FIFO_ADDR,
523 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
524 	HIF_DEVICE_GET_IRQ_PROC_MODE,
525 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
526 	HIF_DEVICE_POWER_STATE_CHANGE,
527 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
528 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
529 	HIF_DEVICE_GET_OS_DEVICE,
530 	HIF_DEVICE_DEBUG_BUS_STATE,
531 	HIF_BMI_DONE,
532 	HIF_DEVICE_SET_TARGET_TYPE,
533 	HIF_DEVICE_SET_HTC_CONTEXT,
534 	HIF_DEVICE_GET_HTC_CONTEXT,
535 };
536 
537 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
538 struct HID_ACCESS_LOG {
539 	uint32_t seqnum;
540 	bool is_write;
541 	void *addr;
542 	uint32_t value;
543 };
544 #endif
545 
546 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
547 		uint32_t value);
548 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
549 
550 #define HIF_MAX_DEVICES                 1
551 /**
552  * struct htc_callbacks - Structure for HTC Callbacks methods
553  * @context:             context to pass to the dsrhandler
554  *                       note : rwCompletionHandler is provided the context
555  *                       passed to hif_read_write
556  * @rwCompletionHandler: Read / write completion handler
557  * @dsrHandler:          DSR Handler
558  */
559 struct htc_callbacks {
560 	void *context;
561 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
562 	QDF_STATUS(*dsr_handler)(void *context);
563 };
564 
565 /**
566  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
567  * @context: Private data context
568  * @set_recovery_in_progress: To Set Driver state for recovery in progress
569  * @is_recovery_in_progress: Query if driver state is recovery in progress
570  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
571  * @is_driver_unloading: Query if driver is unloading.
572  * @get_bandwidth_level: Query current bandwidth level for the driver
573  * This Structure provides callback pointer for HIF to query hdd for driver
574  * states.
575  */
576 struct hif_driver_state_callbacks {
577 	void *context;
578 	void (*set_recovery_in_progress)(void *context, uint8_t val);
579 	bool (*is_recovery_in_progress)(void *context);
580 	bool (*is_load_unload_in_progress)(void *context);
581 	bool (*is_driver_unloading)(void *context);
582 	bool (*is_target_ready)(void *context);
583 	int (*get_bandwidth_level)(void *context);
584 };
585 
586 /* This API detaches the HTC layer from the HIF device */
587 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
588 
589 /****************************************************************/
590 /* BMI and Diag window abstraction                              */
591 /****************************************************************/
592 
593 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
594 
595 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
596 				     * handled atomically by
597 				     * DiagRead/DiagWrite
598 				     */
599 
600 #ifdef WLAN_FEATURE_BMI
601 /*
602  * API to handle HIF-specific BMI message exchanges, this API is synchronous
603  * and only allowed to be called from a context that can block (sleep)
604  */
605 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
606 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
607 				uint8_t *pSendMessage, uint32_t Length,
608 				uint8_t *pResponseMessage,
609 				uint32_t *pResponseLength, uint32_t TimeoutMS);
610 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
611 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
612 #else /* WLAN_FEATURE_BMI */
613 static inline void
614 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
615 {
616 }
617 
618 static inline bool
619 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
620 {
621 	return false;
622 }
623 #endif /* WLAN_FEATURE_BMI */
624 
625 /*
626  * APIs to handle HIF specific diagnostic read accesses. These APIs are
627  * synchronous and only allowed to be called from a context that
628  * can block (sleep). They are not high performance APIs.
629  *
630  * hif_diag_read_access reads a 4 Byte aligned/length value from a
631  * Target register or memory word.
632  *
633  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
634  */
635 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
636 				uint32_t address, uint32_t *data);
637 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
638 		      uint8_t *data, int nbytes);
639 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
640 			void *ramdump_base, uint32_t address, uint32_t size);
641 /*
642  * APIs to handle HIF specific diagnostic write accesses. These APIs are
643  * synchronous and only allowed to be called from a context that
644  * can block (sleep).
645  * They are not high performance APIs.
646  *
647  * hif_diag_write_access writes a 4 Byte aligned/length value to a
648  * Target register or memory word.
649  *
650  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
651  */
652 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
653 				 uint32_t address, uint32_t data);
654 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
655 			uint32_t address, uint8_t *data, int nbytes);
656 
657 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
658 
659 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
660 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
661 
662 /*
663  * Set the FASTPATH_mode_on flag in sc, for use by data path
664  */
665 #ifdef WLAN_FEATURE_FASTPATH
666 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
667 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
668 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
669 
670 /**
671  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
672  * @handler: Callback funtcion
673  * @context: handle for callback function
674  *
675  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
676  */
677 QDF_STATUS hif_ce_fastpath_cb_register(
678 		struct hif_opaque_softc *hif_ctx,
679 		fastpath_msg_handler handler, void *context);
680 #else
681 static inline QDF_STATUS hif_ce_fastpath_cb_register(
682 		struct hif_opaque_softc *hif_ctx,
683 		fastpath_msg_handler handler, void *context)
684 {
685 	return QDF_STATUS_E_FAILURE;
686 }
687 
688 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
689 {
690 	return NULL;
691 }
692 
693 #endif
694 
695 /*
696  * Enable/disable CDC max performance workaround
697  * For max-performace set this to 0
698  * To allow SoC to enter sleep set this to 1
699  */
700 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
701 
702 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
703 			     qdf_shared_mem_t **ce_sr,
704 			     uint32_t *ce_sr_ring_size,
705 			     qdf_dma_addr_t *ce_reg_paddr);
706 
707 /**
708  * @brief List of callbacks - filled in by HTC.
709  */
710 struct hif_msg_callbacks {
711 	void *Context;
712 	/**< context meaningful to HTC */
713 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
714 					uint32_t transferID,
715 					uint32_t toeplitz_hash_result);
716 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
717 					uint8_t pipeID);
718 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
719 	void (*fwEventHandler)(void *context, QDF_STATUS status);
720 	void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
721 };
722 
723 enum hif_target_status {
724 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
725 	TARGET_STATUS_RESET,  /* target got reset */
726 	TARGET_STATUS_EJECT,  /* target got ejected */
727 	TARGET_STATUS_SUSPEND /*target got suspend */
728 };
729 
730 /**
731  * enum hif_attribute_flags: configure hif
732  *
733  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
734  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
735  *  							+ No pktlog CE
736  */
737 enum hif_attribute_flags {
738 	HIF_LOWDESC_CE_CFG = 1,
739 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
740 };
741 
742 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
743 	(attr |= (v & 0x01) << 5)
744 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
745 	(attr |= (v & 0x03) << 6)
746 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
747 	(attr |= (v & 0x01) << 13)
748 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
749 	(attr |= (v & 0x01) << 14)
750 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
751 	(attr |= (v & 0x01) << 15)
752 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
753 	(attr |= (v & 0x0FFF) << 16)
754 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
755 	(attr |= (v & 0x01) << 30)
756 
757 struct hif_ul_pipe_info {
758 	unsigned int nentries;
759 	unsigned int nentries_mask;
760 	unsigned int sw_index;
761 	unsigned int write_index; /* cached copy */
762 	unsigned int hw_index;    /* cached copy */
763 	void *base_addr_owner_space; /* Host address space */
764 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
765 };
766 
767 struct hif_dl_pipe_info {
768 	unsigned int nentries;
769 	unsigned int nentries_mask;
770 	unsigned int sw_index;
771 	unsigned int write_index; /* cached copy */
772 	unsigned int hw_index;    /* cached copy */
773 	void *base_addr_owner_space; /* Host address space */
774 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
775 };
776 
777 struct hif_pipe_addl_info {
778 	uint32_t pci_mem;
779 	uint32_t ctrl_addr;
780 	struct hif_ul_pipe_info ul_pipe;
781 	struct hif_dl_pipe_info dl_pipe;
782 };
783 
784 #ifdef CONFIG_SLUB_DEBUG_ON
785 #define MSG_FLUSH_NUM 16
786 #else /* PERF build */
787 #define MSG_FLUSH_NUM 32
788 #endif /* SLUB_DEBUG_ON */
789 
790 struct hif_bus_id;
791 
792 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
793 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
794 		     int opcode, void *config, uint32_t config_len);
795 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
796 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
797 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
798 		   struct hif_msg_callbacks *callbacks);
799 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
800 void hif_stop(struct hif_opaque_softc *hif_ctx);
801 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
802 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
803 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
804 		      uint8_t cmd_id, bool start);
805 
806 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
807 				  uint32_t transferID, uint32_t nbytes,
808 				  qdf_nbuf_t wbuf, uint32_t data_attr);
809 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
810 			     int force);
811 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
812 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
813 			  uint8_t *DLPipe);
814 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
815 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
816 			int *dl_is_polled);
817 uint16_t
818 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
819 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
820 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
821 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
822 		     bool wait_for_it);
823 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
824 #ifndef HIF_PCI
825 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
826 {
827 	return 0;
828 }
829 #else
830 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
831 #endif
832 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
833 			u32 *revision, const char **target_name);
834 
835 #ifdef RECEIVE_OFFLOAD
836 /**
837  * hif_offld_flush_cb_register() - Register the offld flush callback
838  * @scn: HIF opaque context
839  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
840  *			 Or GRO/LRO flush when RxThread is not enabled. Called
841  *			 with corresponding context for flush.
842  * Return: None
843  */
844 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
845 				 void (offld_flush_handler)(void *ol_ctx));
846 
847 /**
848  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
849  * @scn: HIF opaque context
850  *
851  * Return: None
852  */
853 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
854 #endif
855 
856 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
857 /**
858  * hif_exec_should_yield() - Check if hif napi context should yield
859  * @hif_ctx - HIF opaque context
860  * @grp_id - grp_id of the napi for which check needs to be done
861  *
862  * The function uses grp_id to look for NAPI and checks if NAPI needs to
863  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
864  * yield decision.
865  *
866  * Return: true if NAPI needs to yield, else false
867  */
868 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
869 #else
870 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
871 					 uint grp_id)
872 {
873 	return false;
874 }
875 #endif
876 
877 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
878 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
879 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
880 				      int htc_htt_tx_endpoint);
881 
882 /**
883  * hif_open() - Create hif handle
884  * @qdf_ctx: qdf context
885  * @mode: Driver Mode
886  * @bus_type: Bus Type
887  * @cbk: CDS Callbacks
888  * @psoc: psoc object manager
889  *
890  * API to open HIF Context
891  *
892  * Return: HIF Opaque Pointer
893  */
894 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
895 				  uint32_t mode,
896 				  enum qdf_bus_type bus_type,
897 				  struct hif_driver_state_callbacks *cbk,
898 				  struct wlan_objmgr_psoc *psoc);
899 
900 void hif_close(struct hif_opaque_softc *hif_ctx);
901 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
902 		      void *bdev, const struct hif_bus_id *bid,
903 		      enum qdf_bus_type bus_type,
904 		      enum hif_enable_type type);
905 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
906 #ifdef CE_TASKLET_DEBUG_ENABLE
907 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
908 				 uint8_t value);
909 #endif
910 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
911 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
912 
913 /**
914  * enum wlan_rtpm_dbgid - runtime pm put/get debug id
915  * @RTPM_ID_RESVERD:       Reserved
916  * @RTPM_ID_WMI:           WMI sending msg, expect put happen at
917  *                         tx completion from CE level directly.
918  * @RTPM_ID_HTC:           pkt sending by HTT_DATA_MSG_SVC, expect
919  *                         put from fw response or just in
920  *                         htc_issue_packets
921  * @RTPM_ID_QOS_NOTIFY:    pm qos notifer
922  * @RTPM_ID_DP_TX_DESC_ALLOC_FREE:      tx desc alloc/free
923  * @RTPM_ID_CE_SEND_FAST:  operation in ce_send_fast, not include
924  *                         the pkt put happens outside this function
925  * @RTPM_ID_SUSPEND_RESUME:     suspend/resume in hdd
926  * @RTPM_ID_DW_TX_HW_ENQUEUE:   operation in functin dp_tx_hw_enqueue
927  * @RTPM_ID_HAL_REO_CMD:        HAL_REO_CMD operation
928  * @RTPM_ID_DP_PRINT_RING_STATS:  operation in dp_print_ring_stats
929  */
930 /* New value added to the enum must also be reflected in function
931  *  rtpm_string_from_dbgid()
932  */
933 typedef enum {
934 	RTPM_ID_RESVERD   = 0,
935 	RTPM_ID_WMI       = 1,
936 	RTPM_ID_HTC       = 2,
937 	RTPM_ID_QOS_NOTIFY  = 3,
938 	RTPM_ID_DP_TX_DESC_ALLOC_FREE  = 4,
939 	RTPM_ID_CE_SEND_FAST       = 5,
940 	RTPM_ID_SUSPEND_RESUME     = 6,
941 	RTPM_ID_DW_TX_HW_ENQUEUE   = 7,
942 	RTPM_ID_HAL_REO_CMD        = 8,
943 	RTPM_ID_DP_PRINT_RING_STATS  = 9,
944 
945 	RTPM_ID_MAX,
946 } wlan_rtpm_dbgid;
947 
948 /**
949  * rtpm_string_from_dbgid() - Convert dbgid to respective string
950  * @id -  debug id
951  *
952  * Debug support function to convert  dbgid to string.
953  * Please note to add new string in the array at index equal to
954  * its enum value in wlan_rtpm_dbgid.
955  */
956 static inline char *rtpm_string_from_dbgid(wlan_rtpm_dbgid id)
957 {
958 	static const char *strings[] = { "RTPM_ID_RESVERD",
959 					"RTPM_ID_WMI",
960 					"RTPM_ID_HTC",
961 					"RTPM_ID_QOS_NOTIFY",
962 					"RTPM_ID_DP_TX_DESC_ALLOC_FREE",
963 					"RTPM_ID_CE_SEND_FAST",
964 					"RTPM_ID_SUSPEND_RESUME",
965 					"RTPM_ID_DW_TX_HW_ENQUEUE",
966 					"RTPM_ID_HAL_REO_CMD",
967 					"RTPM_ID_DP_PRINT_RING_STATS",
968 					"RTPM_ID_MAX"};
969 
970 	return (char *)strings[id];
971 }
972 
973 #ifdef FEATURE_RUNTIME_PM
974 struct hif_pm_runtime_lock;
975 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
976 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
977 			    wlan_rtpm_dbgid rtpm_dbgid);
978 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
979 				    wlan_rtpm_dbgid rtpm_dbgid);
980 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx);
981 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
982 		       wlan_rtpm_dbgid rtpm_dbgid);
983 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
984 				 wlan_rtpm_dbgid rtpm_dbgid);
985 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
986 		       wlan_rtpm_dbgid rtpm_dbgid);
987 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
988 			      wlan_rtpm_dbgid rtpm_dbgid);
989 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx);
990 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
991 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
992 			struct hif_pm_runtime_lock *lock);
993 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
994 		struct hif_pm_runtime_lock *lock);
995 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
996 		struct hif_pm_runtime_lock *lock);
997 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
998 		struct hif_pm_runtime_lock *lock, unsigned int delay);
999 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx);
1000 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx);
1001 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
1002 					  int val);
1003 void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx);
1004 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
1005 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
1006 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx);
1007 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx);
1008 #else
1009 struct hif_pm_runtime_lock {
1010 	const char *name;
1011 };
1012 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
1013 static inline int
1014 hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
1015 			wlan_rtpm_dbgid rtpm_dbgid)
1016 { return 0; }
1017 static inline int
1018 hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
1019 				wlan_rtpm_dbgid rtpm_dbgid)
1020 { return 0; }
1021 static inline int
1022 hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
1023 { return 0; }
1024 static inline void
1025 hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1026 			    wlan_rtpm_dbgid rtpm_dbgid)
1027 {}
1028 
1029 static inline int
1030 hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid)
1031 { return 0; }
1032 static inline int
1033 hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid)
1034 { return 0; }
1035 static inline int
1036 hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1037 			  wlan_rtpm_dbgid rtpm_dbgid)
1038 { return 0; }
1039 static inline void
1040 hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {};
1041 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
1042 					const char *name)
1043 { return 0; }
1044 static inline void
1045 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1046 			struct hif_pm_runtime_lock *lock) {}
1047 
1048 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1049 		struct hif_pm_runtime_lock *lock)
1050 { return 0; }
1051 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1052 		struct hif_pm_runtime_lock *lock)
1053 { return 0; }
1054 static inline int
1055 hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
1056 		struct hif_pm_runtime_lock *lock, unsigned int delay)
1057 { return 0; }
1058 static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
1059 { return false; }
1060 static inline int
1061 hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
1062 { return 0; }
1063 static inline void
1064 hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val)
1065 { return; }
1066 static inline void
1067 hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
1068 { return; }
1069 static inline void
1070 hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {};
1071 static inline int
1072 hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1073 { return 0; }
1074 static inline qdf_time_t
1075 hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
1076 { return 0; }
1077 static inline int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
1078 { return 0; }
1079 #endif
1080 
1081 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1082 				 bool is_packet_log_enabled);
1083 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1084 
1085 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1086 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1087 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1088 
1089 #ifdef IPA_OFFLOAD
1090 /**
1091  * hif_get_ipa_hw_type() - get IPA hw type
1092  *
1093  * This API return the IPA hw type.
1094  *
1095  * Return: IPA hw type
1096  */
1097 static inline
1098 enum ipa_hw_type hif_get_ipa_hw_type(void)
1099 {
1100 	return ipa_get_hw_type();
1101 }
1102 
1103 /**
1104  * hif_get_ipa_present() - get IPA hw status
1105  *
1106  * This API return the IPA hw status.
1107  *
1108  * Return: true if IPA is present or false otherwise
1109  */
1110 static inline
1111 bool hif_get_ipa_present(void)
1112 {
1113 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
1114 		return true;
1115 	else
1116 		return false;
1117 }
1118 #endif
1119 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1120 /**
1121  * hif_bus_ealry_suspend() - stop non wmi tx traffic
1122  * @context: hif context
1123  */
1124 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1125 
1126 /**
1127  * hif_bus_late_resume() - resume non wmi traffic
1128  * @context: hif context
1129  */
1130 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1131 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1132 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1133 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1134 
1135 /**
1136  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1137  * @hif_ctx: an opaque HIF handle to use
1138  *
1139  * As opposed to the standard hif_irq_enable, this function always applies to
1140  * the APPS side kernel interrupt handling.
1141  *
1142  * Return: errno
1143  */
1144 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1145 
1146 /**
1147  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1148  * @hif_ctx: an opaque HIF handle to use
1149  *
1150  * As opposed to the standard hif_irq_disable, this function always applies to
1151  * the APPS side kernel interrupt handling.
1152  *
1153  * Return: errno
1154  */
1155 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1156 
1157 /**
1158  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1159  * @hif_ctx: an opaque HIF handle to use
1160  *
1161  * As opposed to the standard hif_irq_enable, this function always applies to
1162  * the APPS side kernel interrupt handling.
1163  *
1164  * Return: errno
1165  */
1166 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1167 
1168 /**
1169  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1170  * @hif_ctx: an opaque HIF handle to use
1171  *
1172  * As opposed to the standard hif_irq_disable, this function always applies to
1173  * the APPS side kernel interrupt handling.
1174  *
1175  * Return: errno
1176  */
1177 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1178 
1179 /**
1180  * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1181  * @hif_ctx: an opaque HIF handle to use
1182  *
1183  * This function always applies to the APPS side kernel interrupt handling
1184  * to wake the system from suspend.
1185  *
1186  * Return: errno
1187  */
1188 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
1189 
1190 /**
1191  * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
1192  * @hif_ctx: an opaque HIF handle to use
1193  *
1194  * This function always applies to the APPS side kernel interrupt handling
1195  * to disable the wake irq.
1196  *
1197  * Return: errno
1198  */
1199 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
1200 
1201 #ifdef FEATURE_RUNTIME_PM
1202 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1203 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
1204 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1205 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1206 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
1207 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
1208 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
1209 #endif
1210 
1211 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
1212 int hif_dump_registers(struct hif_opaque_softc *scn);
1213 int ol_copy_ramdump(struct hif_opaque_softc *scn);
1214 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
1215 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1216 		     u32 *revision, const char **target_name);
1217 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
1218 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
1219 						   scn);
1220 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
1221 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
1222 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
1223 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1224 			   hif_target_status);
1225 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1226 			 struct hif_config_info *cfg);
1227 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
1228 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1229 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
1230 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1231 			   uint32_t transfer_id, u_int32_t len);
1232 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1233 	uint32_t transfer_id, uint32_t download_len);
1234 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
1235 void hif_ce_war_disable(void);
1236 void hif_ce_war_enable(void);
1237 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
1238 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
1239 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
1240 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
1241 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
1242 		uint32_t pipe_num);
1243 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
1244 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
1245 
1246 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
1247 				int rx_bundle_cnt);
1248 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
1249 
1250 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
1251 
1252 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
1253 
1254 enum hif_exec_type {
1255 	HIF_EXEC_NAPI_TYPE,
1256 	HIF_EXEC_TASKLET_TYPE,
1257 };
1258 
1259 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
1260 
1261 /**
1262  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
1263  * @softc: hif opaque context owning the exec context
1264  * @id: the id of the interrupt context
1265  *
1266  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
1267  *         'id' registered with the OS
1268  */
1269 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
1270 				uint8_t id);
1271 
1272 /**
1273  * hif_configure_ext_group_interrupts() - Congigure ext group intrrupts
1274  * @hif_ctx: hif opaque context
1275  *
1276  * Return: QDF_STATUS
1277  */
1278 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
1279 
1280 /**
1281  * hif_register_ext_group() - API to register external group
1282  * interrupt handler.
1283  * @hif_ctx : HIF Context
1284  * @numirq: number of irq's in the group
1285  * @irq: array of irq values
1286  * @handler: callback interrupt handler function
1287  * @cb_ctx: context to passed in callback
1288  * @type: napi vs tasklet
1289  *
1290  * Return: QDF_STATUS
1291  */
1292 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1293 				  uint32_t numirq, uint32_t irq[],
1294 				  ext_intr_handler handler,
1295 				  void *cb_ctx, const char *context_name,
1296 				  enum hif_exec_type type, uint32_t scale);
1297 
1298 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1299 				const char *context_name);
1300 
1301 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1302 				u_int8_t pipeid,
1303 				struct hif_msg_callbacks *callbacks);
1304 
1305 /**
1306  * hif_print_napi_stats() - Display HIF NAPI stats
1307  * @hif_ctx - HIF opaque context
1308  *
1309  * Return: None
1310  */
1311 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
1312 
1313 /* hif_clear_napi_stats() - function clears the stats of the
1314  * latency when called.
1315  * @hif_ctx - the HIF context to assign the callback to
1316  *
1317  * Return: None
1318  */
1319 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
1320 
1321 #ifdef __cplusplus
1322 }
1323 #endif
1324 
1325 #ifdef FORCE_WAKE
1326 /**
1327  * hif_force_wake_request() - Function to wake from power collapse
1328  * @handle: HIF opaque handle
1329  *
1330  * Description: API to check if the device is awake or not before
1331  * read/write to BAR + 4K registers. If device is awake return
1332  * success otherwise write '1' to
1333  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
1334  * the device and does wakeup the PCI and MHI within 50ms
1335  * and then the device writes a value to
1336  * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
1337  * handshake process to let the host know the device is awake.
1338  *
1339  * Return: zero - success/non-zero - failure
1340  */
1341 int hif_force_wake_request(struct hif_opaque_softc *handle);
1342 
1343 /**
1344  * hif_force_wake_release() - API to release/reset the SOC wake register
1345  * from interrupting the device.
1346  * @handle: HIF opaque handle
1347  *
1348  * Description: API to set the
1349  * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
1350  * to release the interrupt line.
1351  *
1352  * Return: zero - success/non-zero - failure
1353  */
1354 int hif_force_wake_release(struct hif_opaque_softc *handle);
1355 #else
1356 static inline
1357 int hif_force_wake_request(struct hif_opaque_softc *handle)
1358 {
1359 	return 0;
1360 }
1361 
1362 static inline
1363 int hif_force_wake_release(struct hif_opaque_softc *handle)
1364 {
1365 	return 0;
1366 }
1367 #endif /* FORCE_WAKE */
1368 
1369 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1370 /**
1371  * hif_prevent_link_low_power_states() - Prevent from going to low power states
1372  * @hif - HIF opaque context
1373  *
1374  * Return: 0 on success. Error code on failure.
1375  */
1376 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
1377 
1378 /**
1379  * hif_allow_link_low_power_states() - Allow link to go to low power states
1380  * @hif - HIF opaque context
1381  *
1382  * Return: None
1383  */
1384 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
1385 
1386 #else
1387 
1388 static inline
1389 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
1390 {
1391 	return 0;
1392 }
1393 
1394 static inline
1395 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
1396 {
1397 }
1398 #endif
1399 
1400 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
1401 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
1402 
1403 /**
1404  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
1405  * @hif_ctx - the HIF context to assign the callback to
1406  * @callback - the callback to assign
1407  * @priv - the private data to pass to the callback when invoked
1408  *
1409  * Return: None
1410  */
1411 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1412 			       void (*callback)(void *),
1413 			       void *priv);
1414 /*
1415  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1416  * for defined here
1417  */
1418 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1419 ssize_t hif_dump_desc_trace_buf(struct device *dev,
1420 				struct device_attribute *attr, char *buf);
1421 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1422 					const char *buf, size_t size);
1423 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
1424 				const char *buf, size_t size);
1425 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
1426 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
1427 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
1428 
1429 /**
1430  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
1431  * @hif: hif context
1432  * @ce_service_max_yield_time: CE service max yield time to set
1433  *
1434  * This API storess CE service max yield time in hif context based
1435  * on ini value.
1436  *
1437  * Return: void
1438  */
1439 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1440 				       uint32_t ce_service_max_yield_time);
1441 
1442 /**
1443  * hif_get_ce_service_max_yield_time() - get CE service max yield time
1444  * @hif: hif context
1445  *
1446  * This API returns CE service max yield time.
1447  *
1448  * Return: CE service max yield time
1449  */
1450 unsigned long long
1451 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
1452 
1453 /**
1454  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
1455  * @hif: hif context
1456  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
1457  *
1458  * This API stores CE service max rx ind flush in hif context based
1459  * on ini value.
1460  *
1461  * Return: void
1462  */
1463 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1464 					 uint8_t ce_service_max_rx_ind_flush);
1465 
1466 #ifdef OL_ATH_SMART_LOGGING
1467 /*
1468  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1469  * @scn : HIF handler
1470  * @buf_cur: Current pointer in ring buffer
1471  * @buf_init:Start of the ring buffer
1472  * @buf_sz: Size of the ring buffer
1473  * @ce: Copy Engine id
1474  * @skb_sz: Max size of the SKB buffer to be copied
1475  *
1476  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1477  * and buffers pointed by them in to the given buf
1478  *
1479  * Return: Current pointer in ring buffer
1480  */
1481 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1482 			 uint8_t *buf_init, uint32_t buf_sz,
1483 			 uint32_t ce, uint32_t skb_sz);
1484 #endif /* OL_ATH_SMART_LOGGING */
1485 
1486 /*
1487  * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
1488  * to hif_opaque_softc handle
1489  * @hif_handle - hif_softc type
1490  *
1491  * Return: hif_opaque_softc type
1492  */
1493 static inline struct hif_opaque_softc *
1494 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
1495 {
1496 	return (struct hif_opaque_softc *)hif_handle;
1497 }
1498 
1499 #ifdef FORCE_WAKE
1500 /**
1501  * hif_srng_init_phase(): Indicate srng initialization phase
1502  * to avoid force wake as UMAC power collapse is not yet
1503  * enabled
1504  * @hif_ctx: hif opaque handle
1505  * @init_phase: initialization phase
1506  *
1507  * Return:  None
1508  */
1509 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1510 			 bool init_phase);
1511 #else
1512 static inline
1513 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
1514 			 bool init_phase)
1515 {
1516 }
1517 #endif /* FORCE_WAKE */
1518 
1519 #ifdef HIF_IPCI
1520 /**
1521  * hif_shutdown_notifier_cb - Call back for shutdown notifier
1522  * @ctx: hif handle
1523  *
1524  * Return:  None
1525  */
1526 void hif_shutdown_notifier_cb(void *ctx);
1527 #else
1528 static inline
1529 void hif_shutdown_notifier_cb(void *ctx)
1530 {
1531 }
1532 #endif /* HIF_IPCI */
1533 
1534 #ifdef HIF_CE_LOG_INFO
1535 /**
1536  * hif_log_ce_info() - API to log ce info
1537  * @scn: hif handle
1538  * @data: hang event data buffer
1539  * @offset: offset at which data needs to be written
1540  *
1541  * Return:  None
1542  */
1543 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
1544 		     unsigned int *offset);
1545 #else
1546 static inline
1547 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
1548 		     unsigned int *offset)
1549 {
1550 }
1551 #endif
1552 
1553 #ifdef HIF_CPU_PERF_AFFINE_MASK
1554 /**
1555  * hif_config_irq_set_perf_affinity_hint() - API to set affinity
1556  * @hif_ctx: hif opaque handle
1557  *
1558  * This function is used to move the WLAN IRQs to perf cores in
1559  * case of defconfig builds.
1560  *
1561  * Return:  None
1562  */
1563 void hif_config_irq_set_perf_affinity_hint(
1564 	struct hif_opaque_softc *hif_ctx);
1565 
1566 #else
1567 static inline void hif_config_irq_set_perf_affinity_hint(
1568 	struct hif_opaque_softc *hif_ctx)
1569 {
1570 }
1571 #endif
1572 #endif /* _HIF_H_ */
1573