xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 27d564647e9b50e713c60b0d7e5ea2a9b0a3ae74)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HIF_H_
20 #define _HIF_H_
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif /* __cplusplus */
25 
26 /* Header files */
27 #include <qdf_status.h>
28 #include "qdf_nbuf.h"
29 #include "qdf_lro.h"
30 #include "ol_if_athvar.h"
31 #include <linux/platform_device.h>
32 #ifdef HIF_PCI
33 #include <linux/pci.h>
34 #endif /* HIF_PCI */
35 #ifdef HIF_USB
36 #include <linux/usb.h>
37 #endif /* HIF_USB */
38 #ifdef IPA_OFFLOAD
39 #include <linux/ipa.h>
40 #endif
41 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
42 
43 typedef void __iomem *A_target_id_t;
44 typedef void *hif_handle_t;
45 
46 #define HIF_TYPE_AR6002   2
47 #define HIF_TYPE_AR6003   3
48 #define HIF_TYPE_AR6004   5
49 #define HIF_TYPE_AR9888   6
50 #define HIF_TYPE_AR6320   7
51 #define HIF_TYPE_AR6320V2 8
52 /* For attaching Peregrine 2.0 board host_reg_tbl only */
53 #define HIF_TYPE_AR9888V2 9
54 #define HIF_TYPE_ADRASTEA 10
55 #define HIF_TYPE_AR900B 11
56 #define HIF_TYPE_QCA9984 12
57 #define HIF_TYPE_IPQ4019 13
58 #define HIF_TYPE_QCA9888 14
59 #define HIF_TYPE_QCA8074 15
60 #define HIF_TYPE_QCA6290 16
61 #define HIF_TYPE_QCN7605 17
62 #define HIF_TYPE_QCA6390 18
63 #define HIF_TYPE_QCA8074V2 19
64 
65 #ifdef IPA_OFFLOAD
66 #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE   37
67 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
68 #endif
69 
70 /* enum hif_ic_irq - enum defining integrated chip irq numbers
71  * defining irq nubers that can be used by external modules like datapath
72  */
73 enum hif_ic_irq {
74 	host2wbm_desc_feed = 16,
75 	host2reo_re_injection,
76 	host2reo_command,
77 	host2rxdma_monitor_ring3,
78 	host2rxdma_monitor_ring2,
79 	host2rxdma_monitor_ring1,
80 	reo2host_exception,
81 	wbm2host_rx_release,
82 	reo2host_status,
83 	reo2host_destination_ring4,
84 	reo2host_destination_ring3,
85 	reo2host_destination_ring2,
86 	reo2host_destination_ring1,
87 	rxdma2host_monitor_destination_mac3,
88 	rxdma2host_monitor_destination_mac2,
89 	rxdma2host_monitor_destination_mac1,
90 	ppdu_end_interrupts_mac3,
91 	ppdu_end_interrupts_mac2,
92 	ppdu_end_interrupts_mac1,
93 	rxdma2host_monitor_status_ring_mac3,
94 	rxdma2host_monitor_status_ring_mac2,
95 	rxdma2host_monitor_status_ring_mac1,
96 	host2rxdma_host_buf_ring_mac3,
97 	host2rxdma_host_buf_ring_mac2,
98 	host2rxdma_host_buf_ring_mac1,
99 	rxdma2host_destination_ring_mac3,
100 	rxdma2host_destination_ring_mac2,
101 	rxdma2host_destination_ring_mac1,
102 	host2tcl_input_ring4,
103 	host2tcl_input_ring3,
104 	host2tcl_input_ring2,
105 	host2tcl_input_ring1,
106 	wbm2host_tx_completions_ring3,
107 	wbm2host_tx_completions_ring2,
108 	wbm2host_tx_completions_ring1,
109 	tcl2host_status_ring,
110 };
111 
112 struct CE_state;
113 #define CE_COUNT_MAX 12
114 #define HIF_MAX_GRP_IRQ 16
115 #define HIF_MAX_GROUP 8
116 
117 #ifdef CONFIG_SLUB_DEBUG_ON
118 #ifndef CONFIG_WIN
119 #define HIF_CONFIG_SLUB_DEBUG_ON
120 #endif
121 #endif
122 
123 #ifndef NAPI_YIELD_BUDGET_BASED
124 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
125 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
126 #else  /* PERF build */
127 #ifdef CONFIG_WIN
128 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
129 #else
130 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
131 #endif /* CONFIG_WIN */
132 #endif /* SLUB_DEBUG_ON */
133 #else  /* NAPI_YIELD_BUDGET_BASED */
134 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
135 #endif /* NAPI_YIELD_BUDGET_BASED */
136 #define QCA_NAPI_BUDGET    64
137 #define QCA_NAPI_DEF_SCALE  \
138 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
139 
140 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
141 /* NOTE: "napi->scale" can be changed,
142  * but this does not change the number of buckets
143  */
144 #define QCA_NAPI_NUM_BUCKETS 4
145 /**
146  * qca_napi_stat - stats structure for execution contexts
147  * @napi_schedules - number of times the schedule function is called
148  * @napi_polls - number of times the execution context runs
149  * @napi_completes - number of times that the generating interrupt is reenabled
150  * @napi_workdone - cumulative of all work done reported by handler
151  * @cpu_corrected - incremented when execution context runs on a different core
152  *			than the one that its irq is affined to.
153  * @napi_budget_uses - histogram of work done per execution run
154  * @time_limit_reache - count of yields due to time limit threshholds
155  * @rxpkt_thresh_reached - count of yields due to a work limit
156  *
157  * needs to be renamed
158  */
159 struct qca_napi_stat {
160 	uint32_t napi_schedules;
161 	uint32_t napi_polls;
162 	uint32_t napi_completes;
163 	uint32_t napi_workdone;
164 	uint32_t cpu_corrected;
165 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
166 	uint32_t time_limit_reached;
167 	uint32_t rxpkt_thresh_reached;
168 	unsigned long long napi_max_poll_time;
169 };
170 
171 
172 /**
173  * per NAPI instance data structure
174  * This data structure holds stuff per NAPI instance.
175  * Note that, in the current implementation, though scale is
176  * an instance variable, it is set to the same value for all
177  * instances.
178  */
179 struct qca_napi_info {
180 	struct net_device    netdev; /* dummy net_dev */
181 	void                 *hif_ctx;
182 	struct napi_struct   napi;
183 	uint8_t              scale;   /* currently same on all instances */
184 	uint8_t              id;
185 	uint8_t              cpu;
186 	int                  irq;
187 	struct qca_napi_stat stats[NR_CPUS];
188 #ifdef RECEIVE_OFFLOAD
189 	/* will only be present for data rx CE's */
190 	void (*offld_flush_cb)(void *);
191 	struct napi_struct   rx_thread_napi;
192 	struct net_device    rx_thread_netdev;
193 #endif /* RECEIVE_OFFLOAD */
194 	qdf_lro_ctx_t        lro_ctx;
195 };
196 
197 enum qca_napi_tput_state {
198 	QCA_NAPI_TPUT_UNINITIALIZED,
199 	QCA_NAPI_TPUT_LO,
200 	QCA_NAPI_TPUT_HI
201 };
202 enum qca_napi_cpu_state {
203 	QCA_NAPI_CPU_UNINITIALIZED,
204 	QCA_NAPI_CPU_DOWN,
205 	QCA_NAPI_CPU_UP };
206 
207 /**
208  * struct qca_napi_cpu - an entry of the napi cpu table
209  * @core_id:     physical core id of the core
210  * @cluster_id:  cluster this core belongs to
211  * @core_mask:   mask to match all core of this cluster
212  * @thread_mask: mask for this core within the cluster
213  * @max_freq:    maximum clock this core can be clocked at
214  *               same for all cpus of the same core.
215  * @napis:       bitmap of napi instances on this core
216  * @execs:       bitmap of execution contexts on this core
217  * cluster_nxt:  chain to link cores within the same cluster
218  *
219  * This structure represents a single entry in the napi cpu
220  * table. The table is part of struct qca_napi_data.
221  * This table is initialized by the init function, called while
222  * the first napi instance is being created, updated by hotplug
223  * notifier and when cpu affinity decisions are made (by throughput
224  * detection), and deleted when the last napi instance is removed.
225  */
226 struct qca_napi_cpu {
227 	enum qca_napi_cpu_state state;
228 	int			core_id;
229 	int			cluster_id;
230 	cpumask_t		core_mask;
231 	cpumask_t		thread_mask;
232 	unsigned int		max_freq;
233 	uint32_t		napis;
234 	uint32_t		execs;
235 	int			cluster_nxt;  /* index, not pointer */
236 };
237 
238 /**
239  * struct qca_napi_data - collection of napi data for a single hif context
240  * @hif_softc: pointer to the hif context
241  * @lock: spinlock used in the event state machine
242  * @state: state variable used in the napi stat machine
243  * @ce_map: bit map indicating which ce's have napis running
244  * @exec_map: bit map of instanciated exec contexts
245  * @user_cpu_affin_map: CPU affinity map from INI config.
246  * @napi_cpu: cpu info for irq affinty
247  * @lilcl_head:
248  * @bigcl_head:
249  * @napi_mode: irq affinity & clock voting mode
250  * @cpuhp_handler: CPU hotplug event registration handle
251  */
252 struct qca_napi_data {
253 	struct               hif_softc *hif_softc;
254 	qdf_spinlock_t       lock;
255 	uint32_t             state;
256 
257 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
258 	 * not used by clients (clients use an id returned by create)
259 	 */
260 	uint32_t             ce_map;
261 	uint32_t             exec_map;
262 	uint32_t             user_cpu_affin_mask;
263 	struct qca_napi_info *napis[CE_COUNT_MAX];
264 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
265 	int                  lilcl_head, bigcl_head;
266 	enum qca_napi_tput_state napi_mode;
267 	struct qdf_cpuhp_handler *cpuhp_handler;
268 	uint8_t              flags;
269 };
270 
271 /**
272  * struct hif_config_info - Place Holder for hif confiruation
273  * @enable_self_recovery: Self Recovery
274  *
275  * Structure for holding hif ini parameters.
276  */
277 struct hif_config_info {
278 	bool enable_self_recovery;
279 #ifdef FEATURE_RUNTIME_PM
280 	bool enable_runtime_pm;
281 	u_int32_t runtime_pm_delay;
282 #endif
283 };
284 
285 /**
286  * struct hif_target_info - Target Information
287  * @target_version: Target Version
288  * @target_type: Target Type
289  * @target_revision: Target Revision
290  * @soc_version: SOC Version
291  *
292  * Structure to hold target information.
293  */
294 struct hif_target_info {
295 	uint32_t target_version;
296 	uint32_t target_type;
297 	uint32_t target_revision;
298 	uint32_t soc_version;
299 	char *hw_name;
300 };
301 
302 struct hif_opaque_softc {
303 };
304 
305 /**
306  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
307  *
308  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
309  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
310  *                         minimize power
311  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
312  *                         platform-specific measures to completely power-off
313  *                         the module and associated hardware (i.e. cut power
314  *                         supplies)
315  */
316 enum HIF_DEVICE_POWER_CHANGE_TYPE {
317 	HIF_DEVICE_POWER_UP,
318 	HIF_DEVICE_POWER_DOWN,
319 	HIF_DEVICE_POWER_CUT
320 };
321 
322 /**
323  * enum hif_enable_type: what triggered the enabling of hif
324  *
325  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
326  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
327  */
328 enum hif_enable_type {
329 	HIF_ENABLE_TYPE_PROBE,
330 	HIF_ENABLE_TYPE_REINIT,
331 	HIF_ENABLE_TYPE_MAX
332 };
333 
334 /**
335  * enum hif_disable_type: what triggered the disabling of hif
336  *
337  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
338  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
339  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
340  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
341  */
342 enum hif_disable_type {
343 	HIF_DISABLE_TYPE_PROBE_ERROR,
344 	HIF_DISABLE_TYPE_REINIT_ERROR,
345 	HIF_DISABLE_TYPE_REMOVE,
346 	HIF_DISABLE_TYPE_SHUTDOWN,
347 	HIF_DISABLE_TYPE_MAX
348 };
349 /**
350  * enum hif_device_config_opcode: configure mode
351  *
352  * @HIF_DEVICE_POWER_STATE: device power state
353  * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size
354  * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address
355  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
356  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
357  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
358  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
359  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
360  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
361  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
362  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
363  * @HIF_BMI_DONE: bmi done
364  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
365  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
366  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
367  */
368 enum hif_device_config_opcode {
369 	HIF_DEVICE_POWER_STATE = 0,
370 	HIF_DEVICE_GET_BLOCK_SIZE,
371 	HIF_DEVICE_GET_FIFO_ADDR,
372 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
373 	HIF_DEVICE_GET_IRQ_PROC_MODE,
374 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
375 	HIF_DEVICE_POWER_STATE_CHANGE,
376 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
377 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
378 	HIF_DEVICE_GET_OS_DEVICE,
379 	HIF_DEVICE_DEBUG_BUS_STATE,
380 	HIF_BMI_DONE,
381 	HIF_DEVICE_SET_TARGET_TYPE,
382 	HIF_DEVICE_SET_HTC_CONTEXT,
383 	HIF_DEVICE_GET_HTC_CONTEXT,
384 };
385 
386 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
387 struct HID_ACCESS_LOG {
388 	uint32_t seqnum;
389 	bool is_write;
390 	void *addr;
391 	uint32_t value;
392 };
393 #endif
394 
395 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
396 		uint32_t value);
397 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
398 
399 #define HIF_MAX_DEVICES                 1
400 /**
401  * struct htc_callbacks - Structure for HTC Callbacks methods
402  * @context:             context to pass to the dsrhandler
403  *                       note : rwCompletionHandler is provided the context
404  *                       passed to hif_read_write
405  * @rwCompletionHandler: Read / write completion handler
406  * @dsrHandler:          DSR Handler
407  */
408 struct htc_callbacks {
409 	void *context;
410 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
411 	QDF_STATUS(*dsr_handler)(void *context);
412 };
413 
414 /**
415  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
416  * @context: Private data context
417  * @set_recovery_in_progress: To Set Driver state for recovery in progress
418  * @is_recovery_in_progress: Query if driver state is recovery in progress
419  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
420  * @is_driver_unloading: Query if driver is unloading.
421  *
422  * This Structure provides callback pointer for HIF to query hdd for driver
423  * states.
424  */
425 struct hif_driver_state_callbacks {
426 	void *context;
427 	void (*set_recovery_in_progress)(void *context, uint8_t val);
428 	bool (*is_recovery_in_progress)(void *context);
429 	bool (*is_load_unload_in_progress)(void *context);
430 	bool (*is_driver_unloading)(void *context);
431 	bool (*is_target_ready)(void *context);
432 };
433 
434 /* This API detaches the HTC layer from the HIF device */
435 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
436 
437 /****************************************************************/
438 /* BMI and Diag window abstraction                              */
439 /****************************************************************/
440 
441 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
442 
443 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
444 				     * handled atomically by
445 				     * DiagRead/DiagWrite
446 				     */
447 
448 #ifdef WLAN_FEATURE_BMI
449 /*
450  * API to handle HIF-specific BMI message exchanges, this API is synchronous
451  * and only allowed to be called from a context that can block (sleep)
452  */
453 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
454 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
455 				uint8_t *pSendMessage, uint32_t Length,
456 				uint8_t *pResponseMessage,
457 				uint32_t *pResponseLength, uint32_t TimeoutMS);
458 void hif_register_bmi_callbacks(struct hif_softc *hif_sc);
459 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
460 #else /* WLAN_FEATURE_BMI */
461 static inline void
462 hif_register_bmi_callbacks(struct hif_softc *hif_sc)
463 {
464 }
465 
466 static inline bool
467 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
468 {
469 	return false;
470 }
471 #endif /* WLAN_FEATURE_BMI */
472 
473 /*
474  * APIs to handle HIF specific diagnostic read accesses. These APIs are
475  * synchronous and only allowed to be called from a context that
476  * can block (sleep). They are not high performance APIs.
477  *
478  * hif_diag_read_access reads a 4 Byte aligned/length value from a
479  * Target register or memory word.
480  *
481  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
482  */
483 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
484 				uint32_t address, uint32_t *data);
485 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
486 		      uint8_t *data, int nbytes);
487 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
488 			void *ramdump_base, uint32_t address, uint32_t size);
489 /*
490  * APIs to handle HIF specific diagnostic write accesses. These APIs are
491  * synchronous and only allowed to be called from a context that
492  * can block (sleep).
493  * They are not high performance APIs.
494  *
495  * hif_diag_write_access writes a 4 Byte aligned/length value to a
496  * Target register or memory word.
497  *
498  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
499  */
500 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
501 				 uint32_t address, uint32_t data);
502 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
503 			uint32_t address, uint8_t *data, int nbytes);
504 
505 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
506 
507 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
508 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
509 
510 /*
511  * Set the FASTPATH_mode_on flag in sc, for use by data path
512  */
513 #ifdef WLAN_FEATURE_FASTPATH
514 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
515 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
516 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
517 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
518 				fastpath_msg_handler handler, void *context);
519 #else
520 static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
521 					      fastpath_msg_handler handler,
522 					      void *context)
523 {
524 	return QDF_STATUS_E_FAILURE;
525 }
526 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
527 {
528 	return NULL;
529 }
530 
531 #endif
532 
533 /*
534  * Enable/disable CDC max performance workaround
535  * For max-performace set this to 0
536  * To allow SoC to enter sleep set this to 1
537  */
538 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
539 
540 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
541 			     qdf_shared_mem_t **ce_sr,
542 			     uint32_t *ce_sr_ring_size,
543 			     qdf_dma_addr_t *ce_reg_paddr);
544 
545 /**
546  * @brief List of callbacks - filled in by HTC.
547  */
548 struct hif_msg_callbacks {
549 	void *Context;
550 	/**< context meaningful to HTC */
551 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
552 					uint32_t transferID,
553 					uint32_t toeplitz_hash_result);
554 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
555 					uint8_t pipeID);
556 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
557 	void (*fwEventHandler)(void *context, QDF_STATUS status);
558 };
559 
560 enum hif_target_status {
561 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
562 	TARGET_STATUS_RESET,  /* target got reset */
563 	TARGET_STATUS_EJECT,  /* target got ejected */
564 	TARGET_STATUS_SUSPEND /*target got suspend */
565 };
566 
567 /**
568  * enum hif_attribute_flags: configure hif
569  *
570  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
571  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
572  *  							+ No pktlog CE
573  */
574 enum hif_attribute_flags {
575 	HIF_LOWDESC_CE_CFG = 1,
576 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
577 };
578 
579 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
580 	(attr |= (v & 0x01) << 5)
581 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
582 	(attr |= (v & 0x03) << 6)
583 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
584 	(attr |= (v & 0x01) << 13)
585 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
586 	(attr |= (v & 0x01) << 14)
587 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
588 	(attr |= (v & 0x01) << 15)
589 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
590 	(attr |= (v & 0x0FFF) << 16)
591 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
592 	(attr |= (v & 0x01) << 30)
593 
594 struct hif_ul_pipe_info {
595 	unsigned int nentries;
596 	unsigned int nentries_mask;
597 	unsigned int sw_index;
598 	unsigned int write_index; /* cached copy */
599 	unsigned int hw_index;    /* cached copy */
600 	void *base_addr_owner_space; /* Host address space */
601 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
602 };
603 
604 struct hif_dl_pipe_info {
605 	unsigned int nentries;
606 	unsigned int nentries_mask;
607 	unsigned int sw_index;
608 	unsigned int write_index; /* cached copy */
609 	unsigned int hw_index;    /* cached copy */
610 	void *base_addr_owner_space; /* Host address space */
611 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
612 };
613 
614 struct hif_pipe_addl_info {
615 	uint32_t pci_mem;
616 	uint32_t ctrl_addr;
617 	struct hif_ul_pipe_info ul_pipe;
618 	struct hif_dl_pipe_info dl_pipe;
619 };
620 
621 #ifdef CONFIG_SLUB_DEBUG_ON
622 #define MSG_FLUSH_NUM 16
623 #else /* PERF build */
624 #define MSG_FLUSH_NUM 32
625 #endif /* SLUB_DEBUG_ON */
626 
627 struct hif_bus_id;
628 
629 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
630 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
631 		     int opcode, void *config, uint32_t config_len);
632 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
633 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
634 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
635 		   struct hif_msg_callbacks *callbacks);
636 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
637 void hif_stop(struct hif_opaque_softc *hif_ctx);
638 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
639 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
640 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
641 		      uint8_t cmd_id, bool start);
642 
643 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
644 				  uint32_t transferID, uint32_t nbytes,
645 				  qdf_nbuf_t wbuf, uint32_t data_attr);
646 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
647 			     int force);
648 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
649 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
650 			  uint8_t *DLPipe);
651 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
652 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
653 			int *dl_is_polled);
654 uint16_t
655 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
656 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
657 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
658 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
659 		     bool wait_for_it);
660 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
661 #ifndef HIF_PCI
662 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
663 {
664 	return 0;
665 }
666 #else
667 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
668 #endif
669 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
670 			u32 *revision, const char **target_name);
671 
672 #ifdef RECEIVE_OFFLOAD
673 /**
674  * hif_offld_flush_cb_register() - Register the offld flush callback
675  * @scn: HIF opaque context
676  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
677  *			 Or GRO/LRO flush when RxThread is not enabled. Called
678  *			 with corresponding context for flush.
679  * Return: None
680  */
681 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
682 				 void (offld_flush_handler)(void *ol_ctx));
683 
684 /**
685  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
686  * @scn: HIF opaque context
687  *
688  * Return: None
689  */
690 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
691 #endif
692 
693 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
694 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
695 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
696 				      int htc_htt_tx_endpoint);
697 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
698 				  enum qdf_bus_type bus_type,
699 				  struct hif_driver_state_callbacks *cbk);
700 void hif_close(struct hif_opaque_softc *hif_ctx);
701 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
702 		      void *bdev, const struct hif_bus_id *bid,
703 		      enum qdf_bus_type bus_type,
704 		      enum hif_enable_type type);
705 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
706 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
707 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
708 #ifdef FEATURE_RUNTIME_PM
709 struct hif_pm_runtime_lock;
710 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
711 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
712 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
713 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
714 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
715 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
716 			struct hif_pm_runtime_lock *lock);
717 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
718 		struct hif_pm_runtime_lock *lock);
719 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
720 		struct hif_pm_runtime_lock *lock);
721 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
722 		struct hif_pm_runtime_lock *lock, unsigned int delay);
723 #else
724 struct hif_pm_runtime_lock {
725 	const char *name;
726 };
727 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
728 static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
729 {}
730 
731 static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
732 { return 0; }
733 static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
734 { return 0; }
735 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
736 					const char *name)
737 { return 0; }
738 static inline void
739 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
740 			struct hif_pm_runtime_lock *lock) {}
741 
742 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
743 		struct hif_pm_runtime_lock *lock)
744 { return 0; }
745 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
746 		struct hif_pm_runtime_lock *lock)
747 { return 0; }
748 static inline int
749 hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
750 		struct hif_pm_runtime_lock *lock, unsigned int delay)
751 { return 0; }
752 #endif
753 
754 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
755 				 bool is_packet_log_enabled);
756 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
757 
758 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
759 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
760 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
761 
762 #ifdef IPA_OFFLOAD
763 /**
764  * hif_get_ipa_hw_type() - get IPA hw type
765  *
766  * This API return the IPA hw type.
767  *
768  * Return: IPA hw type
769  */
770 static inline
771 enum ipa_hw_type hif_get_ipa_hw_type(void)
772 {
773 	return ipa_get_hw_type();
774 }
775 
776 /**
777  * hif_get_ipa_present() - get IPA hw status
778  *
779  * This API return the IPA hw status.
780  *
781  * Return: true if IPA is present or false otherwise
782  */
783 static inline
784 bool hif_get_ipa_present(void)
785 {
786 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
787 		return true;
788 	else
789 		return false;
790 }
791 #endif
792 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
793 /**
794  * hif_bus_ealry_suspend() - stop non wmi tx traffic
795  * @context: hif context
796  */
797 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
798 
799 /**
800  * hif_bus_late_resume() - resume non wmi traffic
801  * @context: hif context
802  */
803 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
804 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
805 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
806 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
807 
808 /**
809  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
810  * @hif_ctx: an opaque HIF handle to use
811  *
812  * As opposed to the standard hif_irq_enable, this function always applies to
813  * the APPS side kernel interrupt handling.
814  *
815  * Return: errno
816  */
817 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
818 
819 /**
820  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
821  * @hif_ctx: an opaque HIF handle to use
822  *
823  * As opposed to the standard hif_irq_disable, this function always applies to
824  * the APPS side kernel interrupt handling.
825  *
826  * Return: errno
827  */
828 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
829 
830 /**
831  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
832  * @hif_ctx: an opaque HIF handle to use
833  *
834  * As opposed to the standard hif_irq_enable, this function always applies to
835  * the APPS side kernel interrupt handling.
836  *
837  * Return: errno
838  */
839 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
840 
841 /**
842  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
843  * @hif_ctx: an opaque HIF handle to use
844  *
845  * As opposed to the standard hif_irq_disable, this function always applies to
846  * the APPS side kernel interrupt handling.
847  *
848  * Return: errno
849  */
850 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
851 
852 #ifdef FEATURE_RUNTIME_PM
853 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
854 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
855 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
856 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
857 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
858 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
859 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
860 #endif
861 
862 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
863 int hif_dump_registers(struct hif_opaque_softc *scn);
864 int ol_copy_ramdump(struct hif_opaque_softc *scn);
865 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
866 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
867 		     u32 *revision, const char **target_name);
868 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
869 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
870 						   scn);
871 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
872 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
873 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
874 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
875 			   hif_target_status);
876 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
877 			 struct hif_config_info *cfg);
878 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
879 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
880 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
881 int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t
882 		transfer_id, u_int32_t len);
883 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
884 	uint32_t transfer_id, uint32_t download_len);
885 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
886 void hif_ce_war_disable(void);
887 void hif_ce_war_enable(void);
888 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
889 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
890 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
891 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
892 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
893 		uint32_t pipe_num);
894 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
895 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
896 
897 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
898 				int rx_bundle_cnt);
899 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
900 
901 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
902 
903 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
904 
905 enum hif_exec_type {
906 	HIF_EXEC_NAPI_TYPE,
907 	HIF_EXEC_TASKLET_TYPE,
908 };
909 
910 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
911 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
912 uint32_t  hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
913 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
914 		void *cb_ctx, const char *context_name,
915 		enum hif_exec_type type, uint32_t scale);
916 
917 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
918 				const char *context_name);
919 
920 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
921 				u_int8_t pipeid,
922 				struct hif_msg_callbacks *callbacks);
923 
924 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
925 #ifdef __cplusplus
926 }
927 #endif
928 
929 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
930 
931 /**
932  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
933  * @hif_ctx - the HIF context to assign the callback to
934  * @callback - the callback to assign
935  * @priv - the private data to pass to the callback when invoked
936  *
937  * Return: None
938  */
939 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
940 			       void (*callback)(void *),
941 			       void *priv);
942 #ifndef CONFIG_WIN
943 #ifndef HIF_CE_DEBUG_DATA_BUF
944 #define HIF_CE_DEBUG_DATA_BUF 0
945 #endif
946 #endif
947 /*
948  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
949  * for defined here
950  */
951 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
952 ssize_t hif_dump_desc_trace_buf(struct device *dev,
953 				struct device_attribute *attr, char *buf);
954 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
955 					const char *buf, size_t size);
956 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
957 				const char *buf, size_t size);
958 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
959 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
960 #endif /* Note: for MCL, #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
961 
962 /**
963  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
964  * @hif: hif context
965  * @ce_service_max_yield_time: CE service max yield time to set
966  *
967  * This API storess CE service max yield time in hif context based
968  * on ini value.
969  *
970  * Return: void
971  */
972 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
973 				       uint32_t ce_service_max_yield_time);
974 
975 /**
976  * hif_get_ce_service_max_yield_time() - get CE service max yield time
977  * @hif: hif context
978  *
979  * This API returns CE service max yield time.
980  *
981  * Return: CE service max yield time
982  */
983 unsigned long long
984 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
985 
986 /**
987  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
988  * @hif: hif context
989  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
990  *
991  * This API stores CE service max rx ind flush in hif context based
992  * on ini value.
993  *
994  * Return: void
995  */
996 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
997 				       uint8_t ce_service_max_rx_ind_flush);
998 #ifdef OL_ATH_SMART_LOGGING
999 /*
1000  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1001  * @scn : HIF handler
1002  * @buf_cur: Current pointer in ring buffer
1003  * @buf_init:Start of the ring buffer
1004  * @buf_sz: Size of the ring buffer
1005  * @ce: Copy Engine id
1006  * @skb_sz: Max size of the SKB buffer to be copied
1007  *
1008  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1009  * and buffers pointed by them in to the given buf
1010  *
1011  * Return: Current pointer in ring buffer
1012  */
1013 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1014 			 uint8_t *buf_init, uint32_t buf_sz,
1015 			 uint32_t ce, uint32_t skb_sz);
1016 #endif /* OL_ATH_SMART_LOGGING */
1017 #endif /* _HIF_H_ */
1018