xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HIF_H_
20 #define _HIF_H_
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif /* __cplusplus */
25 
26 /* Header files */
27 #include <qdf_status.h>
28 #include "qdf_nbuf.h"
29 #include "qdf_lro.h"
30 #include "ol_if_athvar.h"
31 #include <linux/platform_device.h>
32 #ifdef HIF_PCI
33 #include <linux/pci.h>
34 #endif /* HIF_PCI */
35 #ifdef HIF_USB
36 #include <linux/usb.h>
37 #endif /* HIF_USB */
38 #ifdef IPA_OFFLOAD
39 #include <linux/ipa.h>
40 #endif
41 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
42 
43 typedef void __iomem *A_target_id_t;
44 typedef void *hif_handle_t;
45 
46 #define HIF_TYPE_AR6002   2
47 #define HIF_TYPE_AR6003   3
48 #define HIF_TYPE_AR6004   5
49 #define HIF_TYPE_AR9888   6
50 #define HIF_TYPE_AR6320   7
51 #define HIF_TYPE_AR6320V2 8
52 /* For attaching Peregrine 2.0 board host_reg_tbl only */
53 #define HIF_TYPE_AR9888V2 9
54 #define HIF_TYPE_ADRASTEA 10
55 #define HIF_TYPE_AR900B 11
56 #define HIF_TYPE_QCA9984 12
57 #define HIF_TYPE_IPQ4019 13
58 #define HIF_TYPE_QCA9888 14
59 #define HIF_TYPE_QCA8074 15
60 #define HIF_TYPE_QCA6290 16
61 #define HIF_TYPE_QCN7605 17
62 
63 #ifdef IPA_OFFLOAD
64 #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE   37
65 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
66 #endif
67 
68 /* enum hif_ic_irq - enum defining integrated chip irq numbers
69  * defining irq nubers that can be used by external modules like datapath
70  */
71 enum hif_ic_irq {
72 	host2wbm_desc_feed = 18,
73 	host2reo_re_injection,
74 	host2reo_command,
75 	host2rxdma_monitor_ring3,
76 	host2rxdma_monitor_ring2,
77 	host2rxdma_monitor_ring1,
78 	reo2host_exception,
79 	wbm2host_rx_release,
80 	reo2host_status,
81 	reo2host_destination_ring4,
82 	reo2host_destination_ring3,
83 	reo2host_destination_ring2,
84 	reo2host_destination_ring1,
85 	rxdma2host_monitor_destination_mac3,
86 	rxdma2host_monitor_destination_mac2,
87 	rxdma2host_monitor_destination_mac1,
88 	ppdu_end_interrupts_mac3,
89 	ppdu_end_interrupts_mac2,
90 	ppdu_end_interrupts_mac1,
91 	rxdma2host_monitor_status_ring_mac3,
92 	rxdma2host_monitor_status_ring_mac2,
93 	rxdma2host_monitor_status_ring_mac1,
94 	host2rxdma_host_buf_ring_mac3,
95 	host2rxdma_host_buf_ring_mac2,
96 	host2rxdma_host_buf_ring_mac1,
97 	rxdma2host_destination_ring_mac3,
98 	rxdma2host_destination_ring_mac2,
99 	rxdma2host_destination_ring_mac1,
100 	host2tcl_input_ring4,
101 	host2tcl_input_ring3,
102 	host2tcl_input_ring2,
103 	host2tcl_input_ring1,
104 	wbm2host_tx_completions_ring3,
105 	wbm2host_tx_completions_ring2,
106 	wbm2host_tx_completions_ring1,
107 	tcl2host_status_ring,
108 };
109 
110 struct CE_state;
111 #define CE_COUNT_MAX 12
112 #define HIF_MAX_GRP_IRQ 16
113 #define HIF_MAX_GROUP 8
114 
115 #ifdef CONFIG_SLUB_DEBUG_ON
116 #ifndef CONFIG_WIN
117 #define HIF_CONFIG_SLUB_DEBUG_ON
118 #endif
119 #endif
120 
121 #ifndef NAPI_YIELD_BUDGET_BASED
122 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
123 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
124 #else  /* PERF build */
125 #ifdef CONFIG_WIN
126 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
127 #else
128 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
129 #endif /* CONFIG_WIN */
130 #endif /* SLUB_DEBUG_ON */
131 #else  /* NAPI_YIELD_BUDGET_BASED */
132 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
133 #endif /* NAPI_YIELD_BUDGET_BASED */
134 #define QCA_NAPI_BUDGET    64
135 #define QCA_NAPI_DEF_SCALE  \
136 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
137 
138 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
139 /* NOTE: "napi->scale" can be changed,
140  * but this does not change the number of buckets
141  */
142 #define QCA_NAPI_NUM_BUCKETS 4
143 /**
144  * qca_napi_stat - stats structure for execution contexts
145  * @napi_schedules - number of times the schedule function is called
146  * @napi_polls - number of times the execution context runs
147  * @napi_completes - number of times that the generating interrupt is reenabled
148  * @napi_workdone - cumulative of all work done reported by handler
149  * @cpu_corrected - incremented when execution context runs on a different core
150  *			than the one that its irq is affined to.
151  * @napi_budget_uses - histogram of work done per execution run
152  * @time_limit_reache - count of yields due to time limit threshholds
153  * @rxpkt_thresh_reached - count of yields due to a work limit
154  *
155  * needs to be renamed
156  */
157 struct qca_napi_stat {
158 	uint32_t napi_schedules;
159 	uint32_t napi_polls;
160 	uint32_t napi_completes;
161 	uint32_t napi_workdone;
162 	uint32_t cpu_corrected;
163 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
164 	uint32_t time_limit_reached;
165 	uint32_t rxpkt_thresh_reached;
166 	unsigned long long napi_max_poll_time;
167 };
168 
169 
170 /**
171  * per NAPI instance data structure
172  * This data structure holds stuff per NAPI instance.
173  * Note that, in the current implementation, though scale is
174  * an instance variable, it is set to the same value for all
175  * instances.
176  */
177 struct qca_napi_info {
178 	struct net_device    netdev; /* dummy net_dev */
179 	void                 *hif_ctx;
180 	struct napi_struct   napi;
181 	uint8_t              scale;   /* currently same on all instances */
182 	uint8_t              id;
183 	uint8_t              cpu;
184 	int                  irq;
185 	struct qca_napi_stat stats[NR_CPUS];
186 #ifdef RECEIVE_OFFLOAD
187 	/* will only be present for data rx CE's */
188 	void (*offld_flush_cb)(void *);
189 	struct napi_struct   rx_thread_napi;
190 	struct net_device    rx_thread_netdev;
191 #endif /* RECEIVE_OFFLOAD */
192 	qdf_lro_ctx_t        lro_ctx;
193 };
194 
195 enum qca_napi_tput_state {
196 	QCA_NAPI_TPUT_UNINITIALIZED,
197 	QCA_NAPI_TPUT_LO,
198 	QCA_NAPI_TPUT_HI
199 };
200 enum qca_napi_cpu_state {
201 	QCA_NAPI_CPU_UNINITIALIZED,
202 	QCA_NAPI_CPU_DOWN,
203 	QCA_NAPI_CPU_UP };
204 
205 /**
206  * struct qca_napi_cpu - an entry of the napi cpu table
207  * @core_id:     physical core id of the core
208  * @cluster_id:  cluster this core belongs to
209  * @core_mask:   mask to match all core of this cluster
210  * @thread_mask: mask for this core within the cluster
211  * @max_freq:    maximum clock this core can be clocked at
212  *               same for all cpus of the same core.
213  * @napis:       bitmap of napi instances on this core
214  * @execs:       bitmap of execution contexts on this core
215  * cluster_nxt:  chain to link cores within the same cluster
216  *
217  * This structure represents a single entry in the napi cpu
218  * table. The table is part of struct qca_napi_data.
219  * This table is initialized by the init function, called while
220  * the first napi instance is being created, updated by hotplug
221  * notifier and when cpu affinity decisions are made (by throughput
222  * detection), and deleted when the last napi instance is removed.
223  */
224 struct qca_napi_cpu {
225 	enum qca_napi_cpu_state state;
226 	int			core_id;
227 	int			cluster_id;
228 	cpumask_t		core_mask;
229 	cpumask_t		thread_mask;
230 	unsigned int		max_freq;
231 	uint32_t		napis;
232 	uint32_t		execs;
233 	int			cluster_nxt;  /* index, not pointer */
234 };
235 
236 /**
237  * struct qca_napi_data - collection of napi data for a single hif context
238  * @hif_softc: pointer to the hif context
239  * @lock: spinlock used in the event state machine
240  * @state: state variable used in the napi stat machine
241  * @ce_map: bit map indicating which ce's have napis running
242  * @exec_map: bit map of instanciated exec contexts
243  * @user_cpu_affin_map: CPU affinity map from INI config.
244  * @napi_cpu: cpu info for irq affinty
245  * @lilcl_head:
246  * @bigcl_head:
247  * @napi_mode: irq affinity & clock voting mode
248  * @cpuhp_handler: CPU hotplug event registration handle
249  */
250 struct qca_napi_data {
251 	struct               hif_softc *hif_softc;
252 	qdf_spinlock_t       lock;
253 	uint32_t             state;
254 
255 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
256 	 * not used by clients (clients use an id returned by create)
257 	 */
258 	uint32_t             ce_map;
259 	uint32_t             exec_map;
260 	uint32_t             user_cpu_affin_mask;
261 	struct qca_napi_info *napis[CE_COUNT_MAX];
262 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
263 	int                  lilcl_head, bigcl_head;
264 	enum qca_napi_tput_state napi_mode;
265 	struct qdf_cpuhp_handler *cpuhp_handler;
266 	uint8_t              flags;
267 };
268 
269 /**
270  * struct hif_config_info - Place Holder for hif confiruation
271  * @enable_self_recovery: Self Recovery
272  *
273  * Structure for holding hif ini parameters.
274  */
275 struct hif_config_info {
276 	bool enable_self_recovery;
277 #ifdef FEATURE_RUNTIME_PM
278 	bool enable_runtime_pm;
279 	u_int32_t runtime_pm_delay;
280 #endif
281 };
282 
283 /**
284  * struct hif_target_info - Target Information
285  * @target_version: Target Version
286  * @target_type: Target Type
287  * @target_revision: Target Revision
288  * @soc_version: SOC Version
289  *
290  * Structure to hold target information.
291  */
292 struct hif_target_info {
293 	uint32_t target_version;
294 	uint32_t target_type;
295 	uint32_t target_revision;
296 	uint32_t soc_version;
297 	char *hw_name;
298 };
299 
300 struct hif_opaque_softc {
301 };
302 
303 /**
304  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
305  *
306  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
307  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
308  *                         minimize power
309  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
310  *                         platform-specific measures to completely power-off
311  *                         the module and associated hardware (i.e. cut power
312  *                         supplies)
313  */
314 enum HIF_DEVICE_POWER_CHANGE_TYPE {
315 	HIF_DEVICE_POWER_UP,
316 	HIF_DEVICE_POWER_DOWN,
317 	HIF_DEVICE_POWER_CUT
318 };
319 
320 /**
321  * enum hif_enable_type: what triggered the enabling of hif
322  *
323  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
324  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
325  */
326 enum hif_enable_type {
327 	HIF_ENABLE_TYPE_PROBE,
328 	HIF_ENABLE_TYPE_REINIT,
329 	HIF_ENABLE_TYPE_MAX
330 };
331 
332 /**
333  * enum hif_disable_type: what triggered the disabling of hif
334  *
335  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
336  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
337  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
338  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
339  */
340 enum hif_disable_type {
341 	HIF_DISABLE_TYPE_PROBE_ERROR,
342 	HIF_DISABLE_TYPE_REINIT_ERROR,
343 	HIF_DISABLE_TYPE_REMOVE,
344 	HIF_DISABLE_TYPE_SHUTDOWN,
345 	HIF_DISABLE_TYPE_MAX
346 };
347 /**
348  * enum hif_device_config_opcode: configure mode
349  *
350  * @HIF_DEVICE_POWER_STATE: device power state
351  * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size
352  * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address
353  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
354  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
355  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
356  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
357  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
358  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
359  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
360  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
361  * @HIF_BMI_DONE: bmi done
362  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
363  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
364  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
365  */
366 enum hif_device_config_opcode {
367 	HIF_DEVICE_POWER_STATE = 0,
368 	HIF_DEVICE_GET_BLOCK_SIZE,
369 	HIF_DEVICE_GET_FIFO_ADDR,
370 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
371 	HIF_DEVICE_GET_IRQ_PROC_MODE,
372 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
373 	HIF_DEVICE_POWER_STATE_CHANGE,
374 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
375 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
376 	HIF_DEVICE_GET_OS_DEVICE,
377 	HIF_DEVICE_DEBUG_BUS_STATE,
378 	HIF_BMI_DONE,
379 	HIF_DEVICE_SET_TARGET_TYPE,
380 	HIF_DEVICE_SET_HTC_CONTEXT,
381 	HIF_DEVICE_GET_HTC_CONTEXT,
382 };
383 
384 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
385 struct HID_ACCESS_LOG {
386 	uint32_t seqnum;
387 	bool is_write;
388 	void *addr;
389 	uint32_t value;
390 };
391 #endif
392 
393 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
394 		uint32_t value);
395 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
396 
397 #define HIF_MAX_DEVICES                 1
398 /**
399  * struct htc_callbacks - Structure for HTC Callbacks methods
400  * @context:             context to pass to the dsrhandler
401  *                       note : rwCompletionHandler is provided the context
402  *                       passed to hif_read_write
403  * @rwCompletionHandler: Read / write completion handler
404  * @dsrHandler:          DSR Handler
405  */
406 struct htc_callbacks {
407 	void *context;
408 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
409 	QDF_STATUS(*dsr_handler)(void *context);
410 };
411 
412 /**
413  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
414  * @context: Private data context
415  * @set_recovery_in_progress: To Set Driver state for recovery in progress
416  * @is_recovery_in_progress: Query if driver state is recovery in progress
417  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
418  * @is_driver_unloading: Query if driver is unloading.
419  *
420  * This Structure provides callback pointer for HIF to query hdd for driver
421  * states.
422  */
423 struct hif_driver_state_callbacks {
424 	void *context;
425 	void (*set_recovery_in_progress)(void *context, uint8_t val);
426 	bool (*is_recovery_in_progress)(void *context);
427 	bool (*is_load_unload_in_progress)(void *context);
428 	bool (*is_driver_unloading)(void *context);
429 	bool (*is_target_ready)(void *context);
430 };
431 
432 /* This API detaches the HTC layer from the HIF device */
433 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
434 
435 /****************************************************************/
436 /* BMI and Diag window abstraction                              */
437 /****************************************************************/
438 
439 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
440 
441 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
442 				     * handled atomically by
443 				     * DiagRead/DiagWrite
444 				     */
445 
446 /*
447  * API to handle HIF-specific BMI message exchanges, this API is synchronous
448  * and only allowed to be called from a context that can block (sleep)
449  */
450 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
451 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
452 				uint8_t *pSendMessage, uint32_t Length,
453 				uint8_t *pResponseMessage,
454 				uint32_t *pResponseLength, uint32_t TimeoutMS);
455 void hif_register_bmi_callbacks(struct hif_softc *hif_sc);
456 /*
457  * APIs to handle HIF specific diagnostic read accesses. These APIs are
458  * synchronous and only allowed to be called from a context that
459  * can block (sleep). They are not high performance APIs.
460  *
461  * hif_diag_read_access reads a 4 Byte aligned/length value from a
462  * Target register or memory word.
463  *
464  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
465  */
466 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
467 				uint32_t address, uint32_t *data);
468 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
469 		      uint8_t *data, int nbytes);
470 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
471 			void *ramdump_base, uint32_t address, uint32_t size);
472 /*
473  * APIs to handle HIF specific diagnostic write accesses. These APIs are
474  * synchronous and only allowed to be called from a context that
475  * can block (sleep).
476  * They are not high performance APIs.
477  *
478  * hif_diag_write_access writes a 4 Byte aligned/length value to a
479  * Target register or memory word.
480  *
481  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
482  */
483 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
484 				 uint32_t address, uint32_t data);
485 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
486 			uint32_t address, uint8_t *data, int nbytes);
487 
488 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
489 
490 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
491 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
492 
493 /*
494  * Set the FASTPATH_mode_on flag in sc, for use by data path
495  */
496 #ifdef WLAN_FEATURE_FASTPATH
497 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
498 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
499 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
500 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
501 				fastpath_msg_handler handler, void *context);
502 #else
503 static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
504 					      fastpath_msg_handler handler,
505 					      void *context)
506 {
507 	return QDF_STATUS_E_FAILURE;
508 }
509 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
510 {
511 	return NULL;
512 }
513 
514 #endif
515 
516 /*
517  * Enable/disable CDC max performance workaround
518  * For max-performace set this to 0
519  * To allow SoC to enter sleep set this to 1
520  */
521 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
522 
523 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
524 			     qdf_shared_mem_t **ce_sr,
525 			     uint32_t *ce_sr_ring_size,
526 			     qdf_dma_addr_t *ce_reg_paddr);
527 
528 /**
529  * @brief List of callbacks - filled in by HTC.
530  */
531 struct hif_msg_callbacks {
532 	void *Context;
533 	/**< context meaningful to HTC */
534 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
535 					uint32_t transferID,
536 					uint32_t toeplitz_hash_result);
537 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
538 					uint8_t pipeID);
539 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
540 	void (*fwEventHandler)(void *context, QDF_STATUS status);
541 };
542 
543 enum hif_target_status {
544 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
545 	TARGET_STATUS_RESET,  /* target got reset */
546 	TARGET_STATUS_EJECT,  /* target got ejected */
547 	TARGET_STATUS_SUSPEND /*target got suspend */
548 };
549 
550 /**
551  * enum hif_attribute_flags: configure hif
552  *
553  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
554  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
555  *  							+ No pktlog CE
556  */
557 enum hif_attribute_flags {
558 	HIF_LOWDESC_CE_CFG = 1,
559 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
560 };
561 
562 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
563 	(attr |= (v & 0x01) << 5)
564 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
565 	(attr |= (v & 0x03) << 6)
566 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
567 	(attr |= (v & 0x01) << 13)
568 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
569 	(attr |= (v & 0x01) << 14)
570 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
571 	(attr |= (v & 0x01) << 15)
572 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
573 	(attr |= (v & 0x0FFF) << 16)
574 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
575 	(attr |= (v & 0x01) << 30)
576 
577 struct hif_ul_pipe_info {
578 	unsigned int nentries;
579 	unsigned int nentries_mask;
580 	unsigned int sw_index;
581 	unsigned int write_index; /* cached copy */
582 	unsigned int hw_index;    /* cached copy */
583 	void *base_addr_owner_space; /* Host address space */
584 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
585 };
586 
587 struct hif_dl_pipe_info {
588 	unsigned int nentries;
589 	unsigned int nentries_mask;
590 	unsigned int sw_index;
591 	unsigned int write_index; /* cached copy */
592 	unsigned int hw_index;    /* cached copy */
593 	void *base_addr_owner_space; /* Host address space */
594 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
595 };
596 
597 struct hif_pipe_addl_info {
598 	uint32_t pci_mem;
599 	uint32_t ctrl_addr;
600 	struct hif_ul_pipe_info ul_pipe;
601 	struct hif_dl_pipe_info dl_pipe;
602 };
603 
604 #ifdef CONFIG_SLUB_DEBUG_ON
605 #define MSG_FLUSH_NUM 16
606 #else /* PERF build */
607 #define MSG_FLUSH_NUM 32
608 #endif /* SLUB_DEBUG_ON */
609 
610 struct hif_bus_id;
611 
612 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
613 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
614 		     int opcode, void *config, uint32_t config_len);
615 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
616 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
617 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
618 		   struct hif_msg_callbacks *callbacks);
619 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
620 void hif_stop(struct hif_opaque_softc *hif_ctx);
621 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
622 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
623 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
624 		      uint8_t cmd_id, bool start);
625 
626 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
627 				  uint32_t transferID, uint32_t nbytes,
628 				  qdf_nbuf_t wbuf, uint32_t data_attr);
629 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
630 			     int force);
631 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
632 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
633 			  uint8_t *DLPipe);
634 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
635 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
636 			int *dl_is_polled);
637 uint16_t
638 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
639 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
640 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
641 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
642 		     bool wait_for_it);
643 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
644 #ifndef HIF_PCI
645 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
646 {
647 	return 0;
648 }
649 #else
650 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
651 #endif
652 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
653 			u32 *revision, const char **target_name);
654 
655 #ifdef RECEIVE_OFFLOAD
656 /**
657  * hif_offld_flush_cb_register() - Register the offld flush callback
658  * @scn: HIF opaque context
659  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
660  *			 Or GRO/LRO flush when RxThread is not enabled. Called
661  *			 with corresponding context for flush.
662  * Return: None
663  */
664 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
665 				 void (offld_flush_handler)(void *ol_ctx));
666 
667 /**
668  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
669  * @scn: HIF opaque context
670  *
671  * Return: None
672  */
673 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
674 #endif
675 
676 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
677 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
678 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
679 				      int htc_htt_tx_endpoint);
680 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
681 				  enum qdf_bus_type bus_type,
682 				  struct hif_driver_state_callbacks *cbk);
683 void hif_close(struct hif_opaque_softc *hif_ctx);
684 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
685 		      void *bdev, const struct hif_bus_id *bid,
686 		      enum qdf_bus_type bus_type,
687 		      enum hif_enable_type type);
688 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
689 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
690 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
691 #ifdef FEATURE_RUNTIME_PM
692 struct hif_pm_runtime_lock;
693 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
694 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
695 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
696 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
697 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
698 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
699 			struct hif_pm_runtime_lock *lock);
700 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
701 		struct hif_pm_runtime_lock *lock);
702 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
703 		struct hif_pm_runtime_lock *lock);
704 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
705 		struct hif_pm_runtime_lock *lock, unsigned int delay);
706 #else
707 struct hif_pm_runtime_lock {
708 	const char *name;
709 };
710 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
711 static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
712 {}
713 
714 static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
715 { return 0; }
716 static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
717 { return 0; }
718 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
719 					const char *name)
720 { return 0; }
721 static inline void
722 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
723 			struct hif_pm_runtime_lock *lock) {}
724 
725 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
726 		struct hif_pm_runtime_lock *lock)
727 { return 0; }
728 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
729 		struct hif_pm_runtime_lock *lock)
730 { return 0; }
731 static inline int
732 hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
733 		struct hif_pm_runtime_lock *lock, unsigned int delay)
734 { return 0; }
735 #endif
736 
737 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
738 				 bool is_packet_log_enabled);
739 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
740 
741 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
742 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
743 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
744 
745 #ifdef IPA_OFFLOAD
746 /**
747  * hif_get_ipa_hw_type() - get IPA hw type
748  *
749  * This API return the IPA hw type.
750  *
751  * Return: IPA hw type
752  */
753 static inline
754 enum ipa_hw_type hif_get_ipa_hw_type(void)
755 {
756 	return ipa_get_hw_type();
757 }
758 
759 /**
760  * hif_get_ipa_present() - get IPA hw status
761  *
762  * This API return the IPA hw status.
763  *
764  * Return: true if IPA is present or false otherwise
765  */
766 static inline
767 bool hif_get_ipa_present(void)
768 {
769 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
770 		return true;
771 	else
772 		return false;
773 }
774 #endif
775 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
776 /**
777  * hif_bus_ealry_suspend() - stop non wmi tx traffic
778  * @context: hif context
779  */
780 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
781 
782 /**
783  * hif_bus_late_resume() - resume non wmi traffic
784  * @context: hif context
785  */
786 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
787 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
788 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
789 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
790 
791 /**
792  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
793  * @hif_ctx: an opaque HIF handle to use
794  *
795  * As opposed to the standard hif_irq_enable, this function always applies to
796  * the APPS side kernel interrupt handling.
797  *
798  * Return: errno
799  */
800 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
801 
802 /**
803  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
804  * @hif_ctx: an opaque HIF handle to use
805  *
806  * As opposed to the standard hif_irq_disable, this function always applies to
807  * the APPS side kernel interrupt handling.
808  *
809  * Return: errno
810  */
811 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
812 
813 /**
814  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
815  * @hif_ctx: an opaque HIF handle to use
816  *
817  * As opposed to the standard hif_irq_enable, this function always applies to
818  * the APPS side kernel interrupt handling.
819  *
820  * Return: errno
821  */
822 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
823 
824 /**
825  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
826  * @hif_ctx: an opaque HIF handle to use
827  *
828  * As opposed to the standard hif_irq_disable, this function always applies to
829  * the APPS side kernel interrupt handling.
830  *
831  * Return: errno
832  */
833 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
834 
835 #ifdef FEATURE_RUNTIME_PM
836 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
837 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
838 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
839 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
840 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
841 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
842 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
843 #endif
844 
845 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
846 int hif_dump_registers(struct hif_opaque_softc *scn);
847 int ol_copy_ramdump(struct hif_opaque_softc *scn);
848 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
849 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
850 		     u32 *revision, const char **target_name);
851 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
852 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
853 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
854 						   scn);
855 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
856 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
857 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
858 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
859 			   hif_target_status);
860 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
861 			 struct hif_config_info *cfg);
862 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
863 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
864 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
865 int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t
866 		transfer_id, u_int32_t len);
867 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
868 	uint32_t transfer_id, uint32_t download_len);
869 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
870 void hif_ce_war_disable(void);
871 void hif_ce_war_enable(void);
872 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
873 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
874 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
875 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
876 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
877 		uint32_t pipe_num);
878 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
879 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
880 
881 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
882 				int rx_bundle_cnt);
883 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
884 
885 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
886 
887 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
888 
889 enum hif_exec_type {
890 	HIF_EXEC_NAPI_TYPE,
891 	HIF_EXEC_TASKLET_TYPE,
892 };
893 
894 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
895 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
896 uint32_t  hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
897 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
898 		void *cb_ctx, const char *context_name,
899 		enum hif_exec_type type, uint32_t scale);
900 
901 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
902 				const char *context_name);
903 
904 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
905 				u_int8_t pipeid,
906 				struct hif_msg_callbacks *callbacks);
907 
908 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
909 #ifdef __cplusplus
910 }
911 #endif
912 
913 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
914 
915 /**
916  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
917  * @hif_ctx - the HIF context to assign the callback to
918  * @callback - the callback to assign
919  * @priv - the private data to pass to the callback when invoked
920  *
921  * Return: None
922  */
923 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
924 			       void (*callback)(void *),
925 			       void *priv);
926 #ifndef CONFIG_WIN
927 #ifndef HIF_CE_DEBUG_DATA_BUF
928 #define HIF_CE_DEBUG_DATA_BUF 0
929 #endif
930 #endif
931 /*
932  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
933  * for defined here
934  */
935 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
936 ssize_t hif_dump_desc_trace_buf(struct device *dev,
937 				struct device_attribute *attr, char *buf);
938 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
939 					const char *buf, size_t size);
940 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
941 				const char *buf, size_t size);
942 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
943 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
944 #endif /* Note: for MCL, #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
945 
946 /**
947  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
948  * @hif: hif context
949  * @ce_service_max_yield_time: CE service max yield time to set
950  *
951  * This API storess CE service max yield time in hif context based
952  * on ini value.
953  *
954  * Return: void
955  */
956 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
957 				       uint32_t ce_service_max_yield_time);
958 
959 /**
960  * hif_get_ce_service_max_yield_time() - get CE service max yield time
961  * @hif: hif context
962  *
963  * This API returns CE service max yield time.
964  *
965  * Return: CE service max yield time
966  */
967 unsigned long long
968 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
969 
970 /**
971  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
972  * @hif: hif context
973  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
974  *
975  * This API stores CE service max rx ind flush in hif context based
976  * on ini value.
977  *
978  * Return: void
979  */
980 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
981 				       uint8_t ce_service_max_rx_ind_flush);
982 #ifdef OL_ATH_SMART_LOGGING
983 /*
984  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
985  * @scn : HIF handler
986  * @buf_cur: Current pointer in ring buffer
987  * @buf_init:Start of the ring buffer
988  * @buf_sz: Size of the ring buffer
989  * @ce: Copy Engine id
990  * @skb_sz: Max size of the SKB buffer to be copied
991  *
992  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
993  * and buffers pointed by them in to the given buf
994  *
995  * Return: Current pointer in ring buffer
996  */
997 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
998 			 uint8_t *buf_init, uint32_t buf_sz,
999 			 uint32_t ce, uint32_t skb_sz);
1000 #endif /* OL_ATH_SMART_LOGGING */
1001 #endif /* _HIF_H_ */
1002