xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HIF_H_
20 #define _HIF_H_
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif /* __cplusplus */
25 
26 /* Header files */
27 #include <qdf_status.h>
28 #include "qdf_nbuf.h"
29 #include "qdf_lro.h"
30 #include "ol_if_athvar.h"
31 #include <linux/platform_device.h>
32 #ifdef HIF_PCI
33 #include <linux/pci.h>
34 #endif /* HIF_PCI */
35 #ifdef HIF_USB
36 #include <linux/usb.h>
37 #endif /* HIF_USB */
38 #ifdef IPA_OFFLOAD
39 #include <linux/ipa.h>
40 #endif
41 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
42 
43 typedef void __iomem *A_target_id_t;
44 typedef void *hif_handle_t;
45 
46 #define HIF_TYPE_AR6002   2
47 #define HIF_TYPE_AR6003   3
48 #define HIF_TYPE_AR6004   5
49 #define HIF_TYPE_AR9888   6
50 #define HIF_TYPE_AR6320   7
51 #define HIF_TYPE_AR6320V2 8
52 /* For attaching Peregrine 2.0 board host_reg_tbl only */
53 #define HIF_TYPE_AR9888V2 9
54 #define HIF_TYPE_ADRASTEA 10
55 #define HIF_TYPE_AR900B 11
56 #define HIF_TYPE_QCA9984 12
57 #define HIF_TYPE_IPQ4019 13
58 #define HIF_TYPE_QCA9888 14
59 #define HIF_TYPE_QCA8074 15
60 #define HIF_TYPE_QCA6290 16
61 
62 #ifdef IPA_OFFLOAD
63 #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE   37
64 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
65 #endif
66 
67 /* enum hif_ic_irq - enum defining integrated chip irq numbers
68  * defining irq nubers that can be used by external modules like datapath
69  */
70 enum hif_ic_irq {
71 	host2wbm_desc_feed = 18,
72 	host2reo_re_injection,
73 	host2reo_command,
74 	host2rxdma_monitor_ring3,
75 	host2rxdma_monitor_ring2,
76 	host2rxdma_monitor_ring1,
77 	reo2host_exception,
78 	wbm2host_rx_release,
79 	reo2host_status,
80 	reo2host_destination_ring4,
81 	reo2host_destination_ring3,
82 	reo2host_destination_ring2,
83 	reo2host_destination_ring1,
84 	rxdma2host_monitor_destination_mac3,
85 	rxdma2host_monitor_destination_mac2,
86 	rxdma2host_monitor_destination_mac1,
87 	ppdu_end_interrupts_mac3,
88 	ppdu_end_interrupts_mac2,
89 	ppdu_end_interrupts_mac1,
90 	rxdma2host_monitor_status_ring_mac3,
91 	rxdma2host_monitor_status_ring_mac2,
92 	rxdma2host_monitor_status_ring_mac1,
93 	host2rxdma_host_buf_ring_mac3,
94 	host2rxdma_host_buf_ring_mac2,
95 	host2rxdma_host_buf_ring_mac1,
96 	rxdma2host_destination_ring_mac3,
97 	rxdma2host_destination_ring_mac2,
98 	rxdma2host_destination_ring_mac1,
99 	host2tcl_input_ring4,
100 	host2tcl_input_ring3,
101 	host2tcl_input_ring2,
102 	host2tcl_input_ring1,
103 	wbm2host_tx_completions_ring3,
104 	wbm2host_tx_completions_ring2,
105 	wbm2host_tx_completions_ring1,
106 	tcl2host_status_ring,
107 };
108 
109 struct CE_state;
110 #define CE_COUNT_MAX 12
111 #define HIF_MAX_GRP_IRQ 16
112 #define HIF_MAX_GROUP 8
113 
114 #ifdef CONFIG_SLUB_DEBUG_ON
115 #ifndef CONFIG_WIN
116 #define HIF_CONFIG_SLUB_DEBUG_ON
117 #endif
118 #endif
119 
120 #ifndef NAPI_YIELD_BUDGET_BASED
121 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
122 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
123 #else  /* PERF build */
124 #ifdef CONFIG_WIN
125 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
126 #else
127 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
128 #endif /* CONFIG_WIN */
129 #endif /* SLUB_DEBUG_ON */
130 #else  /* NAPI_YIELD_BUDGET_BASED */
131 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
132 #endif /* NAPI_YIELD_BUDGET_BASED */
133 #define QCA_NAPI_BUDGET    64
134 #define QCA_NAPI_DEF_SCALE  \
135 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
136 
137 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
138 /* NOTE: "napi->scale" can be changed,
139  * but this does not change the number of buckets
140  */
141 #define QCA_NAPI_NUM_BUCKETS 4
142 /**
143  * qca_napi_stat - stats structure for execution contexts
144  * @napi_schedules - number of times the schedule function is called
145  * @napi_polls - number of times the execution context runs
146  * @napi_completes - number of times that the generating interrupt is reenabled
147  * @napi_workdone - cumulative of all work done reported by handler
148  * @cpu_corrected - incremented when execution context runs on a different core
149  *			than the one that its irq is affined to.
150  * @napi_budget_uses - histogram of work done per execution run
151  * @time_limit_reache - count of yields due to time limit threshholds
152  * @rxpkt_thresh_reached - count of yields due to a work limit
153  *
154  * needs to be renamed
155  */
156 struct qca_napi_stat {
157 	uint32_t napi_schedules;
158 	uint32_t napi_polls;
159 	uint32_t napi_completes;
160 	uint32_t napi_workdone;
161 	uint32_t cpu_corrected;
162 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
163 	uint32_t time_limit_reached;
164 	uint32_t rxpkt_thresh_reached;
165 	unsigned long long napi_max_poll_time;
166 };
167 
168 
169 /**
170  * per NAPI instance data structure
171  * This data structure holds stuff per NAPI instance.
172  * Note that, in the current implementation, though scale is
173  * an instance variable, it is set to the same value for all
174  * instances.
175  */
176 struct qca_napi_info {
177 	struct net_device    netdev; /* dummy net_dev */
178 	void                 *hif_ctx;
179 	struct napi_struct   napi;
180 	uint8_t              scale;   /* currently same on all instances */
181 	uint8_t              id;
182 	uint8_t              cpu;
183 	int                  irq;
184 	struct qca_napi_stat stats[NR_CPUS];
185 #ifdef RECEIVE_OFFLOAD
186 	/* will only be present for data rx CE's */
187 	void (*offld_flush_cb)(void *);
188 	struct napi_struct   rx_thread_napi;
189 	struct net_device    rx_thread_netdev;
190 #endif /* RECEIVE_OFFLOAD */
191 	qdf_lro_ctx_t        lro_ctx;
192 };
193 
194 enum qca_napi_tput_state {
195 	QCA_NAPI_TPUT_UNINITIALIZED,
196 	QCA_NAPI_TPUT_LO,
197 	QCA_NAPI_TPUT_HI
198 };
199 enum qca_napi_cpu_state {
200 	QCA_NAPI_CPU_UNINITIALIZED,
201 	QCA_NAPI_CPU_DOWN,
202 	QCA_NAPI_CPU_UP };
203 
204 /**
205  * struct qca_napi_cpu - an entry of the napi cpu table
206  * @core_id:     physical core id of the core
207  * @cluster_id:  cluster this core belongs to
208  * @core_mask:   mask to match all core of this cluster
209  * @thread_mask: mask for this core within the cluster
210  * @max_freq:    maximum clock this core can be clocked at
211  *               same for all cpus of the same core.
212  * @napis:       bitmap of napi instances on this core
213  * @execs:       bitmap of execution contexts on this core
214  * cluster_nxt:  chain to link cores within the same cluster
215  *
216  * This structure represents a single entry in the napi cpu
217  * table. The table is part of struct qca_napi_data.
218  * This table is initialized by the init function, called while
219  * the first napi instance is being created, updated by hotplug
220  * notifier and when cpu affinity decisions are made (by throughput
221  * detection), and deleted when the last napi instance is removed.
222  */
223 struct qca_napi_cpu {
224 	enum qca_napi_cpu_state state;
225 	int			core_id;
226 	int			cluster_id;
227 	cpumask_t		core_mask;
228 	cpumask_t		thread_mask;
229 	unsigned int		max_freq;
230 	uint32_t		napis;
231 	uint32_t		execs;
232 	int			cluster_nxt;  /* index, not pointer */
233 };
234 
235 /**
236  * struct qca_napi_data - collection of napi data for a single hif context
237  * @hif_softc: pointer to the hif context
238  * @lock: spinlock used in the event state machine
239  * @state: state variable used in the napi stat machine
240  * @ce_map: bit map indicating which ce's have napis running
241  * @exec_map: bit map of instanciated exec contexts
242  * @napi_cpu: cpu info for irq affinty
243  * @lilcl_head:
244  * @bigcl_head:
245  * @napi_mode: irq affinity & clock voting mode
246  * @cpuhp_handler: CPU hotplug event registration handle
247  */
248 struct qca_napi_data {
249 	struct               hif_softc *hif_softc;
250 	qdf_spinlock_t       lock;
251 	uint32_t             state;
252 
253 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
254 	 * not used by clients (clients use an id returned by create)
255 	 */
256 	uint32_t             ce_map;
257 	uint32_t             exec_map;
258 	struct qca_napi_info *napis[CE_COUNT_MAX];
259 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
260 	int                  lilcl_head, bigcl_head;
261 	enum qca_napi_tput_state napi_mode;
262 	struct qdf_cpuhp_handler *cpuhp_handler;
263 	uint8_t              flags;
264 };
265 
266 /**
267  * struct hif_config_info - Place Holder for hif confiruation
268  * @enable_self_recovery: Self Recovery
269  *
270  * Structure for holding hif ini parameters.
271  */
272 struct hif_config_info {
273 	bool enable_self_recovery;
274 #ifdef FEATURE_RUNTIME_PM
275 	bool enable_runtime_pm;
276 	u_int32_t runtime_pm_delay;
277 #endif
278 };
279 
280 /**
281  * struct hif_target_info - Target Information
282  * @target_version: Target Version
283  * @target_type: Target Type
284  * @target_revision: Target Revision
285  * @soc_version: SOC Version
286  *
287  * Structure to hold target information.
288  */
289 struct hif_target_info {
290 	uint32_t target_version;
291 	uint32_t target_type;
292 	uint32_t target_revision;
293 	uint32_t soc_version;
294 	char *hw_name;
295 };
296 
297 struct hif_opaque_softc {
298 };
299 
300 /**
301  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
302  *
303  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
304  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
305  *                         minimize power
306  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
307  *                         platform-specific measures to completely power-off
308  *                         the module and associated hardware (i.e. cut power
309  *                         supplies)
310  */
311 enum HIF_DEVICE_POWER_CHANGE_TYPE {
312 	HIF_DEVICE_POWER_UP,
313 	HIF_DEVICE_POWER_DOWN,
314 	HIF_DEVICE_POWER_CUT
315 };
316 
317 /**
318  * enum hif_enable_type: what triggered the enabling of hif
319  *
320  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
321  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
322  */
323 enum hif_enable_type {
324 	HIF_ENABLE_TYPE_PROBE,
325 	HIF_ENABLE_TYPE_REINIT,
326 	HIF_ENABLE_TYPE_MAX
327 };
328 
329 /**
330  * enum hif_disable_type: what triggered the disabling of hif
331  *
332  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
333  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
334  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
335  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
336  */
337 enum hif_disable_type {
338 	HIF_DISABLE_TYPE_PROBE_ERROR,
339 	HIF_DISABLE_TYPE_REINIT_ERROR,
340 	HIF_DISABLE_TYPE_REMOVE,
341 	HIF_DISABLE_TYPE_SHUTDOWN,
342 	HIF_DISABLE_TYPE_MAX
343 };
344 /**
345  * enum hif_device_config_opcode: configure mode
346  *
347  * @HIF_DEVICE_POWER_STATE: device power state
348  * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size
349  * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address
350  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
351  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
352  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
353  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
354  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
355  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
356  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
357  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
358  * @HIF_BMI_DONE: bmi done
359  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
360  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
361  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
362  */
363 enum hif_device_config_opcode {
364 	HIF_DEVICE_POWER_STATE = 0,
365 	HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
366 	HIF_DEVICE_GET_MBOX_ADDR,
367 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
368 	HIF_DEVICE_GET_IRQ_PROC_MODE,
369 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
370 	HIF_DEVICE_POWER_STATE_CHANGE,
371 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
372 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
373 	HIF_DEVICE_GET_OS_DEVICE,
374 	HIF_DEVICE_DEBUG_BUS_STATE,
375 	HIF_BMI_DONE,
376 	HIF_DEVICE_SET_TARGET_TYPE,
377 	HIF_DEVICE_SET_HTC_CONTEXT,
378 	HIF_DEVICE_GET_HTC_CONTEXT,
379 };
380 
381 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
382 struct HID_ACCESS_LOG {
383 	uint32_t seqnum;
384 	bool is_write;
385 	void *addr;
386 	uint32_t value;
387 };
388 #endif
389 
390 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
391 		uint32_t value);
392 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
393 
394 #define HIF_MAX_DEVICES                 1
395 /**
396  * struct htc_callbacks - Structure for HTC Callbacks methods
397  * @context:             context to pass to the dsrhandler
398  *                       note : rwCompletionHandler is provided the context
399  *                       passed to hif_read_write
400  * @rwCompletionHandler: Read / write completion handler
401  * @dsrHandler:          DSR Handler
402  */
403 struct htc_callbacks {
404 	void *context;
405 	QDF_STATUS(*rwCompletionHandler)(void *rwContext, QDF_STATUS status);
406 	QDF_STATUS(*dsrHandler)(void *context);
407 };
408 
409 /**
410  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
411  * @context: Private data context
412  * @set_recovery_in_progress: To Set Driver state for recovery in progress
413  * @is_recovery_in_progress: Query if driver state is recovery in progress
414  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
415  * @is_driver_unloading: Query if driver is unloading.
416  *
417  * This Structure provides callback pointer for HIF to query hdd for driver
418  * states.
419  */
420 struct hif_driver_state_callbacks {
421 	void *context;
422 	void (*set_recovery_in_progress)(void *context, uint8_t val);
423 	bool (*is_recovery_in_progress)(void *context);
424 	bool (*is_load_unload_in_progress)(void *context);
425 	bool (*is_driver_unloading)(void *context);
426 	bool (*is_target_ready)(void *context);
427 };
428 
429 /* This API detaches the HTC layer from the HIF device */
430 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
431 
432 /****************************************************************/
433 /* BMI and Diag window abstraction                              */
434 /****************************************************************/
435 
436 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
437 
438 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
439 				     * handled atomically by
440 				     * DiagRead/DiagWrite
441 				     */
442 
443 /*
444  * API to handle HIF-specific BMI message exchanges, this API is synchronous
445  * and only allowed to be called from a context that can block (sleep)
446  */
447 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
448 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
449 				uint8_t *pSendMessage, uint32_t Length,
450 				uint8_t *pResponseMessage,
451 				uint32_t *pResponseLength, uint32_t TimeoutMS);
452 void hif_register_bmi_callbacks(struct hif_softc *hif_sc);
453 /*
454  * APIs to handle HIF specific diagnostic read accesses. These APIs are
455  * synchronous and only allowed to be called from a context that
456  * can block (sleep). They are not high performance APIs.
457  *
458  * hif_diag_read_access reads a 4 Byte aligned/length value from a
459  * Target register or memory word.
460  *
461  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
462  */
463 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
464 				uint32_t address, uint32_t *data);
465 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
466 		      uint8_t *data, int nbytes);
467 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
468 			void *ramdump_base, uint32_t address, uint32_t size);
469 /*
470  * APIs to handle HIF specific diagnostic write accesses. These APIs are
471  * synchronous and only allowed to be called from a context that
472  * can block (sleep).
473  * They are not high performance APIs.
474  *
475  * hif_diag_write_access writes a 4 Byte aligned/length value to a
476  * Target register or memory word.
477  *
478  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
479  */
480 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
481 				 uint32_t address, uint32_t data);
482 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
483 			uint32_t address, uint8_t *data, int nbytes);
484 
485 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
486 
487 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
488 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
489 
490 /*
491  * Set the FASTPATH_mode_on flag in sc, for use by data path
492  */
493 #ifdef WLAN_FEATURE_FASTPATH
494 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
495 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
496 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
497 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
498 				fastpath_msg_handler handler, void *context);
499 #else
500 static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
501 					      fastpath_msg_handler handler,
502 					      void *context)
503 {
504 	return QDF_STATUS_E_FAILURE;
505 }
506 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
507 {
508 	return NULL;
509 }
510 
511 #endif
512 
513 /*
514  * Enable/disable CDC max performance workaround
515  * For max-performace set this to 0
516  * To allow SoC to enter sleep set this to 1
517  */
518 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
519 
520 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
521 			     qdf_shared_mem_t **ce_sr,
522 			     uint32_t *ce_sr_ring_size,
523 			     qdf_dma_addr_t *ce_reg_paddr);
524 
525 /**
526  * @brief List of callbacks - filled in by HTC.
527  */
528 struct hif_msg_callbacks {
529 	void *Context;
530 	/**< context meaningful to HTC */
531 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
532 					uint32_t transferID,
533 					uint32_t toeplitz_hash_result);
534 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
535 					uint8_t pipeID);
536 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
537 	void (*fwEventHandler)(void *context, QDF_STATUS status);
538 };
539 
540 enum hif_target_status {
541 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
542 	TARGET_STATUS_RESET,  /* target got reset */
543 	TARGET_STATUS_EJECT,  /* target got ejected */
544 	TARGET_STATUS_SUSPEND /*target got suspend */
545 };
546 
547 /**
548  * enum hif_attribute_flags: configure hif
549  *
550  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
551  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
552  *  							+ No pktlog CE
553  */
554 enum hif_attribute_flags {
555 	HIF_LOWDESC_CE_CFG = 1,
556 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
557 };
558 
559 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
560 	(attr |= (v & 0x01) << 5)
561 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
562 	(attr |= (v & 0x03) << 6)
563 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
564 	(attr |= (v & 0x01) << 13)
565 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
566 	(attr |= (v & 0x01) << 14)
567 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
568 	(attr |= (v & 0x01) << 15)
569 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
570 	(attr |= (v & 0x0FFF) << 16)
571 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
572 	(attr |= (v & 0x01) << 30)
573 
574 struct hif_ul_pipe_info {
575 	unsigned int nentries;
576 	unsigned int nentries_mask;
577 	unsigned int sw_index;
578 	unsigned int write_index; /* cached copy */
579 	unsigned int hw_index;    /* cached copy */
580 	void *base_addr_owner_space; /* Host address space */
581 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
582 };
583 
584 struct hif_dl_pipe_info {
585 	unsigned int nentries;
586 	unsigned int nentries_mask;
587 	unsigned int sw_index;
588 	unsigned int write_index; /* cached copy */
589 	unsigned int hw_index;    /* cached copy */
590 	void *base_addr_owner_space; /* Host address space */
591 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
592 };
593 
594 struct hif_pipe_addl_info {
595 	uint32_t pci_mem;
596 	uint32_t ctrl_addr;
597 	struct hif_ul_pipe_info ul_pipe;
598 	struct hif_dl_pipe_info dl_pipe;
599 };
600 
601 #ifdef CONFIG_SLUB_DEBUG_ON
602 #define MSG_FLUSH_NUM 16
603 #else /* PERF build */
604 #define MSG_FLUSH_NUM 32
605 #endif /* SLUB_DEBUG_ON */
606 
607 struct hif_bus_id;
608 
609 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
610 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
611 		     int opcode, void *config, uint32_t config_len);
612 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
613 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
614 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
615 		   struct hif_msg_callbacks *callbacks);
616 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
617 void hif_stop(struct hif_opaque_softc *hif_ctx);
618 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
619 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
620 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
621 		      uint8_t cmd_id, bool start);
622 
623 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
624 				  uint32_t transferID, uint32_t nbytes,
625 				  qdf_nbuf_t wbuf, uint32_t data_attr);
626 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
627 			     int force);
628 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
629 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
630 			  uint8_t *DLPipe);
631 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
632 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
633 			int *dl_is_polled);
634 uint16_t
635 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
636 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
637 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
638 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
639 		     bool wait_for_it);
640 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
641 #ifndef HIF_PCI
642 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
643 {
644 	return 0;
645 }
646 #else
647 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
648 #endif
649 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
650 			u32 *revision, const char **target_name);
651 
652 #ifdef RECEIVE_OFFLOAD
653 /**
654  * hif_offld_flush_cb_register() - Register the offld flush callback
655  * @scn: HIF opaque context
656  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
657  *			 Or GRO/LRO flush when RxThread is not enabled. Called
658  *			 with corresponding context for flush.
659  * Return: None
660  */
661 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
662 				 void (offld_flush_handler)(void *ol_ctx));
663 
664 /**
665  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
666  * @scn: HIF opaque context
667  *
668  * Return: None
669  */
670 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
671 #endif
672 
673 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
674 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
675 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
676 				      int htc_htt_tx_endpoint);
677 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
678 				  enum qdf_bus_type bus_type,
679 				  struct hif_driver_state_callbacks *cbk);
680 void hif_close(struct hif_opaque_softc *hif_ctx);
681 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
682 		      void *bdev, const struct hif_bus_id *bid,
683 		      enum qdf_bus_type bus_type,
684 		      enum hif_enable_type type);
685 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
686 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
687 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
688 #ifdef FEATURE_RUNTIME_PM
689 struct hif_pm_runtime_lock;
690 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
691 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
692 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
693 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
694 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
695 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
696 			struct hif_pm_runtime_lock *lock);
697 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
698 		struct hif_pm_runtime_lock *lock);
699 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
700 		struct hif_pm_runtime_lock *lock);
701 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
702 		struct hif_pm_runtime_lock *lock, unsigned int delay);
703 #else
704 struct hif_pm_runtime_lock {
705 	const char *name;
706 };
707 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
708 static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
709 {}
710 
711 static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
712 { return 0; }
713 static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
714 { return 0; }
715 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
716 					const char *name)
717 { return 0; }
718 static inline void
719 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
720 			struct hif_pm_runtime_lock *lock) {}
721 
722 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
723 		struct hif_pm_runtime_lock *lock)
724 { return 0; }
725 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
726 		struct hif_pm_runtime_lock *lock)
727 { return 0; }
728 static inline int
729 hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
730 		struct hif_pm_runtime_lock *lock, unsigned int delay)
731 { return 0; }
732 #endif
733 
734 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
735 				 bool is_packet_log_enabled);
736 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
737 
738 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
739 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
740 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
741 
742 #ifdef IPA_OFFLOAD
743 /**
744  * hif_get_ipa_hw_type() - get IPA hw type
745  *
746  * This API return the IPA hw type.
747  *
748  * Return: IPA hw type
749  */
750 static inline
751 enum ipa_hw_type hif_get_ipa_hw_type(void)
752 {
753 	return ipa_get_hw_type();
754 }
755 
756 /**
757  * hif_get_ipa_present() - get IPA hw status
758  *
759  * This API return the IPA hw status.
760  *
761  * Return: true if IPA is present or false otherwise
762  */
763 static inline
764 bool hif_get_ipa_present(void)
765 {
766 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
767 		return true;
768 	else
769 		return false;
770 }
771 #endif
772 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
773 /**
774  * hif_bus_ealry_suspend() - stop non wmi tx traffic
775  * @context: hif context
776  */
777 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
778 
779 /**
780  * hif_bus_late_resume() - resume non wmi traffic
781  * @context: hif context
782  */
783 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
784 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
785 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
786 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
787 
788 /**
789  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
790  * @hif_ctx: an opaque HIF handle to use
791  *
792  * As opposed to the standard hif_irq_enable, this function always applies to
793  * the APPS side kernel interrupt handling.
794  *
795  * Return: errno
796  */
797 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
798 
799 /**
800  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
801  * @hif_ctx: an opaque HIF handle to use
802  *
803  * As opposed to the standard hif_irq_disable, this function always applies to
804  * the APPS side kernel interrupt handling.
805  *
806  * Return: errno
807  */
808 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
809 
810 /**
811  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
812  * @hif_ctx: an opaque HIF handle to use
813  *
814  * As opposed to the standard hif_irq_enable, this function always applies to
815  * the APPS side kernel interrupt handling.
816  *
817  * Return: errno
818  */
819 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
820 
821 /**
822  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
823  * @hif_ctx: an opaque HIF handle to use
824  *
825  * As opposed to the standard hif_irq_disable, this function always applies to
826  * the APPS side kernel interrupt handling.
827  *
828  * Return: errno
829  */
830 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
831 
832 #ifdef FEATURE_RUNTIME_PM
833 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
834 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
835 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
836 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
837 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
838 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
839 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
840 #endif
841 
842 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
843 int hif_dump_registers(struct hif_opaque_softc *scn);
844 int ol_copy_ramdump(struct hif_opaque_softc *scn);
845 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
846 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
847 		     u32 *revision, const char **target_name);
848 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
849 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
850 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
851 						   scn);
852 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
853 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
854 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
855 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
856 			   hif_target_status);
857 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
858 			 struct hif_config_info *cfg);
859 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
860 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
861 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
862 int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t
863 		transfer_id, u_int32_t len);
864 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
865 	uint32_t transfer_id, uint32_t download_len);
866 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
867 void hif_ce_war_disable(void);
868 void hif_ce_war_enable(void);
869 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
870 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
871 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
872 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
873 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
874 		uint32_t pipe_num);
875 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
876 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
877 
878 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
879 				int rx_bundle_cnt);
880 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
881 
882 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
883 
884 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
885 
886 enum hif_exec_type {
887 	HIF_EXEC_NAPI_TYPE,
888 	HIF_EXEC_TASKLET_TYPE,
889 };
890 
891 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
892 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
893 uint32_t  hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
894 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
895 		void *cb_ctx, const char *context_name,
896 		enum hif_exec_type type, uint32_t scale);
897 
898 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
899 				const char *context_name);
900 
901 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
902 				u_int8_t pipeid,
903 				struct hif_msg_callbacks *callbacks);
904 
905 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
906 #ifdef __cplusplus
907 }
908 #endif
909 
910 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
911 
912 /**
913  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
914  * @hif_ctx - the HIF context to assign the callback to
915  * @callback - the callback to assign
916  * @priv - the private data to pass to the callback when invoked
917  *
918  * Return: None
919  */
920 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
921 			       void (*callback)(void *),
922 			       void *priv);
923 #ifndef CONFIG_WIN
924 #ifndef HIF_CE_DEBUG_DATA_BUF
925 #define HIF_CE_DEBUG_DATA_BUF 0
926 #endif
927 #endif
928 /*
929  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
930  * for defined here
931  */
932 #if HIF_CE_DEBUG_DATA_BUF
933 ssize_t hif_dump_desc_trace_buf(struct device *dev,
934 				struct device_attribute *attr, char *buf);
935 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
936 					const char *buf, size_t size);
937 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
938 				const char *buf, size_t size);
939 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
940 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
941 #endif /* Note: for MCL, #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
942 
943 /**
944  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
945  * @hif: hif context
946  * @ce_service_max_yield_time: CE service max yield time to set
947  *
948  * This API storess CE service max yield time in hif context based
949  * on ini value.
950  *
951  * Return: void
952  */
953 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
954 				       uint32_t ce_service_max_yield_time);
955 
956 /**
957  * hif_get_ce_service_max_yield_time() - get CE service max yield time
958  * @hif: hif context
959  *
960  * This API returns CE service max yield time.
961  *
962  * Return: CE service max yield time
963  */
964 unsigned long long
965 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
966 
967 /**
968  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
969  * @hif: hif context
970  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
971  *
972  * This API stores CE service max rx ind flush in hif context based
973  * on ini value.
974  *
975  * Return: void
976  */
977 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
978 				       uint8_t ce_service_max_rx_ind_flush);
979 #endif /* _HIF_H_ */
980