xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision ad85c389289a03e320cd08dea21861f9857892fc)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HIF_H_
20 #define _HIF_H_
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif /* __cplusplus */
25 
26 /* Header files */
27 #include <qdf_status.h>
28 #include "qdf_nbuf.h"
29 #include "qdf_lro.h"
30 #include "ol_if_athvar.h"
31 #include <linux/platform_device.h>
32 #ifdef HIF_PCI
33 #include <linux/pci.h>
34 #endif /* HIF_PCI */
35 #ifdef HIF_USB
36 #include <linux/usb.h>
37 #endif /* HIF_USB */
38 #ifdef IPA_OFFLOAD
39 #include <linux/ipa.h>
40 #endif
41 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
42 
43 typedef void __iomem *A_target_id_t;
44 typedef void *hif_handle_t;
45 
46 #define HIF_TYPE_AR6002   2
47 #define HIF_TYPE_AR6003   3
48 #define HIF_TYPE_AR6004   5
49 #define HIF_TYPE_AR9888   6
50 #define HIF_TYPE_AR6320   7
51 #define HIF_TYPE_AR6320V2 8
52 /* For attaching Peregrine 2.0 board host_reg_tbl only */
53 #define HIF_TYPE_AR9888V2 9
54 #define HIF_TYPE_ADRASTEA 10
55 #define HIF_TYPE_AR900B 11
56 #define HIF_TYPE_QCA9984 12
57 #define HIF_TYPE_IPQ4019 13
58 #define HIF_TYPE_QCA9888 14
59 #define HIF_TYPE_QCA8074 15
60 #define HIF_TYPE_QCA6290 16
61 #define HIF_TYPE_QCN7605 17
62 #define HIF_TYPE_QCA6390 18
63 #define HIF_TYPE_QCA8074V2 19
64 #define HIF_TYPE_QCA6018  20
65 
66 #ifdef IPA_OFFLOAD
67 #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE   37
68 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
69 #endif
70 
71 /* enum hif_ic_irq - enum defining integrated chip irq numbers
72  * defining irq nubers that can be used by external modules like datapath
73  */
74 enum hif_ic_irq {
75 	host2wbm_desc_feed = 16,
76 	host2reo_re_injection,
77 	host2reo_command,
78 	host2rxdma_monitor_ring3,
79 	host2rxdma_monitor_ring2,
80 	host2rxdma_monitor_ring1,
81 	reo2host_exception,
82 	wbm2host_rx_release,
83 	reo2host_status,
84 	reo2host_destination_ring4,
85 	reo2host_destination_ring3,
86 	reo2host_destination_ring2,
87 	reo2host_destination_ring1,
88 	rxdma2host_monitor_destination_mac3,
89 	rxdma2host_monitor_destination_mac2,
90 	rxdma2host_monitor_destination_mac1,
91 	ppdu_end_interrupts_mac3,
92 	ppdu_end_interrupts_mac2,
93 	ppdu_end_interrupts_mac1,
94 	rxdma2host_monitor_status_ring_mac3,
95 	rxdma2host_monitor_status_ring_mac2,
96 	rxdma2host_monitor_status_ring_mac1,
97 	host2rxdma_host_buf_ring_mac3,
98 	host2rxdma_host_buf_ring_mac2,
99 	host2rxdma_host_buf_ring_mac1,
100 	rxdma2host_destination_ring_mac3,
101 	rxdma2host_destination_ring_mac2,
102 	rxdma2host_destination_ring_mac1,
103 	host2tcl_input_ring4,
104 	host2tcl_input_ring3,
105 	host2tcl_input_ring2,
106 	host2tcl_input_ring1,
107 	wbm2host_tx_completions_ring3,
108 	wbm2host_tx_completions_ring2,
109 	wbm2host_tx_completions_ring1,
110 	tcl2host_status_ring,
111 };
112 
113 struct CE_state;
114 #define CE_COUNT_MAX 12
115 #define HIF_MAX_GRP_IRQ 16
116 
117 #ifdef CONFIG_WIN
118 #define HIF_MAX_GROUP 12
119 #else
120 #define HIF_MAX_GROUP 8
121 #endif
122 
123 #ifdef CONFIG_SLUB_DEBUG_ON
124 #ifndef CONFIG_WIN
125 #define HIF_CONFIG_SLUB_DEBUG_ON
126 #endif
127 #endif
128 
129 #ifndef NAPI_YIELD_BUDGET_BASED
130 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
131 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
132 #else  /* PERF build */
133 #ifdef CONFIG_WIN
134 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
135 #else
136 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
137 #endif /* CONFIG_WIN */
138 #endif /* SLUB_DEBUG_ON */
139 #else  /* NAPI_YIELD_BUDGET_BASED */
140 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
141 #endif /* NAPI_YIELD_BUDGET_BASED */
142 #define QCA_NAPI_BUDGET    64
143 #define QCA_NAPI_DEF_SCALE  \
144 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
145 
146 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
147 /* NOTE: "napi->scale" can be changed,
148  * but this does not change the number of buckets
149  */
150 #define QCA_NAPI_NUM_BUCKETS 4
151 /**
152  * qca_napi_stat - stats structure for execution contexts
153  * @napi_schedules - number of times the schedule function is called
154  * @napi_polls - number of times the execution context runs
155  * @napi_completes - number of times that the generating interrupt is reenabled
156  * @napi_workdone - cumulative of all work done reported by handler
157  * @cpu_corrected - incremented when execution context runs on a different core
158  *			than the one that its irq is affined to.
159  * @napi_budget_uses - histogram of work done per execution run
160  * @time_limit_reache - count of yields due to time limit threshholds
161  * @rxpkt_thresh_reached - count of yields due to a work limit
162  *
163  * needs to be renamed
164  */
165 struct qca_napi_stat {
166 	uint32_t napi_schedules;
167 	uint32_t napi_polls;
168 	uint32_t napi_completes;
169 	uint32_t napi_workdone;
170 	uint32_t cpu_corrected;
171 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
172 	uint32_t time_limit_reached;
173 	uint32_t rxpkt_thresh_reached;
174 	unsigned long long napi_max_poll_time;
175 };
176 
177 
178 /**
179  * per NAPI instance data structure
180  * This data structure holds stuff per NAPI instance.
181  * Note that, in the current implementation, though scale is
182  * an instance variable, it is set to the same value for all
183  * instances.
184  */
185 struct qca_napi_info {
186 	struct net_device    netdev; /* dummy net_dev */
187 	void                 *hif_ctx;
188 	struct napi_struct   napi;
189 	uint8_t              scale;   /* currently same on all instances */
190 	uint8_t              id;
191 	uint8_t              cpu;
192 	int                  irq;
193 	cpumask_t            cpumask;
194 	struct qca_napi_stat stats[NR_CPUS];
195 #ifdef RECEIVE_OFFLOAD
196 	/* will only be present for data rx CE's */
197 	void (*offld_flush_cb)(void *);
198 	struct napi_struct   rx_thread_napi;
199 	struct net_device    rx_thread_netdev;
200 #endif /* RECEIVE_OFFLOAD */
201 	qdf_lro_ctx_t        lro_ctx;
202 };
203 
204 enum qca_napi_tput_state {
205 	QCA_NAPI_TPUT_UNINITIALIZED,
206 	QCA_NAPI_TPUT_LO,
207 	QCA_NAPI_TPUT_HI
208 };
209 enum qca_napi_cpu_state {
210 	QCA_NAPI_CPU_UNINITIALIZED,
211 	QCA_NAPI_CPU_DOWN,
212 	QCA_NAPI_CPU_UP };
213 
214 /**
215  * struct qca_napi_cpu - an entry of the napi cpu table
216  * @core_id:     physical core id of the core
217  * @cluster_id:  cluster this core belongs to
218  * @core_mask:   mask to match all core of this cluster
219  * @thread_mask: mask for this core within the cluster
220  * @max_freq:    maximum clock this core can be clocked at
221  *               same for all cpus of the same core.
222  * @napis:       bitmap of napi instances on this core
223  * @execs:       bitmap of execution contexts on this core
224  * cluster_nxt:  chain to link cores within the same cluster
225  *
226  * This structure represents a single entry in the napi cpu
227  * table. The table is part of struct qca_napi_data.
228  * This table is initialized by the init function, called while
229  * the first napi instance is being created, updated by hotplug
230  * notifier and when cpu affinity decisions are made (by throughput
231  * detection), and deleted when the last napi instance is removed.
232  */
233 struct qca_napi_cpu {
234 	enum qca_napi_cpu_state state;
235 	int			core_id;
236 	int			cluster_id;
237 	cpumask_t		core_mask;
238 	cpumask_t		thread_mask;
239 	unsigned int		max_freq;
240 	uint32_t		napis;
241 	uint32_t		execs;
242 	int			cluster_nxt;  /* index, not pointer */
243 };
244 
245 /**
246  * struct qca_napi_data - collection of napi data for a single hif context
247  * @hif_softc: pointer to the hif context
248  * @lock: spinlock used in the event state machine
249  * @state: state variable used in the napi stat machine
250  * @ce_map: bit map indicating which ce's have napis running
251  * @exec_map: bit map of instanciated exec contexts
252  * @user_cpu_affin_map: CPU affinity map from INI config.
253  * @napi_cpu: cpu info for irq affinty
254  * @lilcl_head:
255  * @bigcl_head:
256  * @napi_mode: irq affinity & clock voting mode
257  * @cpuhp_handler: CPU hotplug event registration handle
258  */
259 struct qca_napi_data {
260 	struct               hif_softc *hif_softc;
261 	qdf_spinlock_t       lock;
262 	uint32_t             state;
263 
264 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
265 	 * not used by clients (clients use an id returned by create)
266 	 */
267 	uint32_t             ce_map;
268 	uint32_t             exec_map;
269 	uint32_t             user_cpu_affin_mask;
270 	struct qca_napi_info *napis[CE_COUNT_MAX];
271 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
272 	int                  lilcl_head, bigcl_head;
273 	enum qca_napi_tput_state napi_mode;
274 	struct qdf_cpuhp_handler *cpuhp_handler;
275 	uint8_t              flags;
276 };
277 
278 /**
279  * struct hif_config_info - Place Holder for hif confiruation
280  * @enable_self_recovery: Self Recovery
281  *
282  * Structure for holding hif ini parameters.
283  */
284 struct hif_config_info {
285 	bool enable_self_recovery;
286 #ifdef FEATURE_RUNTIME_PM
287 	bool enable_runtime_pm;
288 	u_int32_t runtime_pm_delay;
289 #endif
290 };
291 
292 /**
293  * struct hif_target_info - Target Information
294  * @target_version: Target Version
295  * @target_type: Target Type
296  * @target_revision: Target Revision
297  * @soc_version: SOC Version
298  *
299  * Structure to hold target information.
300  */
301 struct hif_target_info {
302 	uint32_t target_version;
303 	uint32_t target_type;
304 	uint32_t target_revision;
305 	uint32_t soc_version;
306 	char *hw_name;
307 };
308 
309 struct hif_opaque_softc {
310 };
311 
312 /**
313  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
314  *
315  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
316  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
317  *                         minimize power
318  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
319  *                         platform-specific measures to completely power-off
320  *                         the module and associated hardware (i.e. cut power
321  *                         supplies)
322  */
323 enum HIF_DEVICE_POWER_CHANGE_TYPE {
324 	HIF_DEVICE_POWER_UP,
325 	HIF_DEVICE_POWER_DOWN,
326 	HIF_DEVICE_POWER_CUT
327 };
328 
329 /**
330  * enum hif_enable_type: what triggered the enabling of hif
331  *
332  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
333  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
334  */
335 enum hif_enable_type {
336 	HIF_ENABLE_TYPE_PROBE,
337 	HIF_ENABLE_TYPE_REINIT,
338 	HIF_ENABLE_TYPE_MAX
339 };
340 
341 /**
342  * enum hif_disable_type: what triggered the disabling of hif
343  *
344  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
345  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
346  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
347  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
348  */
349 enum hif_disable_type {
350 	HIF_DISABLE_TYPE_PROBE_ERROR,
351 	HIF_DISABLE_TYPE_REINIT_ERROR,
352 	HIF_DISABLE_TYPE_REMOVE,
353 	HIF_DISABLE_TYPE_SHUTDOWN,
354 	HIF_DISABLE_TYPE_MAX
355 };
356 /**
357  * enum hif_device_config_opcode: configure mode
358  *
359  * @HIF_DEVICE_POWER_STATE: device power state
360  * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size
361  * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address
362  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
363  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
364  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
365  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
366  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
367  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
368  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
369  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
370  * @HIF_BMI_DONE: bmi done
371  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
372  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
373  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
374  */
375 enum hif_device_config_opcode {
376 	HIF_DEVICE_POWER_STATE = 0,
377 	HIF_DEVICE_GET_BLOCK_SIZE,
378 	HIF_DEVICE_GET_FIFO_ADDR,
379 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
380 	HIF_DEVICE_GET_IRQ_PROC_MODE,
381 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
382 	HIF_DEVICE_POWER_STATE_CHANGE,
383 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
384 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
385 	HIF_DEVICE_GET_OS_DEVICE,
386 	HIF_DEVICE_DEBUG_BUS_STATE,
387 	HIF_BMI_DONE,
388 	HIF_DEVICE_SET_TARGET_TYPE,
389 	HIF_DEVICE_SET_HTC_CONTEXT,
390 	HIF_DEVICE_GET_HTC_CONTEXT,
391 };
392 
393 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
394 struct HID_ACCESS_LOG {
395 	uint32_t seqnum;
396 	bool is_write;
397 	void *addr;
398 	uint32_t value;
399 };
400 #endif
401 
402 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
403 		uint32_t value);
404 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
405 
406 #define HIF_MAX_DEVICES                 1
407 /**
408  * struct htc_callbacks - Structure for HTC Callbacks methods
409  * @context:             context to pass to the dsrhandler
410  *                       note : rwCompletionHandler is provided the context
411  *                       passed to hif_read_write
412  * @rwCompletionHandler: Read / write completion handler
413  * @dsrHandler:          DSR Handler
414  */
415 struct htc_callbacks {
416 	void *context;
417 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
418 	QDF_STATUS(*dsr_handler)(void *context);
419 };
420 
421 /**
422  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
423  * @context: Private data context
424  * @set_recovery_in_progress: To Set Driver state for recovery in progress
425  * @is_recovery_in_progress: Query if driver state is recovery in progress
426  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
427  * @is_driver_unloading: Query if driver is unloading.
428  *
429  * This Structure provides callback pointer for HIF to query hdd for driver
430  * states.
431  */
432 struct hif_driver_state_callbacks {
433 	void *context;
434 	void (*set_recovery_in_progress)(void *context, uint8_t val);
435 	bool (*is_recovery_in_progress)(void *context);
436 	bool (*is_load_unload_in_progress)(void *context);
437 	bool (*is_driver_unloading)(void *context);
438 	bool (*is_target_ready)(void *context);
439 };
440 
441 /* This API detaches the HTC layer from the HIF device */
442 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
443 
444 /****************************************************************/
445 /* BMI and Diag window abstraction                              */
446 /****************************************************************/
447 
448 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
449 
450 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
451 				     * handled atomically by
452 				     * DiagRead/DiagWrite
453 				     */
454 
455 #ifdef WLAN_FEATURE_BMI
456 /*
457  * API to handle HIF-specific BMI message exchanges, this API is synchronous
458  * and only allowed to be called from a context that can block (sleep)
459  */
460 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
461 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
462 				uint8_t *pSendMessage, uint32_t Length,
463 				uint8_t *pResponseMessage,
464 				uint32_t *pResponseLength, uint32_t TimeoutMS);
465 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
466 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
467 #else /* WLAN_FEATURE_BMI */
468 static inline void
469 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
470 {
471 }
472 
473 static inline bool
474 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
475 {
476 	return false;
477 }
478 #endif /* WLAN_FEATURE_BMI */
479 
480 /*
481  * APIs to handle HIF specific diagnostic read accesses. These APIs are
482  * synchronous and only allowed to be called from a context that
483  * can block (sleep). They are not high performance APIs.
484  *
485  * hif_diag_read_access reads a 4 Byte aligned/length value from a
486  * Target register or memory word.
487  *
488  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
489  */
490 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
491 				uint32_t address, uint32_t *data);
492 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
493 		      uint8_t *data, int nbytes);
494 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
495 			void *ramdump_base, uint32_t address, uint32_t size);
496 /*
497  * APIs to handle HIF specific diagnostic write accesses. These APIs are
498  * synchronous and only allowed to be called from a context that
499  * can block (sleep).
500  * They are not high performance APIs.
501  *
502  * hif_diag_write_access writes a 4 Byte aligned/length value to a
503  * Target register or memory word.
504  *
505  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
506  */
507 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
508 				 uint32_t address, uint32_t data);
509 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
510 			uint32_t address, uint8_t *data, int nbytes);
511 
512 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
513 
514 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
515 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
516 
517 /*
518  * Set the FASTPATH_mode_on flag in sc, for use by data path
519  */
520 #ifdef WLAN_FEATURE_FASTPATH
521 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
522 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
523 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
524 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
525 				fastpath_msg_handler handler, void *context);
526 #else
527 static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
528 					      fastpath_msg_handler handler,
529 					      void *context)
530 {
531 	return QDF_STATUS_E_FAILURE;
532 }
533 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
534 {
535 	return NULL;
536 }
537 
538 #endif
539 
540 /*
541  * Enable/disable CDC max performance workaround
542  * For max-performace set this to 0
543  * To allow SoC to enter sleep set this to 1
544  */
545 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
546 
547 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
548 			     qdf_shared_mem_t **ce_sr,
549 			     uint32_t *ce_sr_ring_size,
550 			     qdf_dma_addr_t *ce_reg_paddr);
551 
552 /**
553  * @brief List of callbacks - filled in by HTC.
554  */
555 struct hif_msg_callbacks {
556 	void *Context;
557 	/**< context meaningful to HTC */
558 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
559 					uint32_t transferID,
560 					uint32_t toeplitz_hash_result);
561 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
562 					uint8_t pipeID);
563 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
564 	void (*fwEventHandler)(void *context, QDF_STATUS status);
565 };
566 
567 enum hif_target_status {
568 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
569 	TARGET_STATUS_RESET,  /* target got reset */
570 	TARGET_STATUS_EJECT,  /* target got ejected */
571 	TARGET_STATUS_SUSPEND /*target got suspend */
572 };
573 
574 /**
575  * enum hif_attribute_flags: configure hif
576  *
577  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
578  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
579  *  							+ No pktlog CE
580  */
581 enum hif_attribute_flags {
582 	HIF_LOWDESC_CE_CFG = 1,
583 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
584 };
585 
586 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
587 	(attr |= (v & 0x01) << 5)
588 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
589 	(attr |= (v & 0x03) << 6)
590 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
591 	(attr |= (v & 0x01) << 13)
592 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
593 	(attr |= (v & 0x01) << 14)
594 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
595 	(attr |= (v & 0x01) << 15)
596 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
597 	(attr |= (v & 0x0FFF) << 16)
598 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
599 	(attr |= (v & 0x01) << 30)
600 
601 struct hif_ul_pipe_info {
602 	unsigned int nentries;
603 	unsigned int nentries_mask;
604 	unsigned int sw_index;
605 	unsigned int write_index; /* cached copy */
606 	unsigned int hw_index;    /* cached copy */
607 	void *base_addr_owner_space; /* Host address space */
608 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
609 };
610 
611 struct hif_dl_pipe_info {
612 	unsigned int nentries;
613 	unsigned int nentries_mask;
614 	unsigned int sw_index;
615 	unsigned int write_index; /* cached copy */
616 	unsigned int hw_index;    /* cached copy */
617 	void *base_addr_owner_space; /* Host address space */
618 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
619 };
620 
621 struct hif_pipe_addl_info {
622 	uint32_t pci_mem;
623 	uint32_t ctrl_addr;
624 	struct hif_ul_pipe_info ul_pipe;
625 	struct hif_dl_pipe_info dl_pipe;
626 };
627 
628 #ifdef CONFIG_SLUB_DEBUG_ON
629 #define MSG_FLUSH_NUM 16
630 #else /* PERF build */
631 #define MSG_FLUSH_NUM 32
632 #endif /* SLUB_DEBUG_ON */
633 
634 struct hif_bus_id;
635 
636 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
637 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
638 		     int opcode, void *config, uint32_t config_len);
639 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
640 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
641 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
642 		   struct hif_msg_callbacks *callbacks);
643 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
644 void hif_stop(struct hif_opaque_softc *hif_ctx);
645 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
646 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
647 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
648 		      uint8_t cmd_id, bool start);
649 
650 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
651 				  uint32_t transferID, uint32_t nbytes,
652 				  qdf_nbuf_t wbuf, uint32_t data_attr);
653 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
654 			     int force);
655 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
656 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
657 			  uint8_t *DLPipe);
658 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
659 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
660 			int *dl_is_polled);
661 uint16_t
662 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
663 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
664 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
665 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
666 		     bool wait_for_it);
667 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
668 #ifndef HIF_PCI
669 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
670 {
671 	return 0;
672 }
673 #else
674 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
675 #endif
676 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
677 			u32 *revision, const char **target_name);
678 
679 #ifdef RECEIVE_OFFLOAD
680 /**
681  * hif_offld_flush_cb_register() - Register the offld flush callback
682  * @scn: HIF opaque context
683  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
684  *			 Or GRO/LRO flush when RxThread is not enabled. Called
685  *			 with corresponding context for flush.
686  * Return: None
687  */
688 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
689 				 void (offld_flush_handler)(void *ol_ctx));
690 
691 /**
692  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
693  * @scn: HIF opaque context
694  *
695  * Return: None
696  */
697 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
698 #endif
699 
700 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
701 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
702 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
703 				      int htc_htt_tx_endpoint);
704 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
705 				  enum qdf_bus_type bus_type,
706 				  struct hif_driver_state_callbacks *cbk);
707 void hif_close(struct hif_opaque_softc *hif_ctx);
708 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
709 		      void *bdev, const struct hif_bus_id *bid,
710 		      enum qdf_bus_type bus_type,
711 		      enum hif_enable_type type);
712 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
713 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
714 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
715 #ifdef FEATURE_RUNTIME_PM
716 struct hif_pm_runtime_lock;
717 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
718 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
719 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
720 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
721 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
722 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
723 			struct hif_pm_runtime_lock *lock);
724 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
725 		struct hif_pm_runtime_lock *lock);
726 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
727 		struct hif_pm_runtime_lock *lock);
728 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
729 		struct hif_pm_runtime_lock *lock, unsigned int delay);
730 #else
731 struct hif_pm_runtime_lock {
732 	const char *name;
733 };
734 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
735 static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
736 {}
737 
738 static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
739 { return 0; }
740 static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
741 { return 0; }
742 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
743 					const char *name)
744 { return 0; }
745 static inline void
746 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
747 			struct hif_pm_runtime_lock *lock) {}
748 
749 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
750 		struct hif_pm_runtime_lock *lock)
751 { return 0; }
752 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
753 		struct hif_pm_runtime_lock *lock)
754 { return 0; }
755 static inline int
756 hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
757 		struct hif_pm_runtime_lock *lock, unsigned int delay)
758 { return 0; }
759 #endif
760 
761 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
762 				 bool is_packet_log_enabled);
763 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
764 
765 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
766 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
767 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
768 
769 #ifdef IPA_OFFLOAD
770 /**
771  * hif_get_ipa_hw_type() - get IPA hw type
772  *
773  * This API return the IPA hw type.
774  *
775  * Return: IPA hw type
776  */
777 static inline
778 enum ipa_hw_type hif_get_ipa_hw_type(void)
779 {
780 	return ipa_get_hw_type();
781 }
782 
783 /**
784  * hif_get_ipa_present() - get IPA hw status
785  *
786  * This API return the IPA hw status.
787  *
788  * Return: true if IPA is present or false otherwise
789  */
790 static inline
791 bool hif_get_ipa_present(void)
792 {
793 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
794 		return true;
795 	else
796 		return false;
797 }
798 #endif
799 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
800 /**
801  * hif_bus_ealry_suspend() - stop non wmi tx traffic
802  * @context: hif context
803  */
804 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
805 
806 /**
807  * hif_bus_late_resume() - resume non wmi traffic
808  * @context: hif context
809  */
810 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
811 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
812 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
813 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
814 
815 /**
816  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
817  * @hif_ctx: an opaque HIF handle to use
818  *
819  * As opposed to the standard hif_irq_enable, this function always applies to
820  * the APPS side kernel interrupt handling.
821  *
822  * Return: errno
823  */
824 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
825 
826 /**
827  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
828  * @hif_ctx: an opaque HIF handle to use
829  *
830  * As opposed to the standard hif_irq_disable, this function always applies to
831  * the APPS side kernel interrupt handling.
832  *
833  * Return: errno
834  */
835 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
836 
837 /**
838  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
839  * @hif_ctx: an opaque HIF handle to use
840  *
841  * As opposed to the standard hif_irq_enable, this function always applies to
842  * the APPS side kernel interrupt handling.
843  *
844  * Return: errno
845  */
846 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
847 
848 /**
849  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
850  * @hif_ctx: an opaque HIF handle to use
851  *
852  * As opposed to the standard hif_irq_disable, this function always applies to
853  * the APPS side kernel interrupt handling.
854  *
855  * Return: errno
856  */
857 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
858 
859 #ifdef FEATURE_RUNTIME_PM
860 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
861 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
862 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
863 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
864 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
865 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
866 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
867 #endif
868 
869 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
870 int hif_dump_registers(struct hif_opaque_softc *scn);
871 int ol_copy_ramdump(struct hif_opaque_softc *scn);
872 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
873 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
874 		     u32 *revision, const char **target_name);
875 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
876 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
877 						   scn);
878 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
879 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
880 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
881 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
882 			   hif_target_status);
883 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
884 			 struct hif_config_info *cfg);
885 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
886 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
887 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
888 int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t
889 		transfer_id, u_int32_t len);
890 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
891 	uint32_t transfer_id, uint32_t download_len);
892 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
893 void hif_ce_war_disable(void);
894 void hif_ce_war_enable(void);
895 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
896 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
897 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
898 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
899 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
900 		uint32_t pipe_num);
901 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
902 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
903 
904 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
905 				int rx_bundle_cnt);
906 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
907 
908 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
909 
910 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
911 
912 enum hif_exec_type {
913 	HIF_EXEC_NAPI_TYPE,
914 	HIF_EXEC_TASKLET_TYPE,
915 };
916 
917 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
918 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
919 uint32_t  hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
920 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
921 		void *cb_ctx, const char *context_name,
922 		enum hif_exec_type type, uint32_t scale);
923 
924 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
925 				const char *context_name);
926 
927 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
928 				u_int8_t pipeid,
929 				struct hif_msg_callbacks *callbacks);
930 
931 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
932 #ifdef __cplusplus
933 }
934 #endif
935 
936 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
937 
938 /**
939  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
940  * @hif_ctx - the HIF context to assign the callback to
941  * @callback - the callback to assign
942  * @priv - the private data to pass to the callback when invoked
943  *
944  * Return: None
945  */
946 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
947 			       void (*callback)(void *),
948 			       void *priv);
949 /*
950  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
951  * for defined here
952  */
953 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
954 ssize_t hif_dump_desc_trace_buf(struct device *dev,
955 				struct device_attribute *attr, char *buf);
956 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
957 					const char *buf, size_t size);
958 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
959 				const char *buf, size_t size);
960 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
961 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
962 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
963 
964 /**
965  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
966  * @hif: hif context
967  * @ce_service_max_yield_time: CE service max yield time to set
968  *
969  * This API storess CE service max yield time in hif context based
970  * on ini value.
971  *
972  * Return: void
973  */
974 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
975 				       uint32_t ce_service_max_yield_time);
976 
977 /**
978  * hif_get_ce_service_max_yield_time() - get CE service max yield time
979  * @hif: hif context
980  *
981  * This API returns CE service max yield time.
982  *
983  * Return: CE service max yield time
984  */
985 unsigned long long
986 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
987 
988 /**
989  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
990  * @hif: hif context
991  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
992  *
993  * This API stores CE service max rx ind flush in hif context based
994  * on ini value.
995  *
996  * Return: void
997  */
998 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
999 				       uint8_t ce_service_max_rx_ind_flush);
1000 #ifdef OL_ATH_SMART_LOGGING
1001 /*
1002  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1003  * @scn : HIF handler
1004  * @buf_cur: Current pointer in ring buffer
1005  * @buf_init:Start of the ring buffer
1006  * @buf_sz: Size of the ring buffer
1007  * @ce: Copy Engine id
1008  * @skb_sz: Max size of the SKB buffer to be copied
1009  *
1010  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1011  * and buffers pointed by them in to the given buf
1012  *
1013  * Return: Current pointer in ring buffer
1014  */
1015 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1016 			 uint8_t *buf_init, uint32_t buf_sz,
1017 			 uint32_t ce, uint32_t skb_sz);
1018 #endif /* OL_ATH_SMART_LOGGING */
1019 #endif /* _HIF_H_ */
1020