xref: /wlan-dirver/qca-wifi-host-cmn/hif/inc/hif.h (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HIF_H_
20 #define _HIF_H_
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif /* __cplusplus */
25 
26 /* Header files */
27 #include <qdf_status.h>
28 #include "qdf_nbuf.h"
29 #include "qdf_lro.h"
30 #include "ol_if_athvar.h"
31 #include <linux/platform_device.h>
32 #ifdef HIF_PCI
33 #include <linux/pci.h>
34 #endif /* HIF_PCI */
35 #ifdef HIF_USB
36 #include <linux/usb.h>
37 #endif /* HIF_USB */
38 #ifdef IPA_OFFLOAD
39 #include <linux/ipa.h>
40 #endif
41 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
42 
43 typedef void __iomem *A_target_id_t;
44 typedef void *hif_handle_t;
45 
46 #define HIF_TYPE_AR6002   2
47 #define HIF_TYPE_AR6003   3
48 #define HIF_TYPE_AR6004   5
49 #define HIF_TYPE_AR9888   6
50 #define HIF_TYPE_AR6320   7
51 #define HIF_TYPE_AR6320V2 8
52 /* For attaching Peregrine 2.0 board host_reg_tbl only */
53 #define HIF_TYPE_AR9888V2 9
54 #define HIF_TYPE_ADRASTEA 10
55 #define HIF_TYPE_AR900B 11
56 #define HIF_TYPE_QCA9984 12
57 #define HIF_TYPE_IPQ4019 13
58 #define HIF_TYPE_QCA9888 14
59 #define HIF_TYPE_QCA8074 15
60 #define HIF_TYPE_QCA6290 16
61 #define HIF_TYPE_QCN7605 17
62 #define HIF_TYPE_QCA6390 18
63 #define HIF_TYPE_QCA8074V2 19
64 #define HIF_TYPE_QCA6018  20
65 
66 #ifdef IPA_OFFLOAD
67 #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE   37
68 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3       32
69 #endif
70 
71 /* enum hif_ic_irq - enum defining integrated chip irq numbers
72  * defining irq nubers that can be used by external modules like datapath
73  */
74 enum hif_ic_irq {
75 	host2wbm_desc_feed = 16,
76 	host2reo_re_injection,
77 	host2reo_command,
78 	host2rxdma_monitor_ring3,
79 	host2rxdma_monitor_ring2,
80 	host2rxdma_monitor_ring1,
81 	reo2host_exception,
82 	wbm2host_rx_release,
83 	reo2host_status,
84 	reo2host_destination_ring4,
85 	reo2host_destination_ring3,
86 	reo2host_destination_ring2,
87 	reo2host_destination_ring1,
88 	rxdma2host_monitor_destination_mac3,
89 	rxdma2host_monitor_destination_mac2,
90 	rxdma2host_monitor_destination_mac1,
91 	ppdu_end_interrupts_mac3,
92 	ppdu_end_interrupts_mac2,
93 	ppdu_end_interrupts_mac1,
94 	rxdma2host_monitor_status_ring_mac3,
95 	rxdma2host_monitor_status_ring_mac2,
96 	rxdma2host_monitor_status_ring_mac1,
97 	host2rxdma_host_buf_ring_mac3,
98 	host2rxdma_host_buf_ring_mac2,
99 	host2rxdma_host_buf_ring_mac1,
100 	rxdma2host_destination_ring_mac3,
101 	rxdma2host_destination_ring_mac2,
102 	rxdma2host_destination_ring_mac1,
103 	host2tcl_input_ring4,
104 	host2tcl_input_ring3,
105 	host2tcl_input_ring2,
106 	host2tcl_input_ring1,
107 	wbm2host_tx_completions_ring3,
108 	wbm2host_tx_completions_ring2,
109 	wbm2host_tx_completions_ring1,
110 	tcl2host_status_ring,
111 };
112 
113 struct CE_state;
114 #define CE_COUNT_MAX 12
115 #define HIF_MAX_GRP_IRQ 16
116 
117 #ifdef CONFIG_WIN
118 #define HIF_MAX_GROUP 12
119 #else
120 #define HIF_MAX_GROUP 8
121 #endif
122 
123 #ifdef CONFIG_SLUB_DEBUG_ON
124 #ifndef CONFIG_WIN
125 #define HIF_CONFIG_SLUB_DEBUG_ON
126 #endif
127 #endif
128 
129 #ifndef NAPI_YIELD_BUDGET_BASED
130 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
131 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
132 #else
133 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
134 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
135 #endif
136 #endif /* SLUB_DEBUG_ON */
137 #else  /* NAPI_YIELD_BUDGET_BASED */
138 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
139 #endif /* NAPI_YIELD_BUDGET_BASED */
140 
141 #define QCA_NAPI_BUDGET    64
142 #define QCA_NAPI_DEF_SCALE  \
143 	(1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
144 
145 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
146 /* NOTE: "napi->scale" can be changed,
147  * but this does not change the number of buckets
148  */
149 #define QCA_NAPI_NUM_BUCKETS 4
150 
151 /**
152  * qca_napi_stat - stats structure for execution contexts
153  * @napi_schedules - number of times the schedule function is called
154  * @napi_polls - number of times the execution context runs
155  * @napi_completes - number of times that the generating interrupt is reenabled
156  * @napi_workdone - cumulative of all work done reported by handler
157  * @cpu_corrected - incremented when execution context runs on a different core
158  *			than the one that its irq is affined to.
159  * @napi_budget_uses - histogram of work done per execution run
160  * @time_limit_reache - count of yields due to time limit threshholds
161  * @rxpkt_thresh_reached - count of yields due to a work limit
162  * @poll_time_buckets - histogram of poll times for the napi
163  *
164  */
165 struct qca_napi_stat {
166 	uint32_t napi_schedules;
167 	uint32_t napi_polls;
168 	uint32_t napi_completes;
169 	uint32_t napi_workdone;
170 	uint32_t cpu_corrected;
171 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
172 	uint32_t time_limit_reached;
173 	uint32_t rxpkt_thresh_reached;
174 	unsigned long long napi_max_poll_time;
175 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
176 	uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
177 #endif
178 };
179 
180 
181 /**
182  * per NAPI instance data structure
183  * This data structure holds stuff per NAPI instance.
184  * Note that, in the current implementation, though scale is
185  * an instance variable, it is set to the same value for all
186  * instances.
187  */
188 struct qca_napi_info {
189 	struct net_device    netdev; /* dummy net_dev */
190 	void                 *hif_ctx;
191 	struct napi_struct   napi;
192 	uint8_t              scale;   /* currently same on all instances */
193 	uint8_t              id;
194 	uint8_t              cpu;
195 	int                  irq;
196 	cpumask_t            cpumask;
197 	struct qca_napi_stat stats[NR_CPUS];
198 #ifdef RECEIVE_OFFLOAD
199 	/* will only be present for data rx CE's */
200 	void (*offld_flush_cb)(void *);
201 	struct napi_struct   rx_thread_napi;
202 	struct net_device    rx_thread_netdev;
203 #endif /* RECEIVE_OFFLOAD */
204 	qdf_lro_ctx_t        lro_ctx;
205 };
206 
207 enum qca_napi_tput_state {
208 	QCA_NAPI_TPUT_UNINITIALIZED,
209 	QCA_NAPI_TPUT_LO,
210 	QCA_NAPI_TPUT_HI
211 };
212 enum qca_napi_cpu_state {
213 	QCA_NAPI_CPU_UNINITIALIZED,
214 	QCA_NAPI_CPU_DOWN,
215 	QCA_NAPI_CPU_UP };
216 
217 /**
218  * struct qca_napi_cpu - an entry of the napi cpu table
219  * @core_id:     physical core id of the core
220  * @cluster_id:  cluster this core belongs to
221  * @core_mask:   mask to match all core of this cluster
222  * @thread_mask: mask for this core within the cluster
223  * @max_freq:    maximum clock this core can be clocked at
224  *               same for all cpus of the same core.
225  * @napis:       bitmap of napi instances on this core
226  * @execs:       bitmap of execution contexts on this core
227  * cluster_nxt:  chain to link cores within the same cluster
228  *
229  * This structure represents a single entry in the napi cpu
230  * table. The table is part of struct qca_napi_data.
231  * This table is initialized by the init function, called while
232  * the first napi instance is being created, updated by hotplug
233  * notifier and when cpu affinity decisions are made (by throughput
234  * detection), and deleted when the last napi instance is removed.
235  */
236 struct qca_napi_cpu {
237 	enum qca_napi_cpu_state state;
238 	int			core_id;
239 	int			cluster_id;
240 	cpumask_t		core_mask;
241 	cpumask_t		thread_mask;
242 	unsigned int		max_freq;
243 	uint32_t		napis;
244 	uint32_t		execs;
245 	int			cluster_nxt;  /* index, not pointer */
246 };
247 
248 /**
249  * struct qca_napi_data - collection of napi data for a single hif context
250  * @hif_softc: pointer to the hif context
251  * @lock: spinlock used in the event state machine
252  * @state: state variable used in the napi stat machine
253  * @ce_map: bit map indicating which ce's have napis running
254  * @exec_map: bit map of instanciated exec contexts
255  * @user_cpu_affin_map: CPU affinity map from INI config.
256  * @napi_cpu: cpu info for irq affinty
257  * @lilcl_head:
258  * @bigcl_head:
259  * @napi_mode: irq affinity & clock voting mode
260  * @cpuhp_handler: CPU hotplug event registration handle
261  */
262 struct qca_napi_data {
263 	struct               hif_softc *hif_softc;
264 	qdf_spinlock_t       lock;
265 	uint32_t             state;
266 
267 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
268 	 * not used by clients (clients use an id returned by create)
269 	 */
270 	uint32_t             ce_map;
271 	uint32_t             exec_map;
272 	uint32_t             user_cpu_affin_mask;
273 	struct qca_napi_info *napis[CE_COUNT_MAX];
274 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
275 	int                  lilcl_head, bigcl_head;
276 	enum qca_napi_tput_state napi_mode;
277 	struct qdf_cpuhp_handler *cpuhp_handler;
278 	uint8_t              flags;
279 };
280 
281 /**
282  * struct hif_config_info - Place Holder for HIF configuration
283  * @enable_self_recovery: Self Recovery
284  * @enable_runtime_pm: Enable Runtime PM
285  * @runtime_pm_delay: Runtime PM Delay
286  * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
287  *
288  * Structure for holding HIF ini parameters.
289  */
290 struct hif_config_info {
291 	bool enable_self_recovery;
292 #ifdef FEATURE_RUNTIME_PM
293 	bool enable_runtime_pm;
294 	u_int32_t runtime_pm_delay;
295 #endif
296 	uint64_t rx_softirq_max_yield_duration_ns;
297 };
298 
299 /**
300  * struct hif_target_info - Target Information
301  * @target_version: Target Version
302  * @target_type: Target Type
303  * @target_revision: Target Revision
304  * @soc_version: SOC Version
305  * @hw_name: pointer to hardware name
306  *
307  * Structure to hold target information.
308  */
309 struct hif_target_info {
310 	uint32_t target_version;
311 	uint32_t target_type;
312 	uint32_t target_revision;
313 	uint32_t soc_version;
314 	char *hw_name;
315 };
316 
317 struct hif_opaque_softc {
318 };
319 
320 /**
321  * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
322  *
323  * @HIF_DEVICE_POWER_UP:   HIF layer should power up interface and/or module
324  * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
325  *                         minimize power
326  * @HIF_DEVICE_POWER_CUT:  HIF layer should initiate bus-specific AND/OR
327  *                         platform-specific measures to completely power-off
328  *                         the module and associated hardware (i.e. cut power
329  *                         supplies)
330  */
331 enum HIF_DEVICE_POWER_CHANGE_TYPE {
332 	HIF_DEVICE_POWER_UP,
333 	HIF_DEVICE_POWER_DOWN,
334 	HIF_DEVICE_POWER_CUT
335 };
336 
337 /**
338  * enum hif_enable_type: what triggered the enabling of hif
339  *
340  * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
341  * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
342  */
343 enum hif_enable_type {
344 	HIF_ENABLE_TYPE_PROBE,
345 	HIF_ENABLE_TYPE_REINIT,
346 	HIF_ENABLE_TYPE_MAX
347 };
348 
349 /**
350  * enum hif_disable_type: what triggered the disabling of hif
351  *
352  * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
353  * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
354  * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
355  * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
356  */
357 enum hif_disable_type {
358 	HIF_DISABLE_TYPE_PROBE_ERROR,
359 	HIF_DISABLE_TYPE_REINIT_ERROR,
360 	HIF_DISABLE_TYPE_REMOVE,
361 	HIF_DISABLE_TYPE_SHUTDOWN,
362 	HIF_DISABLE_TYPE_MAX
363 };
364 /**
365  * enum hif_device_config_opcode: configure mode
366  *
367  * @HIF_DEVICE_POWER_STATE: device power state
368  * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size
369  * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address
370  * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
371  * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
372  * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
373  * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
374  * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
375  * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
376  * @HIF_DEVICE_GET_OS_DEVICE: get OS device
377  * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
378  * @HIF_BMI_DONE: bmi done
379  * @HIF_DEVICE_SET_TARGET_TYPE: set target type
380  * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
381  * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
382  */
383 enum hif_device_config_opcode {
384 	HIF_DEVICE_POWER_STATE = 0,
385 	HIF_DEVICE_GET_BLOCK_SIZE,
386 	HIF_DEVICE_GET_FIFO_ADDR,
387 	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
388 	HIF_DEVICE_GET_IRQ_PROC_MODE,
389 	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
390 	HIF_DEVICE_POWER_STATE_CHANGE,
391 	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
392 	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
393 	HIF_DEVICE_GET_OS_DEVICE,
394 	HIF_DEVICE_DEBUG_BUS_STATE,
395 	HIF_BMI_DONE,
396 	HIF_DEVICE_SET_TARGET_TYPE,
397 	HIF_DEVICE_SET_HTC_CONTEXT,
398 	HIF_DEVICE_GET_HTC_CONTEXT,
399 };
400 
401 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
402 struct HID_ACCESS_LOG {
403 	uint32_t seqnum;
404 	bool is_write;
405 	void *addr;
406 	uint32_t value;
407 };
408 #endif
409 
410 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
411 		uint32_t value);
412 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
413 
414 #define HIF_MAX_DEVICES                 1
415 /**
416  * struct htc_callbacks - Structure for HTC Callbacks methods
417  * @context:             context to pass to the dsrhandler
418  *                       note : rwCompletionHandler is provided the context
419  *                       passed to hif_read_write
420  * @rwCompletionHandler: Read / write completion handler
421  * @dsrHandler:          DSR Handler
422  */
423 struct htc_callbacks {
424 	void *context;
425 	QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
426 	QDF_STATUS(*dsr_handler)(void *context);
427 };
428 
429 /**
430  * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
431  * @context: Private data context
432  * @set_recovery_in_progress: To Set Driver state for recovery in progress
433  * @is_recovery_in_progress: Query if driver state is recovery in progress
434  * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
435  * @is_driver_unloading: Query if driver is unloading.
436  *
437  * This Structure provides callback pointer for HIF to query hdd for driver
438  * states.
439  */
440 struct hif_driver_state_callbacks {
441 	void *context;
442 	void (*set_recovery_in_progress)(void *context, uint8_t val);
443 	bool (*is_recovery_in_progress)(void *context);
444 	bool (*is_load_unload_in_progress)(void *context);
445 	bool (*is_driver_unloading)(void *context);
446 	bool (*is_target_ready)(void *context);
447 };
448 
449 /* This API detaches the HTC layer from the HIF device */
450 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
451 
452 /****************************************************************/
453 /* BMI and Diag window abstraction                              */
454 /****************************************************************/
455 
456 #define HIF_BMI_EXCHANGE_NO_TIMEOUT  ((uint32_t)(0))
457 
458 #define DIAG_TRANSFER_LIMIT 2048U   /* maximum number of bytes that can be
459 				     * handled atomically by
460 				     * DiagRead/DiagWrite
461 				     */
462 
463 #ifdef WLAN_FEATURE_BMI
464 /*
465  * API to handle HIF-specific BMI message exchanges, this API is synchronous
466  * and only allowed to be called from a context that can block (sleep)
467  */
468 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
469 				qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
470 				uint8_t *pSendMessage, uint32_t Length,
471 				uint8_t *pResponseMessage,
472 				uint32_t *pResponseLength, uint32_t TimeoutMS);
473 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
474 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
475 #else /* WLAN_FEATURE_BMI */
476 static inline void
477 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
478 {
479 }
480 
481 static inline bool
482 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
483 {
484 	return false;
485 }
486 #endif /* WLAN_FEATURE_BMI */
487 
488 /*
489  * APIs to handle HIF specific diagnostic read accesses. These APIs are
490  * synchronous and only allowed to be called from a context that
491  * can block (sleep). They are not high performance APIs.
492  *
493  * hif_diag_read_access reads a 4 Byte aligned/length value from a
494  * Target register or memory word.
495  *
496  * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
497  */
498 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
499 				uint32_t address, uint32_t *data);
500 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
501 		      uint8_t *data, int nbytes);
502 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
503 			void *ramdump_base, uint32_t address, uint32_t size);
504 /*
505  * APIs to handle HIF specific diagnostic write accesses. These APIs are
506  * synchronous and only allowed to be called from a context that
507  * can block (sleep).
508  * They are not high performance APIs.
509  *
510  * hif_diag_write_access writes a 4 Byte aligned/length value to a
511  * Target register or memory word.
512  *
513  * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
514  */
515 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
516 				 uint32_t address, uint32_t data);
517 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
518 			uint32_t address, uint8_t *data, int nbytes);
519 
520 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
521 
522 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
523 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
524 
525 /*
526  * Set the FASTPATH_mode_on flag in sc, for use by data path
527  */
528 #ifdef WLAN_FEATURE_FASTPATH
529 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
530 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
531 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
532 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
533 				fastpath_msg_handler handler, void *context);
534 #else
535 static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
536 					      fastpath_msg_handler handler,
537 					      void *context)
538 {
539 	return QDF_STATUS_E_FAILURE;
540 }
541 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
542 {
543 	return NULL;
544 }
545 
546 #endif
547 
548 /*
549  * Enable/disable CDC max performance workaround
550  * For max-performace set this to 0
551  * To allow SoC to enter sleep set this to 1
552  */
553 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
554 
555 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
556 			     qdf_shared_mem_t **ce_sr,
557 			     uint32_t *ce_sr_ring_size,
558 			     qdf_dma_addr_t *ce_reg_paddr);
559 
560 /**
561  * @brief List of callbacks - filled in by HTC.
562  */
563 struct hif_msg_callbacks {
564 	void *Context;
565 	/**< context meaningful to HTC */
566 	QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
567 					uint32_t transferID,
568 					uint32_t toeplitz_hash_result);
569 	QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
570 					uint8_t pipeID);
571 	void (*txResourceAvailHandler)(void *context, uint8_t pipe);
572 	void (*fwEventHandler)(void *context, QDF_STATUS status);
573 };
574 
575 enum hif_target_status {
576 	TARGET_STATUS_CONNECTED = 0,  /* target connected */
577 	TARGET_STATUS_RESET,  /* target got reset */
578 	TARGET_STATUS_EJECT,  /* target got ejected */
579 	TARGET_STATUS_SUSPEND /*target got suspend */
580 };
581 
582 /**
583  * enum hif_attribute_flags: configure hif
584  *
585  * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
586  * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
587  *  							+ No pktlog CE
588  */
589 enum hif_attribute_flags {
590 	HIF_LOWDESC_CE_CFG = 1,
591 	HIF_LOWDESC_CE_NO_PKTLOG_CFG
592 };
593 
594 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
595 	(attr |= (v & 0x01) << 5)
596 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
597 	(attr |= (v & 0x03) << 6)
598 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
599 	(attr |= (v & 0x01) << 13)
600 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
601 	(attr |= (v & 0x01) << 14)
602 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
603 	(attr |= (v & 0x01) << 15)
604 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
605 	(attr |= (v & 0x0FFF) << 16)
606 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
607 	(attr |= (v & 0x01) << 30)
608 
609 struct hif_ul_pipe_info {
610 	unsigned int nentries;
611 	unsigned int nentries_mask;
612 	unsigned int sw_index;
613 	unsigned int write_index; /* cached copy */
614 	unsigned int hw_index;    /* cached copy */
615 	void *base_addr_owner_space; /* Host address space */
616 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
617 };
618 
619 struct hif_dl_pipe_info {
620 	unsigned int nentries;
621 	unsigned int nentries_mask;
622 	unsigned int sw_index;
623 	unsigned int write_index; /* cached copy */
624 	unsigned int hw_index;    /* cached copy */
625 	void *base_addr_owner_space; /* Host address space */
626 	qdf_dma_addr_t base_addr_CE_space; /* CE address space */
627 };
628 
629 struct hif_pipe_addl_info {
630 	uint32_t pci_mem;
631 	uint32_t ctrl_addr;
632 	struct hif_ul_pipe_info ul_pipe;
633 	struct hif_dl_pipe_info dl_pipe;
634 };
635 
636 #ifdef CONFIG_SLUB_DEBUG_ON
637 #define MSG_FLUSH_NUM 16
638 #else /* PERF build */
639 #define MSG_FLUSH_NUM 32
640 #endif /* SLUB_DEBUG_ON */
641 
642 struct hif_bus_id;
643 
644 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
645 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
646 		     int opcode, void *config, uint32_t config_len);
647 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
648 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
649 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
650 		   struct hif_msg_callbacks *callbacks);
651 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
652 void hif_stop(struct hif_opaque_softc *hif_ctx);
653 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
654 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
655 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
656 		      uint8_t cmd_id, bool start);
657 
658 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
659 				  uint32_t transferID, uint32_t nbytes,
660 				  qdf_nbuf_t wbuf, uint32_t data_attr);
661 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
662 			     int force);
663 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
664 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
665 			  uint8_t *DLPipe);
666 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
667 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
668 			int *dl_is_polled);
669 uint16_t
670 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
671 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
672 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
673 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
674 		     bool wait_for_it);
675 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
676 #ifndef HIF_PCI
677 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
678 {
679 	return 0;
680 }
681 #else
682 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
683 #endif
684 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
685 			u32 *revision, const char **target_name);
686 
687 #ifdef RECEIVE_OFFLOAD
688 /**
689  * hif_offld_flush_cb_register() - Register the offld flush callback
690  * @scn: HIF opaque context
691  * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
692  *			 Or GRO/LRO flush when RxThread is not enabled. Called
693  *			 with corresponding context for flush.
694  * Return: None
695  */
696 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
697 				 void (offld_flush_handler)(void *ol_ctx));
698 
699 /**
700  * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
701  * @scn: HIF opaque context
702  *
703  * Return: None
704  */
705 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
706 #endif
707 
708 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
709 /**
710  * hif_exec_should_yield() - Check if hif napi context should yield
711  * @hif_ctx - HIF opaque context
712  * @grp_id - grp_id of the napi for which check needs to be done
713  *
714  * The function uses grp_id to look for NAPI and checks if NAPI needs to
715  * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
716  * yield decision.
717  *
718  * Return: true if NAPI needs to yield, else false
719  */
720 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
721 #else
722 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
723 					 uint grp_id)
724 {
725 	return false;
726 }
727 #endif
728 
729 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
730 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
731 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
732 				      int htc_htt_tx_endpoint);
733 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
734 				  enum qdf_bus_type bus_type,
735 				  struct hif_driver_state_callbacks *cbk);
736 void hif_close(struct hif_opaque_softc *hif_ctx);
737 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
738 		      void *bdev, const struct hif_bus_id *bid,
739 		      enum qdf_bus_type bus_type,
740 		      enum hif_enable_type type);
741 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
742 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
743 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
744 #ifdef FEATURE_RUNTIME_PM
745 struct hif_pm_runtime_lock;
746 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
747 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
748 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
749 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
750 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
751 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
752 			struct hif_pm_runtime_lock *lock);
753 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
754 		struct hif_pm_runtime_lock *lock);
755 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
756 		struct hif_pm_runtime_lock *lock);
757 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
758 		struct hif_pm_runtime_lock *lock, unsigned int delay);
759 #else
760 struct hif_pm_runtime_lock {
761 	const char *name;
762 };
763 static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
764 static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
765 {}
766 
767 static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
768 { return 0; }
769 static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
770 { return 0; }
771 static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
772 					const char *name)
773 { return 0; }
774 static inline void
775 hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
776 			struct hif_pm_runtime_lock *lock) {}
777 
778 static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
779 		struct hif_pm_runtime_lock *lock)
780 { return 0; }
781 static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
782 		struct hif_pm_runtime_lock *lock)
783 { return 0; }
784 static inline int
785 hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
786 		struct hif_pm_runtime_lock *lock, unsigned int delay)
787 { return 0; }
788 #endif
789 
790 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
791 				 bool is_packet_log_enabled);
792 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
793 
794 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
795 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
796 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
797 
798 #ifdef IPA_OFFLOAD
799 /**
800  * hif_get_ipa_hw_type() - get IPA hw type
801  *
802  * This API return the IPA hw type.
803  *
804  * Return: IPA hw type
805  */
806 static inline
807 enum ipa_hw_type hif_get_ipa_hw_type(void)
808 {
809 	return ipa_get_hw_type();
810 }
811 
812 /**
813  * hif_get_ipa_present() - get IPA hw status
814  *
815  * This API return the IPA hw status.
816  *
817  * Return: true if IPA is present or false otherwise
818  */
819 static inline
820 bool hif_get_ipa_present(void)
821 {
822 	if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
823 		return true;
824 	else
825 		return false;
826 }
827 #endif
828 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
829 /**
830  * hif_bus_ealry_suspend() - stop non wmi tx traffic
831  * @context: hif context
832  */
833 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
834 
835 /**
836  * hif_bus_late_resume() - resume non wmi traffic
837  * @context: hif context
838  */
839 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
840 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
841 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
842 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
843 
844 /**
845  * hif_apps_irqs_enable() - Enables all irqs from the APPS side
846  * @hif_ctx: an opaque HIF handle to use
847  *
848  * As opposed to the standard hif_irq_enable, this function always applies to
849  * the APPS side kernel interrupt handling.
850  *
851  * Return: errno
852  */
853 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
854 
855 /**
856  * hif_apps_irqs_disable() - Disables all irqs from the APPS side
857  * @hif_ctx: an opaque HIF handle to use
858  *
859  * As opposed to the standard hif_irq_disable, this function always applies to
860  * the APPS side kernel interrupt handling.
861  *
862  * Return: errno
863  */
864 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
865 
866 /**
867  * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
868  * @hif_ctx: an opaque HIF handle to use
869  *
870  * As opposed to the standard hif_irq_enable, this function always applies to
871  * the APPS side kernel interrupt handling.
872  *
873  * Return: errno
874  */
875 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
876 
877 /**
878  * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
879  * @hif_ctx: an opaque HIF handle to use
880  *
881  * As opposed to the standard hif_irq_disable, this function always applies to
882  * the APPS side kernel interrupt handling.
883  *
884  * Return: errno
885  */
886 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
887 
888 #ifdef FEATURE_RUNTIME_PM
889 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
890 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
891 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
892 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
893 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
894 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
895 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
896 #endif
897 
898 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
899 int hif_dump_registers(struct hif_opaque_softc *scn);
900 int ol_copy_ramdump(struct hif_opaque_softc *scn);
901 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
902 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
903 		     u32 *revision, const char **target_name);
904 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
905 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
906 						   scn);
907 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
908 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
909 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
910 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
911 			   hif_target_status);
912 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
913 			 struct hif_config_info *cfg);
914 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
915 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
916 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
917 int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t
918 		transfer_id, u_int32_t len);
919 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
920 	uint32_t transfer_id, uint32_t download_len);
921 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
922 void hif_ce_war_disable(void);
923 void hif_ce_war_enable(void);
924 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
925 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
926 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
927 		struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
928 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
929 		uint32_t pipe_num);
930 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
931 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
932 
933 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
934 				int rx_bundle_cnt);
935 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
936 
937 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
938 
939 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
940 
941 enum hif_exec_type {
942 	HIF_EXEC_NAPI_TYPE,
943 	HIF_EXEC_TASKLET_TYPE,
944 };
945 
946 typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
947 
948 /**
949  * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
950  * @softc: hif opaque context owning the exec context
951  * @id: the id of the interrupt context
952  *
953  * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
954  *         'id' registered with the OS
955  */
956 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
957 				uint8_t id);
958 
959 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
960 uint32_t  hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
961 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
962 		void *cb_ctx, const char *context_name,
963 		enum hif_exec_type type, uint32_t scale);
964 
965 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
966 				const char *context_name);
967 
968 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
969 				u_int8_t pipeid,
970 				struct hif_msg_callbacks *callbacks);
971 
972 /**
973  * hif_print_napi_stats() - Display HIF NAPI stats
974  * @hif_ctx - HIF opaque context
975  *
976  * Return: None
977  */
978 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
979 
980 /* hif_clear_napi_stats() - function clears the stats of the
981  * latency when called.
982  * @hif_ctx - the HIF context to assign the callback to
983  *
984  * Return: None
985  */
986 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
987 
988 #ifdef __cplusplus
989 }
990 #endif
991 
992 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
993 
994 /**
995  * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
996  * @hif_ctx - the HIF context to assign the callback to
997  * @callback - the callback to assign
998  * @priv - the private data to pass to the callback when invoked
999  *
1000  * Return: None
1001  */
1002 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1003 			       void (*callback)(void *),
1004 			       void *priv);
1005 /*
1006  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1007  * for defined here
1008  */
1009 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1010 ssize_t hif_dump_desc_trace_buf(struct device *dev,
1011 				struct device_attribute *attr, char *buf);
1012 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1013 					const char *buf, size_t size);
1014 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
1015 				const char *buf, size_t size);
1016 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
1017 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
1018 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
1019 
1020 /**
1021  * hif_set_ce_service_max_yield_time() - sets CE service max yield time
1022  * @hif: hif context
1023  * @ce_service_max_yield_time: CE service max yield time to set
1024  *
1025  * This API storess CE service max yield time in hif context based
1026  * on ini value.
1027  *
1028  * Return: void
1029  */
1030 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1031 				       uint32_t ce_service_max_yield_time);
1032 
1033 /**
1034  * hif_get_ce_service_max_yield_time() - get CE service max yield time
1035  * @hif: hif context
1036  *
1037  * This API returns CE service max yield time.
1038  *
1039  * Return: CE service max yield time
1040  */
1041 unsigned long long
1042 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
1043 
1044 /**
1045  * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
1046  * @hif: hif context
1047  * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
1048  *
1049  * This API stores CE service max rx ind flush in hif context based
1050  * on ini value.
1051  *
1052  * Return: void
1053  */
1054 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1055 				       uint8_t ce_service_max_rx_ind_flush);
1056 #ifdef OL_ATH_SMART_LOGGING
1057 /*
1058  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
1059  * @scn : HIF handler
1060  * @buf_cur: Current pointer in ring buffer
1061  * @buf_init:Start of the ring buffer
1062  * @buf_sz: Size of the ring buffer
1063  * @ce: Copy Engine id
1064  * @skb_sz: Max size of the SKB buffer to be copied
1065  *
1066  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
1067  * and buffers pointed by them in to the given buf
1068  *
1069  * Return: Current pointer in ring buffer
1070  */
1071 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
1072 			 uint8_t *buf_init, uint32_t buf_sz,
1073 			 uint32_t ce, uint32_t skb_sz);
1074 #endif /* OL_ATH_SMART_LOGGING */
1075 #endif /* _HIF_H_ */
1076