xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "hif_hw_version.h"
33 #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
34      defined(HIF_IPCI))
35 #include "ce_tasklet.h"
36 #include "ce_api.h"
37 #endif
38 #include "qdf_trace.h"
39 #include "qdf_status.h"
40 #include "hif_debug.h"
41 #include "mp_dev.h"
42 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018)
43 #include "hal_api.h"
44 #endif
45 #include "hif_napi.h"
46 #include "hif_unit_test_suspend_i.h"
47 #include "qdf_module.h"
48 
49 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
50 {
51 	hif_trigger_dump(hif_ctx, cmd_id, start);
52 }
53 
54 /**
55  * hif_get_target_id(): hif_get_target_id
56  *
57  * Return the virtual memory base address to the caller
58  *
59  * @scn: hif_softc
60  *
61  * Return: A_target_id_t
62  */
63 A_target_id_t hif_get_target_id(struct hif_softc *scn)
64 {
65 	return scn->mem;
66 }
67 
68 /**
69  * hif_get_targetdef(): hif_get_targetdef
70  * @scn: scn
71  *
72  * Return: void *
73  */
74 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
75 {
76 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
77 
78 	return scn->targetdef;
79 }
80 
81 #ifdef FORCE_WAKE
82 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
83 			 bool init_phase)
84 {
85 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
86 
87 	if (ce_srng_based(scn))
88 		hal_set_init_phase(scn->hal_soc, init_phase);
89 }
90 #endif /* FORCE_WAKE */
91 
92 /**
93  * hif_vote_link_down(): unvote for link up
94  *
95  * Call hif_vote_link_down to release a previous request made using
96  * hif_vote_link_up. A hif_vote_link_down call should only be made
97  * after a corresponding hif_vote_link_up, otherwise you could be
98  * negating a vote from another source. When no votes are present
99  * hif will not guarantee the linkstate after hif_bus_suspend.
100  *
101  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
102  * and initialization deinitialization sequencences.
103  *
104  * Return: n/a
105  */
106 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
107 {
108 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
109 
110 	QDF_BUG(scn);
111 	scn->linkstate_vote--;
112 	if (scn->linkstate_vote == 0)
113 		hif_bus_prevent_linkdown(scn, false);
114 }
115 
116 /**
117  * hif_vote_link_up(): vote to prevent bus from suspending
118  *
119  * Makes hif guarantee that fw can message the host normally
120  * durring suspend.
121  *
122  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
123  * and initialization deinitialization sequencences.
124  *
125  * Return: n/a
126  */
127 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
128 {
129 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
130 
131 	QDF_BUG(scn);
132 	scn->linkstate_vote++;
133 	if (scn->linkstate_vote == 1)
134 		hif_bus_prevent_linkdown(scn, true);
135 }
136 
137 /**
138  * hif_can_suspend_link(): query if hif is permitted to suspend the link
139  *
140  * Hif will ensure that the link won't be suspended if the upperlayers
141  * don't want it to.
142  *
143  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
144  * we don't need extra locking to ensure votes dont change while
145  * we are in the process of suspending or resuming.
146  *
147  * Return: false if hif will guarantee link up durring suspend.
148  */
149 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
150 {
151 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
152 
153 	QDF_BUG(scn);
154 	return scn->linkstate_vote == 0;
155 }
156 
157 /**
158  * hif_hia_item_address(): hif_hia_item_address
159  * @target_type: target_type
160  * @item_offset: item_offset
161  *
162  * Return: n/a
163  */
164 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
165 {
166 	switch (target_type) {
167 	case TARGET_TYPE_AR6002:
168 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
169 	case TARGET_TYPE_AR6003:
170 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
171 	case TARGET_TYPE_AR6004:
172 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
173 	case TARGET_TYPE_AR6006:
174 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
175 	case TARGET_TYPE_AR9888:
176 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
177 	case TARGET_TYPE_AR6320:
178 	case TARGET_TYPE_AR6320V2:
179 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
180 	case TARGET_TYPE_ADRASTEA:
181 		/* ADRASTEA doesn't have a host interest address */
182 		ASSERT(0);
183 		return 0;
184 	case TARGET_TYPE_AR900B:
185 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
186 	case TARGET_TYPE_QCA9984:
187 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
188 	case TARGET_TYPE_QCA9888:
189 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
190 	case TARGET_TYPE_IPQ4019:
191 		return IPQ4019_HOST_INTEREST_ADDRESS + item_offset;
192 
193 	default:
194 		ASSERT(0);
195 		return 0;
196 	}
197 }
198 
199 /**
200  * hif_max_num_receives_reached() - check max receive is reached
201  * @scn: HIF Context
202  * @count: unsigned int.
203  *
204  * Output check status as bool
205  *
206  * Return: bool
207  */
208 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
209 {
210 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
211 		return count > 120;
212 	else
213 		return count > MAX_NUM_OF_RECEIVES;
214 }
215 
216 /**
217  * init_buffer_count() - initial buffer count
218  * @maxSize: qdf_size_t
219  *
220  * routine to modify the initial buffer count to be allocated on an os
221  * platform basis. Platform owner will need to modify this as needed
222  *
223  * Return: qdf_size_t
224  */
225 qdf_size_t init_buffer_count(qdf_size_t maxSize)
226 {
227 	return maxSize;
228 }
229 
230 /**
231  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
232  * @hif_ctx: hif context
233  * @htc_htt_tx_endpoint: htt_tx_endpoint
234  *
235  * Return: void
236  */
237 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
238 							int htc_htt_tx_endpoint)
239 {
240 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
241 
242 	if (!scn) {
243 		HIF_ERROR("%s: error: scn or scn->hif_sc is NULL!",
244 		       __func__);
245 		return;
246 	}
247 
248 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
249 }
250 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
251 
252 static const struct qwlan_hw qwlan_hw_list[] = {
253 	{
254 		.id = AR6320_REV1_VERSION,
255 		.subid = 0,
256 		.name = "QCA6174_REV1",
257 	},
258 	{
259 		.id = AR6320_REV1_1_VERSION,
260 		.subid = 0x1,
261 		.name = "QCA6174_REV1_1",
262 	},
263 	{
264 		.id = AR6320_REV1_3_VERSION,
265 		.subid = 0x2,
266 		.name = "QCA6174_REV1_3",
267 	},
268 	{
269 		.id = AR6320_REV2_1_VERSION,
270 		.subid = 0x4,
271 		.name = "QCA6174_REV2_1",
272 	},
273 	{
274 		.id = AR6320_REV2_1_VERSION,
275 		.subid = 0x5,
276 		.name = "QCA6174_REV2_2",
277 	},
278 	{
279 		.id = AR6320_REV3_VERSION,
280 		.subid = 0x6,
281 		.name = "QCA6174_REV2.3",
282 	},
283 	{
284 		.id = AR6320_REV3_VERSION,
285 		.subid = 0x8,
286 		.name = "QCA6174_REV3",
287 	},
288 	{
289 		.id = AR6320_REV3_VERSION,
290 		.subid = 0x9,
291 		.name = "QCA6174_REV3_1",
292 	},
293 	{
294 		.id = AR6320_REV3_2_VERSION,
295 		.subid = 0xA,
296 		.name = "AR6320_REV3_2_VERSION",
297 	},
298 	{
299 		.id = WCN3990_v1,
300 		.subid = 0x0,
301 		.name = "WCN3990_V1",
302 	},
303 	{
304 		.id = WCN3990_v2,
305 		.subid = 0x0,
306 		.name = "WCN3990_V2",
307 	},
308 	{
309 		.id = WCN3990_v2_1,
310 		.subid = 0x0,
311 		.name = "WCN3990_V2.1",
312 	},
313 	{
314 		.id = WCN3998,
315 		.subid = 0x0,
316 		.name = "WCN3998",
317 	},
318 	{
319 		.id = QCA9379_REV1_VERSION,
320 		.subid = 0xC,
321 		.name = "QCA9379_REV1",
322 	},
323 	{
324 		.id = QCA9379_REV1_VERSION,
325 		.subid = 0xD,
326 		.name = "QCA9379_REV1_1",
327 	}
328 };
329 
330 /**
331  * hif_get_hw_name(): get a human readable name for the hardware
332  * @info: Target Info
333  *
334  * Return: human readable name for the underlying wifi hardware.
335  */
336 static const char *hif_get_hw_name(struct hif_target_info *info)
337 {
338 	int i;
339 
340 	if (info->hw_name)
341 		return info->hw_name;
342 
343 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
344 		if (info->target_version == qwlan_hw_list[i].id &&
345 		    info->target_revision == qwlan_hw_list[i].subid) {
346 			return qwlan_hw_list[i].name;
347 		}
348 	}
349 
350 	info->hw_name = qdf_mem_malloc(64);
351 	if (!info->hw_name)
352 		return "Unknown Device (nomem)";
353 
354 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
355 			info->target_version);
356 	if (i < 0)
357 		return "Unknown Device (snprintf failure)";
358 	else
359 		return info->hw_name;
360 }
361 
362 /**
363  * hif_get_hw_info(): hif_get_hw_info
364  * @scn: scn
365  * @version: version
366  * @revision: revision
367  *
368  * Return: n/a
369  */
370 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
371 			const char **target_name)
372 {
373 	struct hif_target_info *info = hif_get_target_info_handle(scn);
374 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
375 
376 	if (sc->bus_type == QDF_BUS_TYPE_USB)
377 		hif_usb_get_hw_info(sc);
378 
379 	*version = info->target_version;
380 	*revision = info->target_revision;
381 	*target_name = hif_get_hw_name(info);
382 }
383 
384 /**
385  * hif_get_dev_ba(): API to get device base address.
386  * @scn: scn
387  * @version: version
388  * @revision: revision
389  *
390  * Return: n/a
391  */
392 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
393 {
394 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
395 
396 	return scn->mem;
397 }
398 qdf_export_symbol(hif_get_dev_ba);
399 
400 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
401 /**
402  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
403  * @scn: hif context
404  * @psoc: psoc objmgr handle
405  *
406  * Return: None
407  */
408 static inline
409 void hif_get_cfg_from_psoc(struct hif_softc *scn,
410 			   struct wlan_objmgr_psoc *psoc)
411 {
412 	if (psoc) {
413 		scn->ini_cfg.ce_status_ring_timer_threshold =
414 			cfg_get(psoc,
415 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
416 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
417 			cfg_get(psoc,
418 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
419 	}
420 }
421 #else
422 static inline
423 void hif_get_cfg_from_psoc(struct hif_softc *scn,
424 			   struct wlan_objmgr_psoc *psoc)
425 {
426 }
427 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
428 
429 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
430 				  uint32_t mode,
431 				  enum qdf_bus_type bus_type,
432 				  struct hif_driver_state_callbacks *cbk,
433 				  struct wlan_objmgr_psoc *psoc)
434 {
435 	struct hif_softc *scn;
436 	QDF_STATUS status = QDF_STATUS_SUCCESS;
437 	int bus_context_size = hif_bus_get_context_size(bus_type);
438 
439 	if (bus_context_size == 0) {
440 		HIF_ERROR("%s: context size 0 not allowed", __func__);
441 		return NULL;
442 	}
443 
444 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
445 	if (!scn) {
446 		HIF_ERROR("%s: cannot alloc memory for HIF context of size:%d",
447 						__func__, bus_context_size);
448 		return GET_HIF_OPAQUE_HDL(scn);
449 	}
450 
451 	scn->qdf_dev = qdf_ctx;
452 	scn->hif_con_param = mode;
453 	qdf_atomic_init(&scn->active_tasklet_cnt);
454 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
455 	qdf_atomic_init(&scn->link_suspended);
456 	qdf_atomic_init(&scn->tasklet_from_intr);
457 	qdf_mem_copy(&scn->callbacks, cbk,
458 		     sizeof(struct hif_driver_state_callbacks));
459 	scn->bus_type  = bus_type;
460 
461 	hif_get_cfg_from_psoc(scn, psoc);
462 
463 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
464 	status = hif_bus_open(scn, bus_type);
465 	if (status != QDF_STATUS_SUCCESS) {
466 		HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d",
467 				  __func__, status, bus_type);
468 		qdf_mem_free(scn);
469 		scn = NULL;
470 	}
471 
472 	return GET_HIF_OPAQUE_HDL(scn);
473 }
474 
475 #ifdef ADRASTEA_RRI_ON_DDR
476 /**
477  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
478  * @scn: hif context
479  *
480  * Return: none
481  */
482 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
483 {
484 	if (scn->vaddr_rri_on_ddr)
485 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
486 					(CE_COUNT * sizeof(uint32_t)),
487 					scn->vaddr_rri_on_ddr,
488 					scn->paddr_rri_on_ddr, 0);
489 	scn->vaddr_rri_on_ddr = NULL;
490 }
491 #endif
492 
493 /**
494  * hif_close(): hif_close
495  * @hif_ctx: hif_ctx
496  *
497  * Return: n/a
498  */
499 void hif_close(struct hif_opaque_softc *hif_ctx)
500 {
501 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
502 
503 	if (!scn) {
504 		HIF_ERROR("%s: hif_opaque_softc is NULL", __func__);
505 		return;
506 	}
507 
508 	if (scn->athdiag_procfs_inited) {
509 		athdiag_procfs_remove();
510 		scn->athdiag_procfs_inited = false;
511 	}
512 
513 	if (scn->target_info.hw_name) {
514 		char *hw_name = scn->target_info.hw_name;
515 
516 		scn->target_info.hw_name = "ErrUnloading";
517 		qdf_mem_free(hw_name);
518 	}
519 
520 	hif_uninit_rri_on_ddr(scn);
521 
522 	hif_bus_close(scn);
523 
524 	qdf_mem_free(scn);
525 }
526 
527 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
528      defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
529      defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
530      defined(QCA_WIFI_QCA6750))
531 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
532 {
533 	if (ce_srng_based(scn)) {
534 		scn->hal_soc = hal_attach(
535 					hif_softc_to_hif_opaque_softc(scn),
536 					scn->qdf_dev);
537 		if (!scn->hal_soc)
538 			return QDF_STATUS_E_FAILURE;
539 	}
540 
541 	return QDF_STATUS_SUCCESS;
542 }
543 
544 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
545 {
546 	if (ce_srng_based(scn)) {
547 		hal_detach(scn->hal_soc);
548 		scn->hal_soc = NULL;
549 	}
550 
551 	return QDF_STATUS_SUCCESS;
552 }
553 #else
554 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
555 {
556 	return QDF_STATUS_SUCCESS;
557 }
558 
559 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
560 {
561 	return QDF_STATUS_SUCCESS;
562 }
563 #endif
564 
565 /**
566  * hif_enable(): hif_enable
567  * @hif_ctx: hif_ctx
568  * @dev: dev
569  * @bdev: bus dev
570  * @bid: bus ID
571  * @bus_type: bus type
572  * @type: enable type
573  *
574  * Return: QDF_STATUS
575  */
576 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
577 					  void *bdev,
578 					  const struct hif_bus_id *bid,
579 					  enum qdf_bus_type bus_type,
580 					  enum hif_enable_type type)
581 {
582 	QDF_STATUS status;
583 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
584 
585 	if (!scn) {
586 		HIF_ERROR("%s: hif_ctx = NULL", __func__);
587 		return QDF_STATUS_E_NULL_VALUE;
588 	}
589 
590 	status = hif_enable_bus(scn, dev, bdev, bid, type);
591 	if (status != QDF_STATUS_SUCCESS) {
592 		HIF_ERROR("%s: hif_enable_bus error = %d",
593 				  __func__, status);
594 		return status;
595 	}
596 
597 	status = hif_hal_attach(scn);
598 	if (status != QDF_STATUS_SUCCESS) {
599 		HIF_ERROR("%s: hal attach failed", __func__);
600 		goto disable_bus;
601 	}
602 
603 	if (hif_bus_configure(scn)) {
604 		HIF_ERROR("%s: Target probe failed.", __func__);
605 		status = QDF_STATUS_E_FAILURE;
606 		goto hal_detach;
607 	}
608 
609 	hif_ut_suspend_init(scn);
610 
611 	/*
612 	 * Flag to avoid potential unallocated memory access from MSI
613 	 * interrupt handler which could get scheduled as soon as MSI
614 	 * is enabled, i.e to take care of the race due to the order
615 	 * in where MSI is enabled before the memory, that will be
616 	 * in interrupt handlers, is allocated.
617 	 */
618 
619 	scn->hif_init_done = true;
620 
621 	HIF_DBG("%s: OK", __func__);
622 
623 	return QDF_STATUS_SUCCESS;
624 
625 hal_detach:
626 	hif_hal_detach(scn);
627 disable_bus:
628 	hif_disable_bus(scn);
629 	return status;
630 }
631 
632 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
633 {
634 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
635 
636 	if (!scn)
637 		return;
638 
639 	hif_nointrs(scn);
640 	if (scn->hif_init_done == false)
641 		hif_shutdown_device(hif_ctx);
642 	else
643 		hif_stop(hif_ctx);
644 
645 	hif_hal_detach(scn);
646 
647 	hif_disable_bus(scn);
648 
649 	hif_wlan_disable(scn);
650 
651 	scn->notice_send = false;
652 
653 	HIF_DBG("%s: X", __func__);
654 }
655 
656 #ifdef CE_TASKLET_DEBUG_ENABLE
657 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
658 {
659 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
660 
661 	if (!scn)
662 		return;
663 
664 	scn->ce_latency_stats = val;
665 }
666 #endif
667 
668 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
669 {
670 	hif_display_bus_stats(hif_ctx);
671 }
672 
673 qdf_export_symbol(hif_display_stats);
674 
675 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
676 {
677 	hif_clear_bus_stats(hif_ctx);
678 }
679 
680 /**
681  * hif_crash_shutdown_dump_bus_register() - dump bus registers
682  * @hif_ctx: hif_ctx
683  *
684  * Return: n/a
685  */
686 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) \
687 && defined(DEBUG)
688 
689 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
690 {
691 	struct hif_opaque_softc *scn = hif_ctx;
692 
693 	if (hif_check_soc_status(scn))
694 		return;
695 
696 	if (hif_dump_registers(scn))
697 		HIF_ERROR("Failed to dump bus registers!");
698 }
699 
700 /**
701  * hif_crash_shutdown(): hif_crash_shutdown
702  *
703  * This function is called by the platform driver to dump CE registers
704  *
705  * @hif_ctx: hif_ctx
706  *
707  * Return: n/a
708  */
709 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
710 {
711 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
712 
713 	if (!hif_ctx)
714 		return;
715 
716 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
717 		HIF_INFO_MED("%s: RAM dump disabled for bustype %d",
718 				__func__, scn->bus_type);
719 		return;
720 	}
721 
722 	if (TARGET_STATUS_RESET == scn->target_status) {
723 		HIF_INFO_MED("%s: Target is already asserted, ignore!",
724 			    __func__);
725 		return;
726 	}
727 
728 	if (hif_is_load_or_unload_in_progress(scn)) {
729 		HIF_ERROR("%s: Load/unload is in progress, ignore!", __func__);
730 		return;
731 	}
732 
733 	hif_crash_shutdown_dump_bus_register(hif_ctx);
734 
735 	if (ol_copy_ramdump(hif_ctx))
736 		goto out;
737 
738 	HIF_INFO_MED("%s: RAM dump collecting completed!", __func__);
739 
740 out:
741 	return;
742 }
743 #else
744 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
745 {
746 	HIF_INFO_MED("%s: Collecting target RAM dump disabled",
747 		__func__);
748 }
749 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
750 
751 #ifdef QCA_WIFI_3_0
752 /**
753  * hif_check_fw_reg(): hif_check_fw_reg
754  * @scn: scn
755  * @state:
756  *
757  * Return: int
758  */
759 int hif_check_fw_reg(struct hif_opaque_softc *scn)
760 {
761 	return 0;
762 }
763 #endif
764 
765 /**
766  * hif_read_phy_mem_base(): hif_read_phy_mem_base
767  * @scn: scn
768  * @phy_mem_base: physical mem base
769  *
770  * Return: n/a
771  */
772 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
773 {
774 	*phy_mem_base = scn->mem_pa;
775 }
776 qdf_export_symbol(hif_read_phy_mem_base);
777 
778 /**
779  * hif_get_device_type(): hif_get_device_type
780  * @device_id: device_id
781  * @revision_id: revision_id
782  * @hif_type: returned hif_type
783  * @target_type: returned target_type
784  *
785  * Return: int
786  */
787 int hif_get_device_type(uint32_t device_id,
788 			uint32_t revision_id,
789 			uint32_t *hif_type, uint32_t *target_type)
790 {
791 	int ret = 0;
792 
793 	switch (device_id) {
794 	case ADRASTEA_DEVICE_ID_P2_E12:
795 
796 		*hif_type = HIF_TYPE_ADRASTEA;
797 		*target_type = TARGET_TYPE_ADRASTEA;
798 		break;
799 
800 	case AR9888_DEVICE_ID:
801 		*hif_type = HIF_TYPE_AR9888;
802 		*target_type = TARGET_TYPE_AR9888;
803 		break;
804 
805 	case AR6320_DEVICE_ID:
806 		switch (revision_id) {
807 		case AR6320_FW_1_1:
808 		case AR6320_FW_1_3:
809 			*hif_type = HIF_TYPE_AR6320;
810 			*target_type = TARGET_TYPE_AR6320;
811 			break;
812 
813 		case AR6320_FW_2_0:
814 		case AR6320_FW_3_0:
815 		case AR6320_FW_3_2:
816 			*hif_type = HIF_TYPE_AR6320V2;
817 			*target_type = TARGET_TYPE_AR6320V2;
818 			break;
819 
820 		default:
821 			HIF_ERROR("%s: error - dev_id = 0x%x, rev_id = 0x%x",
822 				   __func__, device_id, revision_id);
823 			ret = -ENODEV;
824 			goto end;
825 		}
826 		break;
827 
828 	case AR9887_DEVICE_ID:
829 		*hif_type = HIF_TYPE_AR9888;
830 		*target_type = TARGET_TYPE_AR9888;
831 		HIF_INFO(" *********** AR9887 **************");
832 		break;
833 
834 	case QCA9984_DEVICE_ID:
835 		*hif_type = HIF_TYPE_QCA9984;
836 		*target_type = TARGET_TYPE_QCA9984;
837 		HIF_INFO(" *********** QCA9984 *************");
838 		break;
839 
840 	case QCA9888_DEVICE_ID:
841 		*hif_type = HIF_TYPE_QCA9888;
842 		*target_type = TARGET_TYPE_QCA9888;
843 		HIF_INFO(" *********** QCA9888 *************");
844 		break;
845 
846 	case AR900B_DEVICE_ID:
847 		*hif_type = HIF_TYPE_AR900B;
848 		*target_type = TARGET_TYPE_AR900B;
849 		HIF_INFO(" *********** AR900B *************");
850 		break;
851 
852 	case IPQ4019_DEVICE_ID:
853 		*hif_type = HIF_TYPE_IPQ4019;
854 		*target_type = TARGET_TYPE_IPQ4019;
855 		HIF_INFO(" *********** IPQ4019  *************");
856 		break;
857 
858 	case QCA8074_DEVICE_ID:
859 		*hif_type = HIF_TYPE_QCA8074;
860 		*target_type = TARGET_TYPE_QCA8074;
861 		HIF_INFO(" *********** QCA8074  *************\n");
862 		break;
863 
864 	case QCA6290_EMULATION_DEVICE_ID:
865 	case QCA6290_DEVICE_ID:
866 		*hif_type = HIF_TYPE_QCA6290;
867 		*target_type = TARGET_TYPE_QCA6290;
868 		HIF_INFO(" *********** QCA6290EMU *************\n");
869 		break;
870 
871 	case QCN9000_DEVICE_ID:
872 		*hif_type = HIF_TYPE_QCN9000;
873 		*target_type = TARGET_TYPE_QCN9000;
874 		HIF_INFO(" *********** QCN9000 *************\n");
875 		break;
876 
877 	case QCN7605_DEVICE_ID:
878 	case QCN7605_COMPOSITE:
879 	case QCN7605_STANDALONE:
880 	case QCN7605_STANDALONE_V2:
881 	case QCN7605_COMPOSITE_V2:
882 		*hif_type = HIF_TYPE_QCN7605;
883 		*target_type = TARGET_TYPE_QCN7605;
884 		HIF_INFO(" *********** QCN7605 *************\n");
885 		break;
886 
887 	case QCA6390_DEVICE_ID:
888 	case QCA6390_EMULATION_DEVICE_ID:
889 		*hif_type = HIF_TYPE_QCA6390;
890 		*target_type = TARGET_TYPE_QCA6390;
891 		HIF_INFO(" *********** QCA6390 *************\n");
892 		break;
893 
894 	case QCA6490_DEVICE_ID:
895 	case QCA6490_EMULATION_DEVICE_ID:
896 		*hif_type = HIF_TYPE_QCA6490;
897 		*target_type = TARGET_TYPE_QCA6490;
898 		HIF_INFO(" *********** QCA6490 *************\n");
899 		break;
900 
901 	case QCA6750_DEVICE_ID:
902 	case QCA6750_EMULATION_DEVICE_ID:
903 		*hif_type = HIF_TYPE_QCA6750;
904 		*target_type = TARGET_TYPE_QCA6750;
905 		HIF_INFO(" *********** QCA6750 *************\n");
906 		break;
907 
908 	case QCA8074V2_DEVICE_ID:
909 		*hif_type = HIF_TYPE_QCA8074V2;
910 		*target_type = TARGET_TYPE_QCA8074V2;
911 		HIF_INFO(" *********** QCA8074V2 *************\n");
912 		break;
913 
914 	case QCA6018_DEVICE_ID:
915 	case RUMIM2M_DEVICE_ID_NODE0:
916 	case RUMIM2M_DEVICE_ID_NODE1:
917 	case RUMIM2M_DEVICE_ID_NODE2:
918 	case RUMIM2M_DEVICE_ID_NODE3:
919 	case RUMIM2M_DEVICE_ID_NODE4:
920 	case RUMIM2M_DEVICE_ID_NODE5:
921 		*hif_type = HIF_TYPE_QCA6018;
922 		*target_type = TARGET_TYPE_QCA6018;
923 		HIF_INFO(" *********** QCA6018 *************\n");
924 		break;
925 
926 	default:
927 		HIF_ERROR("%s: Unsupported device ID = 0x%x!",
928 			  __func__, device_id);
929 		ret = -ENODEV;
930 		break;
931 	}
932 
933 	if (*target_type == TARGET_TYPE_UNKNOWN) {
934 		HIF_ERROR("%s: Unsupported target_type!", __func__);
935 		ret = -ENODEV;
936 	}
937 end:
938 	return ret;
939 }
940 
941 /**
942  * hif_get_bus_type() - return the bus type
943  *
944  * Return: enum qdf_bus_type
945  */
946 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
947 {
948 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
949 
950 	return scn->bus_type;
951 }
952 
953 /**
954  * Target info and ini parameters are global to the driver
955  * Hence these structures are exposed to all the modules in
956  * the driver and they don't need to maintains multiple copies
957  * of the same info, instead get the handle from hif and
958  * modify them in hif
959  */
960 
961 /**
962  * hif_get_ini_handle() - API to get hif_config_param handle
963  * @hif_ctx: HIF Context
964  *
965  * Return: pointer to hif_config_info
966  */
967 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
968 {
969 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
970 
971 	return &sc->hif_config;
972 }
973 
974 /**
975  * hif_get_target_info_handle() - API to get hif_target_info handle
976  * @hif_ctx: HIF context
977  *
978  * Return: Pointer to hif_target_info
979  */
980 struct hif_target_info *hif_get_target_info_handle(
981 					struct hif_opaque_softc *hif_ctx)
982 {
983 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
984 
985 	return &sc->target_info;
986 
987 }
988 qdf_export_symbol(hif_get_target_info_handle);
989 
990 #ifdef RECEIVE_OFFLOAD
991 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
992 				 void (offld_flush_handler)(void *))
993 {
994 	if (hif_napi_enabled(scn, -1))
995 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
996 	else
997 		HIF_ERROR("NAPI not enabled\n");
998 }
999 qdf_export_symbol(hif_offld_flush_cb_register);
1000 
1001 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
1002 {
1003 	if (hif_napi_enabled(scn, -1))
1004 		hif_napi_rx_offld_flush_cb_deregister(scn);
1005 	else
1006 		HIF_ERROR("NAPI not enabled\n");
1007 }
1008 qdf_export_symbol(hif_offld_flush_cb_deregister);
1009 
1010 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1011 {
1012 	if (hif_napi_enabled(hif_hdl, -1))
1013 		return NAPI_PIPE2ID(ctx_id);
1014 	else
1015 		return ctx_id;
1016 }
1017 #else /* RECEIVE_OFFLOAD */
1018 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
1019 {
1020 	return 0;
1021 }
1022 qdf_export_symbol(hif_get_rx_ctx_id);
1023 #endif /* RECEIVE_OFFLOAD */
1024 
1025 #if defined(FEATURE_LRO)
1026 
1027 /**
1028  * hif_get_lro_info - Returns LRO instance for instance ID
1029  * @ctx_id: LRO instance ID
1030  * @hif_hdl: HIF Context
1031  *
1032  * Return: Pointer to LRO instance.
1033  */
1034 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
1035 {
1036 	void *data;
1037 
1038 	if (hif_napi_enabled(hif_hdl, -1))
1039 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
1040 	else
1041 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
1042 
1043 	return data;
1044 }
1045 #endif
1046 
1047 /**
1048  * hif_get_target_status - API to get target status
1049  * @hif_ctx: HIF Context
1050  *
1051  * Return: enum hif_target_status
1052  */
1053 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1054 {
1055 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1056 
1057 	return scn->target_status;
1058 }
1059 qdf_export_symbol(hif_get_target_status);
1060 
1061 /**
1062  * hif_set_target_status() - API to set target status
1063  * @hif_ctx: HIF Context
1064  * @status: Target Status
1065  *
1066  * Return: void
1067  */
1068 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1069 			   hif_target_status status)
1070 {
1071 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1072 
1073 	scn->target_status = status;
1074 }
1075 
1076 /**
1077  * hif_init_ini_config() - API to initialize HIF configuration parameters
1078  * @hif_ctx: HIF Context
1079  * @cfg: HIF Configuration
1080  *
1081  * Return: void
1082  */
1083 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1084 			 struct hif_config_info *cfg)
1085 {
1086 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1087 
1088 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1089 }
1090 
1091 /**
1092  * hif_get_conparam() - API to get driver mode in HIF
1093  * @scn: HIF Context
1094  *
1095  * Return: driver mode of operation
1096  */
1097 uint32_t hif_get_conparam(struct hif_softc *scn)
1098 {
1099 	if (!scn)
1100 		return 0;
1101 
1102 	return scn->hif_con_param;
1103 }
1104 
1105 /**
1106  * hif_get_callbacks_handle() - API to get callbacks Handle
1107  * @scn: HIF Context
1108  *
1109  * Return: pointer to HIF Callbacks
1110  */
1111 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1112 							struct hif_softc *scn)
1113 {
1114 	return &scn->callbacks;
1115 }
1116 
1117 /**
1118  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1119  * @scn: HIF Context
1120  *
1121  * Return: True/False
1122  */
1123 bool hif_is_driver_unloading(struct hif_softc *scn)
1124 {
1125 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1126 
1127 	if (cbk && cbk->is_driver_unloading)
1128 		return cbk->is_driver_unloading(cbk->context);
1129 
1130 	return false;
1131 }
1132 
1133 /**
1134  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1135  * load/unload in progress
1136  * @scn: HIF Context
1137  *
1138  * Return: True/False
1139  */
1140 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1141 {
1142 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1143 
1144 	if (cbk && cbk->is_load_unload_in_progress)
1145 		return cbk->is_load_unload_in_progress(cbk->context);
1146 
1147 	return false;
1148 }
1149 
1150 /**
1151  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1152  * progress
1153  * @scn: HIF Context
1154  *
1155  * Return: True/False
1156  */
1157 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1158 {
1159 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1160 
1161 	if (cbk && cbk->is_recovery_in_progress)
1162 		return cbk->is_recovery_in_progress(cbk->context);
1163 
1164 	return false;
1165 }
1166 
1167 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
1168     defined(HIF_IPCI)
1169 
1170 /**
1171  * hif_update_pipe_callback() - API to register pipe specific callbacks
1172  * @osc: Opaque softc
1173  * @pipeid: pipe id
1174  * @callbacks: callbacks to register
1175  *
1176  * Return: void
1177  */
1178 
1179 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1180 					u_int8_t pipeid,
1181 					struct hif_msg_callbacks *callbacks)
1182 {
1183 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1184 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1185 	struct HIF_CE_pipe_info *pipe_info;
1186 
1187 	QDF_BUG(pipeid < CE_COUNT_MAX);
1188 
1189 	HIF_INFO_LO("+%s pipeid %d\n", __func__, pipeid);
1190 
1191 	pipe_info = &hif_state->pipe_info[pipeid];
1192 
1193 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1194 			callbacks, sizeof(pipe_info->pipe_callbacks));
1195 
1196 	HIF_INFO_LO("-%s\n", __func__);
1197 }
1198 qdf_export_symbol(hif_update_pipe_callback);
1199 
1200 /**
1201  * hif_is_target_ready() - API to query if target is in ready state
1202  * progress
1203  * @scn: HIF Context
1204  *
1205  * Return: True/False
1206  */
1207 bool hif_is_target_ready(struct hif_softc *scn)
1208 {
1209 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1210 
1211 	if (cbk && cbk->is_target_ready)
1212 		return cbk->is_target_ready(cbk->context);
1213 	/*
1214 	 * if callback is not registered then there is no way to determine
1215 	 * if target is ready. In-such case return true to indicate that
1216 	 * target is ready.
1217 	 */
1218 	return true;
1219 }
1220 qdf_export_symbol(hif_is_target_ready);
1221 
1222 /**
1223  * hif_batch_send() - API to access hif specific function
1224  * ce_batch_send.
1225  * @osc: HIF Context
1226  * @msdu : list of msdus to be sent
1227  * @transfer_id : transfer id
1228  * @len : donwloaded length
1229  *
1230  * Return: list of msds not sent
1231  */
1232 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1233 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
1234 {
1235 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1236 
1237 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1238 			len, sendhead);
1239 }
1240 qdf_export_symbol(hif_batch_send);
1241 
1242 /**
1243  * hif_update_tx_ring() - API to access hif specific function
1244  * ce_update_tx_ring.
1245  * @osc: HIF Context
1246  * @num_htt_cmpls : number of htt compl received.
1247  *
1248  * Return: void
1249  */
1250 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
1251 {
1252 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1253 
1254 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
1255 }
1256 qdf_export_symbol(hif_update_tx_ring);
1257 
1258 
1259 /**
1260  * hif_send_single() - API to access hif specific function
1261  * ce_send_single.
1262  * @osc: HIF Context
1263  * @msdu : msdu to be sent
1264  * @transfer_id: transfer id
1265  * @len : downloaded length
1266  *
1267  * Return: msdu sent status
1268  */
1269 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1270 			   uint32_t transfer_id, u_int32_t len)
1271 {
1272 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1273 
1274 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1275 			len);
1276 }
1277 qdf_export_symbol(hif_send_single);
1278 #endif
1279 
1280 /**
1281  * hif_reg_write() - API to access hif specific function
1282  * hif_write32_mb.
1283  * @hif_ctx : HIF Context
1284  * @offset : offset on which value has to be written
1285  * @value : value to be written
1286  *
1287  * Return: None
1288  */
1289 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
1290 		uint32_t value)
1291 {
1292 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1293 
1294 	hif_write32_mb(scn, scn->mem + offset, value);
1295 
1296 }
1297 qdf_export_symbol(hif_reg_write);
1298 
1299 /**
1300  * hif_reg_read() - API to access hif specific function
1301  * hif_read32_mb.
1302  * @hif_ctx : HIF Context
1303  * @offset : offset from which value has to be read
1304  *
1305  * Return: Read value
1306  */
1307 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
1308 {
1309 
1310 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1311 
1312 	return hif_read32_mb(scn, scn->mem + offset);
1313 }
1314 qdf_export_symbol(hif_reg_read);
1315 
1316 /**
1317  * hif_ramdump_handler(): generic ramdump handler
1318  * @scn: struct hif_opaque_softc
1319  *
1320  * Return: None
1321  */
1322 void hif_ramdump_handler(struct hif_opaque_softc *scn)
1323 {
1324 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
1325 		hif_usb_ramdump_handler(scn);
1326 }
1327 
1328 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
1329 {
1330 	struct hif_softc *scn = context;
1331 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
1332 
1333 	HIF_INFO("wake interrupt received on irq %d", irq);
1334 
1335 	if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) {
1336 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
1337 		hif_pm_runtime_request_resume(hif_ctx);
1338 	}
1339 
1340 	if (scn->initial_wakeup_cb)
1341 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
1342 
1343 	if (hif_is_ut_suspended(scn))
1344 		hif_ut_fw_resume(scn);
1345 
1346 	qdf_pm_system_wakeup();
1347 
1348 	return IRQ_HANDLED;
1349 }
1350 
1351 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1352 			       void (*callback)(void *),
1353 			       void *priv)
1354 {
1355 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1356 
1357 	scn->initial_wakeup_cb = callback;
1358 	scn->initial_wakeup_priv = priv;
1359 }
1360 
1361 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1362 				       uint32_t ce_service_max_yield_time)
1363 {
1364 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1365 
1366 	hif_ctx->ce_service_max_yield_time =
1367 		ce_service_max_yield_time * 1000;
1368 }
1369 
1370 unsigned long long
1371 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
1372 {
1373 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1374 
1375 	return hif_ctx->ce_service_max_yield_time;
1376 }
1377 
1378 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1379 				       uint8_t ce_service_max_rx_ind_flush)
1380 {
1381 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1382 
1383 	if (ce_service_max_rx_ind_flush == 0 ||
1384 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
1385 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
1386 	else
1387 		hif_ctx->ce_service_max_rx_ind_flush =
1388 						ce_service_max_rx_ind_flush;
1389 }
1390