xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "hif_hw_version.h"
33 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
34 #include "ce_tasklet.h"
35 #include "ce_api.h"
36 #endif
37 #include "qdf_trace.h"
38 #include "qdf_status.h"
39 #include "hif_debug.h"
40 #include "mp_dev.h"
41 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018)
42 #include "hal_api.h"
43 #endif
44 #include "hif_napi.h"
45 #include "hif_unit_test_suspend_i.h"
46 #include "qdf_module.h"
47 
48 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
49 {
50 	hif_trigger_dump(hif_ctx, cmd_id, start);
51 }
52 
53 /**
54  * hif_get_target_id(): hif_get_target_id
55  *
56  * Return the virtual memory base address to the caller
57  *
58  * @scn: hif_softc
59  *
60  * Return: A_target_id_t
61  */
62 A_target_id_t hif_get_target_id(struct hif_softc *scn)
63 {
64 	return scn->mem;
65 }
66 
67 /**
68  * hif_get_targetdef(): hif_get_targetdef
69  * @scn: scn
70  *
71  * Return: void *
72  */
73 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
74 {
75 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
76 
77 	return scn->targetdef;
78 }
79 
80 /**
81  * hif_vote_link_down(): unvote for link up
82  *
83  * Call hif_vote_link_down to release a previous request made using
84  * hif_vote_link_up. A hif_vote_link_down call should only be made
85  * after a corresponding hif_vote_link_up, otherwise you could be
86  * negating a vote from another source. When no votes are present
87  * hif will not guarantee the linkstate after hif_bus_suspend.
88  *
89  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
90  * and initialization deinitialization sequencences.
91  *
92  * Return: n/a
93  */
94 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
95 {
96 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
97 
98 	QDF_BUG(scn);
99 	scn->linkstate_vote--;
100 	if (scn->linkstate_vote == 0)
101 		hif_bus_prevent_linkdown(scn, false);
102 }
103 
104 /**
105  * hif_vote_link_up(): vote to prevent bus from suspending
106  *
107  * Makes hif guarantee that fw can message the host normally
108  * durring suspend.
109  *
110  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
111  * and initialization deinitialization sequencences.
112  *
113  * Return: n/a
114  */
115 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
116 {
117 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
118 
119 	QDF_BUG(scn);
120 	scn->linkstate_vote++;
121 	if (scn->linkstate_vote == 1)
122 		hif_bus_prevent_linkdown(scn, true);
123 }
124 
125 /**
126  * hif_can_suspend_link(): query if hif is permitted to suspend the link
127  *
128  * Hif will ensure that the link won't be suspended if the upperlayers
129  * don't want it to.
130  *
131  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
132  * we don't need extra locking to ensure votes dont change while
133  * we are in the process of suspending or resuming.
134  *
135  * Return: false if hif will guarantee link up durring suspend.
136  */
137 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
138 {
139 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
140 
141 	QDF_BUG(scn);
142 	return scn->linkstate_vote == 0;
143 }
144 
145 /**
146  * hif_hia_item_address(): hif_hia_item_address
147  * @target_type: target_type
148  * @item_offset: item_offset
149  *
150  * Return: n/a
151  */
152 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
153 {
154 	switch (target_type) {
155 	case TARGET_TYPE_AR6002:
156 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
157 	case TARGET_TYPE_AR6003:
158 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
159 	case TARGET_TYPE_AR6004:
160 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
161 	case TARGET_TYPE_AR6006:
162 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
163 	case TARGET_TYPE_AR9888:
164 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
165 	case TARGET_TYPE_AR6320:
166 	case TARGET_TYPE_AR6320V2:
167 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
168 	case TARGET_TYPE_ADRASTEA:
169 		/* ADRASTEA doesn't have a host interest address */
170 		ASSERT(0);
171 		return 0;
172 	case TARGET_TYPE_AR900B:
173 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
174 	case TARGET_TYPE_QCA9984:
175 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
176 	case TARGET_TYPE_QCA9888:
177 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
178 	case TARGET_TYPE_IPQ4019:
179 		return IPQ4019_HOST_INTEREST_ADDRESS + item_offset;
180 
181 	default:
182 		ASSERT(0);
183 		return 0;
184 	}
185 }
186 
187 /**
188  * hif_max_num_receives_reached() - check max receive is reached
189  * @scn: HIF Context
190  * @count: unsigned int.
191  *
192  * Output check status as bool
193  *
194  * Return: bool
195  */
196 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
197 {
198 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
199 		return count > 120;
200 	else
201 		return count > MAX_NUM_OF_RECEIVES;
202 }
203 
204 /**
205  * init_buffer_count() - initial buffer count
206  * @maxSize: qdf_size_t
207  *
208  * routine to modify the initial buffer count to be allocated on an os
209  * platform basis. Platform owner will need to modify this as needed
210  *
211  * Return: qdf_size_t
212  */
213 qdf_size_t init_buffer_count(qdf_size_t maxSize)
214 {
215 	return maxSize;
216 }
217 
218 /**
219  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
220  * @hif_ctx: hif context
221  * @htc_htt_tx_endpoint: htt_tx_endpoint
222  *
223  * Return: void
224  */
225 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
226 							int htc_htt_tx_endpoint)
227 {
228 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
229 
230 	if (!scn) {
231 		HIF_ERROR("%s: error: scn or scn->hif_sc is NULL!",
232 		       __func__);
233 		return;
234 	}
235 
236 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
237 }
238 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
239 
240 static const struct qwlan_hw qwlan_hw_list[] = {
241 	{
242 		.id = AR6320_REV1_VERSION,
243 		.subid = 0,
244 		.name = "QCA6174_REV1",
245 	},
246 	{
247 		.id = AR6320_REV1_1_VERSION,
248 		.subid = 0x1,
249 		.name = "QCA6174_REV1_1",
250 	},
251 	{
252 		.id = AR6320_REV1_3_VERSION,
253 		.subid = 0x2,
254 		.name = "QCA6174_REV1_3",
255 	},
256 	{
257 		.id = AR6320_REV2_1_VERSION,
258 		.subid = 0x4,
259 		.name = "QCA6174_REV2_1",
260 	},
261 	{
262 		.id = AR6320_REV2_1_VERSION,
263 		.subid = 0x5,
264 		.name = "QCA6174_REV2_2",
265 	},
266 	{
267 		.id = AR6320_REV3_VERSION,
268 		.subid = 0x6,
269 		.name = "QCA6174_REV2.3",
270 	},
271 	{
272 		.id = AR6320_REV3_VERSION,
273 		.subid = 0x8,
274 		.name = "QCA6174_REV3",
275 	},
276 	{
277 		.id = AR6320_REV3_VERSION,
278 		.subid = 0x9,
279 		.name = "QCA6174_REV3_1",
280 	},
281 	{
282 		.id = AR6320_REV3_2_VERSION,
283 		.subid = 0xA,
284 		.name = "AR6320_REV3_2_VERSION",
285 	},
286 	{
287 		.id = WCN3990_v1,
288 		.subid = 0x0,
289 		.name = "WCN3990_V1",
290 	},
291 	{
292 		.id = WCN3990_v2,
293 		.subid = 0x0,
294 		.name = "WCN3990_V2",
295 	},
296 	{
297 		.id = WCN3990_v2_1,
298 		.subid = 0x0,
299 		.name = "WCN3990_V2.1",
300 	},
301 	{
302 		.id = WCN3998,
303 		.subid = 0x0,
304 		.name = "WCN3998",
305 	},
306 	{
307 		.id = QCA9379_REV1_VERSION,
308 		.subid = 0xC,
309 		.name = "QCA9379_REV1",
310 	},
311 	{
312 		.id = QCA9379_REV1_VERSION,
313 		.subid = 0xD,
314 		.name = "QCA9379_REV1_1",
315 	}
316 };
317 
318 /**
319  * hif_get_hw_name(): get a human readable name for the hardware
320  * @info: Target Info
321  *
322  * Return: human readable name for the underlying wifi hardware.
323  */
324 static const char *hif_get_hw_name(struct hif_target_info *info)
325 {
326 	int i;
327 
328 	if (info->hw_name)
329 		return info->hw_name;
330 
331 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
332 		if (info->target_version == qwlan_hw_list[i].id &&
333 		    info->target_revision == qwlan_hw_list[i].subid) {
334 			return qwlan_hw_list[i].name;
335 		}
336 	}
337 
338 	info->hw_name = qdf_mem_malloc(64);
339 	if (!info->hw_name)
340 		return "Unknown Device (nomem)";
341 
342 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
343 			info->target_version);
344 	if (i < 0)
345 		return "Unknown Device (snprintf failure)";
346 	else
347 		return info->hw_name;
348 }
349 
350 /**
351  * hif_get_hw_info(): hif_get_hw_info
352  * @scn: scn
353  * @version: version
354  * @revision: revision
355  *
356  * Return: n/a
357  */
358 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
359 			const char **target_name)
360 {
361 	struct hif_target_info *info = hif_get_target_info_handle(scn);
362 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
363 
364 	if (sc->bus_type == QDF_BUS_TYPE_USB)
365 		hif_usb_get_hw_info(sc);
366 
367 	*version = info->target_version;
368 	*revision = info->target_revision;
369 	*target_name = hif_get_hw_name(info);
370 }
371 
372 /**
373  * hif_get_dev_ba(): API to get device base address.
374  * @scn: scn
375  * @version: version
376  * @revision: revision
377  *
378  * Return: n/a
379  */
380 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
381 {
382 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
383 
384 	return scn->mem;
385 }
386 qdf_export_symbol(hif_get_dev_ba);
387 /**
388  * hif_open(): hif_open
389  * @qdf_ctx: QDF Context
390  * @mode: Driver Mode
391  * @bus_type: Bus Type
392  * @cbk: CDS Callbacks
393  *
394  * API to open HIF Context
395  *
396  * Return: HIF Opaque Pointer
397  */
398 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
399 				  enum qdf_bus_type bus_type,
400 				  struct hif_driver_state_callbacks *cbk)
401 {
402 	struct hif_softc *scn;
403 	QDF_STATUS status = QDF_STATUS_SUCCESS;
404 	int bus_context_size = hif_bus_get_context_size(bus_type);
405 
406 	if (bus_context_size == 0) {
407 		HIF_ERROR("%s: context size 0 not allowed", __func__);
408 		return NULL;
409 	}
410 
411 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
412 	if (!scn) {
413 		HIF_ERROR("%s: cannot alloc memory for HIF context of size:%d",
414 						__func__, bus_context_size);
415 		return GET_HIF_OPAQUE_HDL(scn);
416 	}
417 
418 	scn->qdf_dev = qdf_ctx;
419 	scn->hif_con_param = mode;
420 	qdf_atomic_init(&scn->active_tasklet_cnt);
421 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
422 	qdf_atomic_init(&scn->link_suspended);
423 	qdf_atomic_init(&scn->tasklet_from_intr);
424 	qdf_mem_copy(&scn->callbacks, cbk,
425 		     sizeof(struct hif_driver_state_callbacks));
426 	scn->bus_type  = bus_type;
427 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
428 	status = hif_bus_open(scn, bus_type);
429 	if (status != QDF_STATUS_SUCCESS) {
430 		HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d",
431 				  __func__, status, bus_type);
432 		qdf_mem_free(scn);
433 		scn = NULL;
434 	}
435 
436 	return GET_HIF_OPAQUE_HDL(scn);
437 }
438 
439 #ifdef ADRASTEA_RRI_ON_DDR
440 /**
441  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
442  * @scn: hif context
443  *
444  * Return: none
445  */
446 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
447 {
448 	if (scn->vaddr_rri_on_ddr)
449 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
450 					(CE_COUNT * sizeof(uint32_t)),
451 					scn->vaddr_rri_on_ddr,
452 					scn->paddr_rri_on_ddr, 0);
453 	scn->vaddr_rri_on_ddr = NULL;
454 }
455 #endif
456 
457 /**
458  * hif_close(): hif_close
459  * @hif_ctx: hif_ctx
460  *
461  * Return: n/a
462  */
463 void hif_close(struct hif_opaque_softc *hif_ctx)
464 {
465 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
466 
467 	if (!scn) {
468 		HIF_ERROR("%s: hif_opaque_softc is NULL", __func__);
469 		return;
470 	}
471 
472 	if (scn->athdiag_procfs_inited) {
473 		athdiag_procfs_remove();
474 		scn->athdiag_procfs_inited = false;
475 	}
476 
477 	if (scn->target_info.hw_name) {
478 		char *hw_name = scn->target_info.hw_name;
479 
480 		scn->target_info.hw_name = "ErrUnloading";
481 		qdf_mem_free(hw_name);
482 	}
483 
484 	hif_uninit_rri_on_ddr(scn);
485 
486 	hif_bus_close(scn);
487 	qdf_mem_free(scn);
488 }
489 
490 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
491 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
492 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490)
493 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
494 {
495 	if (ce_srng_based(scn)) {
496 		scn->hal_soc = hal_attach(
497 					hif_softc_to_hif_opaque_softc(scn),
498 					scn->qdf_dev);
499 		if (!scn->hal_soc)
500 			return QDF_STATUS_E_FAILURE;
501 	}
502 
503 	return QDF_STATUS_SUCCESS;
504 }
505 
506 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
507 {
508 	if (ce_srng_based(scn)) {
509 		hal_detach(scn->hal_soc);
510 		scn->hal_soc = NULL;
511 	}
512 
513 	return QDF_STATUS_SUCCESS;
514 }
515 #else
516 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
517 {
518 	return QDF_STATUS_SUCCESS;
519 }
520 
521 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
522 {
523 	return QDF_STATUS_SUCCESS;
524 }
525 #endif
526 
527 /**
528  * hif_enable(): hif_enable
529  * @hif_ctx: hif_ctx
530  * @dev: dev
531  * @bdev: bus dev
532  * @bid: bus ID
533  * @bus_type: bus type
534  * @type: enable type
535  *
536  * Return: QDF_STATUS
537  */
538 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
539 					  void *bdev,
540 					  const struct hif_bus_id *bid,
541 					  enum qdf_bus_type bus_type,
542 					  enum hif_enable_type type)
543 {
544 	QDF_STATUS status;
545 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
546 
547 	if (!scn) {
548 		HIF_ERROR("%s: hif_ctx = NULL", __func__);
549 		return QDF_STATUS_E_NULL_VALUE;
550 	}
551 
552 	status = hif_enable_bus(scn, dev, bdev, bid, type);
553 	if (status != QDF_STATUS_SUCCESS) {
554 		HIF_ERROR("%s: hif_enable_bus error = %d",
555 				  __func__, status);
556 		return status;
557 	}
558 
559 	status = hif_hal_attach(scn);
560 	if (status != QDF_STATUS_SUCCESS) {
561 		HIF_ERROR("%s: hal attach failed", __func__);
562 		goto disable_bus;
563 	}
564 
565 	if (hif_bus_configure(scn)) {
566 		HIF_ERROR("%s: Target probe failed.", __func__);
567 		status = QDF_STATUS_E_FAILURE;
568 		goto hal_detach;
569 	}
570 
571 	hif_ut_suspend_init(scn);
572 
573 	/*
574 	 * Flag to avoid potential unallocated memory access from MSI
575 	 * interrupt handler which could get scheduled as soon as MSI
576 	 * is enabled, i.e to take care of the race due to the order
577 	 * in where MSI is enabled before the memory, that will be
578 	 * in interrupt handlers, is allocated.
579 	 */
580 
581 	scn->hif_init_done = true;
582 
583 	HIF_DBG("%s: OK", __func__);
584 
585 	return QDF_STATUS_SUCCESS;
586 
587 hal_detach:
588 	hif_hal_detach(scn);
589 disable_bus:
590 	hif_disable_bus(scn);
591 	return status;
592 }
593 
594 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
595 {
596 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
597 
598 	if (!scn)
599 		return;
600 
601 	hif_nointrs(scn);
602 	if (scn->hif_init_done == false)
603 		hif_shutdown_device(hif_ctx);
604 	else
605 		hif_stop(hif_ctx);
606 
607 	hif_hal_detach(scn);
608 
609 	hif_disable_bus(scn);
610 
611 	hif_wlan_disable(scn);
612 
613 	scn->notice_send = false;
614 
615 	HIF_DBG("%s: X", __func__);
616 }
617 
618 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
619 {
620 	hif_display_bus_stats(hif_ctx);
621 }
622 
623 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
624 {
625 	hif_clear_bus_stats(hif_ctx);
626 }
627 
628 /**
629  * hif_crash_shutdown_dump_bus_register() - dump bus registers
630  * @hif_ctx: hif_ctx
631  *
632  * Return: n/a
633  */
634 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) \
635 && defined(DEBUG)
636 
637 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
638 {
639 	struct hif_opaque_softc *scn = hif_ctx;
640 
641 	if (hif_check_soc_status(scn))
642 		return;
643 
644 	if (hif_dump_registers(scn))
645 		HIF_ERROR("Failed to dump bus registers!");
646 }
647 
648 /**
649  * hif_crash_shutdown(): hif_crash_shutdown
650  *
651  * This function is called by the platform driver to dump CE registers
652  *
653  * @hif_ctx: hif_ctx
654  *
655  * Return: n/a
656  */
657 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
658 {
659 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
660 
661 	if (!hif_ctx)
662 		return;
663 
664 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
665 		HIF_INFO_MED("%s: RAM dump disabled for bustype %d",
666 				__func__, scn->bus_type);
667 		return;
668 	}
669 
670 	if (TARGET_STATUS_RESET == scn->target_status) {
671 		HIF_INFO_MED("%s: Target is already asserted, ignore!",
672 			    __func__);
673 		return;
674 	}
675 
676 	if (hif_is_load_or_unload_in_progress(scn)) {
677 		HIF_ERROR("%s: Load/unload is in progress, ignore!", __func__);
678 		return;
679 	}
680 
681 	hif_crash_shutdown_dump_bus_register(hif_ctx);
682 
683 	if (ol_copy_ramdump(hif_ctx))
684 		goto out;
685 
686 	HIF_INFO_MED("%s: RAM dump collecting completed!", __func__);
687 
688 out:
689 	return;
690 }
691 #else
692 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
693 {
694 	HIF_INFO_MED("%s: Collecting target RAM dump disabled",
695 		__func__);
696 }
697 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
698 
699 #ifdef QCA_WIFI_3_0
700 /**
701  * hif_check_fw_reg(): hif_check_fw_reg
702  * @scn: scn
703  * @state:
704  *
705  * Return: int
706  */
707 int hif_check_fw_reg(struct hif_opaque_softc *scn)
708 {
709 	return 0;
710 }
711 #endif
712 
713 /**
714  * hif_read_phy_mem_base(): hif_read_phy_mem_base
715  * @scn: scn
716  * @phy_mem_base: physical mem base
717  *
718  * Return: n/a
719  */
720 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
721 {
722 	*phy_mem_base = scn->mem_pa;
723 }
724 qdf_export_symbol(hif_read_phy_mem_base);
725 
726 /**
727  * hif_get_device_type(): hif_get_device_type
728  * @device_id: device_id
729  * @revision_id: revision_id
730  * @hif_type: returned hif_type
731  * @target_type: returned target_type
732  *
733  * Return: int
734  */
735 int hif_get_device_type(uint32_t device_id,
736 			uint32_t revision_id,
737 			uint32_t *hif_type, uint32_t *target_type)
738 {
739 	int ret = 0;
740 
741 	switch (device_id) {
742 	case ADRASTEA_DEVICE_ID_P2_E12:
743 
744 		*hif_type = HIF_TYPE_ADRASTEA;
745 		*target_type = TARGET_TYPE_ADRASTEA;
746 		break;
747 
748 	case AR9888_DEVICE_ID:
749 		*hif_type = HIF_TYPE_AR9888;
750 		*target_type = TARGET_TYPE_AR9888;
751 		break;
752 
753 	case AR6320_DEVICE_ID:
754 		switch (revision_id) {
755 		case AR6320_FW_1_1:
756 		case AR6320_FW_1_3:
757 			*hif_type = HIF_TYPE_AR6320;
758 			*target_type = TARGET_TYPE_AR6320;
759 			break;
760 
761 		case AR6320_FW_2_0:
762 		case AR6320_FW_3_0:
763 		case AR6320_FW_3_2:
764 			*hif_type = HIF_TYPE_AR6320V2;
765 			*target_type = TARGET_TYPE_AR6320V2;
766 			break;
767 
768 		default:
769 			HIF_ERROR("%s: error - dev_id = 0x%x, rev_id = 0x%x",
770 				   __func__, device_id, revision_id);
771 			ret = -ENODEV;
772 			goto end;
773 		}
774 		break;
775 
776 	case AR9887_DEVICE_ID:
777 		*hif_type = HIF_TYPE_AR9888;
778 		*target_type = TARGET_TYPE_AR9888;
779 		HIF_INFO(" *********** AR9887 **************");
780 		break;
781 
782 	case QCA9984_DEVICE_ID:
783 		*hif_type = HIF_TYPE_QCA9984;
784 		*target_type = TARGET_TYPE_QCA9984;
785 		HIF_INFO(" *********** QCA9984 *************");
786 		break;
787 
788 	case QCA9888_DEVICE_ID:
789 		*hif_type = HIF_TYPE_QCA9888;
790 		*target_type = TARGET_TYPE_QCA9888;
791 		HIF_INFO(" *********** QCA9888 *************");
792 		break;
793 
794 	case AR900B_DEVICE_ID:
795 		*hif_type = HIF_TYPE_AR900B;
796 		*target_type = TARGET_TYPE_AR900B;
797 		HIF_INFO(" *********** AR900B *************");
798 		break;
799 
800 	case IPQ4019_DEVICE_ID:
801 		*hif_type = HIF_TYPE_IPQ4019;
802 		*target_type = TARGET_TYPE_IPQ4019;
803 		HIF_INFO(" *********** IPQ4019  *************");
804 		break;
805 
806 	case QCA8074_DEVICE_ID:
807 		*hif_type = HIF_TYPE_QCA8074;
808 		*target_type = TARGET_TYPE_QCA8074;
809 		HIF_INFO(" *********** QCA8074  *************\n");
810 		break;
811 
812 	case QCA6290_EMULATION_DEVICE_ID:
813 	case QCA6290_DEVICE_ID:
814 		*hif_type = HIF_TYPE_QCA6290;
815 		*target_type = TARGET_TYPE_QCA6290;
816 		HIF_INFO(" *********** QCA6290EMU *************\n");
817 		break;
818 
819 	case QCN9000_DEVICE_ID:
820 		*hif_type = HIF_TYPE_QCN9000;
821 		*target_type = TARGET_TYPE_QCN9000;
822 		HIF_INFO(" *********** QCN9000 *************\n");
823 		break;
824 
825 	case QCN7605_DEVICE_ID:
826 	case QCN7605_COMPOSITE:
827 	case QCN7605_STANDALONE:
828 	case QCN7605_STANDALONE_V2:
829 	case QCN7605_COMPOSITE_V2:
830 		*hif_type = HIF_TYPE_QCN7605;
831 		*target_type = TARGET_TYPE_QCN7605;
832 		HIF_INFO(" *********** QCN7605 *************\n");
833 		break;
834 
835 	case QCA6390_DEVICE_ID:
836 	case QCA6390_EMULATION_DEVICE_ID:
837 		*hif_type = HIF_TYPE_QCA6390;
838 		*target_type = TARGET_TYPE_QCA6390;
839 		HIF_INFO(" *********** QCA6390 *************\n");
840 		break;
841 
842 	case QCA6490_DEVICE_ID:
843 	case QCA6490_EMULATION_DEVICE_ID:
844 		*hif_type = HIF_TYPE_QCA6490;
845 		*target_type = TARGET_TYPE_QCA6490;
846 		HIF_INFO(" *********** QCA6490 *************\n");
847 		break;
848 
849 	case QCA8074V2_DEVICE_ID:
850 		*hif_type = HIF_TYPE_QCA8074V2;
851 		*target_type = TARGET_TYPE_QCA8074V2;
852 		HIF_INFO(" *********** QCA8074V2 *************\n");
853 		break;
854 
855 	case QCA6018_DEVICE_ID:
856 	case RUMIM2M_DEVICE_ID_NODE0:
857 	case RUMIM2M_DEVICE_ID_NODE1:
858 	case RUMIM2M_DEVICE_ID_NODE2:
859 	case RUMIM2M_DEVICE_ID_NODE3:
860 	case RUMIM2M_DEVICE_ID_NODE4:
861 	case RUMIM2M_DEVICE_ID_NODE5:
862 		*hif_type = HIF_TYPE_QCA6018;
863 		*target_type = TARGET_TYPE_QCA6018;
864 		HIF_INFO(" *********** QCA6018 *************\n");
865 		break;
866 
867 	default:
868 		HIF_ERROR("%s: Unsupported device ID = 0x%x!",
869 			  __func__, device_id);
870 		ret = -ENODEV;
871 		break;
872 	}
873 
874 	if (*target_type == TARGET_TYPE_UNKNOWN) {
875 		HIF_ERROR("%s: Unsupported target_type!", __func__);
876 		ret = -ENODEV;
877 	}
878 end:
879 	return ret;
880 }
881 
882 /**
883  * hif_get_bus_type() - return the bus type
884  *
885  * Return: enum qdf_bus_type
886  */
887 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
888 {
889 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
890 
891 	return scn->bus_type;
892 }
893 
894 /**
895  * Target info and ini parameters are global to the driver
896  * Hence these structures are exposed to all the modules in
897  * the driver and they don't need to maintains multiple copies
898  * of the same info, instead get the handle from hif and
899  * modify them in hif
900  */
901 
902 /**
903  * hif_get_ini_handle() - API to get hif_config_param handle
904  * @hif_ctx: HIF Context
905  *
906  * Return: pointer to hif_config_info
907  */
908 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
909 {
910 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
911 
912 	return &sc->hif_config;
913 }
914 
915 /**
916  * hif_get_target_info_handle() - API to get hif_target_info handle
917  * @hif_ctx: HIF context
918  *
919  * Return: Pointer to hif_target_info
920  */
921 struct hif_target_info *hif_get_target_info_handle(
922 					struct hif_opaque_softc *hif_ctx)
923 {
924 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
925 
926 	return &sc->target_info;
927 
928 }
929 qdf_export_symbol(hif_get_target_info_handle);
930 
931 #ifdef RECEIVE_OFFLOAD
932 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
933 				 void (offld_flush_handler)(void *))
934 {
935 	if (hif_napi_enabled(scn, -1))
936 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
937 	else
938 		HIF_ERROR("NAPI not enabled\n");
939 }
940 qdf_export_symbol(hif_offld_flush_cb_register);
941 
942 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
943 {
944 	if (hif_napi_enabled(scn, -1))
945 		hif_napi_rx_offld_flush_cb_deregister(scn);
946 	else
947 		HIF_ERROR("NAPI not enabled\n");
948 }
949 qdf_export_symbol(hif_offld_flush_cb_deregister);
950 
951 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
952 {
953 	if (hif_napi_enabled(hif_hdl, -1))
954 		return NAPI_PIPE2ID(ctx_id);
955 	else
956 		return ctx_id;
957 }
958 #else /* RECEIVE_OFFLOAD */
959 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
960 {
961 	return 0;
962 }
963 qdf_export_symbol(hif_get_rx_ctx_id);
964 #endif /* RECEIVE_OFFLOAD */
965 
966 #if defined(FEATURE_LRO)
967 
968 /**
969  * hif_get_lro_info - Returns LRO instance for instance ID
970  * @ctx_id: LRO instance ID
971  * @hif_hdl: HIF Context
972  *
973  * Return: Pointer to LRO instance.
974  */
975 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
976 {
977 	void *data;
978 
979 	if (hif_napi_enabled(hif_hdl, -1))
980 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
981 	else
982 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
983 
984 	return data;
985 }
986 #endif
987 
988 /**
989  * hif_get_target_status - API to get target status
990  * @hif_ctx: HIF Context
991  *
992  * Return: enum hif_target_status
993  */
994 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
995 {
996 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
997 
998 	return scn->target_status;
999 }
1000 qdf_export_symbol(hif_get_target_status);
1001 
1002 /**
1003  * hif_set_target_status() - API to set target status
1004  * @hif_ctx: HIF Context
1005  * @status: Target Status
1006  *
1007  * Return: void
1008  */
1009 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1010 			   hif_target_status status)
1011 {
1012 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1013 
1014 	scn->target_status = status;
1015 }
1016 
1017 /**
1018  * hif_init_ini_config() - API to initialize HIF configuration parameters
1019  * @hif_ctx: HIF Context
1020  * @cfg: HIF Configuration
1021  *
1022  * Return: void
1023  */
1024 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1025 			 struct hif_config_info *cfg)
1026 {
1027 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1028 
1029 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1030 }
1031 
1032 /**
1033  * hif_get_conparam() - API to get driver mode in HIF
1034  * @scn: HIF Context
1035  *
1036  * Return: driver mode of operation
1037  */
1038 uint32_t hif_get_conparam(struct hif_softc *scn)
1039 {
1040 	if (!scn)
1041 		return 0;
1042 
1043 	return scn->hif_con_param;
1044 }
1045 
1046 /**
1047  * hif_get_callbacks_handle() - API to get callbacks Handle
1048  * @scn: HIF Context
1049  *
1050  * Return: pointer to HIF Callbacks
1051  */
1052 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1053 							struct hif_softc *scn)
1054 {
1055 	return &scn->callbacks;
1056 }
1057 
1058 /**
1059  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1060  * @scn: HIF Context
1061  *
1062  * Return: True/False
1063  */
1064 bool hif_is_driver_unloading(struct hif_softc *scn)
1065 {
1066 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1067 
1068 	if (cbk && cbk->is_driver_unloading)
1069 		return cbk->is_driver_unloading(cbk->context);
1070 
1071 	return false;
1072 }
1073 
1074 /**
1075  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1076  * load/unload in progress
1077  * @scn: HIF Context
1078  *
1079  * Return: True/False
1080  */
1081 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1082 {
1083 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1084 
1085 	if (cbk && cbk->is_load_unload_in_progress)
1086 		return cbk->is_load_unload_in_progress(cbk->context);
1087 
1088 	return false;
1089 }
1090 
1091 /**
1092  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1093  * progress
1094  * @scn: HIF Context
1095  *
1096  * Return: True/False
1097  */
1098 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1099 {
1100 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1101 
1102 	if (cbk && cbk->is_recovery_in_progress)
1103 		return cbk->is_recovery_in_progress(cbk->context);
1104 
1105 	return false;
1106 }
1107 
1108 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
1109 
1110 /**
1111  * hif_update_pipe_callback() - API to register pipe specific callbacks
1112  * @osc: Opaque softc
1113  * @pipeid: pipe id
1114  * @callbacks: callbacks to register
1115  *
1116  * Return: void
1117  */
1118 
1119 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1120 					u_int8_t pipeid,
1121 					struct hif_msg_callbacks *callbacks)
1122 {
1123 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1124 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1125 	struct HIF_CE_pipe_info *pipe_info;
1126 
1127 	QDF_BUG(pipeid < CE_COUNT_MAX);
1128 
1129 	HIF_INFO_LO("+%s pipeid %d\n", __func__, pipeid);
1130 
1131 	pipe_info = &hif_state->pipe_info[pipeid];
1132 
1133 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1134 			callbacks, sizeof(pipe_info->pipe_callbacks));
1135 
1136 	HIF_INFO_LO("-%s\n", __func__);
1137 }
1138 qdf_export_symbol(hif_update_pipe_callback);
1139 
1140 /**
1141  * hif_is_target_ready() - API to query if target is in ready state
1142  * progress
1143  * @scn: HIF Context
1144  *
1145  * Return: True/False
1146  */
1147 bool hif_is_target_ready(struct hif_softc *scn)
1148 {
1149 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1150 
1151 	if (cbk && cbk->is_target_ready)
1152 		return cbk->is_target_ready(cbk->context);
1153 	/*
1154 	 * if callback is not registered then there is no way to determine
1155 	 * if target is ready. In-such case return true to indicate that
1156 	 * target is ready.
1157 	 */
1158 	return true;
1159 }
1160 qdf_export_symbol(hif_is_target_ready);
1161 
1162 /**
1163  * hif_batch_send() - API to access hif specific function
1164  * ce_batch_send.
1165  * @osc: HIF Context
1166  * @msdu : list of msdus to be sent
1167  * @transfer_id : transfer id
1168  * @len : donwloaded length
1169  *
1170  * Return: list of msds not sent
1171  */
1172 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1173 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
1174 {
1175 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1176 
1177 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1178 			len, sendhead);
1179 }
1180 qdf_export_symbol(hif_batch_send);
1181 
1182 /**
1183  * hif_update_tx_ring() - API to access hif specific function
1184  * ce_update_tx_ring.
1185  * @osc: HIF Context
1186  * @num_htt_cmpls : number of htt compl received.
1187  *
1188  * Return: void
1189  */
1190 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
1191 {
1192 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1193 
1194 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
1195 }
1196 qdf_export_symbol(hif_update_tx_ring);
1197 
1198 
1199 /**
1200  * hif_send_single() - API to access hif specific function
1201  * ce_send_single.
1202  * @osc: HIF Context
1203  * @msdu : msdu to be sent
1204  * @transfer_id: transfer id
1205  * @len : downloaded length
1206  *
1207  * Return: msdu sent status
1208  */
1209 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1210 			   uint32_t transfer_id, u_int32_t len)
1211 {
1212 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1213 
1214 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1215 			len);
1216 }
1217 qdf_export_symbol(hif_send_single);
1218 #endif
1219 
1220 /**
1221  * hif_reg_write() - API to access hif specific function
1222  * hif_write32_mb.
1223  * @hif_ctx : HIF Context
1224  * @offset : offset on which value has to be written
1225  * @value : value to be written
1226  *
1227  * Return: None
1228  */
1229 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
1230 		uint32_t value)
1231 {
1232 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1233 
1234 	hif_write32_mb(scn, scn->mem + offset, value);
1235 
1236 }
1237 qdf_export_symbol(hif_reg_write);
1238 
1239 /**
1240  * hif_reg_read() - API to access hif specific function
1241  * hif_read32_mb.
1242  * @hif_ctx : HIF Context
1243  * @offset : offset from which value has to be read
1244  *
1245  * Return: Read value
1246  */
1247 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
1248 {
1249 
1250 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1251 
1252 	return hif_read32_mb(scn, scn->mem + offset);
1253 }
1254 qdf_export_symbol(hif_reg_read);
1255 
1256 /**
1257  * hif_ramdump_handler(): generic ramdump handler
1258  * @scn: struct hif_opaque_softc
1259  *
1260  * Return: None
1261  */
1262 void hif_ramdump_handler(struct hif_opaque_softc *scn)
1263 {
1264 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
1265 		hif_usb_ramdump_handler(scn);
1266 }
1267 
1268 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
1269 {
1270 	struct hif_softc *scn = context;
1271 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
1272 
1273 	HIF_INFO("wake interrupt received on irq %d", irq);
1274 
1275 	if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) {
1276 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
1277 		hif_pm_runtime_request_resume(hif_ctx);
1278 	}
1279 
1280 	if (scn->initial_wakeup_cb)
1281 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
1282 
1283 	if (hif_is_ut_suspended(scn))
1284 		hif_ut_fw_resume(scn);
1285 
1286 	return IRQ_HANDLED;
1287 }
1288 
1289 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1290 			       void (*callback)(void *),
1291 			       void *priv)
1292 {
1293 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1294 
1295 	scn->initial_wakeup_cb = callback;
1296 	scn->initial_wakeup_priv = priv;
1297 }
1298 
1299 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1300 				       uint32_t ce_service_max_yield_time)
1301 {
1302 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1303 
1304 	hif_ctx->ce_service_max_yield_time =
1305 		ce_service_max_yield_time * 1000;
1306 }
1307 
1308 unsigned long long
1309 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
1310 {
1311 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1312 
1313 	return hif_ctx->ce_service_max_yield_time;
1314 }
1315 
1316 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1317 				       uint8_t ce_service_max_rx_ind_flush)
1318 {
1319 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1320 
1321 	if (ce_service_max_rx_ind_flush == 0 ||
1322 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
1323 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
1324 	else
1325 		hif_ctx->ce_service_max_rx_ind_flush =
1326 						ce_service_max_rx_ind_flush;
1327 }
1328