xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "hif_hw_version.h"
33 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
34 #include "ce_tasklet.h"
35 #include "ce_api.h"
36 #endif
37 #include "qdf_trace.h"
38 #include "qdf_status.h"
39 #include "hif_debug.h"
40 #include "mp_dev.h"
41 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018)
42 #include "hal_api.h"
43 #endif
44 #include "hif_napi.h"
45 #include "hif_unit_test_suspend_i.h"
46 #include "qdf_module.h"
47 
48 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
49 {
50 	hif_trigger_dump(hif_ctx, cmd_id, start);
51 }
52 
53 /**
54  * hif_get_target_id(): hif_get_target_id
55  *
56  * Return the virtual memory base address to the caller
57  *
58  * @scn: hif_softc
59  *
60  * Return: A_target_id_t
61  */
62 A_target_id_t hif_get_target_id(struct hif_softc *scn)
63 {
64 	return scn->mem;
65 }
66 
67 /**
68  * hif_get_targetdef(): hif_get_targetdef
69  * @scn: scn
70  *
71  * Return: void *
72  */
73 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
74 {
75 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
76 
77 	return scn->targetdef;
78 }
79 
80 #ifdef FORCE_WAKE
81 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
82 			 bool init_phase)
83 {
84 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
85 
86 	if (ce_srng_based(scn))
87 		hal_set_init_phase(scn->hal_soc, init_phase);
88 }
89 #endif /* FORCE_WAKE */
90 
91 /**
92  * hif_vote_link_down(): unvote for link up
93  *
94  * Call hif_vote_link_down to release a previous request made using
95  * hif_vote_link_up. A hif_vote_link_down call should only be made
96  * after a corresponding hif_vote_link_up, otherwise you could be
97  * negating a vote from another source. When no votes are present
98  * hif will not guarantee the linkstate after hif_bus_suspend.
99  *
100  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
101  * and initialization deinitialization sequencences.
102  *
103  * Return: n/a
104  */
105 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
106 {
107 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
108 
109 	QDF_BUG(scn);
110 	scn->linkstate_vote--;
111 	if (scn->linkstate_vote == 0)
112 		hif_bus_prevent_linkdown(scn, false);
113 }
114 
115 /**
116  * hif_vote_link_up(): vote to prevent bus from suspending
117  *
118  * Makes hif guarantee that fw can message the host normally
119  * durring suspend.
120  *
121  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
122  * and initialization deinitialization sequencences.
123  *
124  * Return: n/a
125  */
126 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
127 {
128 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
129 
130 	QDF_BUG(scn);
131 	scn->linkstate_vote++;
132 	if (scn->linkstate_vote == 1)
133 		hif_bus_prevent_linkdown(scn, true);
134 }
135 
136 /**
137  * hif_can_suspend_link(): query if hif is permitted to suspend the link
138  *
139  * Hif will ensure that the link won't be suspended if the upperlayers
140  * don't want it to.
141  *
142  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
143  * we don't need extra locking to ensure votes dont change while
144  * we are in the process of suspending or resuming.
145  *
146  * Return: false if hif will guarantee link up durring suspend.
147  */
148 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
149 {
150 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
151 
152 	QDF_BUG(scn);
153 	return scn->linkstate_vote == 0;
154 }
155 
156 /**
157  * hif_hia_item_address(): hif_hia_item_address
158  * @target_type: target_type
159  * @item_offset: item_offset
160  *
161  * Return: n/a
162  */
163 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
164 {
165 	switch (target_type) {
166 	case TARGET_TYPE_AR6002:
167 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
168 	case TARGET_TYPE_AR6003:
169 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
170 	case TARGET_TYPE_AR6004:
171 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
172 	case TARGET_TYPE_AR6006:
173 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
174 	case TARGET_TYPE_AR9888:
175 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
176 	case TARGET_TYPE_AR6320:
177 	case TARGET_TYPE_AR6320V2:
178 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
179 	case TARGET_TYPE_ADRASTEA:
180 		/* ADRASTEA doesn't have a host interest address */
181 		ASSERT(0);
182 		return 0;
183 	case TARGET_TYPE_AR900B:
184 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
185 	case TARGET_TYPE_QCA9984:
186 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
187 	case TARGET_TYPE_QCA9888:
188 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
189 	case TARGET_TYPE_IPQ4019:
190 		return IPQ4019_HOST_INTEREST_ADDRESS + item_offset;
191 
192 	default:
193 		ASSERT(0);
194 		return 0;
195 	}
196 }
197 
198 /**
199  * hif_max_num_receives_reached() - check max receive is reached
200  * @scn: HIF Context
201  * @count: unsigned int.
202  *
203  * Output check status as bool
204  *
205  * Return: bool
206  */
207 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
208 {
209 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
210 		return count > 120;
211 	else
212 		return count > MAX_NUM_OF_RECEIVES;
213 }
214 
215 /**
216  * init_buffer_count() - initial buffer count
217  * @maxSize: qdf_size_t
218  *
219  * routine to modify the initial buffer count to be allocated on an os
220  * platform basis. Platform owner will need to modify this as needed
221  *
222  * Return: qdf_size_t
223  */
224 qdf_size_t init_buffer_count(qdf_size_t maxSize)
225 {
226 	return maxSize;
227 }
228 
229 /**
230  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
231  * @hif_ctx: hif context
232  * @htc_htt_tx_endpoint: htt_tx_endpoint
233  *
234  * Return: void
235  */
236 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
237 							int htc_htt_tx_endpoint)
238 {
239 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
240 
241 	if (!scn) {
242 		HIF_ERROR("%s: error: scn or scn->hif_sc is NULL!",
243 		       __func__);
244 		return;
245 	}
246 
247 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
248 }
249 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
250 
251 static const struct qwlan_hw qwlan_hw_list[] = {
252 	{
253 		.id = AR6320_REV1_VERSION,
254 		.subid = 0,
255 		.name = "QCA6174_REV1",
256 	},
257 	{
258 		.id = AR6320_REV1_1_VERSION,
259 		.subid = 0x1,
260 		.name = "QCA6174_REV1_1",
261 	},
262 	{
263 		.id = AR6320_REV1_3_VERSION,
264 		.subid = 0x2,
265 		.name = "QCA6174_REV1_3",
266 	},
267 	{
268 		.id = AR6320_REV2_1_VERSION,
269 		.subid = 0x4,
270 		.name = "QCA6174_REV2_1",
271 	},
272 	{
273 		.id = AR6320_REV2_1_VERSION,
274 		.subid = 0x5,
275 		.name = "QCA6174_REV2_2",
276 	},
277 	{
278 		.id = AR6320_REV3_VERSION,
279 		.subid = 0x6,
280 		.name = "QCA6174_REV2.3",
281 	},
282 	{
283 		.id = AR6320_REV3_VERSION,
284 		.subid = 0x8,
285 		.name = "QCA6174_REV3",
286 	},
287 	{
288 		.id = AR6320_REV3_VERSION,
289 		.subid = 0x9,
290 		.name = "QCA6174_REV3_1",
291 	},
292 	{
293 		.id = AR6320_REV3_2_VERSION,
294 		.subid = 0xA,
295 		.name = "AR6320_REV3_2_VERSION",
296 	},
297 	{
298 		.id = WCN3990_v1,
299 		.subid = 0x0,
300 		.name = "WCN3990_V1",
301 	},
302 	{
303 		.id = WCN3990_v2,
304 		.subid = 0x0,
305 		.name = "WCN3990_V2",
306 	},
307 	{
308 		.id = WCN3990_v2_1,
309 		.subid = 0x0,
310 		.name = "WCN3990_V2.1",
311 	},
312 	{
313 		.id = WCN3998,
314 		.subid = 0x0,
315 		.name = "WCN3998",
316 	},
317 	{
318 		.id = QCA9379_REV1_VERSION,
319 		.subid = 0xC,
320 		.name = "QCA9379_REV1",
321 	},
322 	{
323 		.id = QCA9379_REV1_VERSION,
324 		.subid = 0xD,
325 		.name = "QCA9379_REV1_1",
326 	}
327 };
328 
329 /**
330  * hif_get_hw_name(): get a human readable name for the hardware
331  * @info: Target Info
332  *
333  * Return: human readable name for the underlying wifi hardware.
334  */
335 static const char *hif_get_hw_name(struct hif_target_info *info)
336 {
337 	int i;
338 
339 	if (info->hw_name)
340 		return info->hw_name;
341 
342 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
343 		if (info->target_version == qwlan_hw_list[i].id &&
344 		    info->target_revision == qwlan_hw_list[i].subid) {
345 			return qwlan_hw_list[i].name;
346 		}
347 	}
348 
349 	info->hw_name = qdf_mem_malloc(64);
350 	if (!info->hw_name)
351 		return "Unknown Device (nomem)";
352 
353 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
354 			info->target_version);
355 	if (i < 0)
356 		return "Unknown Device (snprintf failure)";
357 	else
358 		return info->hw_name;
359 }
360 
361 /**
362  * hif_get_hw_info(): hif_get_hw_info
363  * @scn: scn
364  * @version: version
365  * @revision: revision
366  *
367  * Return: n/a
368  */
369 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
370 			const char **target_name)
371 {
372 	struct hif_target_info *info = hif_get_target_info_handle(scn);
373 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
374 
375 	if (sc->bus_type == QDF_BUS_TYPE_USB)
376 		hif_usb_get_hw_info(sc);
377 
378 	*version = info->target_version;
379 	*revision = info->target_revision;
380 	*target_name = hif_get_hw_name(info);
381 }
382 
383 /**
384  * hif_get_dev_ba(): API to get device base address.
385  * @scn: scn
386  * @version: version
387  * @revision: revision
388  *
389  * Return: n/a
390  */
391 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
392 {
393 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
394 
395 	return scn->mem;
396 }
397 qdf_export_symbol(hif_get_dev_ba);
398 /**
399  * hif_open(): hif_open
400  * @qdf_ctx: QDF Context
401  * @mode: Driver Mode
402  * @bus_type: Bus Type
403  * @cbk: CDS Callbacks
404  *
405  * API to open HIF Context
406  *
407  * Return: HIF Opaque Pointer
408  */
409 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
410 				  enum qdf_bus_type bus_type,
411 				  struct hif_driver_state_callbacks *cbk)
412 {
413 	struct hif_softc *scn;
414 	QDF_STATUS status = QDF_STATUS_SUCCESS;
415 	int bus_context_size = hif_bus_get_context_size(bus_type);
416 
417 	if (bus_context_size == 0) {
418 		HIF_ERROR("%s: context size 0 not allowed", __func__);
419 		return NULL;
420 	}
421 
422 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
423 	if (!scn) {
424 		HIF_ERROR("%s: cannot alloc memory for HIF context of size:%d",
425 						__func__, bus_context_size);
426 		return GET_HIF_OPAQUE_HDL(scn);
427 	}
428 
429 	scn->qdf_dev = qdf_ctx;
430 	scn->hif_con_param = mode;
431 	qdf_atomic_init(&scn->active_tasklet_cnt);
432 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
433 	qdf_atomic_init(&scn->link_suspended);
434 	qdf_atomic_init(&scn->tasklet_from_intr);
435 	qdf_mem_copy(&scn->callbacks, cbk,
436 		     sizeof(struct hif_driver_state_callbacks));
437 	scn->bus_type  = bus_type;
438 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
439 	status = hif_bus_open(scn, bus_type);
440 	if (status != QDF_STATUS_SUCCESS) {
441 		HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d",
442 				  __func__, status, bus_type);
443 		qdf_mem_free(scn);
444 		scn = NULL;
445 	}
446 
447 	return GET_HIF_OPAQUE_HDL(scn);
448 }
449 
450 #ifdef ADRASTEA_RRI_ON_DDR
451 /**
452  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
453  * @scn: hif context
454  *
455  * Return: none
456  */
457 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
458 {
459 	if (scn->vaddr_rri_on_ddr)
460 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
461 					(CE_COUNT * sizeof(uint32_t)),
462 					scn->vaddr_rri_on_ddr,
463 					scn->paddr_rri_on_ddr, 0);
464 	scn->vaddr_rri_on_ddr = NULL;
465 }
466 #endif
467 
468 /**
469  * hif_close(): hif_close
470  * @hif_ctx: hif_ctx
471  *
472  * Return: n/a
473  */
474 void hif_close(struct hif_opaque_softc *hif_ctx)
475 {
476 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
477 
478 	if (!scn) {
479 		HIF_ERROR("%s: hif_opaque_softc is NULL", __func__);
480 		return;
481 	}
482 
483 	if (scn->athdiag_procfs_inited) {
484 		athdiag_procfs_remove();
485 		scn->athdiag_procfs_inited = false;
486 	}
487 
488 	if (scn->target_info.hw_name) {
489 		char *hw_name = scn->target_info.hw_name;
490 
491 		scn->target_info.hw_name = "ErrUnloading";
492 		qdf_mem_free(hw_name);
493 	}
494 
495 	hif_uninit_rri_on_ddr(scn);
496 
497 	hif_bus_close(scn);
498 	qdf_mem_free(scn);
499 }
500 
501 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
502 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
503 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490)
504 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
505 {
506 	if (ce_srng_based(scn)) {
507 		scn->hal_soc = hal_attach(
508 					hif_softc_to_hif_opaque_softc(scn),
509 					scn->qdf_dev);
510 		if (!scn->hal_soc)
511 			return QDF_STATUS_E_FAILURE;
512 	}
513 
514 	return QDF_STATUS_SUCCESS;
515 }
516 
517 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
518 {
519 	if (ce_srng_based(scn)) {
520 		hal_detach(scn->hal_soc);
521 		scn->hal_soc = NULL;
522 	}
523 
524 	return QDF_STATUS_SUCCESS;
525 }
526 #else
527 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
528 {
529 	return QDF_STATUS_SUCCESS;
530 }
531 
532 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
533 {
534 	return QDF_STATUS_SUCCESS;
535 }
536 #endif
537 
538 /**
539  * hif_enable(): hif_enable
540  * @hif_ctx: hif_ctx
541  * @dev: dev
542  * @bdev: bus dev
543  * @bid: bus ID
544  * @bus_type: bus type
545  * @type: enable type
546  *
547  * Return: QDF_STATUS
548  */
549 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
550 					  void *bdev,
551 					  const struct hif_bus_id *bid,
552 					  enum qdf_bus_type bus_type,
553 					  enum hif_enable_type type)
554 {
555 	QDF_STATUS status;
556 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
557 
558 	if (!scn) {
559 		HIF_ERROR("%s: hif_ctx = NULL", __func__);
560 		return QDF_STATUS_E_NULL_VALUE;
561 	}
562 
563 	status = hif_enable_bus(scn, dev, bdev, bid, type);
564 	if (status != QDF_STATUS_SUCCESS) {
565 		HIF_ERROR("%s: hif_enable_bus error = %d",
566 				  __func__, status);
567 		return status;
568 	}
569 
570 	status = hif_hal_attach(scn);
571 	if (status != QDF_STATUS_SUCCESS) {
572 		HIF_ERROR("%s: hal attach failed", __func__);
573 		goto disable_bus;
574 	}
575 
576 	if (hif_bus_configure(scn)) {
577 		HIF_ERROR("%s: Target probe failed.", __func__);
578 		status = QDF_STATUS_E_FAILURE;
579 		goto hal_detach;
580 	}
581 
582 	hif_ut_suspend_init(scn);
583 
584 	/*
585 	 * Flag to avoid potential unallocated memory access from MSI
586 	 * interrupt handler which could get scheduled as soon as MSI
587 	 * is enabled, i.e to take care of the race due to the order
588 	 * in where MSI is enabled before the memory, that will be
589 	 * in interrupt handlers, is allocated.
590 	 */
591 
592 	scn->hif_init_done = true;
593 
594 	HIF_DBG("%s: OK", __func__);
595 
596 	return QDF_STATUS_SUCCESS;
597 
598 hal_detach:
599 	hif_hal_detach(scn);
600 disable_bus:
601 	hif_disable_bus(scn);
602 	return status;
603 }
604 
605 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
606 {
607 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
608 
609 	if (!scn)
610 		return;
611 
612 	hif_nointrs(scn);
613 	if (scn->hif_init_done == false)
614 		hif_shutdown_device(hif_ctx);
615 	else
616 		hif_stop(hif_ctx);
617 
618 	hif_hal_detach(scn);
619 
620 	hif_disable_bus(scn);
621 
622 	hif_wlan_disable(scn);
623 
624 	scn->notice_send = false;
625 
626 	HIF_DBG("%s: X", __func__);
627 }
628 
629 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
630 {
631 	hif_display_bus_stats(hif_ctx);
632 }
633 
634 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
635 {
636 	hif_clear_bus_stats(hif_ctx);
637 }
638 
639 /**
640  * hif_crash_shutdown_dump_bus_register() - dump bus registers
641  * @hif_ctx: hif_ctx
642  *
643  * Return: n/a
644  */
645 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) \
646 && defined(DEBUG)
647 
648 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
649 {
650 	struct hif_opaque_softc *scn = hif_ctx;
651 
652 	if (hif_check_soc_status(scn))
653 		return;
654 
655 	if (hif_dump_registers(scn))
656 		HIF_ERROR("Failed to dump bus registers!");
657 }
658 
659 /**
660  * hif_crash_shutdown(): hif_crash_shutdown
661  *
662  * This function is called by the platform driver to dump CE registers
663  *
664  * @hif_ctx: hif_ctx
665  *
666  * Return: n/a
667  */
668 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
669 {
670 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
671 
672 	if (!hif_ctx)
673 		return;
674 
675 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
676 		HIF_INFO_MED("%s: RAM dump disabled for bustype %d",
677 				__func__, scn->bus_type);
678 		return;
679 	}
680 
681 	if (TARGET_STATUS_RESET == scn->target_status) {
682 		HIF_INFO_MED("%s: Target is already asserted, ignore!",
683 			    __func__);
684 		return;
685 	}
686 
687 	if (hif_is_load_or_unload_in_progress(scn)) {
688 		HIF_ERROR("%s: Load/unload is in progress, ignore!", __func__);
689 		return;
690 	}
691 
692 	hif_crash_shutdown_dump_bus_register(hif_ctx);
693 
694 	if (ol_copy_ramdump(hif_ctx))
695 		goto out;
696 
697 	HIF_INFO_MED("%s: RAM dump collecting completed!", __func__);
698 
699 out:
700 	return;
701 }
702 #else
703 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
704 {
705 	HIF_INFO_MED("%s: Collecting target RAM dump disabled",
706 		__func__);
707 }
708 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
709 
710 #ifdef QCA_WIFI_3_0
711 /**
712  * hif_check_fw_reg(): hif_check_fw_reg
713  * @scn: scn
714  * @state:
715  *
716  * Return: int
717  */
718 int hif_check_fw_reg(struct hif_opaque_softc *scn)
719 {
720 	return 0;
721 }
722 #endif
723 
724 /**
725  * hif_read_phy_mem_base(): hif_read_phy_mem_base
726  * @scn: scn
727  * @phy_mem_base: physical mem base
728  *
729  * Return: n/a
730  */
731 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
732 {
733 	*phy_mem_base = scn->mem_pa;
734 }
735 qdf_export_symbol(hif_read_phy_mem_base);
736 
737 /**
738  * hif_get_device_type(): hif_get_device_type
739  * @device_id: device_id
740  * @revision_id: revision_id
741  * @hif_type: returned hif_type
742  * @target_type: returned target_type
743  *
744  * Return: int
745  */
746 int hif_get_device_type(uint32_t device_id,
747 			uint32_t revision_id,
748 			uint32_t *hif_type, uint32_t *target_type)
749 {
750 	int ret = 0;
751 
752 	switch (device_id) {
753 	case ADRASTEA_DEVICE_ID_P2_E12:
754 
755 		*hif_type = HIF_TYPE_ADRASTEA;
756 		*target_type = TARGET_TYPE_ADRASTEA;
757 		break;
758 
759 	case AR9888_DEVICE_ID:
760 		*hif_type = HIF_TYPE_AR9888;
761 		*target_type = TARGET_TYPE_AR9888;
762 		break;
763 
764 	case AR6320_DEVICE_ID:
765 		switch (revision_id) {
766 		case AR6320_FW_1_1:
767 		case AR6320_FW_1_3:
768 			*hif_type = HIF_TYPE_AR6320;
769 			*target_type = TARGET_TYPE_AR6320;
770 			break;
771 
772 		case AR6320_FW_2_0:
773 		case AR6320_FW_3_0:
774 		case AR6320_FW_3_2:
775 			*hif_type = HIF_TYPE_AR6320V2;
776 			*target_type = TARGET_TYPE_AR6320V2;
777 			break;
778 
779 		default:
780 			HIF_ERROR("%s: error - dev_id = 0x%x, rev_id = 0x%x",
781 				   __func__, device_id, revision_id);
782 			ret = -ENODEV;
783 			goto end;
784 		}
785 		break;
786 
787 	case AR9887_DEVICE_ID:
788 		*hif_type = HIF_TYPE_AR9888;
789 		*target_type = TARGET_TYPE_AR9888;
790 		HIF_INFO(" *********** AR9887 **************");
791 		break;
792 
793 	case QCA9984_DEVICE_ID:
794 		*hif_type = HIF_TYPE_QCA9984;
795 		*target_type = TARGET_TYPE_QCA9984;
796 		HIF_INFO(" *********** QCA9984 *************");
797 		break;
798 
799 	case QCA9888_DEVICE_ID:
800 		*hif_type = HIF_TYPE_QCA9888;
801 		*target_type = TARGET_TYPE_QCA9888;
802 		HIF_INFO(" *********** QCA9888 *************");
803 		break;
804 
805 	case AR900B_DEVICE_ID:
806 		*hif_type = HIF_TYPE_AR900B;
807 		*target_type = TARGET_TYPE_AR900B;
808 		HIF_INFO(" *********** AR900B *************");
809 		break;
810 
811 	case IPQ4019_DEVICE_ID:
812 		*hif_type = HIF_TYPE_IPQ4019;
813 		*target_type = TARGET_TYPE_IPQ4019;
814 		HIF_INFO(" *********** IPQ4019  *************");
815 		break;
816 
817 	case QCA8074_DEVICE_ID:
818 		*hif_type = HIF_TYPE_QCA8074;
819 		*target_type = TARGET_TYPE_QCA8074;
820 		HIF_INFO(" *********** QCA8074  *************\n");
821 		break;
822 
823 	case QCA6290_EMULATION_DEVICE_ID:
824 	case QCA6290_DEVICE_ID:
825 		*hif_type = HIF_TYPE_QCA6290;
826 		*target_type = TARGET_TYPE_QCA6290;
827 		HIF_INFO(" *********** QCA6290EMU *************\n");
828 		break;
829 
830 	case QCN9000_DEVICE_ID:
831 		*hif_type = HIF_TYPE_QCN9000;
832 		*target_type = TARGET_TYPE_QCN9000;
833 		HIF_INFO(" *********** QCN9000 *************\n");
834 		break;
835 
836 	case QCN7605_DEVICE_ID:
837 	case QCN7605_COMPOSITE:
838 	case QCN7605_STANDALONE:
839 	case QCN7605_STANDALONE_V2:
840 	case QCN7605_COMPOSITE_V2:
841 		*hif_type = HIF_TYPE_QCN7605;
842 		*target_type = TARGET_TYPE_QCN7605;
843 		HIF_INFO(" *********** QCN7605 *************\n");
844 		break;
845 
846 	case QCA6390_DEVICE_ID:
847 	case QCA6390_EMULATION_DEVICE_ID:
848 		*hif_type = HIF_TYPE_QCA6390;
849 		*target_type = TARGET_TYPE_QCA6390;
850 		HIF_INFO(" *********** QCA6390 *************\n");
851 		break;
852 
853 	case QCA6490_DEVICE_ID:
854 	case QCA6490_EMULATION_DEVICE_ID:
855 		*hif_type = HIF_TYPE_QCA6490;
856 		*target_type = TARGET_TYPE_QCA6490;
857 		HIF_INFO(" *********** QCA6490 *************\n");
858 		break;
859 
860 	case QCA8074V2_DEVICE_ID:
861 		*hif_type = HIF_TYPE_QCA8074V2;
862 		*target_type = TARGET_TYPE_QCA8074V2;
863 		HIF_INFO(" *********** QCA8074V2 *************\n");
864 		break;
865 
866 	case QCA6018_DEVICE_ID:
867 	case RUMIM2M_DEVICE_ID_NODE0:
868 	case RUMIM2M_DEVICE_ID_NODE1:
869 	case RUMIM2M_DEVICE_ID_NODE2:
870 	case RUMIM2M_DEVICE_ID_NODE3:
871 	case RUMIM2M_DEVICE_ID_NODE4:
872 	case RUMIM2M_DEVICE_ID_NODE5:
873 		*hif_type = HIF_TYPE_QCA6018;
874 		*target_type = TARGET_TYPE_QCA6018;
875 		HIF_INFO(" *********** QCA6018 *************\n");
876 		break;
877 
878 	default:
879 		HIF_ERROR("%s: Unsupported device ID = 0x%x!",
880 			  __func__, device_id);
881 		ret = -ENODEV;
882 		break;
883 	}
884 
885 	if (*target_type == TARGET_TYPE_UNKNOWN) {
886 		HIF_ERROR("%s: Unsupported target_type!", __func__);
887 		ret = -ENODEV;
888 	}
889 end:
890 	return ret;
891 }
892 
893 /**
894  * hif_get_bus_type() - return the bus type
895  *
896  * Return: enum qdf_bus_type
897  */
898 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
899 {
900 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
901 
902 	return scn->bus_type;
903 }
904 
905 /**
906  * Target info and ini parameters are global to the driver
907  * Hence these structures are exposed to all the modules in
908  * the driver and they don't need to maintains multiple copies
909  * of the same info, instead get the handle from hif and
910  * modify them in hif
911  */
912 
913 /**
914  * hif_get_ini_handle() - API to get hif_config_param handle
915  * @hif_ctx: HIF Context
916  *
917  * Return: pointer to hif_config_info
918  */
919 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
920 {
921 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
922 
923 	return &sc->hif_config;
924 }
925 
926 /**
927  * hif_get_target_info_handle() - API to get hif_target_info handle
928  * @hif_ctx: HIF context
929  *
930  * Return: Pointer to hif_target_info
931  */
932 struct hif_target_info *hif_get_target_info_handle(
933 					struct hif_opaque_softc *hif_ctx)
934 {
935 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
936 
937 	return &sc->target_info;
938 
939 }
940 qdf_export_symbol(hif_get_target_info_handle);
941 
942 #ifdef RECEIVE_OFFLOAD
943 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
944 				 void (offld_flush_handler)(void *))
945 {
946 	if (hif_napi_enabled(scn, -1))
947 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
948 	else
949 		HIF_ERROR("NAPI not enabled\n");
950 }
951 qdf_export_symbol(hif_offld_flush_cb_register);
952 
953 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
954 {
955 	if (hif_napi_enabled(scn, -1))
956 		hif_napi_rx_offld_flush_cb_deregister(scn);
957 	else
958 		HIF_ERROR("NAPI not enabled\n");
959 }
960 qdf_export_symbol(hif_offld_flush_cb_deregister);
961 
962 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
963 {
964 	if (hif_napi_enabled(hif_hdl, -1))
965 		return NAPI_PIPE2ID(ctx_id);
966 	else
967 		return ctx_id;
968 }
969 #else /* RECEIVE_OFFLOAD */
970 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
971 {
972 	return 0;
973 }
974 qdf_export_symbol(hif_get_rx_ctx_id);
975 #endif /* RECEIVE_OFFLOAD */
976 
977 #if defined(FEATURE_LRO)
978 
979 /**
980  * hif_get_lro_info - Returns LRO instance for instance ID
981  * @ctx_id: LRO instance ID
982  * @hif_hdl: HIF Context
983  *
984  * Return: Pointer to LRO instance.
985  */
986 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
987 {
988 	void *data;
989 
990 	if (hif_napi_enabled(hif_hdl, -1))
991 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
992 	else
993 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
994 
995 	return data;
996 }
997 #endif
998 
999 /**
1000  * hif_get_target_status - API to get target status
1001  * @hif_ctx: HIF Context
1002  *
1003  * Return: enum hif_target_status
1004  */
1005 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
1006 {
1007 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1008 
1009 	return scn->target_status;
1010 }
1011 qdf_export_symbol(hif_get_target_status);
1012 
1013 /**
1014  * hif_set_target_status() - API to set target status
1015  * @hif_ctx: HIF Context
1016  * @status: Target Status
1017  *
1018  * Return: void
1019  */
1020 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
1021 			   hif_target_status status)
1022 {
1023 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1024 
1025 	scn->target_status = status;
1026 }
1027 
1028 /**
1029  * hif_init_ini_config() - API to initialize HIF configuration parameters
1030  * @hif_ctx: HIF Context
1031  * @cfg: HIF Configuration
1032  *
1033  * Return: void
1034  */
1035 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
1036 			 struct hif_config_info *cfg)
1037 {
1038 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1039 
1040 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
1041 }
1042 
1043 /**
1044  * hif_get_conparam() - API to get driver mode in HIF
1045  * @scn: HIF Context
1046  *
1047  * Return: driver mode of operation
1048  */
1049 uint32_t hif_get_conparam(struct hif_softc *scn)
1050 {
1051 	if (!scn)
1052 		return 0;
1053 
1054 	return scn->hif_con_param;
1055 }
1056 
1057 /**
1058  * hif_get_callbacks_handle() - API to get callbacks Handle
1059  * @scn: HIF Context
1060  *
1061  * Return: pointer to HIF Callbacks
1062  */
1063 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
1064 							struct hif_softc *scn)
1065 {
1066 	return &scn->callbacks;
1067 }
1068 
1069 /**
1070  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1071  * @scn: HIF Context
1072  *
1073  * Return: True/False
1074  */
1075 bool hif_is_driver_unloading(struct hif_softc *scn)
1076 {
1077 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1078 
1079 	if (cbk && cbk->is_driver_unloading)
1080 		return cbk->is_driver_unloading(cbk->context);
1081 
1082 	return false;
1083 }
1084 
1085 /**
1086  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1087  * load/unload in progress
1088  * @scn: HIF Context
1089  *
1090  * Return: True/False
1091  */
1092 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1093 {
1094 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1095 
1096 	if (cbk && cbk->is_load_unload_in_progress)
1097 		return cbk->is_load_unload_in_progress(cbk->context);
1098 
1099 	return false;
1100 }
1101 
1102 /**
1103  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1104  * progress
1105  * @scn: HIF Context
1106  *
1107  * Return: True/False
1108  */
1109 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1110 {
1111 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1112 
1113 	if (cbk && cbk->is_recovery_in_progress)
1114 		return cbk->is_recovery_in_progress(cbk->context);
1115 
1116 	return false;
1117 }
1118 
1119 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
1120 
1121 /**
1122  * hif_update_pipe_callback() - API to register pipe specific callbacks
1123  * @osc: Opaque softc
1124  * @pipeid: pipe id
1125  * @callbacks: callbacks to register
1126  *
1127  * Return: void
1128  */
1129 
1130 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1131 					u_int8_t pipeid,
1132 					struct hif_msg_callbacks *callbacks)
1133 {
1134 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1135 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1136 	struct HIF_CE_pipe_info *pipe_info;
1137 
1138 	QDF_BUG(pipeid < CE_COUNT_MAX);
1139 
1140 	HIF_INFO_LO("+%s pipeid %d\n", __func__, pipeid);
1141 
1142 	pipe_info = &hif_state->pipe_info[pipeid];
1143 
1144 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1145 			callbacks, sizeof(pipe_info->pipe_callbacks));
1146 
1147 	HIF_INFO_LO("-%s\n", __func__);
1148 }
1149 qdf_export_symbol(hif_update_pipe_callback);
1150 
1151 /**
1152  * hif_is_target_ready() - API to query if target is in ready state
1153  * progress
1154  * @scn: HIF Context
1155  *
1156  * Return: True/False
1157  */
1158 bool hif_is_target_ready(struct hif_softc *scn)
1159 {
1160 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1161 
1162 	if (cbk && cbk->is_target_ready)
1163 		return cbk->is_target_ready(cbk->context);
1164 	/*
1165 	 * if callback is not registered then there is no way to determine
1166 	 * if target is ready. In-such case return true to indicate that
1167 	 * target is ready.
1168 	 */
1169 	return true;
1170 }
1171 qdf_export_symbol(hif_is_target_ready);
1172 
1173 /**
1174  * hif_batch_send() - API to access hif specific function
1175  * ce_batch_send.
1176  * @osc: HIF Context
1177  * @msdu : list of msdus to be sent
1178  * @transfer_id : transfer id
1179  * @len : donwloaded length
1180  *
1181  * Return: list of msds not sent
1182  */
1183 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1184 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
1185 {
1186 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1187 
1188 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1189 			len, sendhead);
1190 }
1191 qdf_export_symbol(hif_batch_send);
1192 
1193 /**
1194  * hif_update_tx_ring() - API to access hif specific function
1195  * ce_update_tx_ring.
1196  * @osc: HIF Context
1197  * @num_htt_cmpls : number of htt compl received.
1198  *
1199  * Return: void
1200  */
1201 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
1202 {
1203 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1204 
1205 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
1206 }
1207 qdf_export_symbol(hif_update_tx_ring);
1208 
1209 
1210 /**
1211  * hif_send_single() - API to access hif specific function
1212  * ce_send_single.
1213  * @osc: HIF Context
1214  * @msdu : msdu to be sent
1215  * @transfer_id: transfer id
1216  * @len : downloaded length
1217  *
1218  * Return: msdu sent status
1219  */
1220 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1221 			   uint32_t transfer_id, u_int32_t len)
1222 {
1223 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1224 
1225 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1226 			len);
1227 }
1228 qdf_export_symbol(hif_send_single);
1229 #endif
1230 
1231 /**
1232  * hif_reg_write() - API to access hif specific function
1233  * hif_write32_mb.
1234  * @hif_ctx : HIF Context
1235  * @offset : offset on which value has to be written
1236  * @value : value to be written
1237  *
1238  * Return: None
1239  */
1240 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
1241 		uint32_t value)
1242 {
1243 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1244 
1245 	hif_write32_mb(scn, scn->mem + offset, value);
1246 
1247 }
1248 qdf_export_symbol(hif_reg_write);
1249 
1250 /**
1251  * hif_reg_read() - API to access hif specific function
1252  * hif_read32_mb.
1253  * @hif_ctx : HIF Context
1254  * @offset : offset from which value has to be read
1255  *
1256  * Return: Read value
1257  */
1258 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
1259 {
1260 
1261 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1262 
1263 	return hif_read32_mb(scn, scn->mem + offset);
1264 }
1265 qdf_export_symbol(hif_reg_read);
1266 
1267 /**
1268  * hif_ramdump_handler(): generic ramdump handler
1269  * @scn: struct hif_opaque_softc
1270  *
1271  * Return: None
1272  */
1273 void hif_ramdump_handler(struct hif_opaque_softc *scn)
1274 {
1275 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
1276 		hif_usb_ramdump_handler(scn);
1277 }
1278 
1279 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
1280 {
1281 	struct hif_softc *scn = context;
1282 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
1283 
1284 	HIF_INFO("wake interrupt received on irq %d", irq);
1285 
1286 	if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) {
1287 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
1288 		hif_pm_runtime_request_resume(hif_ctx);
1289 	}
1290 
1291 	if (scn->initial_wakeup_cb)
1292 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
1293 
1294 	if (hif_is_ut_suspended(scn))
1295 		hif_ut_fw_resume(scn);
1296 
1297 	qdf_pm_system_wakeup();
1298 
1299 	return IRQ_HANDLED;
1300 }
1301 
1302 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1303 			       void (*callback)(void *),
1304 			       void *priv)
1305 {
1306 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1307 
1308 	scn->initial_wakeup_cb = callback;
1309 	scn->initial_wakeup_priv = priv;
1310 }
1311 
1312 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1313 				       uint32_t ce_service_max_yield_time)
1314 {
1315 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1316 
1317 	hif_ctx->ce_service_max_yield_time =
1318 		ce_service_max_yield_time * 1000;
1319 }
1320 
1321 unsigned long long
1322 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
1323 {
1324 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1325 
1326 	return hif_ctx->ce_service_max_yield_time;
1327 }
1328 
1329 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1330 				       uint8_t ce_service_max_rx_ind_flush)
1331 {
1332 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1333 
1334 	if (ce_service_max_rx_ind_flush == 0 ||
1335 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
1336 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
1337 	else
1338 		hif_ctx->ce_service_max_rx_ind_flush =
1339 						ce_service_max_rx_ind_flush;
1340 }
1341