xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "hif_hw_version.h"
33 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
34 #include "ce_tasklet.h"
35 #include "ce_api.h"
36 #endif
37 #include "qdf_trace.h"
38 #include "qdf_status.h"
39 #include "hif_debug.h"
40 #include "mp_dev.h"
41 #ifdef QCA_WIFI_QCA8074
42 #include "hal_api.h"
43 #endif
44 #include "hif_napi.h"
45 #include "hif_unit_test_suspend_i.h"
46 #include "qdf_module.h"
47 
48 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
49 {
50 	hif_trigger_dump(hif_ctx, cmd_id, start);
51 }
52 
53 /**
54  * hif_get_target_id(): hif_get_target_id
55  *
56  * Return the virtual memory base address to the caller
57  *
58  * @scn: hif_softc
59  *
60  * Return: A_target_id_t
61  */
62 A_target_id_t hif_get_target_id(struct hif_softc *scn)
63 {
64 	return scn->mem;
65 }
66 
67 /**
68  * hif_get_targetdef(): hif_get_targetdef
69  * @scn: scn
70  *
71  * Return: void *
72  */
73 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
74 {
75 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
76 
77 	return scn->targetdef;
78 }
79 
80 /**
81  * hif_vote_link_down(): unvote for link up
82  *
83  * Call hif_vote_link_down to release a previous request made using
84  * hif_vote_link_up. A hif_vote_link_down call should only be made
85  * after a corresponding hif_vote_link_up, otherwise you could be
86  * negating a vote from another source. When no votes are present
87  * hif will not guarantee the linkstate after hif_bus_suspend.
88  *
89  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
90  * and initialization deinitialization sequencences.
91  *
92  * Return: n/a
93  */
94 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
95 {
96 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
97 
98 	QDF_BUG(scn);
99 	scn->linkstate_vote--;
100 	if (scn->linkstate_vote == 0)
101 		hif_bus_prevent_linkdown(scn, false);
102 }
103 
104 /**
105  * hif_vote_link_up(): vote to prevent bus from suspending
106  *
107  * Makes hif guarantee that fw can message the host normally
108  * durring suspend.
109  *
110  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
111  * and initialization deinitialization sequencences.
112  *
113  * Return: n/a
114  */
115 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
116 {
117 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
118 
119 	QDF_BUG(scn);
120 	scn->linkstate_vote++;
121 	if (scn->linkstate_vote == 1)
122 		hif_bus_prevent_linkdown(scn, true);
123 }
124 
125 /**
126  * hif_can_suspend_link(): query if hif is permitted to suspend the link
127  *
128  * Hif will ensure that the link won't be suspended if the upperlayers
129  * don't want it to.
130  *
131  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
132  * we don't need extra locking to ensure votes dont change while
133  * we are in the process of suspending or resuming.
134  *
135  * Return: false if hif will guarantee link up durring suspend.
136  */
137 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
138 {
139 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
140 
141 	QDF_BUG(scn);
142 	return scn->linkstate_vote == 0;
143 }
144 
145 /**
146  * hif_hia_item_address(): hif_hia_item_address
147  * @target_type: target_type
148  * @item_offset: item_offset
149  *
150  * Return: n/a
151  */
152 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
153 {
154 	switch (target_type) {
155 	case TARGET_TYPE_AR6002:
156 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
157 	case TARGET_TYPE_AR6003:
158 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
159 	case TARGET_TYPE_AR6004:
160 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
161 	case TARGET_TYPE_AR6006:
162 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
163 	case TARGET_TYPE_AR9888:
164 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
165 	case TARGET_TYPE_AR6320:
166 	case TARGET_TYPE_AR6320V2:
167 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
168 	case TARGET_TYPE_ADRASTEA:
169 		/* ADRASTEA doesn't have a host interest address */
170 		ASSERT(0);
171 		return 0;
172 	case TARGET_TYPE_AR900B:
173 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
174 	case TARGET_TYPE_QCA9984:
175 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
176 	case TARGET_TYPE_QCA9888:
177 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
178 	case TARGET_TYPE_IPQ4019:
179 		return IPQ4019_HOST_INTEREST_ADDRESS + item_offset;
180 
181 	default:
182 		ASSERT(0);
183 		return 0;
184 	}
185 }
186 
187 /**
188  * hif_max_num_receives_reached() - check max receive is reached
189  * @scn: HIF Context
190  * @count: unsigned int.
191  *
192  * Output check status as bool
193  *
194  * Return: bool
195  */
196 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
197 {
198 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
199 		return count > 120;
200 	else
201 		return count > MAX_NUM_OF_RECEIVES;
202 }
203 
204 /**
205  * init_buffer_count() - initial buffer count
206  * @maxSize: qdf_size_t
207  *
208  * routine to modify the initial buffer count to be allocated on an os
209  * platform basis. Platform owner will need to modify this as needed
210  *
211  * Return: qdf_size_t
212  */
213 qdf_size_t init_buffer_count(qdf_size_t maxSize)
214 {
215 	return maxSize;
216 }
217 
218 /**
219  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
220  * @hif_ctx: hif context
221  * @htc_htt_tx_endpoint: htt_tx_endpoint
222  *
223  * Return: void
224  */
225 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
226 							int htc_htt_tx_endpoint)
227 {
228 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
229 
230 	if (!scn) {
231 		HIF_ERROR("%s: error: scn or scn->hif_sc is NULL!",
232 		       __func__);
233 		return;
234 	}
235 
236 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
237 }
238 qdf_export_symbol(hif_save_htc_htt_config_endpoint);
239 
240 static const struct qwlan_hw qwlan_hw_list[] = {
241 	{
242 		.id = AR6320_REV1_VERSION,
243 		.subid = 0,
244 		.name = "QCA6174_REV1",
245 	},
246 	{
247 		.id = AR6320_REV1_1_VERSION,
248 		.subid = 0x1,
249 		.name = "QCA6174_REV1_1",
250 	},
251 	{
252 		.id = AR6320_REV1_3_VERSION,
253 		.subid = 0x2,
254 		.name = "QCA6174_REV1_3",
255 	},
256 	{
257 		.id = AR6320_REV2_1_VERSION,
258 		.subid = 0x4,
259 		.name = "QCA6174_REV2_1",
260 	},
261 	{
262 		.id = AR6320_REV2_1_VERSION,
263 		.subid = 0x5,
264 		.name = "QCA6174_REV2_2",
265 	},
266 	{
267 		.id = AR6320_REV3_VERSION,
268 		.subid = 0x6,
269 		.name = "QCA6174_REV2.3",
270 	},
271 	{
272 		.id = AR6320_REV3_VERSION,
273 		.subid = 0x8,
274 		.name = "QCA6174_REV3",
275 	},
276 	{
277 		.id = AR6320_REV3_VERSION,
278 		.subid = 0x9,
279 		.name = "QCA6174_REV3_1",
280 	},
281 	{
282 		.id = AR6320_REV3_2_VERSION,
283 		.subid = 0xA,
284 		.name = "AR6320_REV3_2_VERSION",
285 	},
286 	{
287 		.id = WCN3990_v1,
288 		.subid = 0x0,
289 		.name = "WCN3990_V1",
290 	},
291 	{
292 		.id = WCN3990_v2,
293 		.subid = 0x0,
294 		.name = "WCN3990_V2",
295 	},
296 	{
297 		.id = WCN3990_v2_1,
298 		.subid = 0x0,
299 		.name = "WCN3990_V2.1",
300 	},
301 	{
302 		.id = QCA9379_REV1_VERSION,
303 		.subid = 0xC,
304 		.name = "QCA9379_REV1",
305 	},
306 	{
307 		.id = QCA9379_REV1_VERSION,
308 		.subid = 0xD,
309 		.name = "QCA9379_REV1_1",
310 	}
311 };
312 
313 /**
314  * hif_get_hw_name(): get a human readable name for the hardware
315  * @info: Target Info
316  *
317  * Return: human readable name for the underlying wifi hardware.
318  */
319 static const char *hif_get_hw_name(struct hif_target_info *info)
320 {
321 	int i;
322 
323 	if (info->hw_name)
324 		return info->hw_name;
325 
326 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
327 		if (info->target_version == qwlan_hw_list[i].id &&
328 		    info->target_revision == qwlan_hw_list[i].subid) {
329 			return qwlan_hw_list[i].name;
330 		}
331 	}
332 
333 	info->hw_name = qdf_mem_malloc(64);
334 	if (!info->hw_name)
335 		return "Unknown Device (nomem)";
336 
337 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
338 			info->target_version);
339 	if (i < 0)
340 		return "Unknown Device (snprintf failure)";
341 	else
342 		return info->hw_name;
343 }
344 
345 /**
346  * hif_get_hw_info(): hif_get_hw_info
347  * @scn: scn
348  * @version: version
349  * @revision: revision
350  *
351  * Return: n/a
352  */
353 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
354 			const char **target_name)
355 {
356 	struct hif_target_info *info = hif_get_target_info_handle(scn);
357 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
358 
359 	if (sc->bus_type == QDF_BUS_TYPE_USB)
360 		hif_usb_get_hw_info(sc);
361 
362 	*version = info->target_version;
363 	*revision = info->target_revision;
364 	*target_name = hif_get_hw_name(info);
365 }
366 
367 /**
368  * hif_get_dev_ba(): API to get device base address.
369  * @scn: scn
370  * @version: version
371  * @revision: revision
372  *
373  * Return: n/a
374  */
375 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
376 {
377 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
378 
379 	return scn->mem;
380 }
381 qdf_export_symbol(hif_get_dev_ba);
382 /**
383  * hif_open(): hif_open
384  * @qdf_ctx: QDF Context
385  * @mode: Driver Mode
386  * @bus_type: Bus Type
387  * @cbk: CDS Callbacks
388  *
389  * API to open HIF Context
390  *
391  * Return: HIF Opaque Pointer
392  */
393 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
394 				  enum qdf_bus_type bus_type,
395 				  struct hif_driver_state_callbacks *cbk)
396 {
397 	struct hif_softc *scn;
398 	QDF_STATUS status = QDF_STATUS_SUCCESS;
399 	int bus_context_size = hif_bus_get_context_size(bus_type);
400 
401 	if (bus_context_size == 0) {
402 		HIF_ERROR("%s: context size 0 not allowed", __func__);
403 		return NULL;
404 	}
405 
406 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
407 	if (!scn) {
408 		HIF_ERROR("%s: cannot alloc memory for HIF context of size:%d",
409 						__func__, bus_context_size);
410 		return GET_HIF_OPAQUE_HDL(scn);
411 	}
412 
413 	scn->qdf_dev = qdf_ctx;
414 	scn->hif_con_param = mode;
415 	qdf_atomic_init(&scn->active_tasklet_cnt);
416 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
417 	qdf_atomic_init(&scn->link_suspended);
418 	qdf_atomic_init(&scn->tasklet_from_intr);
419 	qdf_mem_copy(&scn->callbacks, cbk,
420 		     sizeof(struct hif_driver_state_callbacks));
421 	scn->bus_type  = bus_type;
422 	status = hif_bus_open(scn, bus_type);
423 	if (status != QDF_STATUS_SUCCESS) {
424 		HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d",
425 				  __func__, status, bus_type);
426 		qdf_mem_free(scn);
427 		scn = NULL;
428 	}
429 
430 	return GET_HIF_OPAQUE_HDL(scn);
431 }
432 
433 #ifdef ADRASTEA_RRI_ON_DDR
434 /**
435  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
436  * @scn: hif context
437  *
438  * Return: none
439  */
440 void hif_uninit_rri_on_ddr(struct hif_softc *scn)
441 {
442 	if (scn->vaddr_rri_on_ddr)
443 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
444 					(CE_COUNT * sizeof(uint32_t)),
445 					scn->vaddr_rri_on_ddr,
446 					scn->paddr_rri_on_ddr, 0);
447 	scn->vaddr_rri_on_ddr = NULL;
448 }
449 #endif
450 
451 /**
452  * hif_close(): hif_close
453  * @hif_ctx: hif_ctx
454  *
455  * Return: n/a
456  */
457 void hif_close(struct hif_opaque_softc *hif_ctx)
458 {
459 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
460 
461 	if (scn == NULL) {
462 		HIF_ERROR("%s: hif_opaque_softc is NULL", __func__);
463 		return;
464 	}
465 
466 	if (scn->athdiag_procfs_inited) {
467 		athdiag_procfs_remove();
468 		scn->athdiag_procfs_inited = false;
469 	}
470 
471 	if (scn->target_info.hw_name) {
472 		char *hw_name = scn->target_info.hw_name;
473 
474 		scn->target_info.hw_name = "ErrUnloading";
475 		qdf_mem_free(hw_name);
476 	}
477 
478 	hif_uninit_rri_on_ddr(scn);
479 
480 	hif_bus_close(scn);
481 	qdf_mem_free(scn);
482 }
483 
484 #ifdef QCA_WIFI_QCA8074
485 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
486 {
487 	if (ce_srng_based(scn)) {
488 		scn->hal_soc = hal_attach(scn, scn->qdf_dev);
489 		if (scn->hal_soc == NULL)
490 			return QDF_STATUS_E_FAILURE;
491 	}
492 
493 	return QDF_STATUS_SUCCESS;
494 }
495 
496 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
497 {
498 	if (ce_srng_based(scn)) {
499 		hal_detach(scn->hal_soc);
500 		scn->hal_soc = NULL;
501 	}
502 
503 	return QDF_STATUS_SUCCESS;
504 }
505 #else
506 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
507 {
508 	return QDF_STATUS_SUCCESS;
509 }
510 
511 static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
512 {
513 	return QDF_STATUS_SUCCESS;
514 }
515 #endif
516 
517 /**
518  * hif_enable(): hif_enable
519  * @hif_ctx: hif_ctx
520  * @dev: dev
521  * @bdev: bus dev
522  * @bid: bus ID
523  * @bus_type: bus type
524  * @type: enable type
525  *
526  * Return: QDF_STATUS
527  */
528 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
529 					  void *bdev,
530 					  const struct hif_bus_id *bid,
531 					  enum qdf_bus_type bus_type,
532 					  enum hif_enable_type type)
533 {
534 	QDF_STATUS status;
535 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
536 
537 	if (scn == NULL) {
538 		HIF_ERROR("%s: hif_ctx = NULL", __func__);
539 		return QDF_STATUS_E_NULL_VALUE;
540 	}
541 
542 	status = hif_enable_bus(scn, dev, bdev, bid, type);
543 	if (status != QDF_STATUS_SUCCESS) {
544 		HIF_ERROR("%s: hif_enable_bus error = %d",
545 				  __func__, status);
546 		return status;
547 	}
548 
549 	status = hif_hal_attach(scn);
550 	if (status != QDF_STATUS_SUCCESS) {
551 		HIF_ERROR("%s: hal attach failed", __func__);
552 		goto disable_bus;
553 	}
554 
555 	if (hif_bus_configure(scn)) {
556 		HIF_ERROR("%s: Target probe failed.", __func__);
557 		status = QDF_STATUS_E_FAILURE;
558 		goto hal_detach;
559 	}
560 
561 	hif_ut_suspend_init(scn);
562 
563 	/*
564 	 * Flag to avoid potential unallocated memory access from MSI
565 	 * interrupt handler which could get scheduled as soon as MSI
566 	 * is enabled, i.e to take care of the race due to the order
567 	 * in where MSI is enabled before the memory, that will be
568 	 * in interrupt handlers, is allocated.
569 	 */
570 
571 	scn->hif_init_done = true;
572 
573 	HIF_DBG("%s: OK", __func__);
574 
575 	return QDF_STATUS_SUCCESS;
576 
577 hal_detach:
578 	hif_hal_detach(scn);
579 disable_bus:
580 	hif_disable_bus(scn);
581 	return status;
582 }
583 
584 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
585 {
586 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
587 
588 	if (!scn)
589 		return;
590 
591 	hif_nointrs(scn);
592 	if (scn->hif_init_done == false)
593 		hif_shutdown_device(hif_ctx);
594 	else
595 		hif_stop(hif_ctx);
596 
597 	hif_hal_detach(scn);
598 
599 	hif_disable_bus(scn);
600 
601 	hif_wlan_disable(scn);
602 
603 	scn->notice_send = false;
604 
605 	HIF_DBG("%s: X", __func__);
606 }
607 
608 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
609 {
610 	hif_display_bus_stats(hif_ctx);
611 }
612 
613 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
614 {
615 	hif_clear_bus_stats(hif_ctx);
616 }
617 
618 /**
619  * hif_crash_shutdown_dump_bus_register() - dump bus registers
620  * @hif_ctx: hif_ctx
621  *
622  * Return: n/a
623  */
624 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) \
625 && defined(DEBUG)
626 
627 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
628 {
629 	struct hif_opaque_softc *scn = hif_ctx;
630 
631 	if (hif_check_soc_status(scn))
632 		return;
633 
634 	if (hif_dump_registers(scn))
635 		HIF_ERROR("Failed to dump bus registers!");
636 }
637 
638 /**
639  * hif_crash_shutdown(): hif_crash_shutdown
640  *
641  * This function is called by the platform driver to dump CE registers
642  *
643  * @hif_ctx: hif_ctx
644  *
645  * Return: n/a
646  */
647 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
648 {
649 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
650 
651 	if (!hif_ctx)
652 		return;
653 
654 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
655 		HIF_INFO_MED("%s: RAM dump disabled for bustype %d",
656 				__func__, scn->bus_type);
657 		return;
658 	}
659 
660 	if (TARGET_STATUS_RESET == scn->target_status) {
661 		HIF_INFO_MED("%s: Target is already asserted, ignore!",
662 			    __func__);
663 		return;
664 	}
665 
666 	if (hif_is_load_or_unload_in_progress(scn)) {
667 		HIF_ERROR("%s: Load/unload is in progress, ignore!", __func__);
668 		return;
669 	}
670 
671 	hif_crash_shutdown_dump_bus_register(hif_ctx);
672 
673 	if (ol_copy_ramdump(hif_ctx))
674 		goto out;
675 
676 	HIF_INFO_MED("%s: RAM dump collecting completed!", __func__);
677 
678 out:
679 	return;
680 }
681 #else
682 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
683 {
684 	HIF_INFO_MED("%s: Collecting target RAM dump disabled",
685 		__func__);
686 }
687 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
688 
689 #ifdef QCA_WIFI_3_0
690 /**
691  * hif_check_fw_reg(): hif_check_fw_reg
692  * @scn: scn
693  * @state:
694  *
695  * Return: int
696  */
697 int hif_check_fw_reg(struct hif_opaque_softc *scn)
698 {
699 	return 0;
700 }
701 #endif
702 
703 /**
704  * hif_read_phy_mem_base(): hif_read_phy_mem_base
705  * @scn: scn
706  * @phy_mem_base: physical mem base
707  *
708  * Return: n/a
709  */
710 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
711 {
712 	*phy_mem_base = scn->mem_pa;
713 }
714 qdf_export_symbol(hif_read_phy_mem_base);
715 
716 /**
717  * hif_get_device_type(): hif_get_device_type
718  * @device_id: device_id
719  * @revision_id: revision_id
720  * @hif_type: returned hif_type
721  * @target_type: returned target_type
722  *
723  * Return: int
724  */
725 int hif_get_device_type(uint32_t device_id,
726 			uint32_t revision_id,
727 			uint32_t *hif_type, uint32_t *target_type)
728 {
729 	int ret = 0;
730 
731 	switch (device_id) {
732 	case ADRASTEA_DEVICE_ID_P2_E12:
733 
734 		*hif_type = HIF_TYPE_ADRASTEA;
735 		*target_type = TARGET_TYPE_ADRASTEA;
736 		break;
737 
738 	case AR9888_DEVICE_ID:
739 		*hif_type = HIF_TYPE_AR9888;
740 		*target_type = TARGET_TYPE_AR9888;
741 		break;
742 
743 	case AR6320_DEVICE_ID:
744 		switch (revision_id) {
745 		case AR6320_FW_1_1:
746 		case AR6320_FW_1_3:
747 			*hif_type = HIF_TYPE_AR6320;
748 			*target_type = TARGET_TYPE_AR6320;
749 			break;
750 
751 		case AR6320_FW_2_0:
752 		case AR6320_FW_3_0:
753 		case AR6320_FW_3_2:
754 			*hif_type = HIF_TYPE_AR6320V2;
755 			*target_type = TARGET_TYPE_AR6320V2;
756 			break;
757 
758 		default:
759 			HIF_ERROR("%s: error - dev_id = 0x%x, rev_id = 0x%x",
760 				   __func__, device_id, revision_id);
761 			ret = -ENODEV;
762 			goto end;
763 		}
764 		break;
765 
766 	case AR9887_DEVICE_ID:
767 		*hif_type = HIF_TYPE_AR9888;
768 		*target_type = TARGET_TYPE_AR9888;
769 		HIF_INFO(" *********** AR9887 **************");
770 		break;
771 
772 	case QCA9984_DEVICE_ID:
773 		*hif_type = HIF_TYPE_QCA9984;
774 		*target_type = TARGET_TYPE_QCA9984;
775 		HIF_INFO(" *********** QCA9984 *************");
776 		break;
777 
778 	case QCA9888_DEVICE_ID:
779 		*hif_type = HIF_TYPE_QCA9888;
780 		*target_type = TARGET_TYPE_QCA9888;
781 		HIF_INFO(" *********** QCA9888 *************");
782 		break;
783 
784 	case AR900B_DEVICE_ID:
785 		*hif_type = HIF_TYPE_AR900B;
786 		*target_type = TARGET_TYPE_AR900B;
787 		HIF_INFO(" *********** AR900B *************");
788 		break;
789 
790 	case IPQ4019_DEVICE_ID:
791 		*hif_type = HIF_TYPE_IPQ4019;
792 		*target_type = TARGET_TYPE_IPQ4019;
793 		HIF_INFO(" *********** IPQ4019  *************");
794 		break;
795 
796 	case QCA8074_DEVICE_ID:
797 	case RUMIM2M_DEVICE_ID_NODE0:
798 	case RUMIM2M_DEVICE_ID_NODE1:
799 	case RUMIM2M_DEVICE_ID_NODE2:
800 	case RUMIM2M_DEVICE_ID_NODE3:
801 		*hif_type = HIF_TYPE_QCA8074;
802 		*target_type = TARGET_TYPE_QCA8074;
803 		HIF_INFO(" *********** QCA8074  *************\n");
804 		break;
805 
806 	case QCA6290_EMULATION_DEVICE_ID:
807 	case QCA6290_DEVICE_ID:
808 		*hif_type = HIF_TYPE_QCA6290;
809 		*target_type = TARGET_TYPE_QCA6290;
810 		HIF_INFO(" *********** QCA6290EMU *************\n");
811 		break;
812 
813 	default:
814 		HIF_ERROR("%s: Unsupported device ID!", __func__);
815 		ret = -ENODEV;
816 		break;
817 	}
818 
819 	if (*target_type == TARGET_TYPE_UNKNOWN) {
820 		HIF_ERROR("%s: Unsupported target_type!", __func__);
821 		ret = -ENODEV;
822 	}
823 end:
824 	return ret;
825 }
826 
827 /**
828  * hif_get_bus_type() - return the bus type
829  *
830  * Return: enum qdf_bus_type
831  */
832 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
833 {
834 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
835 
836 	return scn->bus_type;
837 }
838 
839 /**
840  * Target info and ini parameters are global to the driver
841  * Hence these structures are exposed to all the modules in
842  * the driver and they don't need to maintains multiple copies
843  * of the same info, instead get the handle from hif and
844  * modify them in hif
845  */
846 
847 /**
848  * hif_get_ini_handle() - API to get hif_config_param handle
849  * @hif_ctx: HIF Context
850  *
851  * Return: pointer to hif_config_info
852  */
853 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
854 {
855 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
856 
857 	return &sc->hif_config;
858 }
859 
860 /**
861  * hif_get_target_info_handle() - API to get hif_target_info handle
862  * @hif_ctx: HIF context
863  *
864  * Return: Pointer to hif_target_info
865  */
866 struct hif_target_info *hif_get_target_info_handle(
867 					struct hif_opaque_softc *hif_ctx)
868 {
869 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
870 
871 	return &sc->target_info;
872 
873 }
874 qdf_export_symbol(hif_get_target_info_handle);
875 
876 #ifdef RECEIVE_OFFLOAD
877 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
878 				 void (offld_flush_handler)(void *))
879 {
880 	if (hif_napi_enabled(scn, -1))
881 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
882 	else
883 		HIF_ERROR("NAPI not enabled\n");
884 }
885 qdf_export_symbol(hif_offld_flush_cb_register);
886 
887 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
888 {
889 	if (hif_napi_enabled(scn, -1))
890 		hif_napi_rx_offld_flush_cb_deregister(scn);
891 	else
892 		HIF_ERROR("NAPI not enabled\n");
893 }
894 qdf_export_symbol(hif_offld_flush_cb_deregister);
895 
896 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
897 {
898 	if (hif_napi_enabled(hif_hdl, -1))
899 		return NAPI_PIPE2ID(ctx_id);
900 	else
901 		return ctx_id;
902 }
903 #else /* RECEIVE_OFFLOAD */
904 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
905 {
906 	return 0;
907 }
908 #endif /* RECEIVE_OFFLOAD */
909 
910 #if defined(FEATURE_LRO)
911 
912 /**
913  * hif_get_lro_info - Returns LRO instance for instance ID
914  * @ctx_id: LRO instance ID
915  * @hif_hdl: HIF Context
916  *
917  * Return: Pointer to LRO instance.
918  */
919 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
920 {
921 	void *data;
922 
923 	if (hif_napi_enabled(hif_hdl, -1))
924 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
925 	else
926 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
927 
928 	return data;
929 }
930 #endif
931 
932 /**
933  * hif_get_target_status - API to get target status
934  * @hif_ctx: HIF Context
935  *
936  * Return: enum hif_target_status
937  */
938 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
939 {
940 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
941 
942 	return scn->target_status;
943 }
944 qdf_export_symbol(hif_get_target_status);
945 
946 /**
947  * hif_set_target_status() - API to set target status
948  * @hif_ctx: HIF Context
949  * @status: Target Status
950  *
951  * Return: void
952  */
953 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
954 			   hif_target_status status)
955 {
956 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
957 
958 	scn->target_status = status;
959 }
960 
961 /**
962  * hif_init_ini_config() - API to initialize HIF configuration parameters
963  * @hif_ctx: HIF Context
964  * @cfg: HIF Configuration
965  *
966  * Return: void
967  */
968 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
969 			 struct hif_config_info *cfg)
970 {
971 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
972 
973 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
974 }
975 
976 /**
977  * hif_get_conparam() - API to get driver mode in HIF
978  * @scn: HIF Context
979  *
980  * Return: driver mode of operation
981  */
982 uint32_t hif_get_conparam(struct hif_softc *scn)
983 {
984 	if (!scn)
985 		return 0;
986 
987 	return scn->hif_con_param;
988 }
989 
990 /**
991  * hif_get_callbacks_handle() - API to get callbacks Handle
992  * @scn: HIF Context
993  *
994  * Return: pointer to HIF Callbacks
995  */
996 struct hif_driver_state_callbacks *hif_get_callbacks_handle(
997 							struct hif_softc *scn)
998 {
999 	return &scn->callbacks;
1000 }
1001 
1002 /**
1003  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
1004  * @scn: HIF Context
1005  *
1006  * Return: True/False
1007  */
1008 bool hif_is_driver_unloading(struct hif_softc *scn)
1009 {
1010 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1011 
1012 	if (cbk && cbk->is_driver_unloading)
1013 		return cbk->is_driver_unloading(cbk->context);
1014 
1015 	return false;
1016 }
1017 
1018 /**
1019  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1020  * load/unload in progress
1021  * @scn: HIF Context
1022  *
1023  * Return: True/False
1024  */
1025 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1026 {
1027 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1028 
1029 	if (cbk && cbk->is_load_unload_in_progress)
1030 		return cbk->is_load_unload_in_progress(cbk->context);
1031 
1032 	return false;
1033 }
1034 
1035 /**
1036  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1037  * progress
1038  * @scn: HIF Context
1039  *
1040  * Return: True/False
1041  */
1042 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1043 {
1044 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1045 
1046 	if (cbk && cbk->is_recovery_in_progress)
1047 		return cbk->is_recovery_in_progress(cbk->context);
1048 
1049 	return false;
1050 }
1051 
1052 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
1053 
1054 /**
1055  * hif_update_pipe_callback() - API to register pipe specific callbacks
1056  * @osc: Opaque softc
1057  * @pipeid: pipe id
1058  * @callbacks: callbacks to register
1059  *
1060  * Return: void
1061  */
1062 
1063 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1064 					u_int8_t pipeid,
1065 					struct hif_msg_callbacks *callbacks)
1066 {
1067 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1068 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1069 	struct HIF_CE_pipe_info *pipe_info;
1070 
1071 	QDF_BUG(pipeid < CE_COUNT_MAX);
1072 
1073 	HIF_INFO_LO("+%s pipeid %d\n", __func__, pipeid);
1074 
1075 	pipe_info = &hif_state->pipe_info[pipeid];
1076 
1077 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1078 			callbacks, sizeof(pipe_info->pipe_callbacks));
1079 
1080 	HIF_INFO_LO("-%s\n", __func__);
1081 }
1082 qdf_export_symbol(hif_update_pipe_callback);
1083 
1084 /**
1085  * hif_is_target_ready() - API to query if target is in ready state
1086  * progress
1087  * @scn: HIF Context
1088  *
1089  * Return: True/False
1090  */
1091 bool hif_is_target_ready(struct hif_softc *scn)
1092 {
1093 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1094 
1095 	if (cbk && cbk->is_target_ready)
1096 		return cbk->is_target_ready(cbk->context);
1097 
1098 	return false;
1099 }
1100 
1101 /**
1102  * hif_batch_send() - API to access hif specific function
1103  * ce_batch_send.
1104  * @osc: HIF Context
1105  * @msdu : list of msdus to be sent
1106  * @transfer_id : transfer id
1107  * @len : donwloaded length
1108  *
1109  * Return: list of msds not sent
1110  */
1111 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1112 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
1113 {
1114 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1115 
1116 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1117 			len, sendhead);
1118 }
1119 qdf_export_symbol(hif_batch_send);
1120 
1121 /**
1122  * hif_update_tx_ring() - API to access hif specific function
1123  * ce_update_tx_ring.
1124  * @osc: HIF Context
1125  * @num_htt_cmpls : number of htt compl received.
1126  *
1127  * Return: void
1128  */
1129 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
1130 {
1131 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1132 
1133 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
1134 }
1135 qdf_export_symbol(hif_update_tx_ring);
1136 
1137 
1138 /**
1139  * hif_send_single() - API to access hif specific function
1140  * ce_send_single.
1141  * @osc: HIF Context
1142  * @msdu : msdu to be sent
1143  * @transfer_id: transfer id
1144  * @len : downloaded length
1145  *
1146  * Return: msdu sent status
1147  */
1148 int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t
1149 		transfer_id, u_int32_t len)
1150 {
1151 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1152 
1153 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1154 			len);
1155 }
1156 qdf_export_symbol(hif_send_single);
1157 
1158 #ifdef WLAN_FEATURE_FASTPATH
1159 /**
1160  * hif_send_fast() - API to access hif specific function
1161  * ce_send_fast.
1162  * @osc: HIF Context
1163  * @msdu : array of msdus to be sent
1164  * @num_msdus : number of msdus in an array
1165  * @transfer_id: transfer id
1166  * @download_len: download length
1167  *
1168  * Return: No. of packets that could be sent
1169  */
1170 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1171 		uint32_t transfer_id, uint32_t download_len)
1172 {
1173 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1174 
1175 	return ce_send_fast((struct CE_handle *)ce_tx_hdl, nbuf,
1176 			transfer_id, download_len);
1177 }
1178 qdf_export_symbol(hif_send_fast);
1179 #endif
1180 #endif
1181 
1182 /**
1183  * hif_reg_write() - API to access hif specific function
1184  * hif_write32_mb.
1185  * @hif_ctx : HIF Context
1186  * @offset : offset on which value has to be written
1187  * @value : value to be written
1188  *
1189  * Return: None
1190  */
1191 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
1192 		uint32_t value)
1193 {
1194 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1195 
1196 	hif_write32_mb(scn->mem + offset, value);
1197 
1198 }
1199 qdf_export_symbol(hif_reg_write);
1200 
1201 /**
1202  * hif_reg_read() - API to access hif specific function
1203  * hif_read32_mb.
1204  * @hif_ctx : HIF Context
1205  * @offset : offset from which value has to be read
1206  *
1207  * Return: Read value
1208  */
1209 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
1210 {
1211 
1212 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1213 
1214 	return hif_read32_mb(scn->mem + offset);
1215 }
1216 qdf_export_symbol(hif_reg_read);
1217 
1218 /**
1219  * hif_ramdump_handler(): generic ramdump handler
1220  * @scn: struct hif_opaque_softc
1221  *
1222  * Return: None
1223  */
1224 void hif_ramdump_handler(struct hif_opaque_softc *scn)
1225 {
1226 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
1227 		hif_usb_ramdump_handler(scn);
1228 }
1229 
1230 #ifdef WLAN_SUSPEND_RESUME_TEST
1231 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
1232 {
1233 	struct hif_softc *scn = context;
1234 
1235 	HIF_INFO("wake interrupt received on irq %d", irq);
1236 
1237 	if (scn->initial_wakeup_cb)
1238 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
1239 
1240 	if (hif_is_ut_suspended(scn))
1241 		hif_ut_fw_resume(scn);
1242 
1243 	return IRQ_HANDLED;
1244 }
1245 #else /* WLAN_SUSPEND_RESUME_TEST */
1246 irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
1247 {
1248 	struct hif_softc *scn = context;
1249 
1250 	HIF_INFO("wake interrupt received on irq %d", irq);
1251 
1252 	if (scn->initial_wakeup_cb)
1253 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
1254 
1255 	return IRQ_HANDLED;
1256 }
1257 #endif /* WLAN_SUSPEND_RESUME_TEST */
1258 
1259 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
1260 			       void (*callback)(void *),
1261 			       void *priv)
1262 {
1263 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1264 
1265 	scn->initial_wakeup_cb = callback;
1266 	scn->initial_wakeup_priv = priv;
1267 }
1268 
1269 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
1270 				       uint32_t ce_service_max_yield_time)
1271 {
1272 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1273 
1274 	hif_ctx->ce_service_max_yield_time =
1275 		ce_service_max_yield_time * 1000;
1276 }
1277 
1278 unsigned long long
1279 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
1280 {
1281 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1282 
1283 	return hif_ctx->ce_service_max_yield_time;
1284 }
1285 
1286 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
1287 				       uint8_t ce_service_max_rx_ind_flush)
1288 {
1289 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
1290 
1291 	if (ce_service_max_rx_ind_flush == 0 ||
1292 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
1293 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
1294 	else
1295 		hif_ctx->ce_service_max_rx_ind_flush =
1296 						ce_service_max_rx_ind_flush;
1297 }
1298