xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_main.c (revision c8039e3fa439b838b525783fb76d6bdc0259257c)
1 /*
2  * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include "targcfg.h"
29 #include "qdf_lock.h"
30 #include "qdf_status.h"
31 #include "qdf_status.h"
32 #include <qdf_atomic.h>         /* qdf_atomic_read */
33 #include <targaddrs.h>
34 #include "hif_io32.h"
35 #include <hif.h>
36 #include "regtable.h"
37 #define ATH_MODULE_NAME hif
38 #include <a_debug.h>
39 #include "hif_main.h"
40 #include "hif_hw_version.h"
41 #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
42 #include "ce_tasklet.h"
43 #endif
44 #include "qdf_trace.h"
45 #include "qdf_status.h"
46 #include "hif_debug.h"
47 #include "mp_dev.h"
48 #include "ce_api.h"
49 #ifdef QCA_WIFI_QCA8074
50 #include "hal_api.h"
51 #endif
52 #include "hif_napi.h"
53 
54 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
55 {
56 	hif_trigger_dump(hif_ctx, cmd_id, start);
57 }
58 
59 /**
60  * hif_get_target_id(): hif_get_target_id
61  *
62  * Return the virtual memory base address to the caller
63  *
64  * @scn: hif_softc
65  *
66  * Return: A_target_id_t
67  */
68 A_target_id_t hif_get_target_id(struct hif_softc *scn)
69 {
70 	return scn->mem;
71 }
72 
73 /**
74  * hif_get_targetdef(): hif_get_targetdef
75  * @scn: scn
76  *
77  * Return: void *
78  */
79 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
80 {
81 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
82 
83 	return scn->targetdef;
84 }
85 
86 /**
87  * hif_vote_link_down(): unvote for link up
88  *
89  * Call hif_vote_link_down to release a previous request made using
90  * hif_vote_link_up. A hif_vote_link_down call should only be made
91  * after a corresponding hif_vote_link_up, otherwise you could be
92  * negating a vote from another source. When no votes are present
93  * hif will not guarantee the linkstate after hif_bus_suspend.
94  *
95  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
96  * and initialization deinitialization sequencences.
97  *
98  * Return: n/a
99  */
100 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
101 {
102 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
103 	QDF_BUG(scn);
104 
105 	scn->linkstate_vote--;
106 	if (scn->linkstate_vote == 0)
107 		hif_bus_prevent_linkdown(scn, false);
108 }
109 
110 /**
111  * hif_vote_link_up(): vote to prevent bus from suspending
112  *
113  * Makes hif guarantee that fw can message the host normally
114  * durring suspend.
115  *
116  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
117  * and initialization deinitialization sequencences.
118  *
119  * Return: n/a
120  */
121 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
122 {
123 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
124 	QDF_BUG(scn);
125 
126 	scn->linkstate_vote++;
127 	if (scn->linkstate_vote == 1)
128 		hif_bus_prevent_linkdown(scn, true);
129 }
130 
131 /**
132  * hif_can_suspend_link(): query if hif is permitted to suspend the link
133  *
134  * Hif will ensure that the link won't be suspended if the upperlayers
135  * don't want it to.
136  *
137  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
138  * we don't need extra locking to ensure votes dont change while
139  * we are in the process of suspending or resuming.
140  *
141  * Return: false if hif will guarantee link up durring suspend.
142  */
143 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
144 {
145 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
146 	QDF_BUG(scn);
147 
148 	return scn->linkstate_vote == 0;
149 }
150 
151 #ifndef CONFIG_WIN
152 #define QCA9984_HOST_INTEREST_ADDRESS -1
153 #define QCA9888_HOST_INTEREST_ADDRESS -1
154 #define IPQ4019_HOST_INTEREST_ADDRESS -1
155 #endif
156 
157 /**
158  * hif_hia_item_address(): hif_hia_item_address
159  * @target_type: target_type
160  * @item_offset: item_offset
161  *
162  * Return: n/a
163  */
164 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
165 {
166 	switch (target_type) {
167 	case TARGET_TYPE_AR6002:
168 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
169 	case TARGET_TYPE_AR6003:
170 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
171 	case TARGET_TYPE_AR6004:
172 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
173 	case TARGET_TYPE_AR6006:
174 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
175 	case TARGET_TYPE_AR9888:
176 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
177 	case TARGET_TYPE_AR6320:
178 	case TARGET_TYPE_AR6320V2:
179 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
180 	case TARGET_TYPE_ADRASTEA:
181 		/* ADRASTEA doesn't have a host interest address */
182 		ASSERT(0);
183 		return 0;
184 	case TARGET_TYPE_AR900B:
185 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
186 	case TARGET_TYPE_QCA9984:
187 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
188 	case TARGET_TYPE_QCA9888:
189 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
190 	case TARGET_TYPE_IPQ4019:
191 		return IPQ4019_HOST_INTEREST_ADDRESS + item_offset;
192 
193 	default:
194 		ASSERT(0);
195 		return 0;
196 	}
197 }
198 
199 /**
200  * hif_max_num_receives_reached() - check max receive is reached
201  * @scn: HIF Context
202  * @count: unsigned int.
203  *
204  * Output check status as bool
205  *
206  * Return: bool
207  */
208 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
209 {
210 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
211 		return count > 120;
212 	else
213 		return count > MAX_NUM_OF_RECEIVES;
214 }
215 
216 /**
217  * init_buffer_count() - initial buffer count
218  * @maxSize: qdf_size_t
219  *
220  * routine to modify the initial buffer count to be allocated on an os
221  * platform basis. Platform owner will need to modify this as needed
222  *
223  * Return: qdf_size_t
224  */
225 qdf_size_t init_buffer_count(qdf_size_t maxSize)
226 {
227 	return maxSize;
228 }
229 
230 /**
231  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
232  * @hif_ctx: hif context
233  * @htc_htt_tx_endpoint: htt_tx_endpoint
234  *
235  * Return: void
236  */
237 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
238 							int htc_htt_tx_endpoint)
239 {
240 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
241 
242 	if (!scn) {
243 		HIF_ERROR("%s: error: scn or scn->hif_sc is NULL!",
244 		       __func__);
245 		return;
246 	}
247 
248 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
249 }
250 
251 static const struct qwlan_hw qwlan_hw_list[] = {
252 	{
253 		.id = AR6320_REV1_VERSION,
254 		.subid = 0,
255 		.name = "QCA6174_REV1",
256 	},
257 	{
258 		.id = AR6320_REV1_1_VERSION,
259 		.subid = 0x1,
260 		.name = "QCA6174_REV1_1",
261 	},
262 	{
263 		.id = AR6320_REV1_3_VERSION,
264 		.subid = 0x2,
265 		.name = "QCA6174_REV1_3",
266 	},
267 	{
268 		.id = AR6320_REV2_1_VERSION,
269 		.subid = 0x4,
270 		.name = "QCA6174_REV2_1",
271 	},
272 	{
273 		.id = AR6320_REV2_1_VERSION,
274 		.subid = 0x5,
275 		.name = "QCA6174_REV2_2",
276 	},
277 	{
278 		.id = AR6320_REV3_VERSION,
279 		.subid = 0x6,
280 		.name = "QCA6174_REV2.3",
281 	},
282 	{
283 		.id = AR6320_REV3_VERSION,
284 		.subid = 0x8,
285 		.name = "QCA6174_REV3",
286 	},
287 	{
288 		.id = AR6320_REV3_VERSION,
289 		.subid = 0x9,
290 		.name = "QCA6174_REV3_1",
291 	},
292 	{
293 		.id = AR6320_REV3_2_VERSION,
294 		.subid = 0xA,
295 		.name = "AR6320_REV3_2_VERSION",
296 	},
297 	{
298 		.id = WCN3990_v1,
299 		.subid = 0x0,
300 		.name = "WCN3990_V1",
301 	},
302 	{
303 		.id = WCN3990_v2,
304 		.subid = 0x0,
305 		.name = "WCN3990_V2",
306 	},
307 	{
308 		.id = WCN3990_v2_1,
309 		.subid = 0x0,
310 		.name = "WCN3990_V2.1",
311 	},
312 	{
313 		.id = QCA9379_REV1_VERSION,
314 		.subid = 0xC,
315 		.name = "QCA9379_REV1",
316 	},
317 	{
318 		.id = QCA9379_REV1_VERSION,
319 		.subid = 0xD,
320 		.name = "QCA9379_REV1_1",
321 	}
322 };
323 
324 /**
325  * hif_get_hw_name(): get a human readable name for the hardware
326  * @info: Target Info
327  *
328  * Return: human readable name for the underlying wifi hardware.
329  */
330 static const char *hif_get_hw_name(struct hif_target_info *info)
331 {
332 	int i;
333 
334 	if (info->hw_name)
335 		return info->hw_name;
336 
337 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
338 		if (info->target_version == qwlan_hw_list[i].id &&
339 		    info->target_revision == qwlan_hw_list[i].subid) {
340 			return qwlan_hw_list[i].name;
341 		}
342 	}
343 
344 	info->hw_name = qdf_mem_malloc(64);
345 	if (!info->hw_name)
346 		return "Unknown Device (nomem)";
347 
348 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
349 			info->target_version);
350 	if (i < 0)
351 		return "Unknown Device (snprintf failure)";
352 	else
353 		return info->hw_name;
354 }
355 
356 /**
357  * hif_get_hw_info(): hif_get_hw_info
358  * @scn: scn
359  * @version: version
360  * @revision: revision
361  *
362  * Return: n/a
363  */
364 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
365 			const char **target_name)
366 {
367 	struct hif_target_info *info = hif_get_target_info_handle(scn);
368 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
369 
370 	if (sc->bus_type == QDF_BUS_TYPE_USB)
371 		hif_usb_get_hw_info(sc);
372 
373 	*version = info->target_version;
374 	*revision = info->target_revision;
375 	*target_name = hif_get_hw_name(info);
376 }
377 
378 /**
379  * hif_get_dev_ba(): API to get device base address.
380  * @scn: scn
381  * @version: version
382  * @revision: revision
383  *
384  * Return: n/a
385  */
386 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
387 {
388 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
389 
390 	return scn->mem;
391 }
392 /**
393  * hif_open(): hif_open
394  * @qdf_ctx: QDF Context
395  * @mode: Driver Mode
396  * @bus_type: Bus Type
397  * @cbk: CDS Callbacks
398  *
399  * API to open HIF Context
400  *
401  * Return: HIF Opaque Pointer
402  */
403 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
404 				  enum qdf_bus_type bus_type,
405 				  struct hif_driver_state_callbacks *cbk)
406 {
407 	struct hif_softc *scn;
408 	QDF_STATUS status = QDF_STATUS_SUCCESS;
409 	int bus_context_size = hif_bus_get_context_size(bus_type);
410 
411 	if (bus_context_size == 0) {
412 		HIF_ERROR("%s: context size 0 not allowed", __func__);
413 		return NULL;
414 	}
415 
416 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
417 	if (!scn) {
418 		HIF_ERROR("%s: cannot alloc memory for HIF context of size:%d",
419 						__func__, bus_context_size);
420 		return GET_HIF_OPAQUE_HDL(scn);
421 	}
422 
423 	scn->qdf_dev = qdf_ctx;
424 	scn->hif_con_param = mode;
425 	qdf_atomic_init(&scn->active_tasklet_cnt);
426 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
427 	qdf_atomic_init(&scn->link_suspended);
428 	qdf_atomic_init(&scn->tasklet_from_intr);
429 	qdf_mem_copy(&scn->callbacks, cbk, sizeof(struct hif_driver_state_callbacks));
430 	scn->bus_type  = bus_type;
431 	status = hif_bus_open(scn, bus_type);
432 	if (status != QDF_STATUS_SUCCESS) {
433 		HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d",
434 				  __func__, status, bus_type);
435 		qdf_mem_free(scn);
436 		scn = NULL;
437 	}
438 
439 	return GET_HIF_OPAQUE_HDL(scn);
440 }
441 
442 /**
443  * hif_close(): hif_close
444  * @hif_ctx: hif_ctx
445  *
446  * Return: n/a
447  */
448 void hif_close(struct hif_opaque_softc *hif_ctx)
449 {
450 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
451 
452 	if (scn == NULL) {
453 		HIF_ERROR("%s: hif_opaque_softc is NULL", __func__);
454 		return;
455 	}
456 
457 	if (scn->athdiag_procfs_inited) {
458 		athdiag_procfs_remove();
459 		scn->athdiag_procfs_inited = false;
460 	}
461 
462 	if (scn->target_info.hw_name) {
463 		char *hw_name = scn->target_info.hw_name;
464 		scn->target_info.hw_name = "ErrUnloading";
465 		qdf_mem_free(hw_name);
466 	}
467 
468 	hif_bus_close(scn);
469 	qdf_mem_free(scn);
470 }
471 
472 #ifdef QCA_WIFI_QCA8074
473 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
474 {
475 	if (ce_srng_based(scn)) {
476 		scn->hal_soc = hal_attach(scn, scn->qdf_dev);
477 		if (scn->hal_soc == NULL)
478 			return QDF_STATUS_E_FAILURE;
479 	}
480 
481 	return QDF_STATUS_SUCCESS;
482 }
483 #else
484 static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
485 {
486 	return QDF_STATUS_SUCCESS;
487 }
488 #endif
489 
490 /**
491  * hif_enable(): hif_enable
492  * @hif_ctx: hif_ctx
493  * @dev: dev
494  * @bdev: bus dev
495  * @bid: bus ID
496  * @bus_type: bus type
497  * @type: enable type
498  *
499  * Return: QDF_STATUS
500  */
501 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
502 					  void *bdev, const hif_bus_id *bid,
503 					  enum qdf_bus_type bus_type,
504 					  enum hif_enable_type type)
505 {
506 	QDF_STATUS status;
507 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
508 
509 	if (scn == NULL) {
510 		HIF_ERROR("%s: hif_ctx = NULL", __func__);
511 		return QDF_STATUS_E_NULL_VALUE;
512 	}
513 
514 	status = hif_enable_bus(scn, dev, bdev, bid, type);
515 	if (status != QDF_STATUS_SUCCESS) {
516 		HIF_ERROR("%s: hif_enable_bus error = %d",
517 				  __func__, status);
518 		return status;
519 	}
520 
521 	status = hif_hal_attach(scn);
522 	if (status != QDF_STATUS_SUCCESS) {
523 		HIF_ERROR("%s: hal attach failed", __func__);
524 		return status;
525 	}
526 
527 	if (hif_bus_configure(scn)) {
528 		HIF_ERROR("%s: Target probe failed.", __func__);
529 		hif_disable_bus(scn);
530 		status = QDF_STATUS_E_FAILURE;
531 		return status;
532 	}
533 
534 	/*
535 	 * Flag to avoid potential unallocated memory access from MSI
536 	 * interrupt handler which could get scheduled as soon as MSI
537 	 * is enabled, i.e to take care of the race due to the order
538 	 * in where MSI is enabled before the memory, that will be
539 	 * in interrupt handlers, is allocated.
540 	 */
541 
542 	scn->hif_init_done = true;
543 
544 	HIF_TRACE("%s: OK", __func__);
545 
546 	return QDF_STATUS_SUCCESS;
547 }
548 
549 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
550 {
551 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
552 
553 	if (!scn)
554 		return;
555 
556 	hif_nointrs(scn);
557 	if (scn->hif_init_done == false)
558 		hif_shutdown_device(hif_ctx);
559 	else
560 		hif_stop(hif_ctx);
561 
562 	hif_disable_bus(scn);
563 
564 	hif_wlan_disable(scn);
565 
566 	scn->notice_send = false;
567 
568 	HIF_INFO("%s: X", __func__);
569 }
570 
571 void hif_display_stats(struct hif_opaque_softc *hif_ctx)
572 {
573 	hif_display_bus_stats(hif_ctx);
574 }
575 
576 void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
577 {
578 	hif_clear_bus_stats(hif_ctx);
579 }
580 
581 /**
582  * hif_crash_shutdown_dump_bus_register() - dump bus registers
583  * @hif_ctx: hif_ctx
584  *
585  * Return: n/a
586  */
587 #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) \
588 && defined(DEBUG)
589 
590 static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
591 {
592 	struct hif_opaque_softc *scn = hif_ctx;
593 
594 	if (hif_check_soc_status(scn))
595 		return;
596 
597 	if (hif_dump_registers(scn))
598 		HIF_ERROR("Failed to dump bus registers!");
599 }
600 
601 /**
602  * hif_crash_shutdown(): hif_crash_shutdown
603  *
604  * This function is called by the platform driver to dump CE registers
605  *
606  * @hif_ctx: hif_ctx
607  *
608  * Return: n/a
609  */
610 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
611 {
612 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
613 
614 	if (!hif_ctx)
615 		return;
616 
617 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
618 		HIF_INFO_MED("%s: RAM dump disabled for bustype %d",
619 				__func__, scn->bus_type);
620 		return;
621 	}
622 
623 	if (TARGET_STATUS_RESET == scn->target_status) {
624 		HIF_INFO_MED("%s: Target is already asserted, ignore!",
625 			    __func__);
626 		return;
627 	}
628 
629 	if (hif_is_load_or_unload_in_progress(scn)) {
630 		HIF_ERROR("%s: Load/unload is in progress, ignore!", __func__);
631 		return;
632 	}
633 
634 	hif_crash_shutdown_dump_bus_register(hif_ctx);
635 
636 	if (ol_copy_ramdump(hif_ctx))
637 		goto out;
638 
639 	HIF_INFO_MED("%s: RAM dump collecting completed!", __func__);
640 
641 out:
642 	return;
643 }
644 #else
645 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
646 {
647 	HIF_INFO_MED("%s: Collecting target RAM dump disabled",
648 		__func__);
649 	return;
650 }
651 #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
652 
653 #ifdef QCA_WIFI_3_0
654 /**
655  * hif_check_fw_reg(): hif_check_fw_reg
656  * @scn: scn
657  * @state:
658  *
659  * Return: int
660  */
661 int hif_check_fw_reg(struct hif_opaque_softc *scn)
662 {
663 	return 0;
664 }
665 #endif
666 
667 #ifdef IPA_OFFLOAD
668 /**
669  * hif_read_phy_mem_base(): hif_read_phy_mem_base
670  * @scn: scn
671  * @phy_mem_base: physical mem base
672  *
673  * Return: n/a
674  */
675 void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
676 {
677 	*phy_mem_base = scn->mem_pa;
678 }
679 #endif /* IPA_OFFLOAD */
680 
681 /**
682  * hif_get_device_type(): hif_get_device_type
683  * @device_id: device_id
684  * @revision_id: revision_id
685  * @hif_type: returned hif_type
686  * @target_type: returned target_type
687  *
688  * Return: int
689  */
690 int hif_get_device_type(uint32_t device_id,
691 			uint32_t revision_id,
692 			uint32_t *hif_type, uint32_t *target_type)
693 {
694 	int ret = 0;
695 
696 	switch (device_id) {
697 	case ADRASTEA_DEVICE_ID_P2_E12:
698 
699 		*hif_type = HIF_TYPE_ADRASTEA;
700 		*target_type = TARGET_TYPE_ADRASTEA;
701 		break;
702 
703 	case AR9888_DEVICE_ID:
704 		*hif_type = HIF_TYPE_AR9888;
705 		*target_type = TARGET_TYPE_AR9888;
706 		break;
707 
708 	case AR6320_DEVICE_ID:
709 		switch (revision_id) {
710 		case AR6320_FW_1_1:
711 		case AR6320_FW_1_3:
712 			*hif_type = HIF_TYPE_AR6320;
713 			*target_type = TARGET_TYPE_AR6320;
714 			break;
715 
716 		case AR6320_FW_2_0:
717 		case AR6320_FW_3_0:
718 		case AR6320_FW_3_2:
719 			*hif_type = HIF_TYPE_AR6320V2;
720 			*target_type = TARGET_TYPE_AR6320V2;
721 			break;
722 
723 		default:
724 			HIF_ERROR("%s: error - dev_id = 0x%x, rev_id = 0x%x",
725 				   __func__, device_id, revision_id);
726 			ret = -ENODEV;
727 			goto end;
728 		}
729 		break;
730 
731 	case AR9887_DEVICE_ID:
732 		*hif_type = HIF_TYPE_AR9888;
733 		*target_type = TARGET_TYPE_AR9888;
734 		HIF_INFO(" *********** AR9887 **************");
735 		break;
736 
737 	case QCA9984_DEVICE_ID:
738 		*hif_type = HIF_TYPE_QCA9984;
739 		*target_type = TARGET_TYPE_QCA9984;
740 		HIF_INFO(" *********** QCA9984 *************");
741 		break;
742 
743 	case QCA9888_DEVICE_ID:
744 		*hif_type = HIF_TYPE_QCA9888;
745 		*target_type = TARGET_TYPE_QCA9888;
746 		HIF_INFO(" *********** QCA9888 *************");
747 		break;
748 
749 	case AR900B_DEVICE_ID:
750 		*hif_type = HIF_TYPE_AR900B;
751 		*target_type = TARGET_TYPE_AR900B;
752 		HIF_INFO(" *********** AR900B *************");
753 		break;
754 
755 	case IPQ4019_DEVICE_ID:
756 		*hif_type = HIF_TYPE_IPQ4019;
757 		*target_type = TARGET_TYPE_IPQ4019;
758 		HIF_INFO(" *********** IPQ4019  *************");
759 		break;
760 
761 	case QCA8074_DEVICE_ID:
762 	case RUMIM2M_DEVICE_ID_NODE0:
763 	case RUMIM2M_DEVICE_ID_NODE1:
764 	case RUMIM2M_DEVICE_ID_NODE2:
765 	case RUMIM2M_DEVICE_ID_NODE3:
766 		*hif_type = HIF_TYPE_QCA8074;
767 		*target_type = TARGET_TYPE_QCA8074;
768 		HIF_INFO(" *********** QCA8074  *************\n");
769 		break;
770 
771 	case QCA6290_EMULATION_DEVICE_ID:
772 		*hif_type = HIF_TYPE_QCA6290;
773 		*target_type = TARGET_TYPE_QCA6290;
774 		HIF_INFO(" *********** QCA6290EMU *************\n");
775 		break;
776 
777 	default:
778 		HIF_ERROR("%s: Unsupported device ID!", __func__);
779 		ret = -ENODEV;
780 		break;
781 	}
782 end:
783 	return ret;
784 }
785 
786 /**
787  * hif_needs_bmi() - return true if the soc needs bmi through the driver
788  * @hif_ctx: hif context
789  *
790  * Return: true if the soc needs driver bmi otherwise false
791  */
792 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
793 {
794 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx);
795 
796 	return (hif_sc->bus_type != QDF_BUS_TYPE_SNOC) &&
797 		!ce_srng_based(hif_sc);
798 }
799 
800 /**
801  * hif_get_bus_type() - return the bus type
802  *
803  * Return: enum qdf_bus_type
804  */
805 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
806 {
807 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
808 	return scn->bus_type;
809 }
810 
811 /**
812  * Target info and ini parameters are global to the driver
813  * Hence these structures are exposed to all the modules in
814  * the driver and they don't need to maintains multiple copies
815  * of the same info, instead get the handle from hif and
816  * modify them in hif
817  */
818 
819 /**
820  * hif_get_ini_handle() - API to get hif_config_param handle
821  * @hif_ctx: HIF Context
822  *
823  * Return: pointer to hif_config_info
824  */
825 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
826 {
827 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
828 
829 	return &sc->hif_config;
830 }
831 
832 /**
833  * hif_get_target_info_handle() - API to get hif_target_info handle
834  * @hif_ctx: HIF context
835  *
836  * Return: Pointer to hif_target_info
837  */
838 struct hif_target_info *hif_get_target_info_handle(
839 					struct hif_opaque_softc *hif_ctx)
840 {
841 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
842 
843 	return &sc->target_info;
844 
845 }
846 
847 #if defined(FEATURE_LRO)
848 /**
849  * hif_lro_flush_cb_register - API to register for LRO Flush Callback
850  * @scn: HIF Context
851  * @handler: Function pointer to be called by HIF
852  * @data: Private data to be used by the module registering to HIF
853  *
854  * Return: void
855  */
856 void hif_lro_flush_cb_register(struct hif_opaque_softc *scn,
857 			       void (lro_flush_handler)(void *),
858 			       void *(lro_init_handler)(void))
859 {
860 	if (hif_napi_enabled(scn, -1))
861 		hif_napi_lro_flush_cb_register(scn, lro_flush_handler,
862 					       lro_init_handler);
863 	else
864 		ce_lro_flush_cb_register(scn, lro_flush_handler,
865 					lro_init_handler);
866 }
867 
868 /**
869  * hif_get_lro_info - Returns LRO instance for instance ID
870  * @ctx_id: LRO instance ID
871  * @hif_hdl: HIF Context
872  *
873  * Return: Pointer to LRO instance.
874  */
875 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
876 {
877 	void *data;
878 
879 	if (hif_napi_enabled(hif_hdl, -1))
880 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
881 	else
882 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
883 
884 	return data;
885 }
886 
887 /**
888  * hif_get_rx_ctx_id - Returns LRO instance ID based on underlying LRO instance
889  * @ctx_id: LRO context ID
890  * @hif_hdl: HIF Context
891  *
892  * Return: LRO instance ID
893  */
894 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
895 {
896 	if (hif_napi_enabled(hif_hdl, -1))
897 		return NAPI_PIPE2ID(ctx_id);
898 	else
899 		return ctx_id;
900 }
901 
902 /**
903  * hif_lro_flush_cb_deregister - API to deregister for LRO Flush Callbacks
904  * @hif_hdl: HIF Context
905  * @lro_deinit_cb: LRO deinit callback
906  *
907  * Return: void
908  */
909 void hif_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
910 				 void (lro_deinit_cb)(void *))
911 {
912 	if (hif_napi_enabled(hif_hdl, -1))
913 		hif_napi_lro_flush_cb_deregister(hif_hdl, lro_deinit_cb);
914 	else
915 		ce_lro_flush_cb_deregister(hif_hdl, lro_deinit_cb);
916 }
917 #else /* !defined(FEATURE_LRO) */
918 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
919 {
920 	return 0;
921 }
922 #endif
923 
924 /**
925  * hif_get_target_status - API to get target status
926  * @hif_ctx: HIF Context
927  *
928  * Return: enum hif_target_status
929  */
930 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
931 {
932 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
933 
934 	return scn->target_status;
935 }
936 
937 /**
938  * hif_set_target_status() - API to set target status
939  * @hif_ctx: HIF Context
940  * @status: Target Status
941  *
942  * Return: void
943  */
944 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
945 			   hif_target_status status)
946 {
947 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
948 
949 	scn->target_status = status;
950 }
951 
952 /**
953  * hif_init_ini_config() - API to initialize HIF configuration parameters
954  * @hif_ctx: HIF Context
955  * @cfg: HIF Configuration
956  *
957  * Return: void
958  */
959 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
960 			 struct hif_config_info *cfg)
961 {
962 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
963 
964 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
965 }
966 
967 /**
968  * hif_get_conparam() - API to get driver mode in HIF
969  * @scn: HIF Context
970  *
971  * Return: driver mode of operation
972  */
973 uint32_t hif_get_conparam(struct hif_softc *scn)
974 {
975 	if (!scn)
976 		return 0;
977 
978 	return scn->hif_con_param;
979 }
980 
981 /**
982  * hif_get_callbacks_handle() - API to get callbacks Handle
983  * @scn: HIF Context
984  *
985  * Return: pointer to HIF Callbacks
986  */
987 struct hif_driver_state_callbacks *hif_get_callbacks_handle(struct hif_softc *scn)
988 {
989 	return &scn->callbacks;
990 }
991 
992 /**
993  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
994  * @scn: HIF Context
995  *
996  * Return: True/False
997  */
998 bool hif_is_driver_unloading(struct hif_softc *scn)
999 {
1000 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1001 
1002 	if (cbk && cbk->is_driver_unloading)
1003 		return cbk->is_driver_unloading(cbk->context);
1004 
1005 	return false;
1006 }
1007 
1008 /**
1009  * hif_is_load_or_unload_in_progress() - API to query upper layers if
1010  * load/unload in progress
1011  * @scn: HIF Context
1012  *
1013  * Return: True/False
1014  */
1015 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
1016 {
1017 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1018 
1019 	if (cbk && cbk->is_load_unload_in_progress)
1020 		return cbk->is_load_unload_in_progress(cbk->context);
1021 
1022 	return false;
1023 }
1024 
1025 /**
1026  * hif_update_pipe_callback() - API to register pipe specific callbacks
1027  * @osc: Opaque softc
1028  * @pipeid: pipe id
1029  * @callbacks: callbacks to register
1030  *
1031  * Return: void
1032  */
1033 
1034 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
1035 					u_int8_t pipeid,
1036 					struct hif_msg_callbacks *callbacks)
1037 {
1038 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
1039 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1040 	struct HIF_CE_pipe_info *pipe_info;
1041 
1042 	QDF_BUG(pipeid < CE_COUNT_MAX);
1043 
1044 	HIF_INFO_LO("+%s pipeid %d\n", __func__, pipeid);
1045 
1046 	pipe_info = &hif_state->pipe_info[pipeid];
1047 
1048 	qdf_mem_copy(&pipe_info->pipe_callbacks,
1049 			callbacks, sizeof(pipe_info->pipe_callbacks));
1050 
1051 	HIF_INFO_LO("-%s\n", __func__);
1052 }
1053 
1054 /**
1055  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
1056  * progress
1057  * @scn: HIF Context
1058  *
1059  * Return: True/False
1060  */
1061 bool hif_is_recovery_in_progress(struct hif_softc *scn)
1062 {
1063 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
1064 
1065 	if (cbk && cbk->is_recovery_in_progress)
1066 		return cbk->is_recovery_in_progress(cbk->context);
1067 
1068 	return false;
1069 }
1070 #if defined(HIF_PCI) || defined(SNOC) || defined(HIF_AHB)
1071 /**
1072  * hif_batch_send() - API to access hif specific function
1073  * ce_batch_send.
1074  * @osc: HIF Context
1075  * @msdu : list of msdus to be sent
1076  * @transfer_id : transfer id
1077  * @len : donwloaded length
1078  *
1079  * Return: list of msds not sent
1080  */
1081 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
1082 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
1083 {
1084 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1085 
1086 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1087 			len, sendhead);
1088 }
1089 
1090 /**
1091  * hif_update_tx_ring() - API to access hif specific function
1092  * ce_update_tx_ring.
1093  * @osc: HIF Context
1094  * @num_htt_cmpls : number of htt compl received.
1095  *
1096  * Return: void
1097  */
1098 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
1099 {
1100 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1101 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
1102 }
1103 
1104 
1105 /**
1106  * hif_send_single() - API to access hif specific function
1107  * ce_send_single.
1108  * @osc: HIF Context
1109  * @msdu : msdu to be sent
1110  * @transfer_id: transfer id
1111  * @len : downloaded length
1112  *
1113  * Return: msdu sent status
1114  */
1115 int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t
1116 		transfer_id, u_int32_t len)
1117 {
1118 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1119 
1120 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
1121 			len);
1122 }
1123 
1124 /**
1125  * hif_send_fast() - API to access hif specific function
1126  * ce_send_fast.
1127  * @osc: HIF Context
1128  * @msdu : array of msdus to be sent
1129  * @num_msdus : number of msdus in an array
1130  * @transfer_id: transfer id
1131  * @download_len: download length
1132  *
1133  * Return: No. of packets that could be sent
1134  */
1135 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
1136 		uint32_t transfer_id, uint32_t download_len)
1137 {
1138 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
1139 
1140 	return ce_send_fast((struct CE_handle *)ce_tx_hdl, nbuf,
1141 			transfer_id, download_len);
1142 }
1143 #endif
1144 
1145 /**
1146  * hif_reg_write() - API to access hif specific function
1147  * hif_write32_mb.
1148  * @hif_ctx : HIF Context
1149  * @offset : offset on which value has to be written
1150  * @value : value to be written
1151  *
1152  * Return: None
1153  */
1154 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
1155 		uint32_t value)
1156 {
1157 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1158 	hif_write32_mb(scn->mem + offset, value);
1159 
1160 }
1161 
1162 /**
1163  * hif_reg_read() - API to access hif specific function
1164  * hif_read32_mb.
1165  * @hif_ctx : HIF Context
1166  * @offset : offset from which value has to be read
1167  *
1168  * Return: Read value
1169  */
1170 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
1171 {
1172 
1173 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1174 	return hif_read32_mb(scn->mem + offset);
1175 }
1176 
1177 #if defined(HIF_USB)
1178 /**
1179  * hif_ramdump_handler(): generic ramdump handler
1180  * @scn: struct hif_opaque_softc
1181  *
1182  * Return: None
1183  */
1184 
1185 void hif_ramdump_handler(struct hif_opaque_softc *scn)
1186 
1187 {
1188 	if (hif_get_bus_type == QDF_BUS_TYPE_USB)
1189 		hif_usb_ramdump_handler();
1190 }
1191 #endif
1192 
1193 /**
1194  * hif_register_ext_group_int_handler() - API to register external group
1195  * interrupt handler.
1196  * @hif_ctx : HIF Context
1197  * @numirq: number of irq's in the group
1198  * @irq: array of irq values
1199  * @ext_intr_handler: callback interrupt handler function
1200  * @context: context to passed in callback
1201  *
1202  * Return: status
1203  */
1204 uint32_t hif_register_ext_group_int_handler(struct hif_opaque_softc *hif_ctx,
1205 		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
1206 		void *context)
1207 {
1208 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1209 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1210 	struct hif_ext_group_entry *hif_ext_group;
1211 
1212 	if (scn->ext_grp_irq_configured) {
1213 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
1214 		return QDF_STATUS_E_FAILURE;
1215 	}
1216 
1217 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
1218 		HIF_ERROR("%s Max groups reached\n", __func__);
1219 		return QDF_STATUS_E_FAILURE;
1220 	}
1221 
1222 	if (numirq >= HIF_MAX_GRP_IRQ) {
1223 		HIF_ERROR("%s invalid numirq\n", __func__);
1224 		return QDF_STATUS_E_FAILURE;
1225 	}
1226 
1227 	hif_ext_group = &hif_state->hif_ext_group[hif_state->hif_num_extgroup];
1228 
1229 	hif_ext_group->numirq = numirq;
1230 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
1231 	hif_ext_group->context = context;
1232 	hif_ext_group->handler = handler;
1233 	hif_ext_group->configured = true;
1234 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
1235 	hif_ext_group->hif_state = hif_state;
1236 
1237 	hif_state->hif_num_extgroup++;
1238 	return QDF_STATUS_SUCCESS;
1239 }
1240 
1241 /**
1242  * hif_configure_ext_group_interrupts() - API to configure external group
1243  * interrpts
1244  * @hif_ctx : HIF Context
1245  *
1246  * Return: status
1247  */
1248 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
1249 {
1250 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1251 
1252 	if (scn->ext_grp_irq_configured) {
1253 		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
1254 		return QDF_STATUS_E_FAILURE;
1255 	}
1256 
1257 	hif_grp_irq_configure(scn);
1258 	scn->ext_grp_irq_configured = true;
1259 
1260 	return QDF_STATUS_SUCCESS;
1261 }
1262 
1263 /**
1264  * hif_ext_grp_tasklet() - grp tasklet
1265  * data: context
1266  *
1267  * return: void
1268  */
1269 void hif_ext_grp_tasklet(unsigned long data)
1270 {
1271 	struct hif_ext_group_entry *hif_ext_group =
1272 			(struct hif_ext_group_entry *)data;
1273 	struct HIF_CE_state *hif_state = hif_ext_group->hif_state;
1274 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1275 
1276 	if (hif_ext_group->grp_id < HIF_MAX_GROUP) {
1277 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
1278 		hif_grp_irq_enable(scn, hif_ext_group->grp_id);
1279 	} else {
1280 		HIF_ERROR("%s: ERROR - invalid grp_id = %d",
1281 		       __func__, hif_ext_group->grp_id);
1282 	}
1283 
1284 	qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
1285 }
1286 
1287 /**
1288  * hif_grp_tasklet_kill() - grp tasklet kill
1289  * scn: hif_softc
1290  *
1291  * return: void
1292  */
1293 void hif_grp_tasklet_kill(struct hif_softc *scn)
1294 {
1295 	int i;
1296 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1297 
1298 	for (i = 0; i < HIF_MAX_GROUP; i++)
1299 		if (hif_state->hif_ext_group[i].inited) {
1300 			tasklet_kill(&hif_state->hif_ext_group[i].intr_tq);
1301 			hif_state->hif_ext_group[i].inited = false;
1302 		}
1303 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
1304 }
1305