1  /*
2   * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  #include "targcfg.h"
21  #include "qdf_lock.h"
22  #include "qdf_status.h"
23  #include "qdf_status.h"
24  #include <qdf_atomic.h>         /* qdf_atomic_read */
25  #include <targaddrs.h>
26  #include "hif_io32.h"
27  #include <hif.h>
28  #include <target_type.h>
29  #include "regtable.h"
30  #define ATH_MODULE_NAME hif
31  #include <a_debug.h>
32  #include "hif_main.h"
33  #include "hif_hw_version.h"
34  #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35       defined(HIF_IPCI))
36  #include "ce_tasklet.h"
37  #include "ce_api.h"
38  #endif
39  #include "qdf_trace.h"
40  #include "qdf_status.h"
41  #include "hif_debug.h"
42  #include "mp_dev.h"
43  #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44  	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
45  	defined(QCA_WIFI_QCA5332)
46  #include "hal_api.h"
47  #endif
48  #include "hif_napi.h"
49  #include "hif_unit_test_suspend_i.h"
50  #include "qdf_module.h"
51  #ifdef HIF_CE_LOG_INFO
52  #include <qdf_notifier.h>
53  #include <qdf_hang_event_notifier.h>
54  #endif
55  #include <linux/cpumask.h>
56  
57  #include <pld_common.h>
58  #include "ce_internal.h"
59  #include <qdf_tracepoint.h>
60  #include "qdf_ssr_driver_dump.h"
61  
hif_dump(struct hif_opaque_softc * hif_ctx,uint8_t cmd_id,bool start)62  void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
63  {
64  	hif_trigger_dump(hif_ctx, cmd_id, start);
65  }
66  
67  /**
68   * hif_get_target_id(): hif_get_target_id
69   * @scn: scn
70   *
71   * Return the virtual memory base address to the caller
72   *
73   * @scn: hif_softc
74   *
75   * Return: A_target_id_t
76   */
hif_get_target_id(struct hif_softc * scn)77  A_target_id_t hif_get_target_id(struct hif_softc *scn)
78  {
79  	return scn->mem;
80  }
81  
82  /**
83   * hif_get_targetdef(): hif_get_targetdef
84   * @hif_ctx: hif context
85   *
86   * Return: void *
87   */
hif_get_targetdef(struct hif_opaque_softc * hif_ctx)88  void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
89  {
90  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
91  
92  	return scn->targetdef;
93  }
94  
95  #ifdef FORCE_WAKE
96  #ifndef QCA_WIFI_WCN6450
hif_srng_init_phase(struct hif_opaque_softc * hif_ctx,bool init_phase)97  void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
98  			 bool init_phase)
99  {
100  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
101  
102  	if (ce_srng_based(scn))
103  		hal_set_init_phase(scn->hal_soc, init_phase);
104  }
105  #else
hif_srng_init_phase(struct hif_opaque_softc * hif_ctx,bool init_phase)106  void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
107  			 bool init_phase)
108  {
109  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
110  
111  	hal_set_init_phase(scn->hal_soc, init_phase);
112  }
113  #endif
114  #endif /* FORCE_WAKE */
115  
116  #ifdef HIF_IPCI
hif_shutdown_notifier_cb(void * hif_ctx)117  void hif_shutdown_notifier_cb(void *hif_ctx)
118  {
119  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
120  
121  	scn->recovery = true;
122  }
123  #endif
124  
125  /**
126   * hif_vote_link_down(): unvote for link up
127   * @hif_ctx: hif context
128   *
129   * Call hif_vote_link_down to release a previous request made using
130   * hif_vote_link_up. A hif_vote_link_down call should only be made
131   * after a corresponding hif_vote_link_up, otherwise you could be
132   * negating a vote from another source. When no votes are present
133   * hif will not guarantee the linkstate after hif_bus_suspend.
134   *
135   * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
136   * and initialization deinitialization sequencences.
137   *
138   * Return: n/a
139   */
hif_vote_link_down(struct hif_opaque_softc * hif_ctx)140  void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
141  {
142  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
143  
144  	QDF_BUG(scn);
145  	if (scn->linkstate_vote == 0)
146  		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
147  				scn->linkstate_vote);
148  
149  	scn->linkstate_vote--;
150  	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
151  	if (scn->linkstate_vote == 0)
152  		hif_bus_prevent_linkdown(scn, false);
153  }
154  
155  /**
156   * hif_vote_link_up(): vote to prevent bus from suspending
157   * @hif_ctx: hif context
158   *
159   * Makes hif guarantee that fw can message the host normally
160   * during suspend.
161   *
162   * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
163   * and initialization deinitialization sequencences.
164   *
165   * Return: n/a
166   */
hif_vote_link_up(struct hif_opaque_softc * hif_ctx)167  void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
168  {
169  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
170  
171  	QDF_BUG(scn);
172  	scn->linkstate_vote++;
173  	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
174  	if (scn->linkstate_vote == 1)
175  		hif_bus_prevent_linkdown(scn, true);
176  }
177  
178  /**
179   * hif_can_suspend_link(): query if hif is permitted to suspend the link
180   * @hif_ctx: hif context
181   *
182   * Hif will ensure that the link won't be suspended if the upperlayers
183   * don't want it to.
184   *
185   * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
186   * we don't need extra locking to ensure votes dont change while
187   * we are in the process of suspending or resuming.
188   *
189   * Return: false if hif will guarantee link up during suspend.
190   */
hif_can_suspend_link(struct hif_opaque_softc * hif_ctx)191  bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
192  {
193  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
194  
195  	QDF_BUG(scn);
196  	return scn->linkstate_vote == 0;
197  }
198  
199  /**
200   * hif_hia_item_address(): hif_hia_item_address
201   * @target_type: target_type
202   * @item_offset: item_offset
203   *
204   * Return: n/a
205   */
hif_hia_item_address(uint32_t target_type,uint32_t item_offset)206  uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
207  {
208  	switch (target_type) {
209  	case TARGET_TYPE_AR6002:
210  		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
211  	case TARGET_TYPE_AR6003:
212  		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
213  	case TARGET_TYPE_AR6004:
214  		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
215  	case TARGET_TYPE_AR6006:
216  		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
217  	case TARGET_TYPE_AR9888:
218  		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
219  	case TARGET_TYPE_AR6320:
220  	case TARGET_TYPE_AR6320V2:
221  		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
222  	case TARGET_TYPE_ADRASTEA:
223  		/* ADRASTEA doesn't have a host interest address */
224  		ASSERT(0);
225  		return 0;
226  	case TARGET_TYPE_AR900B:
227  		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
228  	case TARGET_TYPE_QCA9984:
229  		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
230  	case TARGET_TYPE_QCA9888:
231  		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
232  
233  	default:
234  		ASSERT(0);
235  		return 0;
236  	}
237  }
238  
239  /**
240   * hif_max_num_receives_reached() - check max receive is reached
241   * @scn: HIF Context
242   * @count: unsigned int.
243   *
244   * Output check status as bool
245   *
246   * Return: bool
247   */
hif_max_num_receives_reached(struct hif_softc * scn,unsigned int count)248  bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
249  {
250  	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
251  		return count > 120;
252  	else
253  		return count > MAX_NUM_OF_RECEIVES;
254  }
255  
256  /**
257   * init_buffer_count() - initial buffer count
258   * @maxSize: qdf_size_t
259   *
260   * routine to modify the initial buffer count to be allocated on an os
261   * platform basis. Platform owner will need to modify this as needed
262   *
263   * Return: qdf_size_t
264   */
init_buffer_count(qdf_size_t maxSize)265  qdf_size_t init_buffer_count(qdf_size_t maxSize)
266  {
267  	return maxSize;
268  }
269  
270  /**
271   * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
272   * @hif_ctx: hif context
273   * @htc_htt_tx_endpoint: htt_tx_endpoint
274   *
275   * Return: void
276   */
hif_save_htc_htt_config_endpoint(struct hif_opaque_softc * hif_ctx,int htc_htt_tx_endpoint)277  void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
278  							int htc_htt_tx_endpoint)
279  {
280  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
281  
282  	if (!scn) {
283  		hif_err("scn or scn->hif_sc is NULL!");
284  		return;
285  	}
286  
287  	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
288  }
289  qdf_export_symbol(hif_save_htc_htt_config_endpoint);
290  
291  static const struct qwlan_hw qwlan_hw_list[] = {
292  	{
293  		.id = AR6320_REV1_VERSION,
294  		.subid = 0,
295  		.name = "QCA6174_REV1",
296  	},
297  	{
298  		.id = AR6320_REV1_1_VERSION,
299  		.subid = 0x1,
300  		.name = "QCA6174_REV1_1",
301  	},
302  	{
303  		.id = AR6320_REV1_3_VERSION,
304  		.subid = 0x2,
305  		.name = "QCA6174_REV1_3",
306  	},
307  	{
308  		.id = AR6320_REV2_1_VERSION,
309  		.subid = 0x4,
310  		.name = "QCA6174_REV2_1",
311  	},
312  	{
313  		.id = AR6320_REV2_1_VERSION,
314  		.subid = 0x5,
315  		.name = "QCA6174_REV2_2",
316  	},
317  	{
318  		.id = AR6320_REV3_VERSION,
319  		.subid = 0x6,
320  		.name = "QCA6174_REV2.3",
321  	},
322  	{
323  		.id = AR6320_REV3_VERSION,
324  		.subid = 0x8,
325  		.name = "QCA6174_REV3",
326  	},
327  	{
328  		.id = AR6320_REV3_VERSION,
329  		.subid = 0x9,
330  		.name = "QCA6174_REV3_1",
331  	},
332  	{
333  		.id = AR6320_REV3_2_VERSION,
334  		.subid = 0xA,
335  		.name = "AR6320_REV3_2_VERSION",
336  	},
337  	{
338  		.id = QCA6390_V1,
339  		.subid = 0x0,
340  		.name = "QCA6390_V1",
341  	},
342  	{
343  		.id = QCA6490_V1,
344  		.subid = 0x0,
345  		.name = "QCA6490_V1",
346  	},
347  	{
348  		.id = WCN3990_v1,
349  		.subid = 0x0,
350  		.name = "WCN3990_V1",
351  	},
352  	{
353  		.id = WCN3990_v2,
354  		.subid = 0x0,
355  		.name = "WCN3990_V2",
356  	},
357  	{
358  		.id = WCN3990_v2_1,
359  		.subid = 0x0,
360  		.name = "WCN3990_V2.1",
361  	},
362  	{
363  		.id = WCN3998,
364  		.subid = 0x0,
365  		.name = "WCN3998",
366  	},
367  	{
368  		.id = QCA9379_REV1_VERSION,
369  		.subid = 0xC,
370  		.name = "QCA9379_REV1",
371  	},
372  	{
373  		.id = QCA9379_REV1_VERSION,
374  		.subid = 0xD,
375  		.name = "QCA9379_REV1_1",
376  	},
377  	{
378  		.id = MANGO_V1,
379  		.subid = 0xF,
380  		.name = "MANGO_V1",
381  	},
382  	{
383  		.id = PEACH_V1,
384  		.subid = 0,
385  		.name = "PEACH_V1",
386  	},
387  
388  	{
389  		.id = KIWI_V1,
390  		.subid = 0,
391  		.name = "KIWI_V1",
392  	},
393  	{
394  		.id = KIWI_V2,
395  		.subid = 0,
396  		.name = "KIWI_V2",
397  	},
398  	{
399  		.id = WCN6750_V1,
400  		.subid = 0,
401  		.name = "WCN6750_V1",
402  	},
403  	{
404  		.id = WCN6750_V2,
405  		.subid = 0,
406  		.name = "WCN6750_V2",
407  	},
408  	{
409  		.id = WCN6450_V1,
410  		.subid = 0,
411  		.name = "WCN6450_V1",
412  	},
413  	{
414  		.id = QCA6490_v2_1,
415  		.subid = 0,
416  		.name = "QCA6490",
417  	},
418  	{
419  		.id = QCA6490_v2,
420  		.subid = 0,
421  		.name = "QCA6490",
422  	},
423  	{
424  		.id = WCN3990_TALOS,
425  		.subid = 0,
426  		.name = "WCN3990",
427  	},
428  	{
429  		.id = WCN3990_MOOREA,
430  		.subid = 0,
431  		.name = "WCN3990",
432  	},
433  	{
434  		.id = WCN3990_SAIPAN,
435  		.subid = 0,
436  		.name = "WCN3990",
437  	},
438  	{
439  		.id = WCN3990_RENNELL,
440  		.subid = 0,
441  		.name = "WCN3990",
442  	},
443  	{
444  		.id = WCN3990_BITRA,
445  		.subid = 0,
446  		.name = "WCN3990",
447  	},
448  	{
449  		.id = WCN3990_DIVAR,
450  		.subid = 0,
451  		.name = "WCN3990",
452  	},
453  	{
454  		.id = WCN3990_ATHERTON,
455  		.subid = 0,
456  		.name = "WCN3990",
457  	},
458  	{
459  		.id = WCN3990_STRAIT,
460  		.subid = 0,
461  		.name = "WCN3990",
462  	},
463  	{
464  		.id = WCN3990_NETRANI,
465  		.subid = 0,
466  		.name = "WCN3990",
467  	},
468  	{
469  		.id = WCN3990_CLARENCE,
470  		.subid = 0,
471  		.name = "WCN3990",
472  	}
473  };
474  
475  /**
476   * hif_get_hw_name(): get a human readable name for the hardware
477   * @info: Target Info
478   *
479   * Return: human readable name for the underlying wifi hardware.
480   */
hif_get_hw_name(struct hif_target_info * info)481  static const char *hif_get_hw_name(struct hif_target_info *info)
482  {
483  	int i;
484  
485  	hif_debug("target version = %d, target revision = %d",
486  		  info->target_version,
487  		  info->target_revision);
488  
489  	if (info->hw_name)
490  		return info->hw_name;
491  
492  	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
493  		if (info->target_version == qwlan_hw_list[i].id &&
494  		    info->target_revision == qwlan_hw_list[i].subid) {
495  			return qwlan_hw_list[i].name;
496  		}
497  	}
498  
499  	info->hw_name = qdf_mem_malloc(64);
500  	if (!info->hw_name)
501  		return "Unknown Device (nomem)";
502  
503  	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
504  			info->target_version);
505  	if (i < 0)
506  		return "Unknown Device (snprintf failure)";
507  	else
508  		return info->hw_name;
509  }
510  
511  /**
512   * hif_get_hw_info(): hif_get_hw_info
513   * @scn: scn
514   * @version: version
515   * @revision: revision
516   * @target_name: target name
517   *
518   * Return: n/a
519   */
hif_get_hw_info(struct hif_opaque_softc * scn,u32 * version,u32 * revision,const char ** target_name)520  void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
521  			const char **target_name)
522  {
523  	struct hif_target_info *info = hif_get_target_info_handle(scn);
524  	struct hif_softc *sc = HIF_GET_SOFTC(scn);
525  
526  	if (sc->bus_type == QDF_BUS_TYPE_USB)
527  		hif_usb_get_hw_info(sc);
528  
529  	*version = info->target_version;
530  	*revision = info->target_revision;
531  	*target_name = hif_get_hw_name(info);
532  }
533  
534  /**
535   * hif_get_dev_ba(): API to get device base address.
536   * @hif_handle: hif handle
537   *
538   * Return: device base address
539   */
hif_get_dev_ba(struct hif_opaque_softc * hif_handle)540  void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
541  {
542  	struct hif_softc *scn = (struct hif_softc *)hif_handle;
543  
544  	return scn->mem;
545  }
546  qdf_export_symbol(hif_get_dev_ba);
547  
548  /**
549   * hif_get_dev_ba_ce(): API to get device ce base address.
550   * @hif_handle: hif handle
551   *
552   * Return: dev mem base address for CE
553   */
hif_get_dev_ba_ce(struct hif_opaque_softc * hif_handle)554  void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
555  {
556  	struct hif_softc *scn = (struct hif_softc *)hif_handle;
557  
558  	return scn->mem_ce;
559  }
560  
561  qdf_export_symbol(hif_get_dev_ba_ce);
562  
563  /**
564   * hif_get_dev_ba_pmm(): API to get device pmm base address.
565   * @hif_handle: scn
566   *
567   * Return: dev mem base address for PMM
568   */
569  
hif_get_dev_ba_pmm(struct hif_opaque_softc * hif_handle)570  void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle)
571  {
572  	struct hif_softc *scn = (struct hif_softc *)hif_handle;
573  
574  	return scn->mem_pmm_base;
575  }
576  
577  qdf_export_symbol(hif_get_dev_ba_pmm);
578  
hif_get_soc_version(struct hif_opaque_softc * hif_handle)579  uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
580  {
581  	struct hif_softc *scn = (struct hif_softc *)hif_handle;
582  
583  	return scn->target_info.soc_version;
584  }
585  
586  qdf_export_symbol(hif_get_soc_version);
587  
588  /**
589   * hif_get_dev_ba_cmem(): API to get device ce base address.
590   * @hif_handle: hif handle
591   *
592   * Return: dev mem base address for CMEM
593   */
hif_get_dev_ba_cmem(struct hif_opaque_softc * hif_handle)594  void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
595  {
596  	struct hif_softc *scn = (struct hif_softc *)hif_handle;
597  
598  	return scn->mem_cmem;
599  }
600  
601  qdf_export_symbol(hif_get_dev_ba_cmem);
602  
603  #ifdef FEATURE_RUNTIME_PM
hif_runtime_prevent_linkdown(struct hif_softc * scn,bool is_get)604  void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
605  {
606  	if (is_get)
607  		qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
608  	else
609  		qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
610  }
611  
612  static inline
hif_rtpm_lock_init(struct hif_softc * scn)613  void hif_rtpm_lock_init(struct hif_softc *scn)
614  {
615  	qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
616  }
617  
618  static inline
hif_rtpm_lock_deinit(struct hif_softc * scn)619  void hif_rtpm_lock_deinit(struct hif_softc *scn)
620  {
621  	qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
622  }
623  #else
624  static inline
hif_rtpm_lock_init(struct hif_softc * scn)625  void hif_rtpm_lock_init(struct hif_softc *scn)
626  {
627  }
628  
629  static inline
hif_rtpm_lock_deinit(struct hif_softc * scn)630  void hif_rtpm_lock_deinit(struct hif_softc *scn)
631  {
632  }
633  #endif
634  
635  #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
636  /**
637   * hif_get_interrupt_threshold_cfg_from_psoc() - Retrieve ini cfg from psoc
638   * @scn: hif context
639   * @psoc: psoc objmgr handle
640   *
641   * Return: None
642   */
643  static inline
hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc * scn,struct wlan_objmgr_psoc * psoc)644  void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
645  					       struct wlan_objmgr_psoc *psoc)
646  {
647  	if (psoc) {
648  		scn->ini_cfg.ce_status_ring_timer_threshold =
649  			cfg_get(psoc,
650  				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
651  		scn->ini_cfg.ce_status_ring_batch_count_threshold =
652  			cfg_get(psoc,
653  				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
654  	}
655  }
656  #else
657  static inline
hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc * scn,struct wlan_objmgr_psoc * psoc)658  void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
659  					       struct wlan_objmgr_psoc *psoc)
660  {
661  }
662  #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
663  
664  /**
665   * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
666   * @scn: hif context
667   * @psoc: psoc objmgr handle
668   *
669   * Return: None
670   */
671  static inline
hif_get_cfg_from_psoc(struct hif_softc * scn,struct wlan_objmgr_psoc * psoc)672  void hif_get_cfg_from_psoc(struct hif_softc *scn,
673  			   struct wlan_objmgr_psoc *psoc)
674  {
675  	if (psoc) {
676  		scn->ini_cfg.disable_wake_irq =
677  			cfg_get(psoc, CFG_DISABLE_WAKE_IRQ);
678  		/**
679  		 * Wake IRQ can't share the same IRQ with the copy engines
680  		 * In one MSI mode, we don't know whether wake IRQ is triggered
681  		 * or not in wake IRQ handler. known issue CR 2055359
682  		 * If you want to support Wake IRQ. Please allocate at least
683  		 * 2 MSI vector. The first is for wake IRQ while the others
684  		 * share the second vector
685  		 */
686  		if (pld_is_one_msi(scn->qdf_dev->dev)) {
687  			hif_debug("Disable wake IRQ once it is one MSI mode");
688  			scn->ini_cfg.disable_wake_irq = true;
689  		}
690  		hif_get_interrupt_threshold_cfg_from_psoc(scn, psoc);
691  	}
692  }
693  
694  #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
695  /**
696   * hif_recovery_notifier_cb - Recovery notifier callback to log
697   *  hang event data
698   * @block: notifier block
699   * @state: state
700   * @data: notifier data
701   *
702   * Return: status
703   */
704  static
hif_recovery_notifier_cb(struct notifier_block * block,unsigned long state,void * data)705  int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
706  			     void *data)
707  {
708  	struct qdf_notifer_data *notif_data = data;
709  	qdf_notif_block *notif_block;
710  	struct hif_softc *hif_handle;
711  	bool bus_id_invalid;
712  
713  	if (!data || !block)
714  		return -EINVAL;
715  
716  	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
717  
718  	hif_handle = notif_block->priv_data;
719  	if (!hif_handle)
720  		return -EINVAL;
721  
722  	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
723  					  &notif_data->offset);
724  	if (bus_id_invalid)
725  		return NOTIFY_STOP_MASK;
726  
727  	hif_log_ce_info(hif_handle, notif_data->hang_data,
728  			&notif_data->offset);
729  
730  	return 0;
731  }
732  
733  /**
734   * hif_register_recovery_notifier - Register hif recovery notifier
735   * @hif_handle: hif handle
736   *
737   * Return: status
738   */
739  static
hif_register_recovery_notifier(struct hif_softc * hif_handle)740  QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
741  {
742  	qdf_notif_block *hif_notifier;
743  
744  	if (!hif_handle)
745  		return QDF_STATUS_E_FAILURE;
746  
747  	hif_notifier = &hif_handle->hif_recovery_notifier;
748  
749  	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
750  	hif_notifier->priv_data = hif_handle;
751  	return qdf_hang_event_register_notifier(hif_notifier);
752  }
753  
754  /**
755   * hif_unregister_recovery_notifier - Un-register hif recovery notifier
756   * @hif_handle: hif handle
757   *
758   * Return: status
759   */
760  static
hif_unregister_recovery_notifier(struct hif_softc * hif_handle)761  QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
762  {
763  	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
764  
765  	return qdf_hang_event_unregister_notifier(hif_notifier);
766  }
767  #else
768  static inline
hif_register_recovery_notifier(struct hif_softc * hif_handle)769  QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
770  {
771  	return QDF_STATUS_SUCCESS;
772  }
773  
774  static inline
hif_unregister_recovery_notifier(struct hif_softc * hif_handle)775  QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
776  {
777  	return QDF_STATUS_SUCCESS;
778  }
779  #endif
780  
781  #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
782  	defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
783  /**
784   * __hif_cpu_hotplug_notify() - CPU hotplug event handler
785   * @context: HIF context
786   * @cpu: CPU Id of the CPU generating the event
787   * @cpu_up: true if the CPU is online
788   *
789   * Return: None
790   */
__hif_cpu_hotplug_notify(void * context,uint32_t cpu,bool cpu_up)791  static void __hif_cpu_hotplug_notify(void *context,
792  				     uint32_t cpu, bool cpu_up)
793  {
794  	struct hif_softc *scn = context;
795  
796  	if (!scn)
797  		return;
798  	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
799  		return;
800  
801  	if (cpu_up) {
802  		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
803  		hif_debug("Setting affinity for online CPU: %d", cpu);
804  	} else {
805  		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
806  	}
807  }
808  
809  /**
810   * hif_cpu_hotplug_notify - cpu core up/down notification
811   * handler
812   * @context: HIF context
813   * @cpu: CPU generating the event
814   * @cpu_up: true if the CPU is online
815   *
816   * Return: None
817   */
hif_cpu_hotplug_notify(void * context,uint32_t cpu,bool cpu_up)818  static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
819  {
820  	struct qdf_op_sync *op_sync;
821  
822  	if (qdf_op_protect(&op_sync))
823  		return;
824  
825  	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
826  
827  	qdf_op_unprotect(op_sync);
828  }
829  
hif_cpu_online_cb(void * context,uint32_t cpu)830  static void hif_cpu_online_cb(void *context, uint32_t cpu)
831  {
832  	hif_cpu_hotplug_notify(context, cpu, true);
833  }
834  
hif_cpu_before_offline_cb(void * context,uint32_t cpu)835  static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
836  {
837  	hif_cpu_hotplug_notify(context, cpu, false);
838  }
839  
hif_cpuhp_register(struct hif_softc * scn)840  static void hif_cpuhp_register(struct hif_softc *scn)
841  {
842  	if (!scn) {
843  		hif_info_high("cannot register hotplug notifiers");
844  		return;
845  	}
846  	qdf_cpuhp_register(&scn->cpuhp_event_handle,
847  			   scn,
848  			   hif_cpu_online_cb,
849  			   hif_cpu_before_offline_cb);
850  }
851  
hif_cpuhp_unregister(struct hif_softc * scn)852  static void hif_cpuhp_unregister(struct hif_softc *scn)
853  {
854  	if (!scn) {
855  		hif_info_high("cannot unregister hotplug notifiers");
856  		return;
857  	}
858  	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
859  }
860  
861  #else
hif_cpuhp_register(struct hif_softc * scn)862  static void hif_cpuhp_register(struct hif_softc *scn)
863  {
864  }
865  
hif_cpuhp_unregister(struct hif_softc * scn)866  static void hif_cpuhp_unregister(struct hif_softc *scn)
867  {
868  }
869  #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
870  
871  #ifdef HIF_DETECTION_LATENCY_ENABLE
872  /*
873   * Bitmask to control enablement of latency detection for the tasklets,
874   * bit-X represents for tasklet of WLAN_CE_X.
875   */
876  #ifndef DETECTION_LATENCY_TASKLET_MASK
877  #define DETECTION_LATENCY_TASKLET_MASK (BIT(2) | BIT(7))
878  #endif
879  
880  static inline int
__hif_tasklet_latency(struct hif_softc * scn,bool from_timer,int idx)881  __hif_tasklet_latency(struct hif_softc *scn, bool from_timer, int idx)
882  {
883  	qdf_time_t sched_time =
884  		scn->latency_detect.tasklet_info[idx].sched_time;
885  	qdf_time_t exec_time =
886  		scn->latency_detect.tasklet_info[idx].exec_time;
887  	qdf_time_t curr_time = qdf_system_ticks();
888  	uint32_t threshold = scn->latency_detect.threshold;
889  	qdf_time_t expect_exec_time =
890  		sched_time + qdf_system_msecs_to_ticks(threshold);
891  
892  	/* 2 kinds of check here.
893  	 * from_timer==true:  check if tasklet stall
894  	 * from_timer==false: check tasklet execute comes late
895  	 */
896  	if (from_timer ?
897  	    (qdf_system_time_after(sched_time, exec_time) &&
898  	     qdf_system_time_after(curr_time, expect_exec_time)) :
899  	    qdf_system_time_after(exec_time, expect_exec_time)) {
900  		hif_err("tasklet[%d] latency detected: from_timer %d, curr_time %lu, sched_time %lu, exec_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
901  			idx, from_timer, curr_time, sched_time,
902  			exec_time, threshold,
903  			scn->latency_detect.timeout,
904  			qdf_get_cpu(), (void *)_RET_IP_);
905  		qdf_trigger_self_recovery(NULL,
906  					  QDF_TASKLET_CREDIT_LATENCY_DETECT);
907  		return -ETIMEDOUT;
908  	}
909  
910  	return 0;
911  }
912  
913  /**
914   * hif_tasklet_latency_detect_enabled() - check whether latency detect
915   * is enabled for the tasklet which is specified by idx
916   * @scn: HIF opaque context
917   * @idx: CE id
918   *
919   * Return: true if latency detect is enabled for the specified tasklet,
920   * false otherwise.
921   */
922  static inline bool
hif_tasklet_latency_detect_enabled(struct hif_softc * scn,int idx)923  hif_tasklet_latency_detect_enabled(struct hif_softc *scn, int idx)
924  {
925  	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
926  		return false;
927  
928  	if (!scn->latency_detect.enable_detection)
929  		return false;
930  
931  	if (idx < 0 || idx >= HIF_TASKLET_IN_MONITOR ||
932  	    !qdf_test_bit(idx, scn->latency_detect.tasklet_bmap))
933  		return false;
934  
935  	return true;
936  }
937  
hif_tasklet_latency_record_exec(struct hif_softc * scn,int idx)938  void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx)
939  {
940  	if (!hif_tasklet_latency_detect_enabled(scn, idx))
941  		return;
942  
943  	/*
944  	 * hif_set_enable_detection(true) might come between
945  	 * hif_tasklet_latency_record_sched() and
946  	 * hif_tasklet_latency_record_exec() during wlan startup, then the
947  	 * sched_time is 0 but exec_time is not, and hit the timeout case in
948  	 * __hif_tasklet_latency().
949  	 * To avoid such issue, skip exec_time recording if sched_time has not
950  	 * been recorded.
951  	 */
952  	if (!scn->latency_detect.tasklet_info[idx].sched_time)
953  		return;
954  
955  	scn->latency_detect.tasklet_info[idx].exec_time = qdf_system_ticks();
956  	__hif_tasklet_latency(scn, false, idx);
957  }
958  
hif_tasklet_latency_record_sched(struct hif_softc * scn,int idx)959  void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx)
960  {
961  	if (!hif_tasklet_latency_detect_enabled(scn, idx))
962  		return;
963  
964  	scn->latency_detect.tasklet_info[idx].sched_cpuid = qdf_get_cpu();
965  	scn->latency_detect.tasklet_info[idx].sched_time = qdf_system_ticks();
966  }
967  
hif_credit_latency(struct hif_softc * scn,bool from_timer)968  static inline void hif_credit_latency(struct hif_softc *scn, bool from_timer)
969  {
970  	qdf_time_t credit_request_time =
971  		scn->latency_detect.credit_request_time;
972  	qdf_time_t credit_report_time = scn->latency_detect.credit_report_time;
973  	qdf_time_t curr_jiffies = qdf_system_ticks();
974  	uint32_t threshold = scn->latency_detect.threshold;
975  	int cpu_id = qdf_get_cpu();
976  
977  	/* 2 kinds of check here.
978  	 * from_timer==true:  check if credit report stall
979  	 * from_timer==false: check credit report comes late
980  	 */
981  
982  	if ((from_timer ?
983  	     qdf_system_time_after(credit_request_time, credit_report_time) :
984  	     qdf_system_time_after(credit_report_time, credit_request_time)) &&
985  	    qdf_system_time_after(curr_jiffies,
986  				  credit_request_time +
987  				  qdf_system_msecs_to_ticks(threshold))) {
988  		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu, credit_report_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
989  			from_timer, curr_jiffies, credit_request_time,
990  			credit_report_time, threshold,
991  			scn->latency_detect.timeout,
992  			cpu_id, (void *)_RET_IP_);
993  		goto latency;
994  	}
995  	return;
996  
997  latency:
998  	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
999  }
1000  
hif_tasklet_latency(struct hif_softc * scn,bool from_timer)1001  static inline void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
1002  {
1003  	int i, ret;
1004  
1005  	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1006  		if (!qdf_test_bit(i, scn->latency_detect.tasklet_bmap))
1007  			continue;
1008  
1009  		ret = __hif_tasklet_latency(scn, from_timer, i);
1010  		if (ret)
1011  			return;
1012  	}
1013  }
1014  
1015  /**
1016   * hif_check_detection_latency(): to check if latency for tasklet/credit
1017   *
1018   * @scn: hif context
1019   * @from_timer: if called from timer handler
1020   * @bitmap_type: indicate if check tasklet or credit
1021   *
1022   * Return: none
1023   */
hif_check_detection_latency(struct hif_softc * scn,bool from_timer,uint32_t bitmap_type)1024  void hif_check_detection_latency(struct hif_softc *scn,
1025  				 bool from_timer,
1026  				 uint32_t bitmap_type)
1027  {
1028  	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1029  		return;
1030  
1031  	if (!scn->latency_detect.enable_detection)
1032  		return;
1033  
1034  	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
1035  		hif_tasklet_latency(scn, from_timer);
1036  
1037  	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
1038  		hif_credit_latency(scn, from_timer);
1039  }
1040  
hif_latency_detect_timeout_handler(void * arg)1041  static void hif_latency_detect_timeout_handler(void *arg)
1042  {
1043  	struct hif_softc *scn = (struct hif_softc *)arg;
1044  	int next_cpu, i;
1045  	qdf_cpu_mask cpu_mask = {0};
1046  	struct hif_latency_detect *detect = &scn->latency_detect;
1047  
1048  	hif_check_detection_latency(scn, true,
1049  				    BIT(HIF_DETECT_TASKLET) |
1050  				    BIT(HIF_DETECT_CREDIT));
1051  
1052  	/* it need to make sure timer start on a different cpu,
1053  	 * so it can detect the tasklet schedule stall, but there
1054  	 * is still chance that, after timer has been started, then
1055  	 * irq/tasklet happens on the same cpu, then tasklet will
1056  	 * execute before softirq timer, if this tasklet stall, the
1057  	 * timer can't detect it, we can accept this as a limitation,
1058  	 * if tasklet stall, anyway other place will detect it, just
1059  	 * a little later.
1060  	 */
1061  	qdf_cpumask_copy(&cpu_mask, (const qdf_cpu_mask *)cpu_active_mask);
1062  	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1063  		if (!qdf_test_bit(i, detect->tasklet_bmap))
1064  			continue;
1065  
1066  		qdf_cpumask_clear_cpu(detect->tasklet_info[i].sched_cpuid,
1067  				      &cpu_mask);
1068  	}
1069  
1070  	next_cpu = cpumask_first(&cpu_mask);
1071  	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
1072  		hif_debug("start timer on local");
1073  		/* it doesn't found a available cpu, start on local cpu*/
1074  		qdf_timer_mod(&detect->timer, detect->timeout);
1075  	} else {
1076  		qdf_timer_start_on(&detect->timer, detect->timeout, next_cpu);
1077  	}
1078  }
1079  
hif_latency_detect_timer_init(struct hif_softc * scn)1080  static void hif_latency_detect_timer_init(struct hif_softc *scn)
1081  {
1082  	scn->latency_detect.timeout =
1083  		DETECTION_TIMER_TIMEOUT;
1084  	scn->latency_detect.threshold =
1085  		DETECTION_LATENCY_THRESHOLD;
1086  
1087  	hif_info("timer timeout %u, latency threshold %u",
1088  		 scn->latency_detect.timeout,
1089  		 scn->latency_detect.threshold);
1090  
1091  	scn->latency_detect.is_timer_started = false;
1092  
1093  	qdf_timer_init(NULL,
1094  		       &scn->latency_detect.timer,
1095  		       &hif_latency_detect_timeout_handler,
1096  		       scn,
1097  		       QDF_TIMER_TYPE_SW_SPIN);
1098  }
1099  
hif_latency_detect_timer_deinit(struct hif_softc * scn)1100  static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
1101  {
1102  	hif_info("deinit timer");
1103  	qdf_timer_free(&scn->latency_detect.timer);
1104  }
1105  
hif_latency_detect_init(struct hif_softc * scn)1106  static void hif_latency_detect_init(struct hif_softc *scn)
1107  {
1108  	uint32_t tasklet_mask;
1109  	int i;
1110  
1111  	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1112  		return;
1113  
1114  	tasklet_mask = DETECTION_LATENCY_TASKLET_MASK;
1115  	hif_info("tasklet mask is 0x%x", tasklet_mask);
1116  	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1117  		if (BIT(i) & tasklet_mask)
1118  			qdf_set_bit(i, scn->latency_detect.tasklet_bmap);
1119  	}
1120  
1121  	hif_latency_detect_timer_init(scn);
1122  }
1123  
hif_latency_detect_deinit(struct hif_softc * scn)1124  static void hif_latency_detect_deinit(struct hif_softc *scn)
1125  {
1126  	int i;
1127  
1128  	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1129  		return;
1130  
1131  	hif_latency_detect_timer_deinit(scn);
1132  	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++)
1133  		qdf_clear_bit(i, scn->latency_detect.tasklet_bmap);
1134  }
1135  
hif_latency_detect_timer_start(struct hif_opaque_softc * hif_ctx)1136  void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
1137  {
1138  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1139  
1140  	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1141  		return;
1142  
1143  	hif_debug_rl("start timer");
1144  	if (scn->latency_detect.is_timer_started) {
1145  		hif_info("timer has been started");
1146  		return;
1147  	}
1148  
1149  	qdf_timer_start(&scn->latency_detect.timer,
1150  			scn->latency_detect.timeout);
1151  	scn->latency_detect.is_timer_started = true;
1152  }
1153  
hif_latency_detect_timer_stop(struct hif_opaque_softc * hif_ctx)1154  void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
1155  {
1156  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1157  
1158  	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1159  		return;
1160  
1161  	hif_debug_rl("stop timer");
1162  
1163  	qdf_timer_sync_cancel(&scn->latency_detect.timer);
1164  	scn->latency_detect.is_timer_started = false;
1165  }
1166  
hif_latency_detect_credit_record_time(enum hif_credit_exchange_type type,struct hif_opaque_softc * hif_ctx)1167  void hif_latency_detect_credit_record_time(
1168  	enum hif_credit_exchange_type type,
1169  	struct hif_opaque_softc *hif_ctx)
1170  {
1171  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1172  
1173  	if (!scn) {
1174  		hif_err("Could not do runtime put, scn is null");
1175  		return;
1176  	}
1177  
1178  	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1179  		return;
1180  
1181  	if (HIF_REQUEST_CREDIT == type)
1182  		scn->latency_detect.credit_request_time = qdf_system_ticks();
1183  	else if (HIF_PROCESS_CREDIT_REPORT == type)
1184  		scn->latency_detect.credit_report_time = qdf_system_ticks();
1185  
1186  	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
1187  }
1188  
hif_set_enable_detection(struct hif_opaque_softc * hif_ctx,bool value)1189  void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
1190  {
1191  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1192  
1193  	if (!scn) {
1194  		hif_err("Could not do runtime put, scn is null");
1195  		return;
1196  	}
1197  
1198  	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1199  		return;
1200  
1201  	scn->latency_detect.enable_detection = value;
1202  }
1203  #else
hif_latency_detect_init(struct hif_softc * scn)1204  static inline void hif_latency_detect_init(struct hif_softc *scn)
1205  {}
1206  
hif_latency_detect_deinit(struct hif_softc * scn)1207  static inline void hif_latency_detect_deinit(struct hif_softc *scn)
1208  {}
1209  #endif
1210  
1211  #ifdef WLAN_FEATURE_AFFINITY_MGR
1212  #define AFFINITY_THRESHOLD 5000000
1213  static inline void
hif_affinity_mgr_init(struct hif_softc * scn,struct wlan_objmgr_psoc * psoc)1214  hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
1215  {
1216  	unsigned int cpus;
1217  	qdf_cpu_mask allowed_mask = {0};
1218  
1219  	scn->affinity_mgr_supported =
1220  		(cfg_get(psoc, CFG_IRQ_AFFINE_AUDIO_USE_CASE) &&
1221  		qdf_walt_get_cpus_taken_supported());
1222  
1223  	hif_info("Affinity Manager supported: %d", scn->affinity_mgr_supported);
1224  
1225  	if (!scn->affinity_mgr_supported)
1226  		return;
1227  
1228  	scn->time_threshold = AFFINITY_THRESHOLD;
1229  	qdf_for_each_possible_cpu(cpus)
1230  		if (qdf_topology_physical_package_id(cpus) ==
1231  			CPU_CLUSTER_TYPE_LITTLE)
1232  			qdf_cpumask_set_cpu(cpus, &allowed_mask);
1233  	qdf_cpumask_copy(&scn->allowed_mask, &allowed_mask);
1234  }
1235  #else
1236  static inline void
hif_affinity_mgr_init(struct hif_softc * scn,struct wlan_objmgr_psoc * psoc)1237  hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
1238  {
1239  }
1240  #endif
1241  
1242  #ifdef FEATURE_DIRECT_LINK
1243  /**
1244   * hif_init_direct_link_rcv_pipe_num(): Initialize the direct link receive
1245   *  pipe number
1246   * @scn: hif context
1247   *
1248   * Return: None
1249   */
1250  static inline
hif_init_direct_link_rcv_pipe_num(struct hif_softc * scn)1251  void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
1252  {
1253  	scn->dl_recv_pipe_num = INVALID_PIPE_NO;
1254  }
1255  #else
1256  static inline
hif_init_direct_link_rcv_pipe_num(struct hif_softc * scn)1257  void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
1258  {
1259  }
1260  #endif
1261  
hif_open(qdf_device_t qdf_ctx,uint32_t mode,enum qdf_bus_type bus_type,struct hif_driver_state_callbacks * cbk,struct wlan_objmgr_psoc * psoc)1262  struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1263  				  uint32_t mode,
1264  				  enum qdf_bus_type bus_type,
1265  				  struct hif_driver_state_callbacks *cbk,
1266  				  struct wlan_objmgr_psoc *psoc)
1267  {
1268  	struct hif_softc *scn;
1269  	QDF_STATUS status = QDF_STATUS_SUCCESS;
1270  	int bus_context_size = hif_bus_get_context_size(bus_type);
1271  
1272  	if (bus_context_size == 0) {
1273  		hif_err("context size 0 not allowed");
1274  		return NULL;
1275  	}
1276  
1277  	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
1278  	if (!scn)
1279  		return GET_HIF_OPAQUE_HDL(scn);
1280  
1281  	scn->qdf_dev = qdf_ctx;
1282  	scn->hif_con_param = mode;
1283  	qdf_atomic_init(&scn->active_tasklet_cnt);
1284  	qdf_atomic_init(&scn->active_oom_work_cnt);
1285  
1286  	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
1287  	qdf_atomic_init(&scn->link_suspended);
1288  	qdf_atomic_init(&scn->tasklet_from_intr);
1289  	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
1290  	qdf_mem_copy(&scn->callbacks, cbk,
1291  		     sizeof(struct hif_driver_state_callbacks));
1292  	scn->bus_type  = bus_type;
1293  
1294  	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
1295  	hif_get_cfg_from_psoc(scn, psoc);
1296  
1297  	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
1298  	status = hif_bus_open(scn, bus_type);
1299  	if (status != QDF_STATUS_SUCCESS) {
1300  		hif_err("hif_bus_open error = %d, bus_type = %d",
1301  			status, bus_type);
1302  		qdf_mem_free(scn);
1303  		scn = NULL;
1304  		goto out;
1305  	}
1306  
1307  	hif_rtpm_lock_init(scn);
1308  
1309  	hif_cpuhp_register(scn);
1310  	hif_latency_detect_init(scn);
1311  	hif_affinity_mgr_init(scn, psoc);
1312  	hif_init_direct_link_rcv_pipe_num(scn);
1313  	hif_ce_desc_history_log_register(scn);
1314  	hif_desc_history_log_register();
1315  	qdf_ssr_driver_dump_register_region("hif", scn, sizeof(*scn));
1316  
1317  out:
1318  	return GET_HIF_OPAQUE_HDL(scn);
1319  }
1320  
1321  #ifdef ADRASTEA_RRI_ON_DDR
1322  /**
1323   * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
1324   * @scn: hif context
1325   *
1326   * Return: none
1327   */
hif_uninit_rri_on_ddr(struct hif_softc * scn)1328  void hif_uninit_rri_on_ddr(struct hif_softc *scn)
1329  {
1330  	if (scn->vaddr_rri_on_ddr)
1331  		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1332  					RRI_ON_DDR_MEM_SIZE,
1333  					scn->vaddr_rri_on_ddr,
1334  					scn->paddr_rri_on_ddr, 0);
1335  	scn->vaddr_rri_on_ddr = NULL;
1336  }
1337  #endif
1338  
1339  /**
1340   * hif_close(): hif_close
1341   * @hif_ctx: hif_ctx
1342   *
1343   * Return: n/a
1344   */
hif_close(struct hif_opaque_softc * hif_ctx)1345  void hif_close(struct hif_opaque_softc *hif_ctx)
1346  {
1347  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1348  
1349  	if (!scn) {
1350  		hif_err("hif_opaque_softc is NULL");
1351  		return;
1352  	}
1353  
1354  	qdf_ssr_driver_dump_unregister_region("hif");
1355  	hif_desc_history_log_unregister();
1356  	hif_ce_desc_history_log_unregister();
1357  	hif_latency_detect_deinit(scn);
1358  
1359  	if (scn->athdiag_procfs_inited) {
1360  		athdiag_procfs_remove();
1361  		scn->athdiag_procfs_inited = false;
1362  	}
1363  
1364  	if (scn->target_info.hw_name) {
1365  		char *hw_name = scn->target_info.hw_name;
1366  
1367  		scn->target_info.hw_name = "ErrUnloading";
1368  		qdf_mem_free(hw_name);
1369  	}
1370  
1371  	hif_uninit_rri_on_ddr(scn);
1372  	hif_cleanup_static_buf_to_target(scn);
1373  	hif_cpuhp_unregister(scn);
1374  	hif_rtpm_lock_deinit(scn);
1375  
1376  	hif_bus_close(scn);
1377  
1378  	qdf_mem_free(scn);
1379  }
1380  
1381  /**
1382   * hif_get_num_active_grp_tasklets() - get the number of active
1383   *		datapath group tasklets pending to be completed.
1384   * @scn: HIF context
1385   *
1386   * Returns: the number of datapath group tasklets which are active
1387   */
hif_get_num_active_grp_tasklets(struct hif_softc * scn)1388  static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1389  {
1390  	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1391  }
1392  
1393  #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1394  	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1395  	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1396  	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1397  	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1398  	defined(QCA_WIFI_QCN6432) || \
1399  	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1400  /**
1401   * hif_get_num_pending_work() - get the number of entries in
1402   *		the workqueue pending to be completed.
1403   * @scn: HIF context
1404   *
1405   * Returns: the number of tasklets which are active
1406   */
hif_get_num_pending_work(struct hif_softc * scn)1407  static inline int hif_get_num_pending_work(struct hif_softc *scn)
1408  {
1409  	return hal_get_reg_write_pending_work(scn->hal_soc);
1410  }
1411  #elif defined(FEATURE_HIF_DELAYED_REG_WRITE)
hif_get_num_pending_work(struct hif_softc * scn)1412  static inline int hif_get_num_pending_work(struct hif_softc *scn)
1413  {
1414  	return qdf_atomic_read(&scn->active_work_cnt);
1415  }
1416  #else
1417  
hif_get_num_pending_work(struct hif_softc * scn)1418  static inline int hif_get_num_pending_work(struct hif_softc *scn)
1419  {
1420  	return 0;
1421  }
1422  #endif
1423  
hif_try_complete_tasks(struct hif_softc * scn)1424  QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1425  {
1426  	uint32_t task_drain_wait_cnt = 0;
1427  	int tasklet = 0, grp_tasklet = 0, work = 0, oom_work = 0;
1428  
1429  	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1430  	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1431  	       (work = hif_get_num_pending_work(scn)) ||
1432  		(oom_work = hif_get_num_active_oom_work(scn))) {
1433  		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1434  			hif_err("pending tasklets %d grp tasklets %d work %d oom work %d",
1435  				tasklet, grp_tasklet, work, oom_work);
1436  			/*
1437  			 * There is chance of OOM thread getting scheduled
1438  			 * continuously or execution get delayed during low
1439  			 * memory state. So avoid panic and prevent suspend
1440  			 * if OOM thread is unable to complete pending
1441  			 * work.
1442  			 */
1443  			if (oom_work)
1444  				hif_err("OOM thread is still pending %d tasklets %d grp tasklets %d work %d",
1445  					oom_work, tasklet, grp_tasklet, work);
1446  			else
1447  				QDF_DEBUG_PANIC("Complete tasks takes more than %u ms: tasklets %d grp tasklets %d work %d oom_work %d",
1448  						HIF_TASK_DRAIN_WAIT_CNT * 10,
1449  						tasklet, grp_tasklet, work,
1450  						oom_work);
1451  			return QDF_STATUS_E_FAULT;
1452  		}
1453  		hif_info("waiting for tasklets %d grp tasklets %d work %d oom_work %d",
1454  			 tasklet, grp_tasklet, work, oom_work);
1455  		msleep(10);
1456  	}
1457  
1458  	return QDF_STATUS_SUCCESS;
1459  }
1460  
hif_try_complete_dp_tasks(struct hif_opaque_softc * hif_ctx)1461  QDF_STATUS hif_try_complete_dp_tasks(struct hif_opaque_softc *hif_ctx)
1462  {
1463  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1464  	uint32_t task_drain_wait_cnt = 0;
1465  	int grp_tasklet = 0, work = 0;
1466  
1467  	while ((grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1468  	       (work = hif_get_num_pending_work(scn))) {
1469  		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1470  			hif_err("pending grp tasklets %d work %d",
1471  				grp_tasklet, work);
1472  			QDF_DEBUG_PANIC("Complete tasks takes more than %u ms: grp tasklets %d work %d",
1473  					HIF_TASK_DRAIN_WAIT_CNT * 10,
1474  					grp_tasklet, work);
1475  			return QDF_STATUS_E_FAULT;
1476  		}
1477  		hif_info("waiting for grp tasklets %d work %d",
1478  			 grp_tasklet, work);
1479  		msleep(10);
1480  	}
1481  
1482  	return QDF_STATUS_SUCCESS;
1483  }
1484  
1485  #ifdef HIF_HAL_REG_ACCESS_SUPPORT
hif_reg_window_write(struct hif_softc * scn,uint32_t offset,uint32_t value)1486  void hif_reg_window_write(struct hif_softc *scn, uint32_t offset,
1487  			  uint32_t value)
1488  {
1489  	hal_write32_mb(scn->hal_soc, offset, value);
1490  }
1491  
hif_reg_window_read(struct hif_softc * scn,uint32_t offset)1492  uint32_t hif_reg_window_read(struct hif_softc *scn, uint32_t offset)
1493  {
1494  	return hal_read32_mb(scn->hal_soc, offset);
1495  }
1496  #endif
1497  
1498  #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
hif_try_prevent_ep_vote_access(struct hif_opaque_softc * hif_ctx)1499  QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1500  {
1501  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1502  	uint32_t work_drain_wait_cnt = 0;
1503  	uint32_t wait_cnt = 0;
1504  	int work = 0;
1505  
1506  	qdf_atomic_set(&scn->dp_ep_vote_access,
1507  		       HIF_EP_VOTE_ACCESS_DISABLE);
1508  	qdf_atomic_set(&scn->ep_vote_access,
1509  		       HIF_EP_VOTE_ACCESS_DISABLE);
1510  
1511  	while ((work = hif_get_num_pending_work(scn))) {
1512  		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1513  			qdf_atomic_set(&scn->dp_ep_vote_access,
1514  				       HIF_EP_VOTE_ACCESS_ENABLE);
1515  			qdf_atomic_set(&scn->ep_vote_access,
1516  				       HIF_EP_VOTE_ACCESS_ENABLE);
1517  			hif_err("timeout wait for pending work %d ", work);
1518  			return QDF_STATUS_E_FAULT;
1519  		}
1520  		qdf_sleep(10);
1521  	}
1522  
1523  	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1524  	return QDF_STATUS_SUCCESS;
1525  
1526  	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1527  		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1528  			hif_err("Release EP vote is not proceed by Fw");
1529  			return QDF_STATUS_E_FAULT;
1530  		}
1531  		qdf_sleep(5);
1532  	}
1533  
1534  	return QDF_STATUS_SUCCESS;
1535  }
1536  
hif_set_ep_intermediate_vote_access(struct hif_opaque_softc * hif_ctx)1537  void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1538  {
1539  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1540  	uint8_t vote_access;
1541  
1542  	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1543  
1544  	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1545  		hif_info("EP vote changed from:%u to intermediate state",
1546  			 vote_access);
1547  
1548  	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1549  		QDF_BUG(0);
1550  
1551  	qdf_atomic_set(&scn->ep_vote_access,
1552  		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1553  }
1554  
hif_allow_ep_vote_access(struct hif_opaque_softc * hif_ctx)1555  void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1556  {
1557  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1558  
1559  	qdf_atomic_set(&scn->dp_ep_vote_access,
1560  		       HIF_EP_VOTE_ACCESS_ENABLE);
1561  	qdf_atomic_set(&scn->ep_vote_access,
1562  		       HIF_EP_VOTE_ACCESS_ENABLE);
1563  }
1564  
hif_set_ep_vote_access(struct hif_opaque_softc * hif_ctx,uint8_t type,uint8_t access)1565  void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1566  			    uint8_t type, uint8_t access)
1567  {
1568  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1569  
1570  	if (type == HIF_EP_VOTE_DP_ACCESS)
1571  		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1572  	else
1573  		qdf_atomic_set(&scn->ep_vote_access, access);
1574  }
1575  
hif_get_ep_vote_access(struct hif_opaque_softc * hif_ctx,uint8_t type)1576  uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1577  			       uint8_t type)
1578  {
1579  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1580  
1581  	if (type == HIF_EP_VOTE_DP_ACCESS)
1582  		return qdf_atomic_read(&scn->dp_ep_vote_access);
1583  	else
1584  		return qdf_atomic_read(&scn->ep_vote_access);
1585  }
1586  #endif
1587  
1588  #ifdef FEATURE_HIF_DELAYED_REG_WRITE
1589  #ifdef MEMORY_DEBUG
1590  #define HIF_REG_WRITE_QUEUE_LEN 128
1591  #else
1592  #define HIF_REG_WRITE_QUEUE_LEN 32
1593  #endif
1594  
1595  /**
1596   * hif_print_reg_write_stats() - Print hif delayed reg write stats
1597   * @hif_ctx: hif opaque handle
1598   *
1599   * Return: None
1600   */
hif_print_reg_write_stats(struct hif_opaque_softc * hif_ctx)1601  void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
1602  {
1603  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1604  	struct CE_state *ce_state;
1605  	uint32_t *hist;
1606  	int i;
1607  
1608  	hist = scn->wstats.sched_delay;
1609  	hif_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u",
1610  		  qdf_atomic_read(&scn->wstats.enqueues),
1611  		  scn->wstats.dequeues,
1612  		  qdf_atomic_read(&scn->wstats.coalesces),
1613  		  qdf_atomic_read(&scn->wstats.direct),
1614  		  qdf_atomic_read(&scn->wstats.q_depth),
1615  		  scn->wstats.max_q_depth,
1616  		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us],
1617  		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us],
1618  		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us],
1619  		  hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]);
1620  
1621  	for (i = 0; i < scn->ce_count; i++) {
1622  		ce_state = scn->ce_id_to_state[i];
1623  		if (!ce_state)
1624  			continue;
1625  
1626  		hif_debug("ce%d: enq %u deq %u coal %u direct %u",
1627  			  i, ce_state->wstats.enqueues,
1628  			  ce_state->wstats.dequeues,
1629  			  ce_state->wstats.coalesces,
1630  			  ce_state->wstats.direct);
1631  	}
1632  }
1633  
1634  /**
1635   * hif_is_reg_write_tput_level_high() - throughput level for delayed reg writes
1636   * @scn: hif_softc pointer
1637   *
1638   * Return: true if throughput is high, else false.
1639   */
hif_is_reg_write_tput_level_high(struct hif_softc * scn)1640  static inline bool hif_is_reg_write_tput_level_high(struct hif_softc *scn)
1641  {
1642  	int bw_level = hif_get_bandwidth_level(GET_HIF_OPAQUE_HDL(scn));
1643  
1644  	return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false;
1645  }
1646  
1647  /**
1648   * hif_reg_write_fill_sched_delay_hist() - fill reg write delay histogram
1649   * @scn: hif_softc pointer
1650   * @delay_us: delay in us
1651   *
1652   * Return: None
1653   */
hif_reg_write_fill_sched_delay_hist(struct hif_softc * scn,uint64_t delay_us)1654  static inline void hif_reg_write_fill_sched_delay_hist(struct hif_softc *scn,
1655  						       uint64_t delay_us)
1656  {
1657  	uint32_t *hist;
1658  
1659  	hist = scn->wstats.sched_delay;
1660  
1661  	if (delay_us < 100)
1662  		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us]++;
1663  	else if (delay_us < 1000)
1664  		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us]++;
1665  	else if (delay_us < 5000)
1666  		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us]++;
1667  	else
1668  		hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]++;
1669  }
1670  
1671  /**
1672   * hif_process_reg_write_q_elem() - process a register write queue element
1673   * @scn: hif_softc pointer
1674   * @q_elem: pointer to hal register write queue element
1675   *
1676   * Return: The value which was written to the address
1677   */
1678  static int32_t
hif_process_reg_write_q_elem(struct hif_softc * scn,struct hif_reg_write_q_elem * q_elem)1679  hif_process_reg_write_q_elem(struct hif_softc *scn,
1680  			     struct hif_reg_write_q_elem *q_elem)
1681  {
1682  	struct CE_state *ce_state = q_elem->ce_state;
1683  	uint32_t write_val = -1;
1684  
1685  	qdf_spin_lock_bh(&ce_state->ce_index_lock);
1686  
1687  	ce_state->reg_write_in_progress = false;
1688  	ce_state->wstats.dequeues++;
1689  
1690  	if (ce_state->src_ring) {
1691  		q_elem->dequeue_val = ce_state->src_ring->write_index;
1692  		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
1693  			       ce_state->src_ring->write_index);
1694  		write_val = ce_state->src_ring->write_index;
1695  	} else if (ce_state->dest_ring) {
1696  		q_elem->dequeue_val = ce_state->dest_ring->write_index;
1697  		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
1698  			       ce_state->dest_ring->write_index);
1699  		write_val = ce_state->dest_ring->write_index;
1700  	} else {
1701  		hif_debug("invalid reg write received");
1702  		qdf_assert(0);
1703  	}
1704  
1705  	q_elem->valid = 0;
1706  	ce_state->last_dequeue_time = q_elem->dequeue_time;
1707  
1708  	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1709  
1710  	return write_val;
1711  }
1712  
1713  /**
1714   * hif_reg_write_work() - Worker to process delayed writes
1715   * @arg: hif_softc pointer
1716   *
1717   * Return: None
1718   */
hif_reg_write_work(void * arg)1719  static void hif_reg_write_work(void *arg)
1720  {
1721  	struct hif_softc *scn = arg;
1722  	struct hif_reg_write_q_elem *q_elem;
1723  	uint32_t offset;
1724  	uint64_t delta_us;
1725  	int32_t q_depth, write_val;
1726  	uint32_t num_processed = 0;
1727  	int32_t ring_id;
1728  
1729  	q_elem = &scn->reg_write_queue[scn->read_idx];
1730  	q_elem->work_scheduled_time = qdf_get_log_timestamp();
1731  	q_elem->cpu_id = qdf_get_cpu();
1732  
1733  	/* Make sure q_elem consistent in the memory for multi-cores */
1734  	qdf_rmb();
1735  	if (!q_elem->valid)
1736  		return;
1737  
1738  	q_depth = qdf_atomic_read(&scn->wstats.q_depth);
1739  	if (q_depth > scn->wstats.max_q_depth)
1740  		scn->wstats.max_q_depth =  q_depth;
1741  
1742  	if (hif_prevent_link_low_power_states(GET_HIF_OPAQUE_HDL(scn))) {
1743  		scn->wstats.prevent_l1_fails++;
1744  		return;
1745  	}
1746  
1747  	while (true) {
1748  		qdf_rmb();
1749  		if (!q_elem->valid)
1750  			break;
1751  
1752  		qdf_rmb();
1753  		q_elem->dequeue_time = qdf_get_log_timestamp();
1754  		ring_id = q_elem->ce_state->id;
1755  		offset = q_elem->offset;
1756  		delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time -
1757  						      q_elem->enqueue_time);
1758  		hif_reg_write_fill_sched_delay_hist(scn, delta_us);
1759  
1760  		scn->wstats.dequeues++;
1761  		qdf_atomic_dec(&scn->wstats.q_depth);
1762  
1763  		write_val = hif_process_reg_write_q_elem(scn, q_elem);
1764  		hif_debug("read_idx %u ce_id %d offset 0x%x dequeue_val %d",
1765  			  scn->read_idx, ring_id, offset, write_val);
1766  
1767  		qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val,
1768  					   q_elem->dequeue_val,
1769  					   q_elem->enqueue_time,
1770  					   q_elem->dequeue_time);
1771  		num_processed++;
1772  		scn->read_idx = (scn->read_idx + 1) &
1773  					(HIF_REG_WRITE_QUEUE_LEN - 1);
1774  		q_elem = &scn->reg_write_queue[scn->read_idx];
1775  	}
1776  
1777  	hif_allow_link_low_power_states(GET_HIF_OPAQUE_HDL(scn));
1778  
1779  	/*
1780  	 * Decrement active_work_cnt by the number of elements dequeued after
1781  	 * hif_allow_link_low_power_states.
1782  	 * This makes sure that hif_try_complete_tasks will wait till we make
1783  	 * the bus access in hif_allow_link_low_power_states. This will avoid
1784  	 * race condition between delayed register worker and bus suspend
1785  	 * (system suspend or runtime suspend).
1786  	 *
1787  	 * The following decrement should be done at the end!
1788  	 */
1789  	qdf_atomic_sub(num_processed, &scn->active_work_cnt);
1790  }
1791  
1792  /**
1793   * hif_delayed_reg_write_deinit() - De-Initialize delayed reg write processing
1794   * @scn: hif_softc pointer
1795   *
1796   * De-initialize main data structures to process register writes in a delayed
1797   * workqueue.
1798   *
1799   * Return: None
1800   */
hif_delayed_reg_write_deinit(struct hif_softc * scn)1801  static void hif_delayed_reg_write_deinit(struct hif_softc *scn)
1802  {
1803  	qdf_flush_work(&scn->reg_write_work);
1804  	qdf_disable_work(&scn->reg_write_work);
1805  	qdf_flush_workqueue(0, scn->reg_write_wq);
1806  	qdf_destroy_workqueue(0, scn->reg_write_wq);
1807  	qdf_mem_free(scn->reg_write_queue);
1808  }
1809  
1810  /**
1811   * hif_delayed_reg_write_init() - Initialization function for delayed reg writes
1812   * @scn: hif_softc pointer
1813   *
1814   * Initialize main data structures to process register writes in a delayed
1815   * workqueue.
1816   */
1817  
hif_delayed_reg_write_init(struct hif_softc * scn)1818  static QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
1819  {
1820  	qdf_atomic_init(&scn->active_work_cnt);
1821  	scn->reg_write_wq =
1822  		qdf_alloc_high_prior_ordered_workqueue("hif_register_write_wq");
1823  	qdf_create_work(0, &scn->reg_write_work, hif_reg_write_work, scn);
1824  	scn->reg_write_queue = qdf_mem_malloc(HIF_REG_WRITE_QUEUE_LEN *
1825  					      sizeof(*scn->reg_write_queue));
1826  	if (!scn->reg_write_queue) {
1827  		hif_err("unable to allocate memory for delayed reg write");
1828  		QDF_BUG(0);
1829  		return QDF_STATUS_E_NOMEM;
1830  	}
1831  
1832  	/* Initial value of indices */
1833  	scn->read_idx = 0;
1834  	qdf_atomic_set(&scn->write_idx, -1);
1835  
1836  	return QDF_STATUS_SUCCESS;
1837  }
1838  
hif_reg_write_enqueue(struct hif_softc * scn,struct CE_state * ce_state,uint32_t value)1839  static void hif_reg_write_enqueue(struct hif_softc *scn,
1840  				  struct CE_state *ce_state,
1841  				  uint32_t value)
1842  {
1843  	struct hif_reg_write_q_elem *q_elem;
1844  	uint32_t write_idx;
1845  
1846  	if (ce_state->reg_write_in_progress) {
1847  		hif_debug("Already in progress ce_id %d offset 0x%x value %u",
1848  			  ce_state->id, ce_state->ce_wrt_idx_offset, value);
1849  		qdf_atomic_inc(&scn->wstats.coalesces);
1850  		ce_state->wstats.coalesces++;
1851  		return;
1852  	}
1853  
1854  	write_idx = qdf_atomic_inc_return(&scn->write_idx);
1855  	write_idx = write_idx & (HIF_REG_WRITE_QUEUE_LEN - 1);
1856  
1857  	q_elem = &scn->reg_write_queue[write_idx];
1858  	if (q_elem->valid) {
1859  		hif_err("queue full");
1860  		QDF_BUG(0);
1861  		return;
1862  	}
1863  
1864  	qdf_atomic_inc(&scn->wstats.enqueues);
1865  	ce_state->wstats.enqueues++;
1866  
1867  	qdf_atomic_inc(&scn->wstats.q_depth);
1868  
1869  	q_elem->ce_state = ce_state;
1870  	q_elem->offset = ce_state->ce_wrt_idx_offset;
1871  	q_elem->enqueue_val = value;
1872  	q_elem->enqueue_time = qdf_get_log_timestamp();
1873  
1874  	/*
1875  	 * Before the valid flag is set to true, all the other
1876  	 * fields in the q_elem needs to be updated in memory.
1877  	 * Else there is a chance that the dequeuing worker thread
1878  	 * might read stale entries and process incorrect srng.
1879  	 */
1880  	qdf_wmb();
1881  	q_elem->valid = true;
1882  
1883  	/*
1884  	 * After all other fields in the q_elem has been updated
1885  	 * in memory successfully, the valid flag needs to be updated
1886  	 * in memory in time too.
1887  	 * Else there is a chance that the dequeuing worker thread
1888  	 * might read stale valid flag and the work will be bypassed
1889  	 * for this round. And if there is no other work scheduled
1890  	 * later, this hal register writing won't be updated any more.
1891  	 */
1892  	qdf_wmb();
1893  
1894  	ce_state->reg_write_in_progress  = true;
1895  	qdf_atomic_inc(&scn->active_work_cnt);
1896  
1897  	hif_debug("write_idx %u ce_id %d offset 0x%x value %u",
1898  		  write_idx, ce_state->id, ce_state->ce_wrt_idx_offset, value);
1899  
1900  	qdf_queue_work(scn->qdf_dev, scn->reg_write_wq,
1901  		       &scn->reg_write_work);
1902  }
1903  
hif_delayed_reg_write(struct hif_softc * scn,uint32_t ctrl_addr,uint32_t val)1904  void hif_delayed_reg_write(struct hif_softc *scn, uint32_t ctrl_addr,
1905  			   uint32_t val)
1906  {
1907  	struct CE_state *ce_state;
1908  	int ce_id = COPY_ENGINE_ID(ctrl_addr);
1909  
1910  	ce_state = scn->ce_id_to_state[ce_id];
1911  
1912  	if (!ce_state->htt_tx_data && !ce_state->htt_rx_data) {
1913  		hif_reg_write_enqueue(scn, ce_state, val);
1914  		return;
1915  	}
1916  
1917  	if (hif_is_reg_write_tput_level_high(scn) ||
1918  	    (PLD_MHI_STATE_L0 == pld_get_mhi_state(scn->qdf_dev->dev))) {
1919  		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset, val);
1920  		qdf_atomic_inc(&scn->wstats.direct);
1921  		ce_state->wstats.direct++;
1922  	} else {
1923  		hif_reg_write_enqueue(scn, ce_state, val);
1924  	}
1925  }
1926  #else
hif_delayed_reg_write_init(struct hif_softc * scn)1927  static inline QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
1928  {
1929  	return QDF_STATUS_SUCCESS;
1930  }
1931  
hif_delayed_reg_write_deinit(struct hif_softc * scn)1932  static inline void  hif_delayed_reg_write_deinit(struct hif_softc *scn)
1933  {
1934  }
1935  #endif
1936  
1937  #if defined(QCA_WIFI_WCN6450)
hif_hal_attach(struct hif_softc * scn)1938  static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1939  {
1940  	scn->hal_soc = hal_attach(hif_softc_to_hif_opaque_softc(scn),
1941  				  scn->qdf_dev);
1942  	if (!scn->hal_soc)
1943  		return QDF_STATUS_E_FAILURE;
1944  
1945  	return QDF_STATUS_SUCCESS;
1946  }
1947  
hif_hal_detach(struct hif_softc * scn)1948  static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1949  {
1950  	hal_detach(scn->hal_soc);
1951  	scn->hal_soc = NULL;
1952  
1953  	return QDF_STATUS_SUCCESS;
1954  }
1955  #elif (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1956  	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1957  	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1958  	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1959  	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1960  	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
hif_hal_attach(struct hif_softc * scn)1961  static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1962  {
1963  	if (ce_srng_based(scn)) {
1964  		scn->hal_soc = hal_attach(
1965  					hif_softc_to_hif_opaque_softc(scn),
1966  					scn->qdf_dev);
1967  		if (!scn->hal_soc)
1968  			return QDF_STATUS_E_FAILURE;
1969  	}
1970  
1971  	return QDF_STATUS_SUCCESS;
1972  }
1973  
hif_hal_detach(struct hif_softc * scn)1974  static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1975  {
1976  	if (ce_srng_based(scn)) {
1977  		hal_detach(scn->hal_soc);
1978  		scn->hal_soc = NULL;
1979  	}
1980  
1981  	return QDF_STATUS_SUCCESS;
1982  }
1983  #else
hif_hal_attach(struct hif_softc * scn)1984  static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1985  {
1986  	return QDF_STATUS_SUCCESS;
1987  }
1988  
hif_hal_detach(struct hif_softc * scn)1989  static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1990  {
1991  	return QDF_STATUS_SUCCESS;
1992  }
1993  #endif
1994  
hif_init_dma_mask(struct device * dev,enum qdf_bus_type bus_type)1995  int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1996  {
1997  	int ret;
1998  
1999  	switch (bus_type) {
2000  	case QDF_BUS_TYPE_IPCI:
2001  		ret = qdf_set_dma_coherent_mask(dev,
2002  						DMA_COHERENT_MASK_DEFAULT);
2003  		if (ret) {
2004  			hif_err("Failed to set dma mask error = %d", ret);
2005  			return ret;
2006  		}
2007  
2008  		break;
2009  	default:
2010  		/* Follow the existing sequence for other targets */
2011  		break;
2012  	}
2013  
2014  	return 0;
2015  }
2016  
2017  /**
2018   * hif_enable(): hif_enable
2019   * @hif_ctx: hif_ctx
2020   * @dev: dev
2021   * @bdev: bus dev
2022   * @bid: bus ID
2023   * @bus_type: bus type
2024   * @type: enable type
2025   *
2026   * Return: QDF_STATUS
2027   */
hif_enable(struct hif_opaque_softc * hif_ctx,struct device * dev,void * bdev,const struct hif_bus_id * bid,enum qdf_bus_type bus_type,enum hif_enable_type type)2028  QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
2029  					  void *bdev,
2030  					  const struct hif_bus_id *bid,
2031  					  enum qdf_bus_type bus_type,
2032  					  enum hif_enable_type type)
2033  {
2034  	QDF_STATUS status;
2035  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2036  
2037  	if (!scn) {
2038  		hif_err("hif_ctx = NULL");
2039  		return QDF_STATUS_E_NULL_VALUE;
2040  	}
2041  
2042  	status = hif_enable_bus(scn, dev, bdev, bid, type);
2043  	if (status != QDF_STATUS_SUCCESS) {
2044  		hif_err("hif_enable_bus error = %d", status);
2045  		return status;
2046  	}
2047  
2048  	status = hif_hal_attach(scn);
2049  	if (status != QDF_STATUS_SUCCESS) {
2050  		hif_err("hal attach failed");
2051  		goto disable_bus;
2052  	}
2053  
2054  	if (hif_delayed_reg_write_init(scn) != QDF_STATUS_SUCCESS) {
2055  		hif_err("unable to initialize delayed reg write");
2056  		goto hal_detach;
2057  	}
2058  
2059  	if (hif_bus_configure(scn)) {
2060  		hif_err("Target probe failed");
2061  		status = QDF_STATUS_E_FAILURE;
2062  		goto hal_detach;
2063  	}
2064  
2065  	hif_ut_suspend_init(scn);
2066  	hif_register_recovery_notifier(scn);
2067  	hif_latency_detect_timer_start(hif_ctx);
2068  
2069  	/*
2070  	 * Flag to avoid potential unallocated memory access from MSI
2071  	 * interrupt handler which could get scheduled as soon as MSI
2072  	 * is enabled, i.e to take care of the race due to the order
2073  	 * in where MSI is enabled before the memory, that will be
2074  	 * in interrupt handlers, is allocated.
2075  	 */
2076  
2077  	scn->hif_init_done = true;
2078  
2079  	hif_debug("OK");
2080  
2081  	return QDF_STATUS_SUCCESS;
2082  
2083  hal_detach:
2084  	hif_hal_detach(scn);
2085  disable_bus:
2086  	hif_disable_bus(scn);
2087  	return status;
2088  }
2089  
hif_disable(struct hif_opaque_softc * hif_ctx,enum hif_disable_type type)2090  void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
2091  {
2092  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2093  
2094  	if (!scn)
2095  		return;
2096  
2097  	hif_delayed_reg_write_deinit(scn);
2098  	hif_set_enable_detection(hif_ctx, false);
2099  	hif_latency_detect_timer_stop(hif_ctx);
2100  
2101  	hif_unregister_recovery_notifier(scn);
2102  
2103  	hif_nointrs(scn);
2104  	if (scn->hif_init_done == false)
2105  		hif_shutdown_device(hif_ctx);
2106  	else
2107  		hif_stop(hif_ctx);
2108  
2109  	hif_hal_detach(scn);
2110  
2111  	hif_disable_bus(scn);
2112  
2113  	hif_wlan_disable(scn);
2114  
2115  	scn->notice_send = false;
2116  
2117  	hif_debug("X");
2118  }
2119  
2120  #ifdef CE_TASKLET_DEBUG_ENABLE
hif_enable_ce_latency_stats(struct hif_opaque_softc * hif_ctx,uint8_t val)2121  void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
2122  {
2123  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2124  
2125  	if (!scn)
2126  		return;
2127  
2128  	scn->ce_latency_stats = val;
2129  }
2130  #endif
2131  
hif_display_stats(struct hif_opaque_softc * hif_ctx)2132  void hif_display_stats(struct hif_opaque_softc *hif_ctx)
2133  {
2134  	hif_display_bus_stats(hif_ctx);
2135  }
2136  
2137  qdf_export_symbol(hif_display_stats);
2138  
hif_clear_stats(struct hif_opaque_softc * hif_ctx)2139  void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
2140  {
2141  	hif_clear_bus_stats(hif_ctx);
2142  }
2143  
2144  /**
2145   * hif_crash_shutdown_dump_bus_register() - dump bus registers
2146   * @hif_ctx: hif_ctx
2147   *
2148   * Return: n/a
2149   */
2150  #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
2151  
hif_crash_shutdown_dump_bus_register(void * hif_ctx)2152  static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
2153  {
2154  	struct hif_opaque_softc *scn = hif_ctx;
2155  
2156  	if (hif_check_soc_status(scn))
2157  		return;
2158  
2159  	if (hif_dump_registers(scn))
2160  		hif_err("Failed to dump bus registers!");
2161  }
2162  
2163  /**
2164   * hif_crash_shutdown(): hif_crash_shutdown
2165   *
2166   * This function is called by the platform driver to dump CE registers
2167   *
2168   * @hif_ctx: hif_ctx
2169   *
2170   * Return: n/a
2171   */
hif_crash_shutdown(struct hif_opaque_softc * hif_ctx)2172  void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
2173  {
2174  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2175  
2176  	if (!hif_ctx)
2177  		return;
2178  
2179  	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
2180  		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
2181  		return;
2182  	}
2183  
2184  	if (TARGET_STATUS_RESET == scn->target_status) {
2185  		hif_warn("Target is already asserted, ignore!");
2186  		return;
2187  	}
2188  
2189  	if (hif_is_load_or_unload_in_progress(scn)) {
2190  		hif_err("Load/unload is in progress, ignore!");
2191  		return;
2192  	}
2193  
2194  	hif_crash_shutdown_dump_bus_register(hif_ctx);
2195  	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
2196  
2197  	if (ol_copy_ramdump(hif_ctx))
2198  		goto out;
2199  
2200  	hif_info("RAM dump collecting completed!");
2201  
2202  out:
2203  	return;
2204  }
2205  #else
hif_crash_shutdown(struct hif_opaque_softc * hif_ctx)2206  void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
2207  {
2208  	hif_debug("Collecting target RAM dump disabled");
2209  }
2210  #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
2211  
2212  #ifdef QCA_WIFI_3_0
2213  /**
2214   * hif_check_fw_reg(): hif_check_fw_reg
2215   * @scn: scn
2216   *
2217   * Return: int
2218   */
hif_check_fw_reg(struct hif_opaque_softc * scn)2219  int hif_check_fw_reg(struct hif_opaque_softc *scn)
2220  {
2221  	return 0;
2222  }
2223  #endif
2224  
2225  /**
2226   * hif_read_phy_mem_base(): hif_read_phy_mem_base
2227   * @scn: scn
2228   * @phy_mem_base: physical mem base
2229   *
2230   * Return: n/a
2231   */
hif_read_phy_mem_base(struct hif_softc * scn,qdf_dma_addr_t * phy_mem_base)2232  void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
2233  {
2234  	*phy_mem_base = scn->mem_pa;
2235  }
2236  qdf_export_symbol(hif_read_phy_mem_base);
2237  
2238  /**
2239   * hif_get_device_type(): hif_get_device_type
2240   * @device_id: device_id
2241   * @revision_id: revision_id
2242   * @hif_type: returned hif_type
2243   * @target_type: returned target_type
2244   *
2245   * Return: int
2246   */
hif_get_device_type(uint32_t device_id,uint32_t revision_id,uint32_t * hif_type,uint32_t * target_type)2247  int hif_get_device_type(uint32_t device_id,
2248  			uint32_t revision_id,
2249  			uint32_t *hif_type, uint32_t *target_type)
2250  {
2251  	int ret = 0;
2252  
2253  	switch (device_id) {
2254  	case ADRASTEA_DEVICE_ID_P2_E12:
2255  
2256  		*hif_type = HIF_TYPE_ADRASTEA;
2257  		*target_type = TARGET_TYPE_ADRASTEA;
2258  		break;
2259  
2260  	case AR9888_DEVICE_ID:
2261  		*hif_type = HIF_TYPE_AR9888;
2262  		*target_type = TARGET_TYPE_AR9888;
2263  		break;
2264  
2265  	case AR6320_DEVICE_ID:
2266  		switch (revision_id) {
2267  		case AR6320_FW_1_1:
2268  		case AR6320_FW_1_3:
2269  			*hif_type = HIF_TYPE_AR6320;
2270  			*target_type = TARGET_TYPE_AR6320;
2271  			break;
2272  
2273  		case AR6320_FW_2_0:
2274  		case AR6320_FW_3_0:
2275  		case AR6320_FW_3_2:
2276  			*hif_type = HIF_TYPE_AR6320V2;
2277  			*target_type = TARGET_TYPE_AR6320V2;
2278  			break;
2279  
2280  		default:
2281  			hif_err("dev_id = 0x%x, rev_id = 0x%x",
2282  				device_id, revision_id);
2283  			ret = -ENODEV;
2284  			goto end;
2285  		}
2286  		break;
2287  
2288  	case AR9887_DEVICE_ID:
2289  		*hif_type = HIF_TYPE_AR9888;
2290  		*target_type = TARGET_TYPE_AR9888;
2291  		hif_info(" *********** AR9887 **************");
2292  		break;
2293  
2294  	case QCA9984_DEVICE_ID:
2295  		*hif_type = HIF_TYPE_QCA9984;
2296  		*target_type = TARGET_TYPE_QCA9984;
2297  		hif_info(" *********** QCA9984 *************");
2298  		break;
2299  
2300  	case QCA9888_DEVICE_ID:
2301  		*hif_type = HIF_TYPE_QCA9888;
2302  		*target_type = TARGET_TYPE_QCA9888;
2303  		hif_info(" *********** QCA9888 *************");
2304  		break;
2305  
2306  	case AR900B_DEVICE_ID:
2307  		*hif_type = HIF_TYPE_AR900B;
2308  		*target_type = TARGET_TYPE_AR900B;
2309  		hif_info(" *********** AR900B *************");
2310  		break;
2311  
2312  	case QCA8074_DEVICE_ID:
2313  		*hif_type = HIF_TYPE_QCA8074;
2314  		*target_type = TARGET_TYPE_QCA8074;
2315  		hif_info(" *********** QCA8074  *************");
2316  		break;
2317  
2318  	case QCA6290_EMULATION_DEVICE_ID:
2319  	case QCA6290_DEVICE_ID:
2320  		*hif_type = HIF_TYPE_QCA6290;
2321  		*target_type = TARGET_TYPE_QCA6290;
2322  		hif_info(" *********** QCA6290EMU *************");
2323  		break;
2324  
2325  	case QCN9000_DEVICE_ID:
2326  		*hif_type = HIF_TYPE_QCN9000;
2327  		*target_type = TARGET_TYPE_QCN9000;
2328  		hif_info(" *********** QCN9000 *************");
2329  		break;
2330  
2331  	case QCN9224_DEVICE_ID:
2332  		*hif_type = HIF_TYPE_QCN9224;
2333  		*target_type = TARGET_TYPE_QCN9224;
2334  		hif_info(" *********** QCN9224 *************");
2335  		break;
2336  
2337  	case QCN6122_DEVICE_ID:
2338  		*hif_type = HIF_TYPE_QCN6122;
2339  		*target_type = TARGET_TYPE_QCN6122;
2340  		hif_info(" *********** QCN6122 *************");
2341  		break;
2342  
2343  	case QCN9160_DEVICE_ID:
2344  		*hif_type = HIF_TYPE_QCN9160;
2345  		*target_type = TARGET_TYPE_QCN9160;
2346  		hif_info(" *********** QCN9160 *************");
2347  		break;
2348  
2349  	case QCN6432_DEVICE_ID:
2350  		*hif_type = HIF_TYPE_QCN6432;
2351  		*target_type = TARGET_TYPE_QCN6432;
2352  		hif_info(" *********** QCN6432 *************");
2353  		break;
2354  
2355  	case QCN7605_DEVICE_ID:
2356  	case QCN7605_COMPOSITE:
2357  	case QCN7605_STANDALONE:
2358  	case QCN7605_STANDALONE_V2:
2359  	case QCN7605_COMPOSITE_V2:
2360  		*hif_type = HIF_TYPE_QCN7605;
2361  		*target_type = TARGET_TYPE_QCN7605;
2362  		hif_info(" *********** QCN7605 *************");
2363  		break;
2364  
2365  	case QCA6390_DEVICE_ID:
2366  	case QCA6390_EMULATION_DEVICE_ID:
2367  		*hif_type = HIF_TYPE_QCA6390;
2368  		*target_type = TARGET_TYPE_QCA6390;
2369  		hif_info(" *********** QCA6390 *************");
2370  		break;
2371  
2372  	case QCA6490_DEVICE_ID:
2373  	case QCA6490_EMULATION_DEVICE_ID:
2374  		*hif_type = HIF_TYPE_QCA6490;
2375  		*target_type = TARGET_TYPE_QCA6490;
2376  		hif_info(" *********** QCA6490 *************");
2377  		break;
2378  
2379  	case QCA6750_DEVICE_ID:
2380  	case QCA6750_EMULATION_DEVICE_ID:
2381  		*hif_type = HIF_TYPE_QCA6750;
2382  		*target_type = TARGET_TYPE_QCA6750;
2383  		hif_info(" *********** QCA6750 *************");
2384  		break;
2385  
2386  	case KIWI_DEVICE_ID:
2387  		*hif_type = HIF_TYPE_KIWI;
2388  		*target_type = TARGET_TYPE_KIWI;
2389  		hif_info(" *********** KIWI *************");
2390  		break;
2391  
2392  	case MANGO_DEVICE_ID:
2393  		*hif_type = HIF_TYPE_MANGO;
2394  		*target_type = TARGET_TYPE_MANGO;
2395  		hif_info(" *********** MANGO *************");
2396  		break;
2397  
2398  	case PEACH_DEVICE_ID:
2399  		*hif_type = HIF_TYPE_PEACH;
2400  		*target_type = TARGET_TYPE_PEACH;
2401  		hif_info(" *********** PEACH *************");
2402  		break;
2403  
2404  	case QCA8074V2_DEVICE_ID:
2405  		*hif_type = HIF_TYPE_QCA8074V2;
2406  		*target_type = TARGET_TYPE_QCA8074V2;
2407  		hif_info(" *********** QCA8074V2 *************");
2408  		break;
2409  
2410  	case QCA6018_DEVICE_ID:
2411  	case RUMIM2M_DEVICE_ID_NODE0:
2412  	case RUMIM2M_DEVICE_ID_NODE1:
2413  	case RUMIM2M_DEVICE_ID_NODE2:
2414  	case RUMIM2M_DEVICE_ID_NODE3:
2415  	case RUMIM2M_DEVICE_ID_NODE4:
2416  	case RUMIM2M_DEVICE_ID_NODE5:
2417  		*hif_type = HIF_TYPE_QCA6018;
2418  		*target_type = TARGET_TYPE_QCA6018;
2419  		hif_info(" *********** QCA6018 *************");
2420  		break;
2421  
2422  	case QCA5018_DEVICE_ID:
2423  		*hif_type = HIF_TYPE_QCA5018;
2424  		*target_type = TARGET_TYPE_QCA5018;
2425  		hif_info(" *********** qca5018 *************");
2426  		break;
2427  
2428  	case QCA5332_DEVICE_ID:
2429  		*hif_type = HIF_TYPE_QCA5332;
2430  		*target_type = TARGET_TYPE_QCA5332;
2431  		hif_info(" *********** QCA5332 *************");
2432  		break;
2433  
2434  	case QCA9574_DEVICE_ID:
2435  		*hif_type = HIF_TYPE_QCA9574;
2436  		*target_type = TARGET_TYPE_QCA9574;
2437  		hif_info(" *********** QCA9574 *************");
2438  		break;
2439  
2440  	case WCN6450_DEVICE_ID:
2441  		*hif_type = HIF_TYPE_WCN6450;
2442  		*target_type = TARGET_TYPE_WCN6450;
2443  		hif_info(" *********** WCN6450 *************");
2444  		break;
2445  
2446  	default:
2447  		hif_err("Unsupported device ID = 0x%x!", device_id);
2448  		ret = -ENODEV;
2449  		break;
2450  	}
2451  
2452  	if (*target_type == TARGET_TYPE_UNKNOWN) {
2453  		hif_err("Unsupported target_type!");
2454  		ret = -ENODEV;
2455  	}
2456  end:
2457  	return ret;
2458  }
2459  
2460  /**
2461   * hif_get_bus_type() - return the bus type
2462   * @hif_hdl: HIF Context
2463   *
2464   * Return: enum qdf_bus_type
2465   */
hif_get_bus_type(struct hif_opaque_softc * hif_hdl)2466  enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
2467  {
2468  	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2469  
2470  	return scn->bus_type;
2471  }
2472  
2473  /*
2474   * Target info and ini parameters are global to the driver
2475   * Hence these structures are exposed to all the modules in
2476   * the driver and they don't need to maintains multiple copies
2477   * of the same info, instead get the handle from hif and
2478   * modify them in hif
2479   */
2480  
2481  /**
2482   * hif_get_ini_handle() - API to get hif_config_param handle
2483   * @hif_ctx: HIF Context
2484   *
2485   * Return: pointer to hif_config_info
2486   */
hif_get_ini_handle(struct hif_opaque_softc * hif_ctx)2487  struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
2488  {
2489  	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
2490  
2491  	return &sc->hif_config;
2492  }
2493  
2494  /**
2495   * hif_get_target_info_handle() - API to get hif_target_info handle
2496   * @hif_ctx: HIF context
2497   *
2498   * Return: Pointer to hif_target_info
2499   */
hif_get_target_info_handle(struct hif_opaque_softc * hif_ctx)2500  struct hif_target_info *hif_get_target_info_handle(
2501  					struct hif_opaque_softc *hif_ctx)
2502  {
2503  	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
2504  
2505  	return &sc->target_info;
2506  
2507  }
2508  qdf_export_symbol(hif_get_target_info_handle);
2509  
2510  #ifdef RECEIVE_OFFLOAD
hif_offld_flush_cb_register(struct hif_opaque_softc * scn,void (offld_flush_handler)(void *))2511  void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
2512  				 void (offld_flush_handler)(void *))
2513  {
2514  	if (hif_napi_enabled(scn, -1))
2515  		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
2516  	else
2517  		hif_err("NAPI not enabled");
2518  }
2519  qdf_export_symbol(hif_offld_flush_cb_register);
2520  
hif_offld_flush_cb_deregister(struct hif_opaque_softc * scn)2521  void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
2522  {
2523  	if (hif_napi_enabled(scn, -1))
2524  		hif_napi_rx_offld_flush_cb_deregister(scn);
2525  	else
2526  		hif_err("NAPI not enabled");
2527  }
2528  qdf_export_symbol(hif_offld_flush_cb_deregister);
2529  
hif_get_rx_ctx_id(int ctx_id,struct hif_opaque_softc * hif_hdl)2530  int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
2531  {
2532  	if (hif_napi_enabled(hif_hdl, -1))
2533  		return NAPI_PIPE2ID(ctx_id);
2534  	else
2535  		return ctx_id;
2536  }
2537  #else /* RECEIVE_OFFLOAD */
hif_get_rx_ctx_id(int ctx_id,struct hif_opaque_softc * hif_hdl)2538  int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
2539  {
2540  	return 0;
2541  }
2542  qdf_export_symbol(hif_get_rx_ctx_id);
2543  #endif /* RECEIVE_OFFLOAD */
2544  
2545  #if defined(FEATURE_LRO)
2546  
2547  /**
2548   * hif_get_lro_info - Returns LRO instance for instance ID
2549   * @ctx_id: LRO instance ID
2550   * @hif_hdl: HIF Context
2551   *
2552   * Return: Pointer to LRO instance.
2553   */
hif_get_lro_info(int ctx_id,struct hif_opaque_softc * hif_hdl)2554  void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
2555  {
2556  	void *data;
2557  
2558  	if (hif_napi_enabled(hif_hdl, -1))
2559  		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
2560  	else
2561  		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
2562  
2563  	return data;
2564  }
2565  #endif
2566  
2567  /**
2568   * hif_get_target_status - API to get target status
2569   * @hif_ctx: HIF Context
2570   *
2571   * Return: enum hif_target_status
2572   */
hif_get_target_status(struct hif_opaque_softc * hif_ctx)2573  enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
2574  {
2575  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2576  
2577  	return scn->target_status;
2578  }
2579  qdf_export_symbol(hif_get_target_status);
2580  
2581  /**
2582   * hif_set_target_status() - API to set target status
2583   * @hif_ctx: HIF Context
2584   * @status: Target Status
2585   *
2586   * Return: void
2587   */
hif_set_target_status(struct hif_opaque_softc * hif_ctx,enum hif_target_status status)2588  void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
2589  			   hif_target_status status)
2590  {
2591  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2592  
2593  	scn->target_status = status;
2594  }
2595  
2596  /**
2597   * hif_init_ini_config() - API to initialize HIF configuration parameters
2598   * @hif_ctx: HIF Context
2599   * @cfg: HIF Configuration
2600   *
2601   * Return: void
2602   */
hif_init_ini_config(struct hif_opaque_softc * hif_ctx,struct hif_config_info * cfg)2603  void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
2604  			 struct hif_config_info *cfg)
2605  {
2606  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2607  
2608  	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
2609  }
2610  
2611  /**
2612   * hif_get_conparam() - API to get driver mode in HIF
2613   * @scn: HIF Context
2614   *
2615   * Return: driver mode of operation
2616   */
hif_get_conparam(struct hif_softc * scn)2617  uint32_t hif_get_conparam(struct hif_softc *scn)
2618  {
2619  	if (!scn)
2620  		return 0;
2621  
2622  	return scn->hif_con_param;
2623  }
2624  
2625  /**
2626   * hif_get_callbacks_handle() - API to get callbacks Handle
2627   * @scn: HIF Context
2628   *
2629   * Return: pointer to HIF Callbacks
2630   */
hif_get_callbacks_handle(struct hif_softc * scn)2631  struct hif_driver_state_callbacks *hif_get_callbacks_handle(
2632  							struct hif_softc *scn)
2633  {
2634  	return &scn->callbacks;
2635  }
2636  
2637  /**
2638   * hif_is_driver_unloading() - API to query upper layers if driver is unloading
2639   * @scn: HIF Context
2640   *
2641   * Return: True/False
2642   */
hif_is_driver_unloading(struct hif_softc * scn)2643  bool hif_is_driver_unloading(struct hif_softc *scn)
2644  {
2645  	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2646  
2647  	if (cbk && cbk->is_driver_unloading)
2648  		return cbk->is_driver_unloading(cbk->context);
2649  
2650  	return false;
2651  }
2652  
2653  /**
2654   * hif_is_load_or_unload_in_progress() - API to query upper layers if
2655   * load/unload in progress
2656   * @scn: HIF Context
2657   *
2658   * Return: True/False
2659   */
hif_is_load_or_unload_in_progress(struct hif_softc * scn)2660  bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
2661  {
2662  	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2663  
2664  	if (cbk && cbk->is_load_unload_in_progress)
2665  		return cbk->is_load_unload_in_progress(cbk->context);
2666  
2667  	return false;
2668  }
2669  
2670  /**
2671   * hif_is_recovery_in_progress() - API to query upper layers if recovery in
2672   * progress
2673   * @scn: HIF Context
2674   *
2675   * Return: True/False
2676   */
hif_is_recovery_in_progress(struct hif_softc * scn)2677  bool hif_is_recovery_in_progress(struct hif_softc *scn)
2678  {
2679  	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2680  
2681  	if (cbk && cbk->is_recovery_in_progress)
2682  		return cbk->is_recovery_in_progress(cbk->context);
2683  
2684  	return false;
2685  }
2686  
2687  #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
2688      defined(HIF_IPCI)
2689  
2690  /**
2691   * hif_update_pipe_callback() - API to register pipe specific callbacks
2692   * @osc: Opaque softc
2693   * @pipeid: pipe id
2694   * @callbacks: callbacks to register
2695   *
2696   * Return: void
2697   */
2698  
hif_update_pipe_callback(struct hif_opaque_softc * osc,u_int8_t pipeid,struct hif_msg_callbacks * callbacks)2699  void hif_update_pipe_callback(struct hif_opaque_softc *osc,
2700  					u_int8_t pipeid,
2701  					struct hif_msg_callbacks *callbacks)
2702  {
2703  	struct hif_softc *scn = HIF_GET_SOFTC(osc);
2704  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2705  	struct HIF_CE_pipe_info *pipe_info;
2706  
2707  	QDF_BUG(pipeid < CE_COUNT_MAX);
2708  
2709  	hif_debug("pipeid: %d", pipeid);
2710  
2711  	pipe_info = &hif_state->pipe_info[pipeid];
2712  
2713  	qdf_mem_copy(&pipe_info->pipe_callbacks,
2714  			callbacks, sizeof(pipe_info->pipe_callbacks));
2715  }
2716  qdf_export_symbol(hif_update_pipe_callback);
2717  
2718  /**
2719   * hif_is_target_ready() - API to query if target is in ready state
2720   * progress
2721   * @scn: HIF Context
2722   *
2723   * Return: True/False
2724   */
hif_is_target_ready(struct hif_softc * scn)2725  bool hif_is_target_ready(struct hif_softc *scn)
2726  {
2727  	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2728  
2729  	if (cbk && cbk->is_target_ready)
2730  		return cbk->is_target_ready(cbk->context);
2731  	/*
2732  	 * if callback is not registered then there is no way to determine
2733  	 * if target is ready. In-such case return true to indicate that
2734  	 * target is ready.
2735  	 */
2736  	return true;
2737  }
2738  qdf_export_symbol(hif_is_target_ready);
2739  
hif_get_bandwidth_level(struct hif_opaque_softc * hif_handle)2740  int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
2741  {
2742  	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
2743  	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2744  
2745  	if (cbk && cbk->get_bandwidth_level)
2746  		return cbk->get_bandwidth_level(cbk->context);
2747  
2748  	return 0;
2749  }
2750  
2751  qdf_export_symbol(hif_get_bandwidth_level);
2752  
2753  #ifdef DP_MEM_PRE_ALLOC
hif_mem_alloc_consistent_unaligned(struct hif_softc * scn,qdf_size_t size,qdf_dma_addr_t * paddr,uint32_t ring_type,uint8_t * is_mem_prealloc)2754  void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
2755  					 qdf_size_t size,
2756  					 qdf_dma_addr_t *paddr,
2757  					 uint32_t ring_type,
2758  					 uint8_t *is_mem_prealloc)
2759  {
2760  	void *vaddr = NULL;
2761  	struct hif_driver_state_callbacks *cbk =
2762  				hif_get_callbacks_handle(scn);
2763  
2764  	*is_mem_prealloc = false;
2765  	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
2766  		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
2767  								   paddr,
2768  								   ring_type);
2769  		if (vaddr) {
2770  			*is_mem_prealloc = true;
2771  			goto end;
2772  		}
2773  	}
2774  
2775  	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
2776  					 scn->qdf_dev->dev,
2777  					 size,
2778  					 paddr);
2779  end:
2780  	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
2781  		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
2782  		(void *)*paddr, (int)size, ring_type);
2783  
2784  	return vaddr;
2785  }
2786  
hif_mem_free_consistent_unaligned(struct hif_softc * scn,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr,qdf_dma_context_t memctx,uint8_t is_mem_prealloc)2787  void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
2788  				       qdf_size_t size,
2789  				       void *vaddr,
2790  				       qdf_dma_addr_t paddr,
2791  				       qdf_dma_context_t memctx,
2792  				       uint8_t is_mem_prealloc)
2793  {
2794  	struct hif_driver_state_callbacks *cbk =
2795  				hif_get_callbacks_handle(scn);
2796  
2797  	if (is_mem_prealloc) {
2798  		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
2799  			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
2800  		} else {
2801  			dp_warn("dp_prealloc_put_consistent_unligned NULL");
2802  			QDF_BUG(0);
2803  		}
2804  	} else {
2805  		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2806  					size, vaddr, paddr, memctx);
2807  	}
2808  }
2809  
hif_prealloc_get_multi_pages(struct hif_softc * scn,uint32_t desc_type,qdf_size_t elem_size,uint16_t elem_num,struct qdf_mem_multi_page_t * pages,bool cacheable)2810  void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
2811  				  qdf_size_t elem_size, uint16_t elem_num,
2812  				  struct qdf_mem_multi_page_t *pages,
2813  				  bool cacheable)
2814  {
2815  	struct hif_driver_state_callbacks *cbk =
2816  			hif_get_callbacks_handle(scn);
2817  
2818  	if (cbk && cbk->prealloc_get_multi_pages)
2819  		cbk->prealloc_get_multi_pages(desc_type, elem_size, elem_num,
2820  					      pages, cacheable);
2821  
2822  	if (!pages->num_pages)
2823  		qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
2824  					  elem_size, elem_num, 0, cacheable);
2825  }
2826  
hif_prealloc_put_multi_pages(struct hif_softc * scn,uint32_t desc_type,struct qdf_mem_multi_page_t * pages,bool cacheable)2827  void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
2828  				  struct qdf_mem_multi_page_t *pages,
2829  				  bool cacheable)
2830  {
2831  	struct hif_driver_state_callbacks *cbk =
2832  			hif_get_callbacks_handle(scn);
2833  
2834  	if (cbk && cbk->prealloc_put_multi_pages &&
2835  	    pages->is_mem_prealloc)
2836  		cbk->prealloc_put_multi_pages(desc_type, pages);
2837  
2838  	if (!pages->is_mem_prealloc)
2839  		qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
2840  					 cacheable);
2841  }
2842  #endif
2843  
2844  /**
2845   * hif_batch_send() - API to access hif specific function
2846   * ce_batch_send.
2847   * @osc: HIF Context
2848   * @msdu: list of msdus to be sent
2849   * @transfer_id: transfer id
2850   * @len: downloaded length
2851   * @sendhead:
2852   *
2853   * Return: list of msds not sent
2854   */
hif_batch_send(struct hif_opaque_softc * osc,qdf_nbuf_t msdu,uint32_t transfer_id,u_int32_t len,uint32_t sendhead)2855  qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2856  		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
2857  {
2858  	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2859  
2860  	if (!ce_tx_hdl)
2861  		return NULL;
2862  
2863  	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2864  			len, sendhead);
2865  }
2866  qdf_export_symbol(hif_batch_send);
2867  
2868  /**
2869   * hif_update_tx_ring() - API to access hif specific function
2870   * ce_update_tx_ring.
2871   * @osc: HIF Context
2872   * @num_htt_cmpls: number of htt compl received.
2873   *
2874   * Return: void
2875   */
hif_update_tx_ring(struct hif_opaque_softc * osc,u_int32_t num_htt_cmpls)2876  void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2877  {
2878  	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2879  
2880  	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2881  }
2882  qdf_export_symbol(hif_update_tx_ring);
2883  
2884  
2885  /**
2886   * hif_send_single() - API to access hif specific function
2887   * ce_send_single.
2888   * @osc: HIF Context
2889   * @msdu : msdu to be sent
2890   * @transfer_id: transfer id
2891   * @len : downloaded length
2892   *
2893   * Return: msdu sent status
2894   */
hif_send_single(struct hif_opaque_softc * osc,qdf_nbuf_t msdu,uint32_t transfer_id,u_int32_t len)2895  QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2896  			   uint32_t transfer_id, u_int32_t len)
2897  {
2898  	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2899  
2900  	if (!ce_tx_hdl)
2901  		return QDF_STATUS_E_NULL_VALUE;
2902  
2903  	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2904  			len);
2905  }
2906  qdf_export_symbol(hif_send_single);
2907  #endif
2908  
2909  /**
2910   * hif_reg_write() - API to access hif specific function
2911   * hif_write32_mb.
2912   * @hif_ctx : HIF Context
2913   * @offset : offset on which value has to be written
2914   * @value : value to be written
2915   *
2916   * Return: None
2917   */
hif_reg_write(struct hif_opaque_softc * hif_ctx,uint32_t offset,uint32_t value)2918  void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2919  		uint32_t value)
2920  {
2921  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2922  
2923  	hif_write32_mb(scn, scn->mem + offset, value);
2924  
2925  }
2926  qdf_export_symbol(hif_reg_write);
2927  
2928  /**
2929   * hif_reg_read() - API to access hif specific function
2930   * hif_read32_mb.
2931   * @hif_ctx : HIF Context
2932   * @offset : offset from which value has to be read
2933   *
2934   * Return: Read value
2935   */
hif_reg_read(struct hif_opaque_softc * hif_ctx,uint32_t offset)2936  uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2937  {
2938  
2939  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2940  
2941  	return hif_read32_mb(scn, scn->mem + offset);
2942  }
2943  qdf_export_symbol(hif_reg_read);
2944  
2945  /**
2946   * hif_ramdump_handler(): generic ramdump handler
2947   * @scn: struct hif_opaque_softc
2948   *
2949   * Return: None
2950   */
hif_ramdump_handler(struct hif_opaque_softc * scn)2951  void hif_ramdump_handler(struct hif_opaque_softc *scn)
2952  {
2953  	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2954  		hif_usb_ramdump_handler(scn);
2955  }
2956  
hif_pm_get_wake_irq_type(struct hif_opaque_softc * hif_ctx)2957  hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2958  {
2959  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2960  
2961  	return scn->wake_irq_type;
2962  }
2963  
hif_wake_interrupt_handler(int irq,void * context)2964  irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2965  {
2966  	struct hif_softc *scn = context;
2967  
2968  	hif_info("wake interrupt received on irq %d", irq);
2969  
2970  	hif_rtpm_set_monitor_wake_intr(0);
2971  	hif_rtpm_request_resume();
2972  
2973  	if (scn->initial_wakeup_cb)
2974  		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2975  
2976  	if (hif_is_ut_suspended(scn))
2977  		hif_ut_fw_resume(scn);
2978  
2979  	qdf_pm_system_wakeup();
2980  
2981  	return IRQ_HANDLED;
2982  }
2983  
hif_set_initial_wakeup_cb(struct hif_opaque_softc * hif_ctx,void (* callback)(void *),void * priv)2984  void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2985  			       void (*callback)(void *),
2986  			       void *priv)
2987  {
2988  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2989  
2990  	scn->initial_wakeup_cb = callback;
2991  	scn->initial_wakeup_priv = priv;
2992  }
2993  
hif_set_ce_service_max_yield_time(struct hif_opaque_softc * hif,uint32_t ce_service_max_yield_time)2994  void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2995  				       uint32_t ce_service_max_yield_time)
2996  {
2997  	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2998  
2999  	hif_ctx->ce_service_max_yield_time =
3000  		ce_service_max_yield_time * 1000;
3001  }
3002  
3003  unsigned long long
hif_get_ce_service_max_yield_time(struct hif_opaque_softc * hif)3004  hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
3005  {
3006  	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3007  
3008  	return hif_ctx->ce_service_max_yield_time;
3009  }
3010  
hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc * hif,uint8_t ce_service_max_rx_ind_flush)3011  void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
3012  				       uint8_t ce_service_max_rx_ind_flush)
3013  {
3014  	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3015  
3016  	if (ce_service_max_rx_ind_flush == 0 ||
3017  	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
3018  		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
3019  	else
3020  		hif_ctx->ce_service_max_rx_ind_flush =
3021  						ce_service_max_rx_ind_flush;
3022  }
3023  
3024  #ifdef SYSTEM_PM_CHECK
__hif_system_pm_set_state(struct hif_opaque_softc * hif,enum hif_system_pm_state state)3025  void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
3026  			       enum hif_system_pm_state state)
3027  {
3028  	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3029  
3030  	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
3031  }
3032  
hif_system_pm_get_state(struct hif_opaque_softc * hif)3033  int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
3034  {
3035  	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3036  
3037  	return qdf_atomic_read(&hif_ctx->sys_pm_state);
3038  }
3039  
hif_system_pm_state_check(struct hif_opaque_softc * hif)3040  int hif_system_pm_state_check(struct hif_opaque_softc *hif)
3041  {
3042  	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3043  	int32_t sys_pm_state;
3044  
3045  	if (!hif_ctx) {
3046  		hif_err("hif context is null");
3047  		return -EFAULT;
3048  	}
3049  
3050  	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
3051  	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
3052  	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
3053  		hif_info("Triggering system wakeup");
3054  		qdf_pm_system_wakeup();
3055  		return -EAGAIN;
3056  	}
3057  
3058  	return 0;
3059  }
3060  #endif
3061  #ifdef WLAN_FEATURE_AFFINITY_MGR
3062  /*
3063   * hif_audio_cpu_affinity_allowed() - Check if audio cpu affinity allowed
3064   *
3065   * @scn: hif handle
3066   * @cfg: hif affinity manager configuration for IRQ
3067   * @audio_taken_cpu: Current CPUs which are taken by audio.
3068   * @current_time: Current system time.
3069   *
3070   * This API checks for 2 conditions
3071   *  1) Last audio taken mask and current taken mask are different
3072   *  2) Last time when IRQ was affined away due to audio taken CPUs is
3073   *     more than time threshold (5 Seconds in current case).
3074   * If both condition satisfies then only return true.
3075   *
3076   * Return: bool: true if it is allowed to affine away audio taken cpus.
3077   */
3078  static inline bool
hif_audio_cpu_affinity_allowed(struct hif_softc * scn,struct hif_cpu_affinity * cfg,qdf_cpu_mask audio_taken_cpu,uint64_t current_time)3079  hif_audio_cpu_affinity_allowed(struct hif_softc *scn,
3080  			       struct hif_cpu_affinity *cfg,
3081  			       qdf_cpu_mask audio_taken_cpu,
3082  			       uint64_t current_time)
3083  {
3084  	if (!qdf_cpumask_equal(&audio_taken_cpu, &cfg->walt_taken_mask) &&
3085  	    (qdf_log_timestamp_to_usecs(current_time -
3086  			 cfg->last_affined_away)
3087  		< scn->time_threshold))
3088  		return false;
3089  	return true;
3090  }
3091  
3092  /*
3093   * hif_affinity_mgr_check_update_mask() - Check if cpu mask need to be updated
3094   *
3095   * @scn: hif handle
3096   * @cfg: hif affinity manager configuration for IRQ
3097   * @audio_taken_cpu: Current CPUs which are taken by audio.
3098   * @cpu_mask: CPU mask which need to be updated.
3099   * @current_time: Current system time.
3100   *
3101   * This API checks if Pro audio use case is running and if cpu_mask need
3102   * to be updated
3103   *
3104   * Return: QDF_STATUS
3105   */
3106  static inline QDF_STATUS
hif_affinity_mgr_check_update_mask(struct hif_softc * scn,struct hif_cpu_affinity * cfg,qdf_cpu_mask audio_taken_cpu,qdf_cpu_mask * cpu_mask,uint64_t current_time)3107  hif_affinity_mgr_check_update_mask(struct hif_softc *scn,
3108  				   struct hif_cpu_affinity *cfg,
3109  				   qdf_cpu_mask audio_taken_cpu,
3110  				   qdf_cpu_mask *cpu_mask,
3111  				   uint64_t current_time)
3112  {
3113  	qdf_cpu_mask allowed_mask;
3114  
3115  	/*
3116  	 * Case 1: audio_taken_mask is empty
3117  	 *   Check if passed cpu_mask and wlan_requested_mask is same or not.
3118  	 *      If both mask are different copy wlan_requested_mask(IRQ affinity
3119  	 *      mask requested by WLAN) to cpu_mask.
3120  	 *
3121  	 * Case 2: audio_taken_mask is not empty
3122  	 *   1. Only allow update if last time when IRQ was affined away due to
3123  	 *      audio taken CPUs is more than 5 seconds or update is requested
3124  	 *      by WLAN
3125  	 *   2. Only allow silver cores to be affined away.
3126  	 *   3. Check if any allowed CPUs for audio use case is set in cpu_mask.
3127  	 *       i. If any CPU mask is set, mask out that CPU from the cpu_mask
3128  	 *       ii. If after masking out audio taken cpu(Silver cores) cpu_mask
3129  	 *           is empty, set mask to all cpu except cpus taken by audio.
3130  	 * Example:
3131  	 *| Audio mask | mask allowed | cpu_mask | WLAN req mask | new cpu_mask|
3132  	 *|  0x00      |       0x00   |   0x0C   |       0x0C    |      0x0C   |
3133  	 *|  0x00      |       0x00   |   0x03   |       0x03    |      0x03   |
3134  	 *|  0x00      |       0x00   |   0xFC   |       0x03    |      0x03   |
3135  	 *|  0x00      |       0x00   |   0x03   |       0x0C    |      0x0C   |
3136  	 *|  0x0F      |       0x03   |   0x0C   |       0x0C    |      0x0C   |
3137  	 *|  0x0F      |       0x03   |   0x03   |       0x03    |      0xFC   |
3138  	 *|  0x03      |       0x03   |   0x0C   |       0x0C    |      0x0C   |
3139  	 *|  0x03      |       0x03   |   0x03   |       0x03    |      0xFC   |
3140  	 *|  0x03      |       0x03   |   0xFC   |       0x03    |      0xFC   |
3141  	 *|  0xF0      |       0x00   |   0x0C   |       0x0C    |      0x0C   |
3142  	 *|  0xF0      |       0x00   |   0x03   |       0x03    |      0x03   |
3143  	 */
3144  
3145  	/* Check if audio taken mask is empty*/
3146  	if (qdf_likely(qdf_cpumask_empty(&audio_taken_cpu))) {
3147  		/* If CPU mask requested by WLAN for the IRQ and
3148  		 * cpu_mask passed CPU mask set for IRQ is different
3149  		 * Copy requested mask into cpu_mask and return
3150  		 */
3151  		if (qdf_unlikely(!qdf_cpumask_equal(cpu_mask,
3152  						    &cfg->wlan_requested_mask))) {
3153  			qdf_cpumask_copy(cpu_mask, &cfg->wlan_requested_mask);
3154  			return QDF_STATUS_SUCCESS;
3155  		}
3156  		return QDF_STATUS_E_ALREADY;
3157  	}
3158  
3159  	if (!(hif_audio_cpu_affinity_allowed(scn, cfg, audio_taken_cpu,
3160  					     current_time) ||
3161  	      cfg->update_requested))
3162  		return QDF_STATUS_E_AGAIN;
3163  
3164  	/* Only allow Silver cores to be affine away */
3165  	qdf_cpumask_and(&allowed_mask, &scn->allowed_mask, &audio_taken_cpu);
3166  	if (qdf_cpumask_intersects(cpu_mask, &allowed_mask)) {
3167  		/* If any of taken CPU(Silver cores) mask is set in cpu_mask,
3168  		 *  mask out the audio taken CPUs from the cpu_mask.
3169  		 */
3170  		qdf_cpumask_andnot(cpu_mask, &cfg->wlan_requested_mask,
3171  				   &allowed_mask);
3172  		/* If cpu_mask is empty set it to all CPUs
3173  		 * except taken by audio(Silver cores)
3174  		 */
3175  		if (qdf_unlikely(qdf_cpumask_empty(cpu_mask)))
3176  			qdf_cpumask_complement(cpu_mask, &allowed_mask);
3177  		return QDF_STATUS_SUCCESS;
3178  	}
3179  
3180  	return QDF_STATUS_E_ALREADY;
3181  }
3182  
3183  static inline QDF_STATUS
hif_check_and_affine_irq(struct hif_softc * scn,struct hif_cpu_affinity * cfg,qdf_cpu_mask audio_taken_cpu,qdf_cpu_mask cpu_mask,uint64_t current_time)3184  hif_check_and_affine_irq(struct hif_softc *scn, struct hif_cpu_affinity *cfg,
3185  			 qdf_cpu_mask audio_taken_cpu, qdf_cpu_mask cpu_mask,
3186  			 uint64_t current_time)
3187  {
3188  	QDF_STATUS status;
3189  
3190  	status = hif_affinity_mgr_check_update_mask(scn, cfg,
3191  						    audio_taken_cpu,
3192  						    &cpu_mask,
3193  						    current_time);
3194  	/* Set IRQ affinity if CPU mask was updated */
3195  	if (QDF_IS_STATUS_SUCCESS(status)) {
3196  		status = hif_irq_set_affinity_hint(cfg->irq,
3197  						   &cpu_mask);
3198  		if (QDF_IS_STATUS_SUCCESS(status)) {
3199  			/* Store audio taken CPU mask */
3200  			qdf_cpumask_copy(&cfg->walt_taken_mask,
3201  					 &audio_taken_cpu);
3202  			/* Store CPU mask which was set for IRQ*/
3203  			qdf_cpumask_copy(&cfg->current_irq_mask,
3204  					 &cpu_mask);
3205  			/* Set time when IRQ affinity was updated */
3206  			cfg->last_updated = current_time;
3207  			if (hif_audio_cpu_affinity_allowed(scn, cfg,
3208  							   audio_taken_cpu,
3209  							   current_time))
3210  				/* If CPU mask was updated due to CPU
3211  				 * taken by audio, update
3212  				 * last_affined_away time
3213  				 */
3214  				cfg->last_affined_away = current_time;
3215  		}
3216  	}
3217  
3218  	return status;
3219  }
3220  
hif_affinity_mgr_affine_irq(struct hif_softc * scn)3221  void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
3222  {
3223  	bool audio_affinity_allowed = false;
3224  	int i, j, ce_id;
3225  	uint64_t current_time;
3226  	char cpu_str[10];
3227  	QDF_STATUS status;
3228  	qdf_cpu_mask cpu_mask, audio_taken_cpu;
3229  	struct HIF_CE_state *hif_state;
3230  	struct hif_exec_context *hif_ext_group;
3231  	struct CE_attr *host_ce_conf;
3232  	struct HIF_CE_state *ce_sc;
3233  	struct hif_cpu_affinity *cfg;
3234  
3235  	if (!scn->affinity_mgr_supported)
3236  		return;
3237  
3238  	current_time = hif_get_log_timestamp();
3239  	/* Get CPU mask for audio taken CPUs */
3240  	audio_taken_cpu = qdf_walt_get_cpus_taken();
3241  
3242  	ce_sc = HIF_GET_CE_STATE(scn);
3243  	host_ce_conf = ce_sc->host_ce_config;
3244  	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3245  		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3246  			continue;
3247  		cfg = &scn->ce_irq_cpu_mask[ce_id];
3248  		qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
3249  		status =
3250  			hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
3251  						 cpu_mask, current_time);
3252  		if (QDF_IS_STATUS_SUCCESS(status))
3253  			audio_affinity_allowed = true;
3254  	}
3255  
3256  	hif_state = HIF_GET_CE_STATE(scn);
3257  	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3258  		hif_ext_group = hif_state->hif_ext_group[i];
3259  		for (j = 0; j < hif_ext_group->numirq; j++) {
3260  			cfg = &scn->irq_cpu_mask[hif_ext_group->grp_id][j];
3261  			qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
3262  			status =
3263  				hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
3264  							 cpu_mask, current_time);
3265  			if (QDF_IS_STATUS_SUCCESS(status)) {
3266  				qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
3267  				audio_affinity_allowed = true;
3268  			}
3269  		}
3270  	}
3271  	if (audio_affinity_allowed) {
3272  		qdf_thread_cpumap_print_to_pagebuf(false, cpu_str,
3273  						   &audio_taken_cpu);
3274  		hif_info("Audio taken CPU mask: %s", cpu_str);
3275  	}
3276  }
3277  
3278  static inline QDF_STATUS
hif_affinity_mgr_set_irq_affinity(struct hif_softc * scn,uint32_t irq,struct hif_cpu_affinity * cfg,qdf_cpu_mask * cpu_mask)3279  hif_affinity_mgr_set_irq_affinity(struct hif_softc *scn, uint32_t irq,
3280  				  struct hif_cpu_affinity *cfg,
3281  				  qdf_cpu_mask *cpu_mask)
3282  {
3283  	uint64_t current_time;
3284  	char cpu_str[10];
3285  	QDF_STATUS status, mask_updated;
3286  	qdf_cpu_mask audio_taken_cpu = qdf_walt_get_cpus_taken();
3287  
3288  	current_time = hif_get_log_timestamp();
3289  	qdf_cpumask_copy(&cfg->wlan_requested_mask, cpu_mask);
3290  	cfg->update_requested = true;
3291  	mask_updated = hif_affinity_mgr_check_update_mask(scn, cfg,
3292  							  audio_taken_cpu,
3293  							  cpu_mask,
3294  							  current_time);
3295  	status = hif_irq_set_affinity_hint(irq, cpu_mask);
3296  	if (QDF_IS_STATUS_SUCCESS(status)) {
3297  		qdf_cpumask_copy(&cfg->walt_taken_mask, &audio_taken_cpu);
3298  		qdf_cpumask_copy(&cfg->current_irq_mask, cpu_mask);
3299  		if (QDF_IS_STATUS_SUCCESS(mask_updated)) {
3300  			cfg->last_updated = current_time;
3301  			if (hif_audio_cpu_affinity_allowed(scn, cfg,
3302  							   audio_taken_cpu,
3303  							   current_time)) {
3304  				cfg->last_affined_away = current_time;
3305  				qdf_thread_cpumap_print_to_pagebuf(false,
3306  								   cpu_str,
3307  								   &audio_taken_cpu);
3308  				hif_info_rl("Audio taken CPU mask: %s",
3309  					    cpu_str);
3310  			}
3311  		}
3312  	}
3313  	cfg->update_requested = false;
3314  	return status;
3315  }
3316  
3317  QDF_STATUS
hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc * scn,uint32_t irq,uint32_t grp_id,uint32_t irq_index,qdf_cpu_mask * cpu_mask)3318  hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
3319  				      uint32_t grp_id, uint32_t irq_index,
3320  				      qdf_cpu_mask *cpu_mask)
3321  {
3322  	struct hif_cpu_affinity *cfg;
3323  
3324  	if (!scn->affinity_mgr_supported)
3325  		return hif_irq_set_affinity_hint(irq, cpu_mask);
3326  
3327  	cfg = &scn->irq_cpu_mask[grp_id][irq_index];
3328  	return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
3329  }
3330  
3331  QDF_STATUS
hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc * scn,uint32_t irq,uint32_t ce_id,qdf_cpu_mask * cpu_mask)3332  hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
3333  				     uint32_t ce_id, qdf_cpu_mask *cpu_mask)
3334  {
3335  	struct hif_cpu_affinity *cfg;
3336  
3337  	if (!scn->affinity_mgr_supported)
3338  		return hif_irq_set_affinity_hint(irq, cpu_mask);
3339  
3340  	cfg = &scn->ce_irq_cpu_mask[ce_id];
3341  	return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
3342  }
3343  
3344  void
hif_affinity_mgr_init_ce_irq(struct hif_softc * scn,int id,int irq)3345  hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
3346  {
3347  	unsigned int cpus;
3348  	qdf_cpu_mask cpu_mask = {0};
3349  	struct hif_cpu_affinity *cfg = NULL;
3350  
3351  	if (!scn->affinity_mgr_supported)
3352  		return;
3353  
3354  	/* Set CPU Mask to Silver core */
3355  	qdf_for_each_possible_cpu(cpus)
3356  		if (qdf_topology_physical_package_id(cpus) ==
3357  		    CPU_CLUSTER_TYPE_LITTLE)
3358  			qdf_cpumask_set_cpu(cpus, &cpu_mask);
3359  
3360  	cfg = &scn->ce_irq_cpu_mask[id];
3361  	qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
3362  	qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
3363  	cfg->irq = irq;
3364  	cfg->last_updated = 0;
3365  	cfg->last_affined_away = 0;
3366  	cfg->update_requested = false;
3367  }
3368  
3369  void
hif_affinity_mgr_init_grp_irq(struct hif_softc * scn,int grp_id,int irq_num,int irq)3370  hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
3371  			      int irq_num, int irq)
3372  {
3373  	unsigned int cpus;
3374  	qdf_cpu_mask cpu_mask = {0};
3375  	struct hif_cpu_affinity *cfg = NULL;
3376  
3377  	if (!scn->affinity_mgr_supported)
3378  		return;
3379  
3380  	/* Set CPU Mask to Silver core */
3381  	qdf_for_each_possible_cpu(cpus)
3382  		if (qdf_topology_physical_package_id(cpus) ==
3383  		    CPU_CLUSTER_TYPE_LITTLE)
3384  			qdf_cpumask_set_cpu(cpus, &cpu_mask);
3385  
3386  	cfg = &scn->irq_cpu_mask[grp_id][irq_num];
3387  	qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
3388  	qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
3389  	cfg->irq = irq;
3390  	cfg->last_updated = 0;
3391  	cfg->last_affined_away = 0;
3392  	cfg->update_requested = false;
3393  }
3394  #endif
3395  
3396  #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
3397  	defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
hif_config_irq_set_perf_affinity_hint(struct hif_opaque_softc * hif_ctx)3398  void hif_config_irq_set_perf_affinity_hint(
3399  	struct hif_opaque_softc *hif_ctx)
3400  {
3401  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3402  
3403  	hif_config_irq_affinity(scn);
3404  }
3405  
3406  qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
3407  #endif
3408