1  /*
2   * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  #include "targcfg.h"
21  #include "qdf_lock.h"
22  #include "qdf_status.h"
23  #include "qdf_status.h"
24  #include <qdf_atomic.h>         /* qdf_atomic_read */
25  #include <targaddrs.h>
26  #include "hif_io32.h"
27  #include <hif.h>
28  #include <target_type.h>
29  #include "regtable.h"
30  #define ATH_MODULE_NAME hif
31  #include <a_debug.h>
32  #include "hif_main.h"
33  #include "ce_api.h"
34  #include "qdf_trace.h"
35  #include "pld_common.h"
36  #include "hif_debug.h"
37  #include "ce_internal.h"
38  #include "ce_reg.h"
39  #include "ce_assignment.h"
40  #include "ce_tasklet.h"
41  #include "qdf_module.h"
42  #include "qdf_ssr_driver_dump.h"
43  #include <wbuff.h>
44  
45  #define CE_POLL_TIMEOUT 10      /* ms */
46  
47  #define AGC_DUMP         1
48  #define CHANINFO_DUMP    2
49  #define BB_WATCHDOG_DUMP 3
50  #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
51  #define PCIE_ACCESS_DUMP 4
52  #endif
53  #include "mp_dev.h"
54  #ifdef HIF_CE_LOG_INFO
55  #include "qdf_hang_event_notifier.h"
56  #endif
57  
58  #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
59  	defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018) || \
60  	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCA5332) || \
61  	defined(QCA_WIFI_QCA9574)) && !defined(QCA_WIFI_SUPPORT_SRNG) && \
62  	!defined(QCA_WIFI_WCN6450)
63  #define QCA_WIFI_SUPPORT_SRNG
64  #endif
65  
66  #ifdef QCA_WIFI_SUPPORT_SRNG
67  #include <hal_api.h>
68  #endif
69  #include "qdf_ssr_driver_dump.h"
70  
71  /* Forward references */
72  QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
73  
74  /*
75   * Fix EV118783, poll to check whether a BMI response comes
76   * other than waiting for the interruption which may be lost.
77   */
78  /* #define BMI_RSP_POLLING */
79  #define BMI_RSP_TO_MILLISEC  1000
80  
81  #ifdef CONFIG_BYPASS_QMI
82  #define BYPASS_QMI 1
83  #else
84  #define BYPASS_QMI 0
85  #endif
86  
87  static void hif_config_rri_on_ddr(struct hif_softc *scn);
88  
89  /**
90   * hif_target_access_log_dump() - dump access log
91   *
92   * dump access log
93   *
94   * Return: n/a
95   */
96  #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
hif_target_access_log_dump(void)97  static void hif_target_access_log_dump(void)
98  {
99  	hif_target_dump_access_log();
100  }
101  #endif
102  
103  /*
104   * This structure contains the interrupt index for each Copy engine
105   * for various number of MSIs available in the system.
106   */
107  static struct ce_int_assignment ce_int_context[NUM_CE_CONTEXT] = {
108  	/* Default configuration */
109  	{{ CE_INTERRUPT_IDX(0),
110  	  CE_INTERRUPT_IDX(1),
111  	  CE_INTERRUPT_IDX(2),
112  	  CE_INTERRUPT_IDX(3),
113  	  CE_INTERRUPT_IDX(4),
114  	  CE_INTERRUPT_IDX(5),
115  	  CE_INTERRUPT_IDX(6),
116  	  CE_INTERRUPT_IDX(7),
117  	  CE_INTERRUPT_IDX(8),
118  	  CE_INTERRUPT_IDX(9),
119  	  CE_INTERRUPT_IDX(10),
120  	  CE_INTERRUPT_IDX(11),
121  #ifdef QCA_WIFI_QCN9224
122  	  CE_INTERRUPT_IDX(12),
123  	  CE_INTERRUPT_IDX(13),
124  	  CE_INTERRUPT_IDX(14),
125  	  CE_INTERRUPT_IDX(15),
126  #endif
127  	} },
128  	/* Interrupt assignment for 1 MSI combination */
129  	{{ CE_INTERRUPT_IDX(0),
130  	  CE_INTERRUPT_IDX(0),
131  	  CE_INTERRUPT_IDX(0),
132  	  CE_INTERRUPT_IDX(0),
133  	  CE_INTERRUPT_IDX(0),
134  	  CE_INTERRUPT_IDX(0),
135  	  CE_INTERRUPT_IDX(0),
136  	  CE_INTERRUPT_IDX(0),
137  	  CE_INTERRUPT_IDX(0),
138  	  CE_INTERRUPT_IDX(0),
139  	  CE_INTERRUPT_IDX(0),
140  	  CE_INTERRUPT_IDX(0),
141  #ifdef QCA_WIFI_QCN9224
142  	  CE_INTERRUPT_IDX(0),
143  	  CE_INTERRUPT_IDX(0),
144  	  CE_INTERRUPT_IDX(0),
145  	  CE_INTERRUPT_IDX(0),
146  #endif
147  	} },
148  	/* Interrupt assignment for 2 MSI combination */
149  	{{ CE_INTERRUPT_IDX(0),
150  	  CE_INTERRUPT_IDX(1),
151  	  CE_INTERRUPT_IDX(0),
152  	  CE_INTERRUPT_IDX(1),
153  	  CE_INTERRUPT_IDX(0),
154  	  CE_INTERRUPT_IDX(1),
155  	  CE_INTERRUPT_IDX(0),
156  	  CE_INTERRUPT_IDX(0),
157  	  CE_INTERRUPT_IDX(0),
158  	  CE_INTERRUPT_IDX(0),
159  	  CE_INTERRUPT_IDX(0),
160  	  CE_INTERRUPT_IDX(0),
161  #ifdef QCA_WIFI_QCN9224
162  	  CE_INTERRUPT_IDX(0),
163  	  CE_INTERRUPT_IDX(0),
164  	  CE_INTERRUPT_IDX(0),
165  	  CE_INTERRUPT_IDX(0),
166  #endif
167  	} },
168  	/* Interrupt assignment for 3 MSI combination */
169  	{{ CE_INTERRUPT_IDX(0),
170  	  CE_INTERRUPT_IDX(1),
171  	  CE_INTERRUPT_IDX(2),
172  	  CE_INTERRUPT_IDX(1),
173  	  CE_INTERRUPT_IDX(0),
174  	  CE_INTERRUPT_IDX(1),
175  	  CE_INTERRUPT_IDX(0),
176  	  CE_INTERRUPT_IDX(0),
177  	  CE_INTERRUPT_IDX(0),
178  	  CE_INTERRUPT_IDX(0),
179  	  CE_INTERRUPT_IDX(0),
180  	  CE_INTERRUPT_IDX(0),
181  #ifdef QCA_WIFI_QCN9224
182  	  CE_INTERRUPT_IDX(0),
183  	  CE_INTERRUPT_IDX(0),
184  	  CE_INTERRUPT_IDX(0),
185  	  CE_INTERRUPT_IDX(0),
186  #endif
187  	} },
188  	/* Interrupt assignment for 4 MSI combination */
189  	{{ CE_INTERRUPT_IDX(0),
190  	  CE_INTERRUPT_IDX(1),
191  	  CE_INTERRUPT_IDX(2),
192  	  CE_INTERRUPT_IDX(3),
193  	  CE_INTERRUPT_IDX(0),
194  	  CE_INTERRUPT_IDX(1),
195  	  CE_INTERRUPT_IDX(0),
196  	  CE_INTERRUPT_IDX(0),
197  	  CE_INTERRUPT_IDX(0),
198  	  CE_INTERRUPT_IDX(0),
199  	  CE_INTERRUPT_IDX(0),
200  	  CE_INTERRUPT_IDX(0),
201  #ifdef QCA_WIFI_QCN9224
202  	  CE_INTERRUPT_IDX(0),
203  	  CE_INTERRUPT_IDX(0),
204  	  CE_INTERRUPT_IDX(0),
205  	  CE_INTERRUPT_IDX(0),
206  #endif
207  	} },
208  	/* Interrupt assignment for 5 MSI combination */
209  	{{ CE_INTERRUPT_IDX(0),
210  	  CE_INTERRUPT_IDX(1),
211  	  CE_INTERRUPT_IDX(2),
212  	  CE_INTERRUPT_IDX(3),
213  	  CE_INTERRUPT_IDX(0),
214  	  CE_INTERRUPT_IDX(4),
215  	  CE_INTERRUPT_IDX(0),
216  	  CE_INTERRUPT_IDX(0),
217  	  CE_INTERRUPT_IDX(0),
218  	  CE_INTERRUPT_IDX(0),
219  	  CE_INTERRUPT_IDX(0),
220  	  CE_INTERRUPT_IDX(0),
221  #ifdef QCA_WIFI_QCN9224
222  	  CE_INTERRUPT_IDX(0),
223  	  CE_INTERRUPT_IDX(0),
224  	  CE_INTERRUPT_IDX(0),
225  	  CE_INTERRUPT_IDX(0),
226  #endif
227  	} },
228  	/* Interrupt assignment for 6 MSI combination */
229  	{{ CE_INTERRUPT_IDX(0),
230  	  CE_INTERRUPT_IDX(1),
231  	  CE_INTERRUPT_IDX(2),
232  	  CE_INTERRUPT_IDX(3),
233  	  CE_INTERRUPT_IDX(4),
234  	  CE_INTERRUPT_IDX(5),
235  	  CE_INTERRUPT_IDX(0),
236  	  CE_INTERRUPT_IDX(0),
237  	  CE_INTERRUPT_IDX(0),
238  	  CE_INTERRUPT_IDX(0),
239  	  CE_INTERRUPT_IDX(0),
240  	  CE_INTERRUPT_IDX(0),
241  #ifdef QCA_WIFI_QCN9224
242  	  CE_INTERRUPT_IDX(0),
243  	  CE_INTERRUPT_IDX(0),
244  	  CE_INTERRUPT_IDX(0),
245  	  CE_INTERRUPT_IDX(0),
246  #endif
247  	} },
248  	/* Interrupt assignment for 7 MSI combination */
249  	{{ CE_INTERRUPT_IDX(0),
250  	  CE_INTERRUPT_IDX(1),
251  	  CE_INTERRUPT_IDX(2),
252  	  CE_INTERRUPT_IDX(3),
253  	  CE_INTERRUPT_IDX(4),
254  	  CE_INTERRUPT_IDX(5),
255  	  CE_INTERRUPT_IDX(6),
256  	  CE_INTERRUPT_IDX(0),
257  	  CE_INTERRUPT_IDX(0),
258  	  CE_INTERRUPT_IDX(0),
259  	  CE_INTERRUPT_IDX(0),
260  	  CE_INTERRUPT_IDX(0),
261  #ifdef QCA_WIFI_QCN9224
262  	  CE_INTERRUPT_IDX(0),
263  	  CE_INTERRUPT_IDX(0),
264  	  CE_INTERRUPT_IDX(0),
265  	  CE_INTERRUPT_IDX(0),
266  #endif
267  	} },
268  	/* Interrupt assignment for 8 MSI combination */
269  	{{ CE_INTERRUPT_IDX(0),
270  	  CE_INTERRUPT_IDX(1),
271  	  CE_INTERRUPT_IDX(2),
272  	  CE_INTERRUPT_IDX(3),
273  	  CE_INTERRUPT_IDX(4),
274  	  CE_INTERRUPT_IDX(5),
275  	  CE_INTERRUPT_IDX(6),
276  	  CE_INTERRUPT_IDX(7),
277  	  CE_INTERRUPT_IDX(0),
278  	  CE_INTERRUPT_IDX(0),
279  	  CE_INTERRUPT_IDX(0),
280  	  CE_INTERRUPT_IDX(0),
281  #ifdef QCA_WIFI_QCN9224
282  	  CE_INTERRUPT_IDX(0),
283  	  CE_INTERRUPT_IDX(0),
284  	  CE_INTERRUPT_IDX(0),
285  	  CE_INTERRUPT_IDX(0),
286  #endif
287  	} },
288  	/* Interrupt assignment for 9 MSI combination */
289  	{{ CE_INTERRUPT_IDX(0),
290  	  CE_INTERRUPT_IDX(1),
291  	  CE_INTERRUPT_IDX(2),
292  	  CE_INTERRUPT_IDX(3),
293  	  CE_INTERRUPT_IDX(4),
294  	  CE_INTERRUPT_IDX(5),
295  	  CE_INTERRUPT_IDX(6),
296  	  CE_INTERRUPT_IDX(7),
297  	  CE_INTERRUPT_IDX(8),
298  	  CE_INTERRUPT_IDX(0),
299  	  CE_INTERRUPT_IDX(0),
300  	  CE_INTERRUPT_IDX(0),
301  #ifdef QCA_WIFI_QCN9224
302  	  CE_INTERRUPT_IDX(0),
303  	  CE_INTERRUPT_IDX(0),
304  	  CE_INTERRUPT_IDX(0),
305  	  CE_INTERRUPT_IDX(0),
306  #endif
307  	} },
308  	/* Interrupt assignment for 10 MSI combination */
309  	{{ CE_INTERRUPT_IDX(0),
310  	  CE_INTERRUPT_IDX(1),
311  	  CE_INTERRUPT_IDX(2),
312  	  CE_INTERRUPT_IDX(3),
313  	  CE_INTERRUPT_IDX(4),
314  	  CE_INTERRUPT_IDX(5),
315  	  CE_INTERRUPT_IDX(6),
316  	  CE_INTERRUPT_IDX(7),
317  	  CE_INTERRUPT_IDX(8),
318  	  CE_INTERRUPT_IDX(9),
319  	  CE_INTERRUPT_IDX(0),
320  	  CE_INTERRUPT_IDX(0),
321  #ifdef QCA_WIFI_QCN9224
322  	  CE_INTERRUPT_IDX(0),
323  	  CE_INTERRUPT_IDX(0),
324  	  CE_INTERRUPT_IDX(0),
325  	  CE_INTERRUPT_IDX(0),
326  #endif
327  	} },
328  	/* Interrupt assignment for 11 MSI combination */
329  	{{ CE_INTERRUPT_IDX(0),
330  	  CE_INTERRUPT_IDX(1),
331  	  CE_INTERRUPT_IDX(2),
332  	  CE_INTERRUPT_IDX(3),
333  	  CE_INTERRUPT_IDX(4),
334  	  CE_INTERRUPT_IDX(5),
335  	  CE_INTERRUPT_IDX(6),
336  	  CE_INTERRUPT_IDX(7),
337  	  CE_INTERRUPT_IDX(8),
338  	  CE_INTERRUPT_IDX(9),
339  	  CE_INTERRUPT_IDX(10),
340  	  CE_INTERRUPT_IDX(0),
341  #ifdef QCA_WIFI_QCN9224
342  	  CE_INTERRUPT_IDX(0),
343  	  CE_INTERRUPT_IDX(0),
344  	  CE_INTERRUPT_IDX(0),
345  	  CE_INTERRUPT_IDX(0),
346  #endif
347  	} },
348  	/* Interrupt assignment for 12 MSI combination */
349  	{{ CE_INTERRUPT_IDX(0),
350  	  CE_INTERRUPT_IDX(1),
351  	  CE_INTERRUPT_IDX(2),
352  	  CE_INTERRUPT_IDX(3),
353  	  CE_INTERRUPT_IDX(4),
354  	  CE_INTERRUPT_IDX(5),
355  	  CE_INTERRUPT_IDX(6),
356  	  CE_INTERRUPT_IDX(7),
357  	  CE_INTERRUPT_IDX(8),
358  	  CE_INTERRUPT_IDX(9),
359  	  CE_INTERRUPT_IDX(10),
360  	  CE_INTERRUPT_IDX(11),
361  #ifdef QCA_WIFI_QCN9224
362  	  CE_INTERRUPT_IDX(0),
363  	  CE_INTERRUPT_IDX(0),
364  	  CE_INTERRUPT_IDX(0),
365  	  CE_INTERRUPT_IDX(0),
366  #endif
367  	} },
368  #ifdef QCA_WIFI_QCN9224
369  	/* Interrupt assignment for 13 MSI combination */
370  	{{ CE_INTERRUPT_IDX(0),
371  	  CE_INTERRUPT_IDX(1),
372  	  CE_INTERRUPT_IDX(2),
373  	  CE_INTERRUPT_IDX(3),
374  	  CE_INTERRUPT_IDX(4),
375  	  CE_INTERRUPT_IDX(5),
376  	  CE_INTERRUPT_IDX(6),
377  	  CE_INTERRUPT_IDX(7),
378  	  CE_INTERRUPT_IDX(8),
379  	  CE_INTERRUPT_IDX(9),
380  	  CE_INTERRUPT_IDX(10),
381  	  CE_INTERRUPT_IDX(11),
382  	  CE_INTERRUPT_IDX(12),
383  	  CE_INTERRUPT_IDX(0),
384  	  CE_INTERRUPT_IDX(0),
385  	  CE_INTERRUPT_IDX(0),
386  	} },
387  	/* Interrupt assignment for 14 MSI combination */
388  	{{ CE_INTERRUPT_IDX(0),
389  	  CE_INTERRUPT_IDX(1),
390  	  CE_INTERRUPT_IDX(2),
391  	  CE_INTERRUPT_IDX(3),
392  	  CE_INTERRUPT_IDX(4),
393  	  CE_INTERRUPT_IDX(5),
394  	  CE_INTERRUPT_IDX(6),
395  	  CE_INTERRUPT_IDX(7),
396  	  CE_INTERRUPT_IDX(8),
397  	  CE_INTERRUPT_IDX(9),
398  	  CE_INTERRUPT_IDX(10),
399  	  CE_INTERRUPT_IDX(11),
400  	  CE_INTERRUPT_IDX(12),
401  	  CE_INTERRUPT_IDX(13),
402  	  CE_INTERRUPT_IDX(0),
403  	  CE_INTERRUPT_IDX(0),
404  	} },
405  	/* Interrupt assignment for 15 MSI combination */
406  	{{ CE_INTERRUPT_IDX(0),
407  	  CE_INTERRUPT_IDX(1),
408  	  CE_INTERRUPT_IDX(2),
409  	  CE_INTERRUPT_IDX(3),
410  	  CE_INTERRUPT_IDX(4),
411  	  CE_INTERRUPT_IDX(5),
412  	  CE_INTERRUPT_IDX(6),
413  	  CE_INTERRUPT_IDX(7),
414  	  CE_INTERRUPT_IDX(8),
415  	  CE_INTERRUPT_IDX(9),
416  	  CE_INTERRUPT_IDX(10),
417  	  CE_INTERRUPT_IDX(11),
418  	  CE_INTERRUPT_IDX(12),
419  	  CE_INTERRUPT_IDX(13),
420  	  CE_INTERRUPT_IDX(14),
421  	  CE_INTERRUPT_IDX(0),
422  	} },
423  	/* Interrupt assignment for 16 MSI combination */
424  	{{ CE_INTERRUPT_IDX(0),
425  	  CE_INTERRUPT_IDX(1),
426  	  CE_INTERRUPT_IDX(2),
427  	  CE_INTERRUPT_IDX(3),
428  	  CE_INTERRUPT_IDX(4),
429  	  CE_INTERRUPT_IDX(5),
430  	  CE_INTERRUPT_IDX(6),
431  	  CE_INTERRUPT_IDX(7),
432  	  CE_INTERRUPT_IDX(8),
433  	  CE_INTERRUPT_IDX(9),
434  	  CE_INTERRUPT_IDX(10),
435  	  CE_INTERRUPT_IDX(11),
436  	  CE_INTERRUPT_IDX(12),
437  	  CE_INTERRUPT_IDX(13),
438  	  CE_INTERRUPT_IDX(14),
439  	  CE_INTERRUPT_IDX(15),
440  	} },
441  #endif
442  };
443  
444  
hif_trigger_dump(struct hif_opaque_softc * hif_ctx,uint8_t cmd_id,bool start)445  void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
446  		      uint8_t cmd_id, bool start)
447  {
448  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
449  
450  	switch (cmd_id) {
451  	case AGC_DUMP:
452  		if (start)
453  			priv_start_agc(scn);
454  		else
455  			priv_dump_agc(scn);
456  		break;
457  	case CHANINFO_DUMP:
458  		if (start)
459  			priv_start_cap_chaninfo(scn);
460  		else
461  			priv_dump_chaninfo(scn);
462  		break;
463  	case BB_WATCHDOG_DUMP:
464  		priv_dump_bbwatchdog(scn);
465  		break;
466  #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
467  	case PCIE_ACCESS_DUMP:
468  		hif_target_access_log_dump();
469  		break;
470  #endif
471  	default:
472  		hif_err("Invalid htc dump command: %d", cmd_id);
473  		break;
474  	}
475  }
476  
ce_poll_timeout(void * arg)477  static void ce_poll_timeout(void *arg)
478  {
479  	struct CE_state *CE_state = (struct CE_state *)arg;
480  
481  	if (CE_state->timer_inited) {
482  		ce_per_engine_service(CE_state->scn, CE_state->id);
483  		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
484  	}
485  }
486  
roundup_pwr2(unsigned int n)487  static unsigned int roundup_pwr2(unsigned int n)
488  {
489  	int i;
490  	unsigned int test_pwr2;
491  
492  	if (!(n & (n - 1)))
493  		return n; /* already a power of 2 */
494  
495  	test_pwr2 = 4;
496  	for (i = 0; i < 29; i++) {
497  		if (test_pwr2 > n)
498  			return test_pwr2;
499  		test_pwr2 = test_pwr2 << 1;
500  	}
501  
502  	QDF_ASSERT(0); /* n too large */
503  	return 0;
504  }
505  
506  #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
507  #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
508  
509  static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
510  	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
511  	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
512  	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
513  	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
514  	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
515  	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
516  	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
517  	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
518  	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
519  #ifdef QCA_WIFI_3_0_ADRASTEA
520  	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
521  	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
522  	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
523  #endif
524  };
525  
526  #ifdef QCN7605_SUPPORT
527  static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
528  	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
529  	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
530  	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
531  	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
532  	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
533  	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
534  	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
535  	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
536  };
537  #endif
538  
539  #ifdef WLAN_FEATURE_EPPING
540  static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
541  	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
542  	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
543  	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
544  	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
545  	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
546  	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
547  	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
548  	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
549  	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
550  };
551  #endif
552  
553  /* CE_PCI TABLE */
554  /*
555   * NOTE: the table below is out of date, though still a useful reference.
556   * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
557   * mapping of HTC services to HIF pipes.
558   */
559  /*
560   * This authoritative table defines Copy Engine configuration and the mapping
561   * of services/endpoints to CEs.  A subset of this information is passed to
562   * the Target during startup as a prerequisite to entering BMI phase.
563   * See:
564   *    target_service_to_ce_map - Target-side mapping
565   *    hif_map_service_to_pipe      - Host-side mapping
566   *    target_ce_config         - Target-side configuration
567   *    host_ce_config           - Host-side configuration
568     ============================================================================
569     Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
570   |                      |      | ctio | Size     | Frequency
571   |                      |      | n    |          |
572     ============================================================================
573     tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
574     descriptor |                      |      |      | O(100B)  | and regular
575     download   |                      |      |      |          |
576     ----------------------------------------------------------------------------
577     rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
578     indication |                      |      |      | O(10B)   | regular
579     upload     |                      |      |      |          |
580     ----------------------------------------------------------------------------
581     MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
582     upload     |                      |      |      | O(1000B) | (frequent
583     e.g. noise |                      |      |      |          | during IP1.0
584     packets    |                      |      |      |          | testing)
585     ----------------------------------------------------------------------------
586     MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
587     download   |                      |      |      | O(1000B) | (frequent
588     e.g.       |                      |      |      |          | during IP1.0
589     misdirecte |                      |      |      |          | testing)
590     d EAPOL    |                      |      |      |          |
591     packets    |                      |      |      |          |
592     ----------------------------------------------------------------------------
593     n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
594   | DATA_VO (uplink)     |      |      |          |
595     ----------------------------------------------------------------------------
596     n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
597   | DATA_VO (downlink)   |      |      |          |
598     ----------------------------------------------------------------------------
599     WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
600   |                      |      |      | O(100B)  |
601     ----------------------------------------------------------------------------
602     WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
603     messages   | (downlink)           |      |      | O(100B)  |
604   |                      |      |      |          |
605     ----------------------------------------------------------------------------
606     n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
607   | HTC_RAW_STREAMS      |      |      |          |
608   | (uplink)             |      |      |          |
609     ----------------------------------------------------------------------------
610     n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
611   | HTC_RAW_STREAMS      |      |      |          |
612   | (downlink)           |      |      |          |
613     ----------------------------------------------------------------------------
614     diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
615   |                      |      |      |          | infrequent
616     ============================================================================
617   */
618  
619  /*
620   * Map from service/endpoint to Copy Engine.
621   * This table is derived from the CE_PCI TABLE, above.
622   * It is passed to the Target at startup for use by firmware.
623   */
624  static struct service_to_pipe target_service_to_ce_map_wlan[] = {
625  	{
626  		WMI_DATA_VO_SVC,
627  		PIPEDIR_OUT,    /* out = UL = host -> target */
628  		3,
629  	},
630  	{
631  		WMI_DATA_VO_SVC,
632  		PIPEDIR_IN,     /* in = DL = target -> host */
633  		2,
634  	},
635  	{
636  		WMI_DATA_BK_SVC,
637  		PIPEDIR_OUT,    /* out = UL = host -> target */
638  		3,
639  	},
640  	{
641  		WMI_DATA_BK_SVC,
642  		PIPEDIR_IN,     /* in = DL = target -> host */
643  		2,
644  	},
645  	{
646  		WMI_DATA_BE_SVC,
647  		PIPEDIR_OUT,    /* out = UL = host -> target */
648  		3,
649  	},
650  	{
651  		WMI_DATA_BE_SVC,
652  		PIPEDIR_IN,     /* in = DL = target -> host */
653  		2,
654  	},
655  	{
656  		WMI_DATA_VI_SVC,
657  		PIPEDIR_OUT,    /* out = UL = host -> target */
658  		3,
659  	},
660  	{
661  		WMI_DATA_VI_SVC,
662  		PIPEDIR_IN,     /* in = DL = target -> host */
663  		2,
664  	},
665  	{
666  		WMI_CONTROL_SVC,
667  		PIPEDIR_OUT,    /* out = UL = host -> target */
668  		3,
669  	},
670  	{
671  		WMI_CONTROL_SVC,
672  		PIPEDIR_IN,     /* in = DL = target -> host */
673  		2,
674  	},
675  	{
676  		HTC_CTRL_RSVD_SVC,
677  		PIPEDIR_OUT,    /* out = UL = host -> target */
678  		0,              /* could be moved to 3 (share with WMI) */
679  	},
680  	{
681  		HTC_CTRL_RSVD_SVC,
682  		PIPEDIR_IN,     /* in = DL = target -> host */
683  		2,
684  	},
685  	{
686  		HTC_RAW_STREAMS_SVC, /* not currently used */
687  		PIPEDIR_OUT,    /* out = UL = host -> target */
688  		0,
689  	},
690  	{
691  		HTC_RAW_STREAMS_SVC, /* not currently used */
692  		PIPEDIR_IN,     /* in = DL = target -> host */
693  		2,
694  	},
695  	{
696  		HTT_DATA_MSG_SVC,
697  		PIPEDIR_OUT,    /* out = UL = host -> target */
698  		4,
699  	},
700  	{
701  		HTT_DATA_MSG_SVC,
702  		PIPEDIR_IN,     /* in = DL = target -> host */
703  		1,
704  	},
705  	{
706  		WDI_IPA_TX_SVC,
707  		PIPEDIR_OUT,    /* in = DL = target -> host */
708  		5,
709  	},
710  #if defined(QCA_WIFI_3_0_ADRASTEA)
711  	{
712  		HTT_DATA2_MSG_SVC,
713  		PIPEDIR_IN,    /* in = DL = target -> host */
714  		9,
715  	},
716  	{
717  		HTT_DATA3_MSG_SVC,
718  		PIPEDIR_IN,    /* in = DL = target -> host */
719  		10,
720  	},
721  	{
722  		PACKET_LOG_SVC,
723  		PIPEDIR_IN,    /* in = DL = target -> host */
724  		11,
725  	},
726  #endif
727  	/* (Additions here) */
728  
729  	{                       /* Must be last */
730  		0,
731  		0,
732  		0,
733  	},
734  };
735  
736  /* PIPEDIR_OUT = HOST to Target */
737  /* PIPEDIR_IN  = TARGET to HOST */
738  #if (defined(QCA_WIFI_QCA8074))
739  static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
740  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
741  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
742  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
743  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
744  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
745  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
746  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
747  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
748  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
749  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
750  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
751  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
752  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
753  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
754  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
755  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
756  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
757  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
758  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
759  	/* (Additions here) */
760  	{ 0, 0, 0, },
761  };
762  #else
763  static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
764  };
765  #endif
766  
767  #if (defined(QCA_WIFI_QCA9574))
768  static struct service_to_pipe target_service_to_ce_map_qca9574[] = {
769  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
770  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
771  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
772  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
773  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
774  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
775  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
776  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
777  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
778  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
779  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
780  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
781  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
782  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
783  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
784  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
785  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
786  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
787  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
788  	/* (Additions here) */
789  	{ 0, 0, 0, },
790  };
791  #else
792  static struct service_to_pipe target_service_to_ce_map_qca9574[] = {
793  };
794  #endif
795  
796  #if (defined(QCA_WIFI_QCA8074V2))
797  static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
798  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
799  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
800  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
801  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
802  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
803  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
804  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
805  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
806  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
807  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
808  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
809  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
810  	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
811  	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
812  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
813  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
814  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
815  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
816  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
817  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
818  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
819  	/* (Additions here) */
820  	{ 0, 0, 0, },
821  };
822  #else
823  static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
824  };
825  #endif
826  
827  #if (defined(QCA_WIFI_QCA6018))
828  static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
829  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
830  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
831  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
832  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
833  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
834  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
835  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
836  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
837  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
838  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
839  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
840  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
841  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
842  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
843  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
844  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
845  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
846  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
847  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
848  	/* (Additions here) */
849  	{ 0, 0, 0, },
850  };
851  #else
852  static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
853  };
854  #endif
855  
856  #if (defined(QCA_WIFI_QCN9000))
857  static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
858  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
859  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
860  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
861  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
862  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
863  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
864  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
865  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
866  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
867  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
868  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
869  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
870  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
871  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
872  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
873  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
874  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
875  	/* (Additions here) */
876  	{ 0, 0, 0, },
877  };
878  #else
879  static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
880  };
881  #endif
882  
883  #if (defined(QCA_WIFI_QCA5332) || defined(QCA_WIFI_QCN6432))
884  static struct service_to_pipe target_service_to_ce_map_qca5332[] = {
885  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
886  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
887  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
888  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
889  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
890  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
891  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
892  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
893  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
894  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
895  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
896  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
897  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
898  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
899  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
900  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
901  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
902  #ifdef WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE
903  	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 9, },
904  	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 9, },
905  #else
906  	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 2, },
907  	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 2, },
908  #endif
909  	/* (Additions here) */
910  	{ 0, 0, 0, },
911  };
912  #else
913  static struct service_to_pipe target_service_to_ce_map_qca5332[] = {
914  };
915  #endif
916  
917  #if (defined(QCA_WIFI_QCN9224))
918  static struct service_to_pipe target_service_to_ce_map_qcn9224[] = {
919  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
920  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
921  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
922  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
923  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
924  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
925  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
926  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
927  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
928  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
929  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
930  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
931  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
932  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
933  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
934  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
935  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7, },
936  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2, },
937  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
938  #ifdef WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE
939  	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 14, },
940  	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 14, },
941  #else
942  	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 2, },
943  	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 2, },
944  #endif
945  	/* (Additions here) */
946  	{ 0, 0, 0, },
947  };
948  #endif
949  
950  #if defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCN9160)
951  static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
952  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
953  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
954  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
955  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
956  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
957  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
958  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
959  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
960  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
961  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
962  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
963  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
964  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
965  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
966  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
967  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
968  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
969  	/* (Additions here) */
970  	{ 0, 0, 0, },
971  };
972  #else
973  static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
974  };
975  #endif
976  
977  /* PIPEDIR_OUT = HOST to Target */
978  /* PIPEDIR_IN  = TARGET to HOST */
979  #ifdef QCN7605_SUPPORT
980  static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
981  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
982  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
983  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
984  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
985  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
986  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
987  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
988  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
989  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
990  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
991  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
992  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
993  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
994  	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
995  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
996  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
997  	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
998  #ifdef IPA_OFFLOAD
999  	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
1000  #else
1001  	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
1002  #endif
1003  	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
1004  	/* (Additions here) */
1005  	{ 0, 0, 0, },
1006  };
1007  #endif
1008  
1009  #if (defined(QCA_WIFI_QCA6290))
1010  #ifdef QCA_6290_AP_MODE
1011  static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1012  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1013  	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
1014  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1015  	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
1016  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1017  	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
1018  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1019  	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
1020  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1021  	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
1022  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1023  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
1024  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1025  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
1026  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
1027  	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
1028  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1029  	/* (Additions here) */
1030  	{ 0, 0, 0, },
1031  };
1032  #else
1033  static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1034  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1035  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1036  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1037  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1038  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1039  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1040  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1041  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1042  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1043  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1044  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1045  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1046  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1047  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1048  	/* (Additions here) */
1049  	{ 0, 0, 0, },
1050  };
1051  #endif
1052  #else
1053  static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1054  };
1055  #endif
1056  
1057  #if (defined(QCA_WIFI_QCA6390))
1058  static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
1059  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1060  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1061  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1062  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1063  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1064  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1065  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1066  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1067  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1068  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1069  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1070  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1071  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1072  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1073  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1074  	/* (Additions here) */
1075  	{ 0, 0, 0, },
1076  };
1077  #else
1078  static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
1079  };
1080  #endif
1081  
1082  static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
1083  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1084  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1085  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1086  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1087  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1088  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1089  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1090  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1091  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1092  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1093  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1094  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1095  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1096  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1097  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1098  	/* (Additions here) */
1099  	{ 0, 0, 0, },
1100  };
1101  
1102  #if (defined(QCA_WIFI_QCA6750))
1103  static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1104  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1105  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1106  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1107  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1108  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1109  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1110  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1111  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1112  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1113  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1114  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1115  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1116  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1117  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1118  	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1119  #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1120  	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1121  #endif
1122  	/* (Additions here) */
1123  	{ 0, 0, 0, },
1124  };
1125  #else
1126  static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1127  };
1128  #endif
1129  
1130  #if (defined(QCA_WIFI_KIWI))
1131  #ifdef FEATURE_DIRECT_LINK
1132  static struct service_to_pipe target_service_to_ce_map_kiwi_direct_link[] = {
1133  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1134  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1135  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1136  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1137  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1138  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1139  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1140  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1141  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1142  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1143  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 4, },
1144  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1145  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1146  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1147  #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1148  	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1149  #endif
1150  	{ LPASS_DATA_MSG_SVC, PIPEDIR_OUT, 0, },
1151  	{ LPASS_DATA_MSG_SVC, PIPEDIR_IN, 5, },
1152  	/* (Additions here) */
1153  	{ 0, 0, 0, },
1154  };
1155  #endif
1156  
1157  static struct service_to_pipe target_service_to_ce_map_kiwi[] = {
1158  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1159  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1160  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1161  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1162  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1163  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1164  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1165  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1166  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1167  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1168  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1169  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1170  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1171  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1172  #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1173  	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1174  #endif
1175  	/* (Additions here) */
1176  	{ 0, 0, 0, },
1177  };
1178  #else
1179  static struct service_to_pipe target_service_to_ce_map_kiwi[] = {
1180  };
1181  #endif
1182  
1183  #ifdef QCA_WIFI_WCN6450
1184  static struct service_to_pipe target_service_to_ce_map_wcn6450[] = {
1185  	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1186  	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1187  	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1188  	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1189  	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1190  	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1191  	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1192  	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1193  	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1194  	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1195  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1196  	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1197  	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1198  	{ HTT_DATA2_MSG_SVC, PIPEDIR_OUT, 5, },
1199  	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1200  	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 10, },
1201  	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 11, },
1202  #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1203  	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1204  #endif
1205  	/* (Additions here) */
1206  	{ 0, 0, 0, },
1207  };
1208  #else
1209  static struct service_to_pipe target_service_to_ce_map_wcn6450[] = {
1210  };
1211  #endif
1212  
1213  static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
1214  	{
1215  		WMI_DATA_VO_SVC,
1216  		PIPEDIR_OUT,    /* out = UL = host -> target */
1217  		3,
1218  	},
1219  	{
1220  		WMI_DATA_VO_SVC,
1221  		PIPEDIR_IN,     /* in = DL = target -> host */
1222  		2,
1223  	},
1224  	{
1225  		WMI_DATA_BK_SVC,
1226  		PIPEDIR_OUT,    /* out = UL = host -> target */
1227  		3,
1228  	},
1229  	{
1230  		WMI_DATA_BK_SVC,
1231  		PIPEDIR_IN,     /* in = DL = target -> host */
1232  		2,
1233  	},
1234  	{
1235  		WMI_DATA_BE_SVC,
1236  		PIPEDIR_OUT,    /* out = UL = host -> target */
1237  		3,
1238  	},
1239  	{
1240  		WMI_DATA_BE_SVC,
1241  		PIPEDIR_IN,     /* in = DL = target -> host */
1242  		2,
1243  	},
1244  	{
1245  		WMI_DATA_VI_SVC,
1246  		PIPEDIR_OUT,    /* out = UL = host -> target */
1247  		3,
1248  	},
1249  	{
1250  		WMI_DATA_VI_SVC,
1251  		PIPEDIR_IN,     /* in = DL = target -> host */
1252  		2,
1253  	},
1254  	{
1255  		WMI_CONTROL_SVC,
1256  		PIPEDIR_OUT,    /* out = UL = host -> target */
1257  		3,
1258  	},
1259  	{
1260  		WMI_CONTROL_SVC,
1261  		PIPEDIR_IN,     /* in = DL = target -> host */
1262  		2,
1263  	},
1264  	{
1265  		HTC_CTRL_RSVD_SVC,
1266  		PIPEDIR_OUT,    /* out = UL = host -> target */
1267  		0,              /* could be moved to 3 (share with WMI) */
1268  	},
1269  	{
1270  		HTC_CTRL_RSVD_SVC,
1271  		PIPEDIR_IN,     /* in = DL = target -> host */
1272  		1,
1273  	},
1274  	{
1275  		HTC_RAW_STREAMS_SVC, /* not currently used */
1276  		PIPEDIR_OUT,    /* out = UL = host -> target */
1277  		0,
1278  	},
1279  	{
1280  		HTC_RAW_STREAMS_SVC, /* not currently used */
1281  		PIPEDIR_IN,     /* in = DL = target -> host */
1282  		1,
1283  	},
1284  	{
1285  		HTT_DATA_MSG_SVC,
1286  		PIPEDIR_OUT,    /* out = UL = host -> target */
1287  		4,
1288  	},
1289  #ifdef WLAN_FEATURE_FASTPATH
1290  	{
1291  		HTT_DATA_MSG_SVC,
1292  		PIPEDIR_IN,     /* in = DL = target -> host */
1293  		5,
1294  	},
1295  #else /* WLAN_FEATURE_FASTPATH */
1296  	{
1297  		HTT_DATA_MSG_SVC,
1298  		PIPEDIR_IN,  /* in = DL = target -> host */
1299  		1,
1300  	},
1301  #endif /* WLAN_FEATURE_FASTPATH */
1302  
1303  	/* (Additions here) */
1304  
1305  	{                       /* Must be last */
1306  		0,
1307  		0,
1308  		0,
1309  	},
1310  };
1311  
1312  static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1313  static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1314  
1315  #ifdef WLAN_FEATURE_EPPING
1316  static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1317  	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1318  	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1319  	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
1320  	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
1321  	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1322  	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1323  	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1324  	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1325  	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1326  	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1327  	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
1328  	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
1329  	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1330  	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
1331  	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
1332  	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
1333  	{0, 0, 0,},             /* Must be last */
1334  };
1335  
hif_select_epping_service_to_pipe_map(struct service_to_pipe ** tgt_svc_map_to_use,uint32_t * sz_tgt_svc_map_to_use)1336  void hif_select_epping_service_to_pipe_map(struct service_to_pipe
1337  					   **tgt_svc_map_to_use,
1338  					   uint32_t *sz_tgt_svc_map_to_use)
1339  {
1340  	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
1341  	*sz_tgt_svc_map_to_use =
1342  			sizeof(target_service_to_ce_map_wlan_epping);
1343  }
1344  #endif
1345  
1346  #ifdef QCN7605_SUPPORT
1347  static inline
hif_select_ce_map_qcn7605(struct service_to_pipe ** tgt_svc_map_to_use,uint32_t * sz_tgt_svc_map_to_use)1348  void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1349  			       uint32_t *sz_tgt_svc_map_to_use)
1350  {
1351  	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
1352  	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
1353  }
1354  #else
1355  static inline
hif_select_ce_map_qcn7605(struct service_to_pipe ** tgt_svc_map_to_use,uint32_t * sz_tgt_svc_map_to_use)1356  void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1357  			       uint32_t *sz_tgt_svc_map_to_use)
1358  {
1359  	hif_err("QCN7605 not supported");
1360  }
1361  #endif
1362  
1363  #ifdef QCA_WIFI_QCN9224
1364  static
hif_set_ce_config_qcn9224(struct hif_softc * scn,struct HIF_CE_state * hif_state)1365  void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1366  			       struct HIF_CE_state *hif_state)
1367  {
1368  	hif_state->host_ce_config = host_ce_config_wlan_qcn9224;
1369  	hif_state->target_ce_config = target_ce_config_wlan_qcn9224;
1370  	hif_state->target_ce_config_sz =
1371  				 sizeof(target_ce_config_wlan_qcn9224);
1372  	scn->ce_count = QCN_9224_CE_COUNT;
1373  	scn->ini_cfg.disable_wake_irq = 1;
1374  }
1375  
1376  static
hif_select_ce_map_qcn9224(struct service_to_pipe ** tgt_svc_map_to_use,uint32_t * sz_tgt_svc_map_to_use)1377  void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1378  			       uint32_t *sz_tgt_svc_map_to_use)
1379  {
1380  	*tgt_svc_map_to_use = target_service_to_ce_map_qcn9224;
1381  	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn9224);
1382  }
1383  #else
1384  static inline
hif_set_ce_config_qcn9224(struct hif_softc * scn,struct HIF_CE_state * hif_state)1385  void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1386  			       struct HIF_CE_state *hif_state)
1387  {
1388  	hif_err("QCN9224 not supported");
1389  }
1390  
1391  static inline
hif_select_ce_map_qcn9224(struct service_to_pipe ** tgt_svc_map_to_use,uint32_t * sz_tgt_svc_map_to_use)1392  void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1393  			       uint32_t *sz_tgt_svc_map_to_use)
1394  {
1395  	hif_err("QCN9224 not supported");
1396  }
1397  #endif
1398  
1399  #ifdef FEATURE_DIRECT_LINK
1400  /**
1401   * hif_select_service_to_pipe_map_kiwi() - Select service to CE map
1402   *  configuration for Kiwi
1403   * @scn: HIF context
1404   * @tgt_svc_map_to_use: returned service map
1405   * @sz_tgt_svc_map_to_use: returned length of the service map
1406   *
1407   * Return: None
1408   */
1409  static inline void
hif_select_service_to_pipe_map_kiwi(struct hif_softc * scn,struct service_to_pipe ** tgt_svc_map_to_use,uint32_t * sz_tgt_svc_map_to_use)1410  hif_select_service_to_pipe_map_kiwi(struct hif_softc *scn,
1411  				    struct service_to_pipe **tgt_svc_map_to_use,
1412  				    uint32_t *sz_tgt_svc_map_to_use)
1413  {
1414  	if (pld_is_direct_link_supported(scn->qdf_dev->dev)) {
1415  		*tgt_svc_map_to_use = target_service_to_ce_map_kiwi_direct_link;
1416  		*sz_tgt_svc_map_to_use =
1417  			sizeof(target_service_to_ce_map_kiwi_direct_link);
1418  	} else {
1419  		*tgt_svc_map_to_use = target_service_to_ce_map_kiwi;
1420  		*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_kiwi);
1421  	}
1422  }
1423  #else
1424  static inline void
hif_select_service_to_pipe_map_kiwi(struct hif_softc * scn,struct service_to_pipe ** tgt_svc_map_to_use,uint32_t * sz_tgt_svc_map_to_use)1425  hif_select_service_to_pipe_map_kiwi(struct hif_softc *scn,
1426  				    struct service_to_pipe **tgt_svc_map_to_use,
1427  				    uint32_t *sz_tgt_svc_map_to_use)
1428  {
1429  	*tgt_svc_map_to_use = target_service_to_ce_map_kiwi;
1430  	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_kiwi);
1431  }
1432  #endif
1433  
hif_select_service_to_pipe_map(struct hif_softc * scn,struct service_to_pipe ** tgt_svc_map_to_use,uint32_t * sz_tgt_svc_map_to_use)1434  static void hif_select_service_to_pipe_map(struct hif_softc *scn,
1435  				    struct service_to_pipe **tgt_svc_map_to_use,
1436  				    uint32_t *sz_tgt_svc_map_to_use)
1437  {
1438  	uint32_t mode = hif_get_conparam(scn);
1439  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1440  	struct hif_target_info *tgt_info = &scn->target_info;
1441  
1442  	if (QDF_IS_EPPING_ENABLED(mode)) {
1443  		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
1444  						      sz_tgt_svc_map_to_use);
1445  	} else {
1446  		switch (tgt_info->target_type) {
1447  		default:
1448  			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
1449  			*sz_tgt_svc_map_to_use =
1450  				sizeof(target_service_to_ce_map_wlan);
1451  			break;
1452  		case TARGET_TYPE_QCN7605:
1453  			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
1454  						  sz_tgt_svc_map_to_use);
1455  			break;
1456  		case TARGET_TYPE_AR900B:
1457  		case TARGET_TYPE_QCA9984:
1458  		case TARGET_TYPE_QCA9888:
1459  		case TARGET_TYPE_AR9888:
1460  		case TARGET_TYPE_AR9888V2:
1461  			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
1462  			*sz_tgt_svc_map_to_use =
1463  				sizeof(target_service_to_ce_map_ar900b);
1464  			break;
1465  		case TARGET_TYPE_QCA6290:
1466  			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
1467  			*sz_tgt_svc_map_to_use =
1468  				sizeof(target_service_to_ce_map_qca6290);
1469  			break;
1470  		case TARGET_TYPE_QCA6390:
1471  			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
1472  			*sz_tgt_svc_map_to_use =
1473  				sizeof(target_service_to_ce_map_qca6390);
1474  			break;
1475  		case TARGET_TYPE_QCA6490:
1476  			*tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
1477  			*sz_tgt_svc_map_to_use =
1478  				sizeof(target_service_to_ce_map_qca6490);
1479  			break;
1480  		case TARGET_TYPE_QCA6750:
1481  			*tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
1482  			*sz_tgt_svc_map_to_use =
1483  				sizeof(target_service_to_ce_map_qca6750);
1484  			break;
1485  		case TARGET_TYPE_KIWI:
1486  		case TARGET_TYPE_MANGO:
1487  		case TARGET_TYPE_PEACH:
1488  			hif_select_service_to_pipe_map_kiwi(scn,
1489  							 tgt_svc_map_to_use,
1490  							 sz_tgt_svc_map_to_use);
1491  			break;
1492  		case TARGET_TYPE_WCN6450:
1493  			*tgt_svc_map_to_use = target_service_to_ce_map_wcn6450;
1494  			*sz_tgt_svc_map_to_use =
1495  				 sizeof(target_service_to_ce_map_wcn6450);
1496  			break;
1497  		case TARGET_TYPE_QCA8074:
1498  			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
1499  			*sz_tgt_svc_map_to_use =
1500  				sizeof(target_service_to_ce_map_qca8074);
1501  			break;
1502  		case TARGET_TYPE_QCA8074V2:
1503  			*tgt_svc_map_to_use =
1504  				target_service_to_ce_map_qca8074_v2;
1505  			*sz_tgt_svc_map_to_use =
1506  				sizeof(target_service_to_ce_map_qca8074_v2);
1507  			break;
1508  		case TARGET_TYPE_QCA9574:
1509  			*tgt_svc_map_to_use =
1510  				target_service_to_ce_map_qca9574;
1511  			*sz_tgt_svc_map_to_use =
1512  				sizeof(target_service_to_ce_map_qca9574);
1513  			break;
1514  		case TARGET_TYPE_QCA6018:
1515  			*tgt_svc_map_to_use =
1516  				target_service_to_ce_map_qca6018;
1517  			*sz_tgt_svc_map_to_use =
1518  				sizeof(target_service_to_ce_map_qca6018);
1519  			break;
1520  		case TARGET_TYPE_QCN9000:
1521  			*tgt_svc_map_to_use =
1522  				target_service_to_ce_map_qcn9000;
1523  			*sz_tgt_svc_map_to_use =
1524  				sizeof(target_service_to_ce_map_qcn9000);
1525  			break;
1526  		case TARGET_TYPE_QCN9224:
1527  			hif_select_ce_map_qcn9224(tgt_svc_map_to_use,
1528  						  sz_tgt_svc_map_to_use);
1529  			break;
1530  		case TARGET_TYPE_QCA5332:
1531  		case TARGET_TYPE_QCN6432:
1532  			*tgt_svc_map_to_use = target_service_to_ce_map_qca5332;
1533  			*sz_tgt_svc_map_to_use =
1534  				sizeof(target_service_to_ce_map_qca5332);
1535  			break;
1536  		case TARGET_TYPE_QCA5018:
1537  		case TARGET_TYPE_QCN6122:
1538  		case TARGET_TYPE_QCN9160:
1539  			*tgt_svc_map_to_use =
1540  				target_service_to_ce_map_qca5018;
1541  			*sz_tgt_svc_map_to_use =
1542  				sizeof(target_service_to_ce_map_qca5018);
1543  			break;
1544  		}
1545  	}
1546  	hif_state->tgt_svc_map = *tgt_svc_map_to_use;
1547  	hif_state->sz_tgt_svc_map = *sz_tgt_svc_map_to_use /
1548  					sizeof(struct service_to_pipe);
1549  }
1550  
1551  #ifndef QCA_WIFI_WCN6450
1552  /**
1553   * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
1554   * @ce_state : pointer to the state context of the CE
1555   *
1556   * Description:
1557   *   Sets htt_rx_data attribute of the state structure if the
1558   *   CE serves one of the HTT DATA services.
1559   *
1560   * Return:
1561   *  false (attribute set to false)
1562   *  true  (attribute set to true);
1563   */
ce_mark_datapath(struct CE_state * ce_state)1564  static bool ce_mark_datapath(struct CE_state *ce_state)
1565  {
1566  	struct service_to_pipe *svc_map;
1567  	uint32_t map_sz, map_len;
1568  	int    i;
1569  	bool   rc = false;
1570  
1571  	if (ce_state) {
1572  		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
1573  					       &map_sz);
1574  
1575  		map_len = map_sz / sizeof(struct service_to_pipe);
1576  		for (i = 0; i < map_len; i++) {
1577  			if ((svc_map[i].pipenum == ce_state->id) &&
1578  			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
1579  			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
1580  			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
1581  				/* HTT CEs are unidirectional */
1582  				if (svc_map[i].pipedir == PIPEDIR_IN)
1583  					ce_state->htt_rx_data = true;
1584  				else
1585  					ce_state->htt_tx_data = true;
1586  				rc = true;
1587  			}
1588  		}
1589  	}
1590  	return rc;
1591  }
1592  
ce_update_msi_batch_intr_flags(struct CE_state * ce_state)1593  static void ce_update_msi_batch_intr_flags(struct CE_state *ce_state)
1594  {
1595  }
1596  
ce_update_wrt_idx_offset(struct hif_softc * scn,struct CE_state * ce_state,struct CE_attr * attr)1597  static inline void ce_update_wrt_idx_offset(struct hif_softc *scn,
1598  					    struct CE_state *ce_state,
1599  					    struct CE_attr *attr)
1600  {
1601  }
1602  #else
ce_mark_datapath(struct CE_state * ce_state)1603  static bool ce_mark_datapath(struct CE_state *ce_state)
1604  {
1605  	struct service_to_pipe *svc_map;
1606  	uint32_t map_sz, map_len;
1607  	int i;
1608  
1609  	if (ce_state) {
1610  		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
1611  					       &map_sz);
1612  
1613  		map_len = map_sz / sizeof(struct service_to_pipe);
1614  		for (i = 0; i < map_len; i++) {
1615  			if ((svc_map[i].pipenum == ce_state->id) &&
1616  			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
1617  			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
1618  			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC)) &&
1619  			    (svc_map[i].pipedir == PIPEDIR_IN))
1620  				ce_state->htt_rx_data = true;
1621  			else if ((svc_map[i].pipenum == ce_state->id) &&
1622  				 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) &&
1623  				 (svc_map[i].pipedir == PIPEDIR_OUT))
1624  				ce_state->htt_tx_data = true;
1625  		}
1626  	}
1627  
1628  	return (ce_state->htt_rx_data || ce_state->htt_tx_data);
1629  }
1630  
ce_update_msi_batch_intr_flags(struct CE_state * ce_state)1631  static void ce_update_msi_batch_intr_flags(struct CE_state *ce_state)
1632  {
1633  	ce_state->msi_supported = true;
1634  	ce_state->batch_intr_supported = true;
1635  }
1636  
ce_update_wrt_idx_offset(struct hif_softc * scn,struct CE_state * ce_state,struct CE_attr * attr)1637  static inline void ce_update_wrt_idx_offset(struct hif_softc *scn,
1638  					    struct CE_state *ce_state,
1639  					    struct CE_attr *attr)
1640  {
1641  	/* Do not setup CE write index offset for FW only CE rings */
1642  	if (!attr->src_nentries && !attr->dest_nentries)
1643  		return;
1644  
1645  	if (attr->src_nentries)
1646  		ce_state->ce_wrt_idx_offset =
1647  			CE_SRC_WR_IDX_OFFSET_GET(scn, ce_state->ctrl_addr);
1648  	else if (attr->dest_nentries)
1649  		ce_state->ce_wrt_idx_offset =
1650  			CE_DST_WR_IDX_OFFSET_GET(scn, ce_state->ctrl_addr);
1651  	else
1652  		QDF_BUG(0);
1653  }
1654  
1655  /*
1656   * hif_ce_print_ring_stats() - Print ce ring statistics
1657   *
1658   * @hif_ctx: hif context
1659   *
1660   * Returns: None
1661   */
hif_ce_print_ring_stats(struct hif_opaque_softc * hif_ctx)1662  void hif_ce_print_ring_stats(struct hif_opaque_softc *hif_ctx)
1663  {
1664  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1665  	struct CE_state *ce_state;
1666  	int i;
1667  
1668  	for (i = 0; i < scn->ce_count; i++) {
1669  		ce_state = scn->ce_id_to_state[i];
1670  		if (!ce_state)
1671  			continue;
1672  
1673  		if (ce_state->src_ring) {
1674  			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
1675  				  "ce%d:SW: sw_index %u write_index %u",
1676  				  ce_state->src_ring->sw_index,
1677  				  ce_state->src_ring->write_index);
1678  
1679  			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
1680  				  "ce%d:HW: read_index %u write_index %u",
1681  				  CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr),
1682  				  CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr));
1683  		}
1684  
1685  		if (ce_state->dest_ring) {
1686  			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
1687  				  "ce%d:SW: sw_index %u write_index %u",
1688  				  ce_state->dest_ring->sw_index,
1689  				  ce_state->dest_ring->write_index);
1690  
1691  			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
1692  				  "ce%d:HW: read_index %u write_index %u",
1693  				  CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr),
1694  				  CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr));
1695  		}
1696  	}
1697  }
1698  #endif
1699  
1700  /**
1701   * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
1702   * @hif_ctx: hif opaque handle
1703   *
1704   * Description:
1705   *   Gets number of WMI EPs configured in target svc map. Since EP map
1706   *   include IN and OUT direction pipes, count only OUT pipes to get EPs
1707   *   configured for WMI service.
1708   *
1709   * Return:
1710   *  uint8_t: count for WMI eps in target svc map
1711   */
hif_get_max_wmi_ep(struct hif_opaque_softc * hif_ctx)1712  uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *hif_ctx)
1713  {
1714  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1715  	struct service_to_pipe *svc_map;
1716  	uint32_t map_sz, map_len;
1717  	int    i;
1718  	uint8_t   wmi_ep_count = 0;
1719  
1720  	hif_select_service_to_pipe_map(scn, &svc_map,
1721  				       &map_sz);
1722  	map_len = map_sz / sizeof(struct service_to_pipe);
1723  
1724  	for (i = 0; i < map_len; i++) {
1725  		/* Count number of WMI EPs based on out direction */
1726  		if ((svc_map[i].pipedir == PIPEDIR_OUT) &&
1727  		    ((svc_map[i].service_id == WMI_CONTROL_SVC)  ||
1728  		    (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC1) ||
1729  		    (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC2))) {
1730  			wmi_ep_count++;
1731  		}
1732  	}
1733  
1734  	return wmi_ep_count;
1735  }
1736  
1737  /**
1738   * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
1739   * @ce_id: ce in question
1740   * @ring: ring state being examined
1741   * @type: "src_ring" or "dest_ring" string for identifying the ring
1742   *
1743   * Warns on non-zero index values.
1744   * Causes a kernel panic if the ring is not empty during initialization.
1745   */
ce_ring_test_initial_indexes(int ce_id,struct CE_ring_state * ring,char * type)1746  static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
1747  					 char *type)
1748  {
1749  	if (ring->write_index != 0 || ring->sw_index != 0)
1750  		hif_err("ce %d, %s, initial sw_index = %d, initial write_index =%d",
1751  			  ce_id, type, ring->sw_index, ring->write_index);
1752  	if (ring->write_index != ring->sw_index)
1753  		QDF_BUG(0);
1754  }
1755  
1756  #ifdef IPA_OFFLOAD
1757  /**
1758   * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
1759   * @scn: softc instance
1760   * @CE_id: ce in question
1761   * @base_addr: pointer to copyengine ring base address
1762   * @ce_ring: copyengine instance
1763   * @nentries: number of entries should be allocated
1764   * @desc_size: ce desc size
1765   *
1766   * Return: QDF_STATUS_SUCCESS - for success
1767   */
ce_alloc_desc_ring(struct hif_softc * scn,unsigned int CE_id,qdf_dma_addr_t * base_addr,struct CE_ring_state * ce_ring,unsigned int nentries,uint32_t desc_size)1768  static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1769  				     qdf_dma_addr_t *base_addr,
1770  				     struct CE_ring_state *ce_ring,
1771  				     unsigned int nentries, uint32_t desc_size)
1772  {
1773  	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1774  	    !ce_srng_based(scn)) {
1775  		if (!scn->ipa_ce_ring) {
1776  			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
1777  				scn->qdf_dev,
1778  				nentries * desc_size + CE_DESC_RING_ALIGN);
1779  			if (!scn->ipa_ce_ring) {
1780  				hif_err(
1781  				"Failed to allocate memory for IPA ce ring");
1782  				return QDF_STATUS_E_NOMEM;
1783  			}
1784  		}
1785  		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
1786  						&scn->ipa_ce_ring->mem_info);
1787  		ce_ring->base_addr_owner_space_unaligned =
1788  						scn->ipa_ce_ring->vaddr;
1789  	} else {
1790  		ce_ring->base_addr_owner_space_unaligned =
1791  			hif_mem_alloc_consistent_unaligned
1792  					(scn,
1793  					 (nentries * desc_size +
1794  					  CE_DESC_RING_ALIGN),
1795  					 base_addr,
1796  					 ce_ring->hal_ring_type,
1797  					 &ce_ring->is_ring_prealloc);
1798  
1799  		if (!ce_ring->base_addr_owner_space_unaligned) {
1800  			hif_err("Failed to allocate DMA memory for ce ring id: %u",
1801  			       CE_id);
1802  			return QDF_STATUS_E_NOMEM;
1803  		}
1804  	}
1805  	return QDF_STATUS_SUCCESS;
1806  }
1807  
1808  /**
1809   * ce_free_desc_ring() - Frees copyengine descriptor ring
1810   * @scn: softc instance
1811   * @CE_id: ce in question
1812   * @ce_ring: copyengine instance
1813   * @desc_size: ce desc size
1814   *
1815   * Return: None
1816   */
ce_free_desc_ring(struct hif_softc * scn,unsigned int CE_id,struct CE_ring_state * ce_ring,uint32_t desc_size)1817  static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1818  			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1819  {
1820  	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1821  	    !ce_srng_based(scn)) {
1822  		if (scn->ipa_ce_ring) {
1823  			qdf_mem_shared_mem_free(scn->qdf_dev,
1824  						scn->ipa_ce_ring);
1825  			scn->ipa_ce_ring = NULL;
1826  		}
1827  		ce_ring->base_addr_owner_space_unaligned = NULL;
1828  	} else {
1829  		hif_mem_free_consistent_unaligned
1830  			(scn,
1831  			 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1832  			 ce_ring->base_addr_owner_space_unaligned,
1833  			 ce_ring->base_addr_CE_space, 0,
1834  			 ce_ring->is_ring_prealloc);
1835  		ce_ring->base_addr_owner_space_unaligned = NULL;
1836  	}
1837  }
1838  #else
ce_alloc_desc_ring(struct hif_softc * scn,unsigned int CE_id,qdf_dma_addr_t * base_addr,struct CE_ring_state * ce_ring,unsigned int nentries,uint32_t desc_size)1839  static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1840  				     qdf_dma_addr_t *base_addr,
1841  				     struct CE_ring_state *ce_ring,
1842  				     unsigned int nentries, uint32_t desc_size)
1843  {
1844  	ce_ring->base_addr_owner_space_unaligned =
1845  			hif_mem_alloc_consistent_unaligned
1846  					(scn,
1847  					 (nentries * desc_size +
1848  					  CE_DESC_RING_ALIGN),
1849  					 base_addr,
1850  					 ce_ring->hal_ring_type,
1851  					 &ce_ring->is_ring_prealloc);
1852  
1853  	if (!ce_ring->base_addr_owner_space_unaligned) {
1854  		hif_err("Failed to allocate DMA memory for ce ring id: %u",
1855  		       CE_id);
1856  		return QDF_STATUS_E_NOMEM;
1857  	}
1858  	return QDF_STATUS_SUCCESS;
1859  }
1860  
ce_free_desc_ring(struct hif_softc * scn,unsigned int CE_id,struct CE_ring_state * ce_ring,uint32_t desc_size)1861  static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1862  			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1863  {
1864  	hif_mem_free_consistent_unaligned
1865  		(scn,
1866  		 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1867  		 ce_ring->base_addr_owner_space_unaligned,
1868  		 ce_ring->base_addr_CE_space, 0,
1869  		 ce_ring->is_ring_prealloc);
1870  	ce_ring->base_addr_owner_space_unaligned = NULL;
1871  }
1872  #endif /* IPA_OFFLOAD */
1873  
1874  /*
1875   * TODO: Need to explore the possibility of having this as part of a
1876   * target context instead of a global array.
1877   */
1878  static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1879  
ce_service_register_module(enum ce_target_type target_type,struct ce_ops * (* ce_attach)(void))1880  void ce_service_register_module(enum ce_target_type target_type,
1881  				struct ce_ops* (*ce_attach)(void))
1882  {
1883  	if (target_type < CE_MAX_TARGET_TYPE)
1884  		ce_attach_register[target_type] = ce_attach;
1885  }
1886  
1887  qdf_export_symbol(ce_service_register_module);
1888  
1889  /**
1890   * ce_srng_based() - Does this target use srng
1891   * @scn: pointer to the state context of the CE
1892   *
1893   * Description:
1894   *   returns true if the target is SRNG based
1895   *
1896   * Return:
1897   *  false (attribute set to false)
1898   *  true  (attribute set to true);
1899   */
ce_srng_based(struct hif_softc * scn)1900  bool ce_srng_based(struct hif_softc *scn)
1901  {
1902  	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1903  	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1904  
1905  	switch (tgt_info->target_type) {
1906  	case TARGET_TYPE_QCA8074:
1907  	case TARGET_TYPE_QCA8074V2:
1908  	case TARGET_TYPE_QCA6290:
1909  	case TARGET_TYPE_QCA6390:
1910  	case TARGET_TYPE_QCA6490:
1911  	case TARGET_TYPE_QCA6750:
1912  	case TARGET_TYPE_QCA6018:
1913  	case TARGET_TYPE_QCN9000:
1914  	case TARGET_TYPE_QCN6122:
1915  	case TARGET_TYPE_QCN9160:
1916  	case TARGET_TYPE_QCA5018:
1917  	case TARGET_TYPE_KIWI:
1918  	case TARGET_TYPE_MANGO:
1919  	case TARGET_TYPE_PEACH:
1920  	case TARGET_TYPE_QCN9224:
1921  	case TARGET_TYPE_QCA9574:
1922  	case TARGET_TYPE_QCA5332:
1923  	case TARGET_TYPE_QCN6432:
1924  		return true;
1925  	default:
1926  		return false;
1927  	}
1928  	return false;
1929  }
1930  qdf_export_symbol(ce_srng_based);
1931  
1932  #ifdef QCA_WIFI_SUPPORT_SRNG
ce_services_attach(struct hif_softc * scn)1933  static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1934  {
1935  	struct ce_ops *ops = NULL;
1936  
1937  	if (ce_srng_based(scn)) {
1938  		if (ce_attach_register[CE_SVC_SRNG])
1939  			ops = ce_attach_register[CE_SVC_SRNG]();
1940  	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1941  		ops = ce_attach_register[CE_SVC_LEGACY]();
1942  	}
1943  
1944  	return ops;
1945  }
1946  
1947  
1948  #else	/* QCA_LITHIUM */
ce_services_attach(struct hif_softc * scn)1949  static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1950  {
1951  	if (ce_attach_register[CE_SVC_LEGACY])
1952  		return ce_attach_register[CE_SVC_LEGACY]();
1953  
1954  	return NULL;
1955  }
1956  #endif /* QCA_LITHIUM */
1957  
hif_prepare_hal_shadow_register_cfg(struct hif_softc * scn,struct pld_shadow_reg_v2_cfg ** shadow_config,int * num_shadow_registers_configured)1958  static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1959  		struct pld_shadow_reg_v2_cfg **shadow_config,
1960  		int *num_shadow_registers_configured) {
1961  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1962  
1963  	hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1964  			scn, shadow_config, num_shadow_registers_configured);
1965  
1966  	return;
1967  }
1968  
1969  #ifdef CONFIG_SHADOW_V3
1970  static inline void
hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc * scn,struct pld_wlan_enable_cfg * cfg)1971  hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn,
1972  				  struct pld_wlan_enable_cfg *cfg)
1973  {
1974  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1975  
1976  	if (!hif_state->ce_services->ce_prepare_shadow_register_v3_cfg)
1977  		return;
1978  
1979  	hif_state->ce_services->ce_prepare_shadow_register_v3_cfg(
1980  			scn, &cfg->shadow_reg_v3_cfg,
1981  			&cfg->num_shadow_reg_v3_cfg);
1982  }
1983  #else
1984  static inline void
hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc * scn,struct pld_wlan_enable_cfg * cfg)1985  hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn,
1986  				  struct pld_wlan_enable_cfg *cfg)
1987  {
1988  }
1989  #endif
1990  
ce_get_desc_size(struct hif_softc * scn,uint8_t ring_type)1991  static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1992  						uint8_t ring_type)
1993  {
1994  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1995  
1996  	return hif_state->ce_services->ce_get_desc_size(ring_type);
1997  }
1998  
1999  #ifdef QCA_WIFI_SUPPORT_SRNG
ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)2000  static inline int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
2001  {
2002  	switch (ce_ring_type) {
2003  	case CE_RING_SRC:
2004  		return CE_SRC;
2005  	case CE_RING_DEST:
2006  		return CE_DST;
2007  	case CE_RING_STATUS:
2008  		return CE_DST_STATUS;
2009  	default:
2010  		return -EINVAL;
2011  	}
2012  }
2013  #else
ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)2014  static int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
2015  {
2016  	return 0;
2017  }
2018  #endif
ce_alloc_ring_state(struct CE_state * CE_state,uint8_t ring_type,uint32_t nentries)2019  static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
2020  		uint8_t ring_type, uint32_t nentries)
2021  {
2022  	uint32_t ce_nbytes;
2023  	char *ptr;
2024  	qdf_dma_addr_t base_addr;
2025  	struct CE_ring_state *ce_ring;
2026  	uint32_t desc_size;
2027  	struct hif_softc *scn = CE_state->scn;
2028  
2029  	ce_nbytes = sizeof(struct CE_ring_state)
2030  		+ (nentries * sizeof(void *));
2031  	ptr = qdf_mem_malloc(ce_nbytes);
2032  	if (!ptr)
2033  		return NULL;
2034  
2035  	ce_ring = (struct CE_ring_state *)ptr;
2036  	ptr += sizeof(struct CE_ring_state);
2037  	ce_ring->nentries = nentries;
2038  	ce_ring->nentries_mask = nentries - 1;
2039  
2040  	ce_ring->low_water_mark_nentries = 0;
2041  	ce_ring->high_water_mark_nentries = nentries;
2042  	ce_ring->per_transfer_context = (void **)ptr;
2043  	ce_ring->hal_ring_type = ce_ring_type_to_hal_ring_type(ring_type);
2044  
2045  	desc_size = ce_get_desc_size(scn, ring_type);
2046  
2047  	/* Legacy platforms that do not support cache
2048  	 * coherent DMA are unsupported
2049  	 */
2050  	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
2051  			       ce_ring, nentries,
2052  			       desc_size) !=
2053  	    QDF_STATUS_SUCCESS) {
2054  		hif_err("ring has no DMA mem");
2055  		qdf_mem_free(ce_ring);
2056  		return NULL;
2057  	}
2058  	ce_ring->base_addr_CE_space_unaligned = base_addr;
2059  
2060  	/* Correctly initialize memory to 0 to
2061  	 * prevent garbage data crashing system
2062  	 * when download firmware
2063  	 */
2064  	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
2065  			nentries * desc_size +
2066  			CE_DESC_RING_ALIGN);
2067  
2068  	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
2069  
2070  		ce_ring->base_addr_CE_space =
2071  			(ce_ring->base_addr_CE_space_unaligned +
2072  			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
2073  
2074  		ce_ring->base_addr_owner_space = (void *)
2075  			(((size_t) ce_ring->base_addr_owner_space_unaligned +
2076  			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
2077  	} else {
2078  		ce_ring->base_addr_CE_space =
2079  				ce_ring->base_addr_CE_space_unaligned;
2080  		ce_ring->base_addr_owner_space =
2081  				ce_ring->base_addr_owner_space_unaligned;
2082  	}
2083  
2084  	return ce_ring;
2085  }
2086  
ce_ring_setup(struct hif_softc * scn,uint8_t ring_type,uint32_t ce_id,struct CE_ring_state * ring,struct CE_attr * attr)2087  static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
2088  			uint32_t ce_id, struct CE_ring_state *ring,
2089  			struct CE_attr *attr)
2090  {
2091  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2092  
2093  	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
2094  					      ring, attr);
2095  }
2096  
ce_srng_cleanup(struct hif_softc * scn,struct CE_state * CE_state,uint8_t ring_type)2097  static void ce_srng_cleanup(struct hif_softc *scn, struct CE_state *CE_state,
2098  			    uint8_t ring_type)
2099  {
2100  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2101  
2102  	if (hif_state->ce_services->ce_srng_cleanup)
2103  		hif_state->ce_services->ce_srng_cleanup(scn,
2104  					CE_state, ring_type);
2105  }
2106  
hif_ce_bus_early_suspend(struct hif_softc * scn)2107  int hif_ce_bus_early_suspend(struct hif_softc *scn)
2108  {
2109  	uint8_t ul_pipe, dl_pipe;
2110  	int ce_id, status, ul_is_polled, dl_is_polled;
2111  	struct CE_state *ce_state;
2112  
2113  	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
2114  					 &ul_pipe, &dl_pipe,
2115  					 &ul_is_polled, &dl_is_polled);
2116  	if (status) {
2117  		hif_err("pipe_mapping failure");
2118  		return status;
2119  	}
2120  
2121  	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2122  		if (ce_id == ul_pipe)
2123  			continue;
2124  		if (ce_id == dl_pipe)
2125  			continue;
2126  
2127  		ce_state = scn->ce_id_to_state[ce_id];
2128  		qdf_spin_lock_bh(&ce_state->ce_index_lock);
2129  		if (ce_state->state == CE_RUNNING)
2130  			ce_state->state = CE_PAUSED;
2131  		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
2132  	}
2133  
2134  	return status;
2135  }
2136  
hif_ce_bus_late_resume(struct hif_softc * scn)2137  int hif_ce_bus_late_resume(struct hif_softc *scn)
2138  {
2139  	int ce_id;
2140  	struct CE_state *ce_state;
2141  	int write_index = 0;
2142  	bool index_updated;
2143  
2144  	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2145  		ce_state = scn->ce_id_to_state[ce_id];
2146  		qdf_spin_lock_bh(&ce_state->ce_index_lock);
2147  		if (ce_state->state == CE_PENDING) {
2148  			write_index = ce_state->src_ring->write_index;
2149  			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2150  					write_index);
2151  			ce_state->state = CE_RUNNING;
2152  			index_updated = true;
2153  		} else {
2154  			index_updated = false;
2155  		}
2156  
2157  		if (ce_state->state == CE_PAUSED)
2158  			ce_state->state = CE_RUNNING;
2159  		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
2160  
2161  		if (index_updated)
2162  			hif_record_ce_desc_event(scn, ce_id,
2163  				RESUME_WRITE_INDEX_UPDATE,
2164  				NULL, NULL, write_index, 0);
2165  	}
2166  
2167  	return 0;
2168  }
2169  
2170  /**
2171   * ce_oom_recovery() - try to recover rx ce from oom condition
2172   * @context: CE_state of the CE with oom rx ring
2173   *
2174   * the executing work Will continue to be rescheduled until
2175   * at least 1 descriptor is successfully posted to the rx ring.
2176   *
2177   * return: none
2178   */
ce_oom_recovery(void * context)2179  static void ce_oom_recovery(void *context)
2180  {
2181  	struct CE_state *ce_state = context;
2182  	struct hif_softc *scn = ce_state->scn;
2183  	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
2184  	struct HIF_CE_pipe_info *pipe_info =
2185  		&ce_softc->pipe_info[ce_state->id];
2186  
2187  	hif_post_recv_buffers_for_pipe(pipe_info);
2188  
2189  	qdf_atomic_dec(&scn->active_oom_work_cnt);
2190  }
2191  
2192  #ifdef HIF_CE_DEBUG_DATA_BUF
2193  /**
2194   * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
2195   * the CE descriptors.
2196   * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
2197   * @scn: hif scn handle
2198   * @ce_id: Copy Engine Id
2199   *
2200   * Return: QDF_STATUS
2201   */
alloc_mem_ce_debug_hist_data(struct hif_softc * scn,uint32_t ce_id)2202  QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
2203  {
2204  	struct hif_ce_desc_event *event = NULL;
2205  	struct hif_ce_desc_event *hist_ev = NULL;
2206  	uint32_t index = 0;
2207  
2208  	hist_ev =
2209  	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
2210  
2211  	if (!hist_ev)
2212  		return QDF_STATUS_E_NOMEM;
2213  
2214  	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
2215  	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
2216  		event = &hist_ev[index];
2217  		event->data =
2218  			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
2219  		if (!event->data) {
2220  			hif_err_rl("ce debug data alloc failed");
2221  			scn->hif_ce_desc_hist.data_enable[ce_id] = false;
2222  			return QDF_STATUS_E_NOMEM;
2223  		}
2224  	}
2225  	return QDF_STATUS_SUCCESS;
2226  }
2227  
2228  /**
2229   * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
2230   * the CE descriptors.
2231   * @scn: hif scn handle
2232   * @ce_id: Copy Engine Id
2233   *
2234   * Return:
2235   */
free_mem_ce_debug_hist_data(struct hif_softc * scn,uint32_t ce_id)2236  void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
2237  {
2238  	struct hif_ce_desc_event *event = NULL;
2239  	struct hif_ce_desc_event *hist_ev = NULL;
2240  	uint32_t index = 0;
2241  
2242  	hist_ev =
2243  	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
2244  
2245  	if (!hist_ev)
2246  		return;
2247  
2248  	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
2249  		event = &hist_ev[index];
2250  		if (event->data)
2251  			qdf_mem_free(event->data);
2252  		event->data = NULL;
2253  		event = NULL;
2254  	}
2255  
2256  }
2257  #endif /* HIF_CE_DEBUG_DATA_BUF */
2258  
2259  #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
2260  #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2261  
2262  /* define below variables for crashscope parse */
2263  struct hif_ce_desc_event *hif_ce_desc_history[CE_COUNT_MAX];
2264  uint32_t hif_ce_history_max = HIF_CE_HISTORY_MAX;
2265  uint32_t hif_ce_count_max = CE_COUNT_MAX;
2266  
2267  /*
2268   * for debug build, it will enable ce history for all ce, but for
2269   * perf build(if CONFIG_SLUB_DEBUG_ON is N), it only enable for
2270   * ce2(wmi event) & ce3(wmi cmd) history.
2271   */
2272  #if defined(CONFIG_SLUB_DEBUG_ON)
2273  #define CE_DESC_HISTORY_BUFF_CNT  CE_COUNT_MAX
2274  #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE  0
2275  #else
2276  
2277  #ifdef QCA_WIFI_SUPPORT_SRNG
2278  /* Enable CE-1 history only on targets not using CE-1 for datapath */
2279  #define CE_DESC_HISTORY_BUFF_CNT  4
2280  #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE (BIT(1) | BIT(2) | BIT(3) | BIT(7))
2281  #else
2282  /* CE2, CE3, CE7 */
2283  #define CE_DESC_HISTORY_BUFF_CNT  3
2284  #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE (BIT(2) | BIT(3) | BIT(7))
2285  #endif /* QCA_WIFI_SUPPORT_SRNG */
2286  #endif
2287  bool hif_ce_only_for_crit = IS_CE_DEBUG_ONLY_FOR_CRIT_CE;
2288  struct hif_ce_desc_event
2289  	hif_ce_desc_history_buff[CE_DESC_HISTORY_BUFF_CNT][HIF_CE_HISTORY_MAX];
2290  
2291  static void
__hif_ce_desc_history_log_register(struct hif_softc * scn)2292  __hif_ce_desc_history_log_register(struct hif_softc *scn)
2293  {
2294  	qdf_ssr_driver_dump_register_region("hif_ce_desc_history_buff",
2295  					    hif_ce_desc_history_buff,
2296  					    sizeof(hif_ce_desc_history_buff));
2297  	qdf_ssr_driver_dump_register_region("hif_ce_desc_hist",
2298  					    &scn->hif_ce_desc_hist,
2299  					    sizeof(scn->hif_ce_desc_hist));
2300  	qdf_ssr_driver_dump_register_region("hif_ce_count_max",
2301  					    &hif_ce_count_max,
2302  					    sizeof(hif_ce_count_max));
2303  	qdf_ssr_driver_dump_register_region("hif_ce_history_max",
2304  					    &hif_ce_history_max,
2305  					    sizeof(hif_ce_history_max));
2306  	qdf_ssr_driver_dump_register_region("hif_ce_only_for_crit",
2307  					    &hif_ce_only_for_crit,
2308  					    sizeof(hif_ce_only_for_crit));
2309  }
2310  
__hif_ce_desc_history_log_unregister(void)2311  static void __hif_ce_desc_history_log_unregister(void)
2312  {
2313  	qdf_ssr_driver_dump_unregister_region("hif_ce_only_for_crit");
2314  	qdf_ssr_driver_dump_unregister_region("hif_ce_history_max");
2315  	qdf_ssr_driver_dump_unregister_region("hif_ce_count_max");
2316  	qdf_ssr_driver_dump_unregister_region("hif_ce_desc_hist");
2317  	qdf_ssr_driver_dump_unregister_region("hif_ce_desc_history_buff");
2318  }
2319  
2320  static struct hif_ce_desc_event *
hif_ce_debug_history_buf_get(struct hif_softc * scn,unsigned int ce_id)2321  	hif_ce_debug_history_buf_get(struct hif_softc *scn, unsigned int ce_id)
2322  {
2323  	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2324  
2325  	hif_debug("get ce debug buffer ce_id %u, only_ce2/ce3=0x%lx, idx=%u",
2326  		  ce_id, IS_CE_DEBUG_ONLY_FOR_CRIT_CE,
2327  		  ce_hist->ce_id_hist_map[ce_id]);
2328  	if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE &&
2329  	    (IS_CE_DEBUG_ONLY_FOR_CRIT_CE & BIT(ce_id))) {
2330  		uint8_t idx = ce_hist->ce_id_hist_map[ce_id];
2331  
2332  		hif_ce_desc_history[ce_id] = hif_ce_desc_history_buff[idx];
2333  	} else {
2334  		hif_ce_desc_history[ce_id] =
2335  			hif_ce_desc_history_buff[ce_id];
2336  	}
2337  
2338  	return hif_ce_desc_history[ce_id];
2339  }
2340  
2341  /**
2342   * alloc_mem_ce_debug_history() - Allocate CE descriptor history
2343   * @scn: hif scn handle
2344   * @ce_id: Copy Engine Id
2345   * @src_nentries: source ce ring entries
2346   * Return: QDF_STATUS
2347   */
2348  static QDF_STATUS
alloc_mem_ce_debug_history(struct hif_softc * scn,unsigned int ce_id,uint32_t src_nentries)2349  alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
2350  			   uint32_t src_nentries)
2351  {
2352  	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2353  	QDF_STATUS status = QDF_STATUS_SUCCESS;
2354  
2355  	/* For perf build, return directly for non ce2/ce3 */
2356  	if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE &&
2357  	    !(IS_CE_DEBUG_ONLY_FOR_CRIT_CE & BIT(ce_id))) {
2358  		ce_hist->enable[ce_id] = false;
2359  		ce_hist->data_enable[ce_id] = false;
2360  		return QDF_STATUS_SUCCESS;
2361  	}
2362  
2363  	ce_hist->hist_ev[ce_id] = hif_ce_debug_history_buf_get(scn, ce_id);
2364  	ce_hist->enable[ce_id] = true;
2365  
2366  	if (src_nentries) {
2367  		status = alloc_mem_ce_debug_hist_data(scn, ce_id);
2368  		if (status != QDF_STATUS_SUCCESS) {
2369  			ce_hist->enable[ce_id] = false;
2370  			ce_hist->hist_ev[ce_id] = NULL;
2371  			return status;
2372  		}
2373  	} else {
2374  		ce_hist->data_enable[ce_id] = false;
2375  	}
2376  
2377  	return QDF_STATUS_SUCCESS;
2378  }
2379  
2380  /**
2381   * free_mem_ce_debug_history() - Free CE descriptor history
2382   * @scn: hif scn handle
2383   * @ce_id: Copy Engine Id
2384   *
2385   * Return: None
2386   */
free_mem_ce_debug_history(struct hif_softc * scn,unsigned int ce_id)2387  static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
2388  {
2389  	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2390  
2391  	if (!ce_hist->enable[ce_id])
2392  		return;
2393  
2394  	ce_hist->enable[ce_id] = false;
2395  	if (ce_hist->data_enable[ce_id]) {
2396  		ce_hist->data_enable[ce_id] = false;
2397  		free_mem_ce_debug_hist_data(scn, ce_id);
2398  	}
2399  	ce_hist->hist_ev[ce_id] = NULL;
2400  }
2401  #else
2402  
2403  static void
__hif_ce_desc_history_log_register(struct hif_softc * scn)2404  __hif_ce_desc_history_log_register(struct hif_softc *scn)
2405  {
2406  }
2407  
__hif_ce_desc_history_log_unregister(void)2408  static void __hif_ce_desc_history_log_unregister(void) { }
2409  
2410  static inline QDF_STATUS
alloc_mem_ce_debug_history(struct hif_softc * scn,unsigned int CE_id,uint32_t src_nentries)2411  alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2412  			   uint32_t src_nentries)
2413  {
2414  	return QDF_STATUS_SUCCESS;
2415  }
2416  
2417  static inline void
free_mem_ce_debug_history(struct hif_softc * scn,unsigned int CE_id)2418  free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
2419  #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
2420  #else
2421  #if defined(HIF_CE_DEBUG_DATA_BUF)
2422  
2423  static void
__hif_ce_desc_history_log_register(struct hif_softc * scn)2424  __hif_ce_desc_history_log_register(struct hif_softc *scn)
2425  {
2426  }
2427  
__hif_ce_desc_history_log_unregister(void)2428  static void __hif_ce_desc_history_log_unregister(void) { }
2429  
2430  static QDF_STATUS
alloc_mem_ce_debug_history(struct hif_softc * scn,unsigned int CE_id,uint32_t src_nentries)2431  alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2432  			   uint32_t src_nentries)
2433  {
2434  	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
2435  	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
2436  
2437  	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
2438  		scn->hif_ce_desc_hist.enable[CE_id] = 0;
2439  		return QDF_STATUS_E_NOMEM;
2440  	} else {
2441  		scn->hif_ce_desc_hist.enable[CE_id] = 1;
2442  		return QDF_STATUS_SUCCESS;
2443  	}
2444  }
2445  
free_mem_ce_debug_history(struct hif_softc * scn,unsigned int CE_id)2446  static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
2447  {
2448  	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2449  	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
2450  
2451  	if (!hist_ev)
2452  		return;
2453  
2454  	if (ce_hist->data_enable[CE_id]) {
2455  		ce_hist->data_enable[CE_id] = false;
2456  		free_mem_ce_debug_hist_data(scn, CE_id);
2457  	}
2458  
2459  	ce_hist->enable[CE_id] = false;
2460  	qdf_mem_free(ce_hist->hist_ev[CE_id]);
2461  	ce_hist->hist_ev[CE_id] = NULL;
2462  }
2463  
2464  #else
2465  
2466  static void
__hif_ce_desc_history_log_register(struct hif_softc * scn)2467  __hif_ce_desc_history_log_register(struct hif_softc *scn)
2468  {
2469  }
2470  
__hif_ce_desc_history_log_unregister(void)2471  static void __hif_ce_desc_history_log_unregister(void) { }
2472  
2473  static inline QDF_STATUS
alloc_mem_ce_debug_history(struct hif_softc * scn,unsigned int CE_id,uint32_t src_nentries)2474  alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2475  			   uint32_t src_nentries)
2476  {
2477  	return QDF_STATUS_SUCCESS;
2478  }
2479  
2480  static inline void
free_mem_ce_debug_history(struct hif_softc * scn,unsigned int CE_id)2481  free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
2482  #endif /* HIF_CE_DEBUG_DATA_BUF */
2483  #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
2484  
2485  #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2486  /**
2487   * reset_ce_debug_history() - reset the index and ce id used for dumping the
2488   * CE records on the console using sysfs.
2489   * @scn: hif scn handle
2490   *
2491   * Return:
2492   */
reset_ce_debug_history(struct hif_softc * scn)2493  static inline void reset_ce_debug_history(struct hif_softc *scn)
2494  {
2495  	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2496  	/* Initialise the CE debug history sysfs interface inputs ce_id and
2497  	 * index. Disable data storing
2498  	 */
2499  	ce_hist->hist_index = 0;
2500  	ce_hist->hist_id = 0;
2501  }
2502  #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
reset_ce_debug_history(struct hif_softc * scn)2503  static inline void reset_ce_debug_history(struct hif_softc *scn) { }
2504  #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
2505  
ce_enable_polling(void * cestate)2506  void ce_enable_polling(void *cestate)
2507  {
2508  	struct CE_state *CE_state = (struct CE_state *)cestate;
2509  
2510  	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2511  		CE_state->timer_inited = true;
2512  }
2513  
ce_disable_polling(void * cestate)2514  void ce_disable_polling(void *cestate)
2515  {
2516  	struct CE_state *CE_state = (struct CE_state *)cestate;
2517  
2518  	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2519  		CE_state->timer_inited = false;
2520  }
2521  
2522  #ifdef WLAN_FEATURE_SSR_DRIVER_DUMP
2523  #define MAX_CE_STR_LEN 50
2524  /**
2525   * ce_ring_dump_register_region() - Register CE ring with SSR dump
2526   * @CE_state: CE_state pointer
2527   * @CE_id: CE id
2528   *
2529   * Return: None
2530   */
2531  static inline
ce_ring_dump_register_region(struct CE_state * CE_state,unsigned int CE_id)2532  void ce_ring_dump_register_region(struct CE_state *CE_state, unsigned int CE_id)
2533  {
2534  	struct CE_ring_state *ce_ring;
2535  	char ce[MAX_CE_STR_LEN];
2536  	char CE_ring_state[MAX_CE_STR_LEN];
2537  	char srng[MAX_CE_STR_LEN];
2538  
2539  	qdf_snprint(ce, MAX_CE_STR_LEN, "%s%d", "ce_", CE_id);
2540  	qdf_ssr_driver_dump_register_region(ce, CE_state, sizeof(*CE_state));
2541  
2542  	if (CE_state->status_ring) {
2543  		ce_ring = CE_state->status_ring;
2544  		qdf_snprint(CE_ring_state, MAX_CE_STR_LEN,
2545  			    "%s%s", ce, "_status_ring");
2546  		qdf_ssr_driver_dump_register_region(CE_ring_state, ce_ring,
2547  						    sizeof(struct CE_ring_state)
2548  						   );
2549  		qdf_snprint(srng, MAX_CE_STR_LEN,
2550  			    "%s%s", CE_ring_state, "_ctx");
2551  		qdf_ssr_driver_dump_register_region(srng, ce_ring->srng_ctx,
2552  						    sizeof(struct hal_srng));
2553  	}
2554  	if (CE_state->dest_ring) {
2555  		ce_ring = CE_state->dest_ring;
2556  		qdf_snprint(CE_ring_state, MAX_CE_STR_LEN,
2557  			    "%s%s", ce, "_dest_ring");
2558  		qdf_ssr_driver_dump_register_region(CE_ring_state, ce_ring,
2559  						    sizeof(struct CE_ring_state)
2560  						   );
2561  		qdf_snprint(srng, MAX_CE_STR_LEN,
2562  			    "%s%s", CE_ring_state, "_ctx");
2563  		qdf_ssr_driver_dump_register_region(srng, ce_ring->srng_ctx,
2564  						    sizeof(struct hal_srng));
2565  	}
2566  	if (CE_state->src_ring) {
2567  		ce_ring = CE_state->src_ring;
2568  		qdf_snprint(CE_ring_state, MAX_CE_STR_LEN,
2569  			    "%s%s", ce, "_src_ring");
2570  		qdf_ssr_driver_dump_register_region(CE_ring_state, ce_ring,
2571  						    sizeof(struct CE_ring_state)
2572  						   );
2573  		qdf_snprint(srng, MAX_CE_STR_LEN,
2574  			    "%s%s", CE_ring_state, "_ctx");
2575  		qdf_ssr_driver_dump_register_region(srng, ce_ring->srng_ctx,
2576  						    sizeof(struct hal_srng));
2577  	}
2578  }
2579  
2580  /**
2581   * ce_ring_dump_unregister_region() - Unregister CE ring with SSR dump
2582   * @CE_state: CE_state pointer
2583   * @CE_id: CE id
2584   *
2585   * Return: None
2586   */
2587  static inline void
ce_ring_dump_unregister_region(struct CE_state * CE_state,unsigned int CE_id)2588  ce_ring_dump_unregister_region(struct CE_state *CE_state, unsigned int CE_id)
2589  {
2590  	char ce[MAX_CE_STR_LEN];
2591  	char CE_ring_state[MAX_CE_STR_LEN];
2592  	char srng[MAX_CE_STR_LEN];
2593  
2594  	qdf_snprint(ce, MAX_CE_STR_LEN, "%s%d", "ce_", CE_id);
2595  	qdf_ssr_driver_dump_unregister_region(ce);
2596  	if (CE_state->status_ring) {
2597  		qdf_snprint(CE_ring_state, MAX_CE_STR_LEN,
2598  			    "%s%s", ce, "_status_ring");
2599  		qdf_snprint(srng, MAX_CE_STR_LEN,
2600  			    "%s%s", CE_ring_state, "_ctx");
2601  		qdf_ssr_driver_dump_unregister_region(CE_ring_state);
2602  		qdf_ssr_driver_dump_unregister_region(srng);
2603  	}
2604  	if (CE_state->dest_ring) {
2605  		qdf_snprint(CE_ring_state, MAX_CE_STR_LEN,
2606  			    "%s%s", ce, "_dest_ring");
2607  		qdf_snprint(srng, MAX_CE_STR_LEN,
2608  			    "%s%s", CE_ring_state, "_ctx");
2609  		qdf_ssr_driver_dump_unregister_region(CE_ring_state);
2610  		qdf_ssr_driver_dump_unregister_region(srng);
2611  	}
2612  	if (CE_state->src_ring) {
2613  		qdf_snprint(CE_ring_state, MAX_CE_STR_LEN,
2614  			    "%s%s", ce, "_src_ring");
2615  		qdf_snprint(srng, MAX_CE_STR_LEN,
2616  			    "%s%s", CE_ring_state, "_ctx");
2617  		qdf_ssr_driver_dump_unregister_region(CE_ring_state);
2618  		qdf_ssr_driver_dump_unregister_region(srng);
2619  	}
2620  }
2621  #else
2622  static inline
ce_ring_dump_register_region(struct CE_state * CE_state,unsigned int CE_id)2623  void ce_ring_dump_register_region(struct CE_state *CE_state, unsigned int CE_id)
2624  {
2625  }
2626  
2627  static inline void
ce_ring_dump_unregister_region(struct CE_state * CE_state,unsigned int CE_id)2628  ce_ring_dump_unregister_region(struct CE_state *CE_state, unsigned int CE_id)
2629  {
2630  }
2631  #endif
2632  /*
2633   * Initialize a Copy Engine based on caller-supplied attributes.
2634   * This may be called once to initialize both source and destination
2635   * rings or it may be called twice for separate source and destination
2636   * initialization. It may be that only one side or the other is
2637   * initialized by software/firmware.
2638   *
2639   * This should be called during the initialization sequence before
2640   * interrupts are enabled, so we don't have to worry about thread safety.
2641   */
ce_init(struct hif_softc * scn,unsigned int CE_id,struct CE_attr * attr)2642  struct CE_handle *ce_init(struct hif_softc *scn,
2643  			  unsigned int CE_id, struct CE_attr *attr)
2644  {
2645  	struct CE_state *CE_state;
2646  	uint32_t ctrl_addr;
2647  	unsigned int nentries;
2648  	bool malloc_CE_state = false;
2649  	bool malloc_src_ring = false;
2650  	int status;
2651  	QDF_STATUS mem_status = QDF_STATUS_SUCCESS;
2652  
2653  	QDF_ASSERT(CE_id < scn->ce_count);
2654  	ctrl_addr = CE_BASE_ADDRESS(CE_id);
2655  	CE_state = scn->ce_id_to_state[CE_id];
2656  
2657  	if (!CE_state) {
2658  		CE_state =
2659  		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
2660  		if (!CE_state)
2661  			return NULL;
2662  
2663  		malloc_CE_state = true;
2664  		qdf_spinlock_create(&CE_state->ce_index_lock);
2665  #ifdef CE_TASKLET_SCHEDULE_ON_FULL
2666  		qdf_spinlock_create(&CE_state->ce_interrupt_lock);
2667  #endif
2668  
2669  		CE_state->id = CE_id;
2670  		CE_state->ctrl_addr = ctrl_addr;
2671  		CE_state->state = CE_RUNNING;
2672  		CE_state->attr_flags = attr->flags;
2673  	}
2674  	CE_state->scn = scn;
2675  	CE_state->service = ce_engine_service_reg;
2676  
2677  	qdf_atomic_init(&CE_state->rx_pending);
2678  	if (!attr) {
2679  		/* Already initialized; caller wants the handle */
2680  		return (struct CE_handle *)CE_state;
2681  	}
2682  
2683  	if (CE_state->src_sz_max)
2684  		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
2685  	else
2686  		CE_state->src_sz_max = attr->src_sz_max;
2687  
2688  	ce_init_ce_desc_event_log(scn, CE_id,
2689  				  attr->src_nentries + attr->dest_nentries);
2690  
2691  	/* source ring setup */
2692  	nentries = attr->src_nentries;
2693  	if (nentries) {
2694  		struct CE_ring_state *src_ring;
2695  
2696  		nentries = roundup_pwr2(nentries);
2697  		if (CE_state->src_ring) {
2698  			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
2699  		} else {
2700  			src_ring = CE_state->src_ring =
2701  				ce_alloc_ring_state(CE_state,
2702  						CE_RING_SRC,
2703  						nentries);
2704  			if (!src_ring) {
2705  				/* cannot allocate src ring. If the
2706  				 * CE_state is allocated locally free
2707  				 * CE_State and return error.
2708  				 */
2709  				hif_err("src ring has no mem");
2710  				if (malloc_CE_state) {
2711  					/* allocated CE_state locally */
2712  					qdf_mem_free(CE_state);
2713  					malloc_CE_state = false;
2714  				}
2715  				return NULL;
2716  			}
2717  			/* we can allocate src ring. Mark that the src ring is
2718  			 * allocated locally
2719  			 */
2720  			malloc_src_ring = true;
2721  
2722  			/*
2723  			 * Also allocate a shadow src ring in
2724  			 * regular mem to use for faster access.
2725  			 */
2726  			src_ring->shadow_base_unaligned =
2727  				qdf_mem_malloc(nentries *
2728  					       sizeof(struct CE_src_desc) +
2729  					       CE_DESC_RING_ALIGN);
2730  			if (!src_ring->shadow_base_unaligned)
2731  				goto error_no_dma_mem;
2732  
2733  			src_ring->shadow_base = (struct CE_src_desc *)
2734  				(((size_t) src_ring->shadow_base_unaligned +
2735  				CE_DESC_RING_ALIGN - 1) &
2736  				 ~(CE_DESC_RING_ALIGN - 1));
2737  
2738  			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
2739  					       src_ring, attr);
2740  			if (status < 0)
2741  				goto error_target_access;
2742  			ce_ring_test_initial_indexes(CE_id, src_ring,
2743  						     "src_ring");
2744  			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
2745  				qdf_timer_init(scn->qdf_dev,
2746  					       &CE_state->poll_timer,
2747  					       ce_poll_timeout,
2748  					       CE_state,
2749  					       QDF_TIMER_TYPE_WAKE_APPS);
2750  				ce_enable_polling(CE_state);
2751  				qdf_timer_mod(&CE_state->poll_timer,
2752  					      CE_POLL_TIMEOUT);
2753  			}
2754  		}
2755  	}
2756  
2757  	/* destination ring setup */
2758  	nentries = attr->dest_nentries;
2759  	if (nentries) {
2760  		struct CE_ring_state *dest_ring;
2761  
2762  		nentries = roundup_pwr2(nentries);
2763  		if (CE_state->dest_ring) {
2764  			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
2765  		} else {
2766  			dest_ring = CE_state->dest_ring =
2767  				ce_alloc_ring_state(CE_state,
2768  						CE_RING_DEST,
2769  						nentries);
2770  			if (!dest_ring) {
2771  				/* cannot allocate dst ring. If the CE_state
2772  				 * or src ring is allocated locally free
2773  				 * CE_State and src ring and return error.
2774  				 */
2775  				hif_err("dest ring has no mem");
2776  				goto error_no_dma_mem;
2777  			}
2778  
2779  			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
2780  				      dest_ring, attr);
2781  			if (status < 0)
2782  				goto error_target_access;
2783  
2784  			ce_ring_test_initial_indexes(CE_id, dest_ring,
2785  						     "dest_ring");
2786  
2787  			/* For srng based target, init status ring here */
2788  			if (ce_srng_based(CE_state->scn)) {
2789  				CE_state->status_ring =
2790  					ce_alloc_ring_state(CE_state,
2791  							CE_RING_STATUS,
2792  							nentries);
2793  				if (!CE_state->status_ring) {
2794  					/*Allocation failed. Cleanup*/
2795  					qdf_mem_free(CE_state->dest_ring);
2796  					if (malloc_src_ring) {
2797  						qdf_mem_free
2798  							(CE_state->src_ring);
2799  						CE_state->src_ring = NULL;
2800  						malloc_src_ring = false;
2801  					}
2802  					if (malloc_CE_state) {
2803  						/* allocated CE_state locally */
2804  						scn->ce_id_to_state[CE_id] =
2805  							NULL;
2806  						qdf_mem_free(CE_state);
2807  						malloc_CE_state = false;
2808  					}
2809  
2810  					return NULL;
2811  				}
2812  
2813  				status = ce_ring_setup(scn, CE_RING_STATUS,
2814  					       CE_id, CE_state->status_ring,
2815  					       attr);
2816  				if (status < 0)
2817  					goto error_target_access;
2818  
2819  			}
2820  
2821  			/* epping */
2822  			/* poll timer */
2823  			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
2824  				qdf_timer_init(scn->qdf_dev,
2825  						&CE_state->poll_timer,
2826  						ce_poll_timeout,
2827  						CE_state,
2828  						QDF_TIMER_TYPE_WAKE_APPS);
2829  				ce_enable_polling(CE_state);
2830  				qdf_timer_mod(&CE_state->poll_timer,
2831  						      CE_POLL_TIMEOUT);
2832  			}
2833  		}
2834  	}
2835  
2836  	if (!ce_srng_based(scn)) {
2837  		/* Enable CE error interrupts */
2838  		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2839  			goto error_target_access;
2840  		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
2841  		if (Q_TARGET_ACCESS_END(scn) < 0)
2842  			goto error_target_access;
2843  	}
2844  
2845  	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
2846  			ce_oom_recovery, CE_state);
2847  
2848  	/* update the htt_data attribute */
2849  	ce_mark_datapath(CE_state);
2850  	scn->ce_id_to_state[CE_id] = CE_state;
2851  
2852  	ce_ring_dump_register_region(CE_state, CE_id);
2853  
2854  	mem_status = alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
2855  	if (mem_status != QDF_STATUS_SUCCESS)
2856  		goto error_target_access;
2857  
2858  	ce_update_msi_batch_intr_flags(CE_state);
2859  	ce_update_wrt_idx_offset(scn, CE_state, attr);
2860  
2861  	return (struct CE_handle *)CE_state;
2862  
2863  error_target_access:
2864  error_no_dma_mem:
2865  	ce_fini((struct CE_handle *)CE_state);
2866  	return NULL;
2867  }
2868  
hif_ce_desc_history_log_register(struct hif_softc * scn)2869  void hif_ce_desc_history_log_register(struct hif_softc *scn)
2870  {
2871  	__hif_ce_desc_history_log_register(scn);
2872  }
2873  
2874  /**
2875   * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
2876   * @hif_ctx: HIF Context
2877   *
2878   * API to check if polling is enabled on all CEs. Returns true when polling
2879   * is enabled on all CEs.
2880   *
2881   * Return: bool
2882   */
hif_is_polled_mode_enabled(struct hif_opaque_softc * hif_ctx)2883  bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
2884  {
2885  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2886  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2887  	struct CE_attr *attr;
2888  	int id;
2889  
2890  	for (id = 0; id < scn->ce_count; id++) {
2891  		attr = &hif_state->host_ce_config[id];
2892  		if (attr && (attr->dest_nentries) &&
2893  		    !(attr->flags & CE_ATTR_ENABLE_POLL))
2894  			return false;
2895  	}
2896  	return true;
2897  }
2898  qdf_export_symbol(hif_is_polled_mode_enabled);
2899  
hif_get_pktlog_ce_num(struct hif_softc * scn)2900  static int hif_get_pktlog_ce_num(struct hif_softc *scn)
2901  {
2902  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2903  	int id;
2904  
2905  	for (id = 0; id < hif_state->sz_tgt_svc_map; id++) {
2906  		if (hif_state->tgt_svc_map[id].service_id ==  PACKET_LOG_SVC)
2907  			return hif_state->tgt_svc_map[id].pipenum;
2908  	}
2909  	return -EINVAL;
2910  }
2911  
2912  #ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
2913  
2914  #define HIF_CE_RX_NBUF_WMI_POOL_SIZE 32
2915  
hif_ce_rx_nbuf_alloc(struct hif_softc * scn,uint8_t ce_id)2916  static qdf_nbuf_t hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
2917  {
2918  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2919  	struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
2920  	qdf_nbuf_t nbuf;
2921  
2922  	nbuf = wbuff_buff_get(scn->wbuff_handle, ce_id, 0, __func__,
2923  			      __LINE__);
2924  	if (!nbuf)
2925  		nbuf = qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz,
2926  				      0, 4, false);
2927  
2928  	if (!nbuf)
2929  		return NULL;
2930  
2931  	return nbuf;
2932  }
2933  
hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)2934  static void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
2935  {
2936  	nbuf = wbuff_buff_put(nbuf);
2937  	if (nbuf)
2938  		qdf_nbuf_free(nbuf);
2939  }
2940  
2941  static int
hif_calc_wbuff_pool_size(struct hif_softc * scn,struct CE_state * ce_state)2942  hif_calc_wbuff_pool_size(struct hif_softc *scn, struct CE_state *ce_state)
2943  {
2944  	int ul_is_polled, dl_is_polled;
2945  	bool is_wmi_svc, wmi_diag_svc;
2946  	uint8_t ul_pipe, dl_pipe;
2947  	int pool_size;
2948  	int status;
2949  	int ce_id;
2950  
2951  	if (!ce_state)
2952  		return 0;
2953  
2954  	ce_id = ce_state->id;
2955  
2956  	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
2957  					 &ul_pipe, &dl_pipe,
2958  					 &ul_is_polled, &dl_is_polled);
2959  	is_wmi_svc = !status && (dl_pipe == ce_id);
2960  
2961  	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
2962  					 WMI_CONTROL_DIAG_SVC,
2963  					 &ul_pipe, &dl_pipe,
2964  					 &ul_is_polled, &dl_is_polled);
2965  	wmi_diag_svc = !status;
2966  
2967  	if (is_wmi_svc && !wmi_diag_svc)
2968  		pool_size = ce_state->dest_ring->nentries +
2969  			HIF_CE_RX_NBUF_WMI_POOL_SIZE;
2970  	else if (is_wmi_svc && wmi_diag_svc)
2971  		pool_size = ce_state->dest_ring->nentries +
2972  			HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
2973  	else if (!is_wmi_svc && wmi_diag_svc && ce_id == dl_pipe)
2974  		pool_size = ce_state->dest_ring->nentries +
2975  			HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
2976  	else
2977  		pool_size = ce_state->dest_ring->nentries;
2978  
2979  	return pool_size;
2980  }
2981  
hif_ce_rx_wbuff_register(struct hif_softc * scn)2982  static void hif_ce_rx_wbuff_register(struct hif_softc *scn)
2983  {
2984  	struct wbuff_alloc_request wbuff_alloc[CE_COUNT_MAX] = {0};
2985  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2986  	struct HIF_CE_pipe_info *pipe_info;
2987  	struct CE_state *ce_state;
2988  	int ce_id;
2989  
2990  	for (ce_id = 0; ce_id <  scn->ce_count; ce_id++) {
2991  		pipe_info = &hif_state->pipe_info[ce_id];
2992  		ce_state = scn->ce_id_to_state[ce_id];
2993  
2994  		if (!pipe_info->buf_sz)
2995  			continue;
2996  
2997  		/* Only RX CEs need WBUFF registration. recv_bufs_needed
2998  		 * contains valid count for RX CEs during init time.
2999  		 */
3000  		if (!atomic_read(&pipe_info->recv_bufs_needed))
3001  			continue;
3002  
3003  		wbuff_alloc[ce_id].pool_id = ce_id;
3004  		wbuff_alloc[ce_id].buffer_size = pipe_info->buf_sz;
3005  		wbuff_alloc[ce_id].pool_size =
3006  				hif_calc_wbuff_pool_size(scn, ce_state);
3007  	}
3008  
3009  	scn->wbuff_handle =
3010  		wbuff_module_register(wbuff_alloc, CE_COUNT_MAX, 0, 4,
3011  				      WBUFF_MODULE_CE_RX);
3012  }
3013  
hif_ce_rx_wbuff_deregister(struct hif_softc * scn)3014  static void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
3015  {
3016  	wbuff_module_deregister(scn->wbuff_handle);
3017  	scn->wbuff_handle = NULL;
3018  }
3019  #else
3020  static inline qdf_nbuf_t
hif_ce_rx_nbuf_alloc(struct hif_softc * scn,uint8_t ce_id)3021  hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
3022  {
3023  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3024  	struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
3025  
3026  	return qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz, 0, 4, false);
3027  }
3028  
hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)3029  static inline void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
3030  {
3031  	return qdf_nbuf_free(nbuf);
3032  }
3033  
hif_ce_rx_wbuff_register(struct hif_softc * scn)3034  static inline void hif_ce_rx_wbuff_register(struct hif_softc *scn)
3035  {
3036  }
3037  
hif_ce_rx_wbuff_deregister(struct hif_softc * scn)3038  static inline void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
3039  {
3040  }
3041  #endif /* WLAN_FEATURE_CE_RX_BUFFER_REUSE */
3042  
3043  #ifdef WLAN_FEATURE_FASTPATH
3044  /**
3045   * hif_enable_fastpath() - Update that we have enabled fastpath mode
3046   * @hif_ctx: HIF context
3047   *
3048   * For use in data path
3049   *
3050   * Return: void
3051   */
hif_enable_fastpath(struct hif_opaque_softc * hif_ctx)3052  void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
3053  {
3054  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3055  
3056  	if (ce_srng_based(scn)) {
3057  		hif_warn("srng rings do not support fastpath");
3058  		return;
3059  	}
3060  	hif_debug("Enabling fastpath mode");
3061  	scn->fastpath_mode_on = true;
3062  }
3063  
3064  /**
3065   * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
3066   * @hif_ctx: HIF Context
3067   *
3068   * For use in data path to skip HTC
3069   *
3070   * Return: bool
3071   */
hif_is_fastpath_mode_enabled(struct hif_opaque_softc * hif_ctx)3072  bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
3073  {
3074  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3075  
3076  	return scn->fastpath_mode_on;
3077  }
3078  
3079  /**
3080   * hif_get_ce_handle - API to get CE handle for FastPath mode
3081   * @hif_ctx: HIF Context
3082   * @id: CopyEngine Id
3083   *
3084   * API to return CE handle for fastpath mode
3085   *
3086   * Return: void
3087   */
hif_get_ce_handle(struct hif_opaque_softc * hif_ctx,int id)3088  void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
3089  {
3090  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3091  
3092  	return scn->ce_id_to_state[id];
3093  }
3094  qdf_export_symbol(hif_get_ce_handle);
3095  
3096  /**
3097   * ce_h2t_tx_ce_cleanup() - Place holder function for H2T CE cleanup.
3098   * No processing is required inside this function.
3099   * @ce_hdl: Cope engine handle
3100   * Using an assert, this function makes sure that,
3101   * the TX CE has been processed completely.
3102   *
3103   * This is called while dismantling CE structures. No other thread
3104   * should be using these structures while dismantling is occurring
3105   * therefore no locking is needed.
3106   *
3107   * Return: none
3108   */
ce_h2t_tx_ce_cleanup(struct CE_handle * ce_hdl)3109  void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
3110  {
3111  	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3112  	struct CE_ring_state *src_ring = ce_state->src_ring;
3113  	struct hif_softc *sc = ce_state->scn;
3114  	uint32_t sw_index, write_index;
3115  
3116  	if (hif_is_nss_wifi_enabled(sc))
3117  		return;
3118  
3119  	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
3120  		hif_debug("Fastpath mode ON, Cleaning up HTT Tx CE");
3121  		sw_index = src_ring->sw_index;
3122  		write_index = src_ring->sw_index;
3123  
3124  		/* At this point Tx CE should be clean */
3125  		qdf_assert_always(sw_index == write_index);
3126  	}
3127  }
3128  
3129  /**
3130   * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
3131   * @ce_hdl: Handle to CE
3132   *
3133   * These buffers are never allocated on the fly, but
3134   * are allocated only once during HIF start and freed
3135   * only once during HIF stop.
3136   * NOTE:
3137   * The assumption here is there is no in-flight DMA in progress
3138   * currently, so that buffers can be freed up safely.
3139   *
3140   * Return: NONE
3141   */
ce_t2h_msg_ce_cleanup(struct CE_handle * ce_hdl)3142  void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
3143  {
3144  	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3145  	struct CE_ring_state *dst_ring = ce_state->dest_ring;
3146  	qdf_nbuf_t nbuf;
3147  	int i;
3148  
3149  	if (ce_state->scn->fastpath_mode_on == false)
3150  		return;
3151  
3152  	if (!ce_state->htt_rx_data)
3153  		return;
3154  
3155  	/*
3156  	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
3157  	 * this CE is completely full: does not leave one blank space, to
3158  	 * distinguish between empty queue & full queue. So free all the
3159  	 * entries.
3160  	 */
3161  	for (i = 0; i < dst_ring->nentries; i++) {
3162  		nbuf = dst_ring->per_transfer_context[i];
3163  
3164  		/*
3165  		 * The reasons for doing this check are:
3166  		 * 1) Protect against calling cleanup before allocating buffers
3167  		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
3168  		 *    could have a partially filled ring, because of a memory
3169  		 *    allocation failure in the middle of allocating ring.
3170  		 *    This check accounts for that case, checking
3171  		 *    fastpath_mode_on flag or started flag would not have
3172  		 *    covered that case. This is not in performance path,
3173  		 *    so OK to do this.
3174  		 */
3175  		if (nbuf) {
3176  			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
3177  					      QDF_DMA_FROM_DEVICE);
3178  			hif_ce_rx_nbuf_free(nbuf);
3179  		}
3180  	}
3181  }
3182  
3183  /**
3184   * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
3185   * @scn: HIF handle
3186   *
3187   * Datapath Rx CEs are special case, where we reuse all the message buffers.
3188   * Hence we have to post all the entries in the pipe, even, in the beginning
3189   * unlike for other CE pipes where one less than dest_nentries are filled in
3190   * the beginning.
3191   *
3192   * Return: None
3193   */
hif_update_fastpath_recv_bufs_cnt(struct hif_softc * scn)3194  static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
3195  {
3196  	int pipe_num;
3197  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3198  
3199  	if (scn->fastpath_mode_on == false)
3200  		return;
3201  
3202  	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3203  		struct HIF_CE_pipe_info *pipe_info =
3204  			&hif_state->pipe_info[pipe_num];
3205  		struct CE_state *ce_state =
3206  			scn->ce_id_to_state[pipe_info->pipe_num];
3207  
3208  		if (ce_state->htt_rx_data)
3209  			atomic_inc(&pipe_info->recv_bufs_needed);
3210  	}
3211  }
3212  #else
hif_update_fastpath_recv_bufs_cnt(struct hif_softc * scn)3213  static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
3214  {
3215  }
3216  
ce_is_fastpath_enabled(struct hif_softc * scn)3217  static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
3218  {
3219  	return false;
3220  }
3221  #endif /* WLAN_FEATURE_FASTPATH */
3222  
ce_fini(struct CE_handle * copyeng)3223  void ce_fini(struct CE_handle *copyeng)
3224  {
3225  	struct CE_state *CE_state = (struct CE_state *)copyeng;
3226  	unsigned int CE_id = CE_state->id;
3227  	struct hif_softc *scn = CE_state->scn;
3228  	uint32_t desc_size;
3229  
3230  	bool inited = CE_state->timer_inited;
3231  	CE_state->state = CE_UNUSED;
3232  	scn->ce_id_to_state[CE_id] = NULL;
3233  	/* Set the flag to false first to stop processing in ce_poll_timeout */
3234  	ce_disable_polling(CE_state);
3235  
3236  	qdf_lro_deinit(CE_state->lro_data);
3237  
3238  	ce_ring_dump_unregister_region(CE_state, CE_id);
3239  
3240  	if (CE_state->src_ring) {
3241  		/* Cleanup the datapath Tx ring */
3242  		ce_h2t_tx_ce_cleanup(copyeng);
3243  
3244  		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
3245  		if (CE_state->src_ring->shadow_base_unaligned)
3246  			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
3247  		if (CE_state->src_ring->base_addr_owner_space_unaligned)
3248  			ce_free_desc_ring(scn, CE_state->id,
3249  					  CE_state->src_ring,
3250  					  desc_size);
3251  		ce_srng_cleanup(scn, CE_state, CE_RING_SRC);
3252  		qdf_mem_free(CE_state->src_ring);
3253  	}
3254  	if (CE_state->dest_ring) {
3255  		/* Cleanup the datapath Rx ring */
3256  		ce_t2h_msg_ce_cleanup(copyeng);
3257  
3258  		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
3259  		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
3260  			ce_free_desc_ring(scn, CE_state->id,
3261  					  CE_state->dest_ring,
3262  					  desc_size);
3263  		ce_srng_cleanup(scn, CE_state, CE_RING_DEST);
3264  		qdf_mem_free(CE_state->dest_ring);
3265  
3266  		/* epping */
3267  		if (inited) {
3268  			qdf_timer_free(&CE_state->poll_timer);
3269  		}
3270  	}
3271  	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
3272  		/* Cleanup the datapath Tx ring */
3273  		ce_h2t_tx_ce_cleanup(copyeng);
3274  
3275  		if (CE_state->status_ring->shadow_base_unaligned)
3276  			qdf_mem_free(
3277  				CE_state->status_ring->shadow_base_unaligned);
3278  
3279  		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
3280  		if (CE_state->status_ring->base_addr_owner_space_unaligned)
3281  			ce_free_desc_ring(scn, CE_state->id,
3282  					  CE_state->status_ring,
3283  					  desc_size);
3284  		ce_srng_cleanup(scn, CE_state, CE_RING_STATUS);
3285  		qdf_mem_free(CE_state->status_ring);
3286  	}
3287  
3288  	free_mem_ce_debug_history(scn, CE_id);
3289  	reset_ce_debug_history(scn);
3290  	ce_deinit_ce_desc_event_log(scn, CE_id);
3291  
3292  	qdf_spinlock_destroy(&CE_state->ce_index_lock);
3293  #ifdef CE_TASKLET_SCHEDULE_ON_FULL
3294  	qdf_spinlock_destroy(&CE_state->ce_interrupt_lock);
3295  #endif
3296  	qdf_mem_free(CE_state);
3297  }
3298  
hif_ce_desc_history_log_unregister(void)3299  void hif_ce_desc_history_log_unregister(void)
3300  {
3301  	__hif_ce_desc_history_log_unregister();
3302  }
3303  
hif_detach_htc(struct hif_opaque_softc * hif_ctx)3304  void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
3305  {
3306  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3307  
3308  	qdf_mem_zero(&hif_state->msg_callbacks_pending,
3309  		  sizeof(hif_state->msg_callbacks_pending));
3310  	qdf_mem_zero(&hif_state->msg_callbacks_current,
3311  		  sizeof(hif_state->msg_callbacks_current));
3312  }
3313  
3314  /* Send the first nbytes bytes of the buffer */
3315  QDF_STATUS
hif_send_head(struct hif_opaque_softc * hif_ctx,uint8_t pipe,unsigned int transfer_id,unsigned int nbytes,qdf_nbuf_t nbuf,unsigned int data_attr)3316  hif_send_head(struct hif_opaque_softc *hif_ctx,
3317  	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
3318  	      qdf_nbuf_t nbuf, unsigned int data_attr)
3319  {
3320  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3321  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3322  	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3323  	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3324  	int bytes = nbytes, nfrags = 0;
3325  	struct ce_sendlist sendlist;
3326  	int i = 0;
3327  	QDF_STATUS status;
3328  	unsigned int mux_id = 0;
3329  
3330  	if (nbytes > qdf_nbuf_len(nbuf)) {
3331  		hif_err("nbytes: %d nbuf_len: %d", nbytes,
3332  		       (uint32_t)qdf_nbuf_len(nbuf));
3333  		QDF_ASSERT(0);
3334  	}
3335  
3336  	transfer_id =
3337  		(mux_id & MUX_ID_MASK) |
3338  		(transfer_id & TRANSACTION_ID_MASK);
3339  	data_attr &= DESC_DATA_FLAG_MASK;
3340  	/*
3341  	 * The common case involves sending multiple fragments within a
3342  	 * single download (the tx descriptor and the tx frame header).
3343  	 * So, optimize for the case of multiple fragments by not even
3344  	 * checking whether it's necessary to use a sendlist.
3345  	 * The overhead of using a sendlist for a single buffer download
3346  	 * is not a big deal, since it happens rarely (for WMI messages).
3347  	 */
3348  	ce_sendlist_init(&sendlist);
3349  	do {
3350  		qdf_dma_addr_t frag_paddr;
3351  		int frag_bytes;
3352  
3353  		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
3354  		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
3355  		/*
3356  		 * Clear the packet offset for all but the first CE desc.
3357  		 */
3358  		if (i++ > 0)
3359  			data_attr &= ~CE_DESC_PKT_OFFSET_BIT_M;
3360  
3361  		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
3362  				    frag_bytes >
3363  				    bytes ? bytes : frag_bytes,
3364  				    qdf_nbuf_get_frag_is_wordstream
3365  				    (nbuf,
3366  				    nfrags) ? 0 :
3367  				    CE_SEND_FLAG_SWAP_DISABLE,
3368  				    data_attr);
3369  		if (status != QDF_STATUS_SUCCESS) {
3370  			hif_err("frag_num: %d larger than limit (status=%d)",
3371  			       nfrags, status);
3372  			return status;
3373  		}
3374  		bytes -= frag_bytes;
3375  		nfrags++;
3376  	} while (bytes > 0);
3377  
3378  	/* Make sure we have resources to handle this request */
3379  	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
3380  	if (pipe_info->num_sends_allowed < nfrags) {
3381  		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
3382  		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
3383  		return QDF_STATUS_E_RESOURCES;
3384  	}
3385  	pipe_info->num_sends_allowed -= nfrags;
3386  	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
3387  
3388  	if (qdf_unlikely(!ce_hdl)) {
3389  		hif_err("CE handle is null");
3390  		return QDF_STATUS_E_INVAL;
3391  	}
3392  
3393  	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
3394  	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
3395  		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
3396  		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
3397  	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
3398  	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
3399  
3400  	return status;
3401  }
3402  
hif_send_complete_check(struct hif_opaque_softc * hif_ctx,uint8_t pipe,int force)3403  void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
3404  								int force)
3405  {
3406  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3407  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3408  
3409  	if (!force) {
3410  		int resources;
3411  		/*
3412  		 * Decide whether to actually poll for completions, or just
3413  		 * wait for a later chance. If there seem to be plenty of
3414  		 * resources left, then just wait, since checking involves
3415  		 * reading a CE register, which is a relatively expensive
3416  		 * operation.
3417  		 */
3418  		resources = hif_get_free_queue_number(hif_ctx, pipe);
3419  		/*
3420  		 * If at least 50% of the total resources are still available,
3421  		 * don't bother checking again yet.
3422  		 */
3423  		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
3424  									 1))
3425  			return;
3426  	}
3427  #ifdef ATH_11AC_TXCOMPACT
3428  	ce_per_engine_servicereap(scn, pipe);
3429  #else
3430  	ce_per_engine_service(scn, pipe);
3431  #endif
3432  }
3433  
3434  #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
3435  QDF_STATUS
hif_register_ce_custom_cb(struct hif_opaque_softc * hif_ctx,uint8_t pipe,void (* custom_cb)(void *),void * custom_cb_context)3436  hif_register_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
3437  			  void (*custom_cb)(void *), void *custom_cb_context)
3438  {
3439  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3440  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3441  	struct HIF_CE_pipe_info *pipe_info;
3442  
3443  	if (pipe >= CE_COUNT_MAX)
3444  		return QDF_STATUS_E_INVAL;
3445  
3446  	pipe_info = &hif_state->pipe_info[pipe];
3447  	ce_register_custom_cb(pipe_info->ce_hdl, custom_cb, custom_cb_context);
3448  
3449  	return QDF_STATUS_SUCCESS;
3450  }
3451  
3452  QDF_STATUS
hif_unregister_ce_custom_cb(struct hif_opaque_softc * hif_ctx,uint8_t pipe)3453  hif_unregister_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3454  {
3455  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3456  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3457  	struct HIF_CE_pipe_info *pipe_info;
3458  
3459  	if (pipe >= CE_COUNT_MAX)
3460  		return QDF_STATUS_E_INVAL;
3461  
3462  	pipe_info = &hif_state->pipe_info[pipe];
3463  	ce_unregister_custom_cb(pipe_info->ce_hdl);
3464  
3465  	return QDF_STATUS_SUCCESS;
3466  }
3467  
3468  QDF_STATUS
hif_enable_ce_custom_cb(struct hif_opaque_softc * hif_ctx,uint8_t pipe)3469  hif_enable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3470  {
3471  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3472  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3473  	struct HIF_CE_pipe_info *pipe_info;
3474  
3475  	if (pipe >= CE_COUNT_MAX)
3476  		return QDF_STATUS_E_INVAL;
3477  
3478  	pipe_info = &hif_state->pipe_info[pipe];
3479  	ce_enable_custom_cb(pipe_info->ce_hdl);
3480  	ce_dispatch_interrupt(pipe, &hif_state->tasklets[pipe]);
3481  
3482  	return QDF_STATUS_SUCCESS;
3483  }
3484  
3485  QDF_STATUS
hif_disable_ce_custom_cb(struct hif_opaque_softc * hif_ctx,uint8_t pipe)3486  hif_disable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3487  {
3488  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3489  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3490  	struct HIF_CE_pipe_info *pipe_info;
3491  
3492  	if (pipe >= CE_COUNT_MAX)
3493  		return QDF_STATUS_E_INVAL;
3494  
3495  	pipe_info = &hif_state->pipe_info[pipe];
3496  	ce_disable_custom_cb(pipe_info->ce_hdl);
3497  
3498  	return QDF_STATUS_SUCCESS;
3499  }
3500  #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
3501  
3502  #if defined(CE_TASKLET_SCHEDULE_ON_FULL) && defined(CE_TASKLET_DEBUG_ENABLE)
3503  #define CE_RING_FULL_THRESHOLD_TIME 3000000
3504  #define CE_RING_FULL_THRESHOLD 1024
3505  /* This function is called from htc_send path. If there is no resourse to send
3506   * packet via HTC, then check if interrupts are not processed from that
3507   * CE for last 3 seconds. If so, schedule a tasklet to reap available entries.
3508   * Also if Queue has reached 1024 entries within 3 seconds, then also schedule
3509   * tasklet.
3510   */
hif_schedule_ce_tasklet(struct hif_opaque_softc * hif_ctx,uint8_t pipe)3511  void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3512  {
3513  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3514  	int64_t diff_time = qdf_get_log_timestamp_usecs() -
3515  			hif_state->stats.tasklet_sched_entry_ts[pipe];
3516  
3517  	hif_state->stats.ce_ring_full_count[pipe]++;
3518  
3519  	if (diff_time >= CE_RING_FULL_THRESHOLD_TIME ||
3520  	    hif_state->stats.ce_ring_full_count[pipe] >=
3521  	    CE_RING_FULL_THRESHOLD) {
3522  		hif_state->stats.ce_ring_full_count[pipe] = 0;
3523  		hif_state->stats.ce_manual_tasklet_schedule_count[pipe]++;
3524  		hif_state->stats.ce_last_manual_tasklet_schedule_ts[pipe] =
3525  			qdf_get_log_timestamp_usecs();
3526  		ce_dispatch_interrupt(pipe, &hif_state->tasklets[pipe]);
3527  	}
3528  }
3529  #else
hif_schedule_ce_tasklet(struct hif_opaque_softc * hif_ctx,uint8_t pipe)3530  void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3531  {
3532  }
3533  #endif
3534  
3535  uint16_t
hif_get_free_queue_number(struct hif_opaque_softc * hif_ctx,uint8_t pipe)3536  hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3537  {
3538  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3539  	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3540  	uint16_t rv;
3541  
3542  	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
3543  	rv = pipe_info->num_sends_allowed;
3544  	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
3545  	return rv;
3546  }
3547  
3548  /* Called by lower (CE) layer when a send to Target completes. */
3549  static void
hif_pci_ce_send_done(struct CE_handle * copyeng,void * ce_context,void * transfer_context,qdf_dma_addr_t CE_data,unsigned int nbytes,unsigned int transfer_id,unsigned int sw_index,unsigned int hw_index,unsigned int toeplitz_hash_result)3550  hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
3551  		     void *transfer_context, qdf_dma_addr_t CE_data,
3552  		     unsigned int nbytes, unsigned int transfer_id,
3553  		     unsigned int sw_index, unsigned int hw_index,
3554  		     unsigned int toeplitz_hash_result)
3555  {
3556  	struct HIF_CE_pipe_info *pipe_info =
3557  		(struct HIF_CE_pipe_info *)ce_context;
3558  	unsigned int sw_idx = sw_index, hw_idx = hw_index;
3559  	struct hif_msg_callbacks *msg_callbacks =
3560  		&pipe_info->pipe_callbacks;
3561  
3562  	do {
3563  		/*
3564  		 * The upper layer callback will be triggered
3565  		 * when last fragment is complteted.
3566  		 */
3567  		if (transfer_context != CE_SENDLIST_ITEM_CTXT)
3568  			msg_callbacks->txCompletionHandler(
3569  				msg_callbacks->Context,
3570  				transfer_context, transfer_id,
3571  				toeplitz_hash_result);
3572  
3573  		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
3574  		pipe_info->num_sends_allowed++;
3575  		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
3576  	} while (ce_completed_send_next(copyeng,
3577  			&ce_context, &transfer_context,
3578  			&CE_data, &nbytes, &transfer_id,
3579  			&sw_idx, &hw_idx,
3580  			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
3581  }
3582  
3583  /**
3584   * hif_ce_do_recv(): send message from copy engine to upper layers
3585   * @msg_callbacks: structure containing callback and callback context
3586   * @netbuf: skb containing message
3587   * @nbytes: number of bytes in the message
3588   * @pipe_info: used for the pipe_number info
3589   *
3590   * Checks the packet length, configures the length in the netbuff,
3591   * and calls the upper layer callback.
3592   *
3593   * return: None
3594   */
hif_ce_do_recv(struct hif_msg_callbacks * msg_callbacks,qdf_nbuf_t netbuf,int nbytes,struct HIF_CE_pipe_info * pipe_info)3595  static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
3596  		qdf_nbuf_t netbuf, int nbytes,
3597  		struct HIF_CE_pipe_info *pipe_info) {
3598  	if (nbytes <= pipe_info->buf_sz) {
3599  		qdf_nbuf_set_pktlen(netbuf, nbytes);
3600  		msg_callbacks->
3601  			rxCompletionHandler(msg_callbacks->Context,
3602  					netbuf, pipe_info->pipe_num);
3603  	} else {
3604  		hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes);
3605  		hif_ce_rx_nbuf_free(netbuf);
3606  	}
3607  }
3608  
3609  #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
3610  /**
3611   * hif_ce_rtpm_mark_last_busy() - record and mark last busy for RTPM
3612   * @scn: hif_softc pointer.
3613   * @ce_id: ce ID
3614   *
3615   * Return: None
3616   */
3617  static inline void
hif_ce_rtpm_mark_last_busy(struct hif_softc * scn,uint32_t ce_id)3618  hif_ce_rtpm_mark_last_busy(struct hif_softc *scn, uint32_t ce_id)
3619  {
3620  	/* do NOT mark last busy for diag event, to avoid impacting RTPM */
3621  	if (ce_id == CE_ID_7)
3622  		return;
3623  
3624  	hif_rtpm_record_ce_last_busy_evt(scn, ce_id);
3625  	hif_rtpm_mark_last_busy(HIF_RTPM_ID_CE);
3626  }
3627  #else
3628  static inline void
hif_ce_rtpm_mark_last_busy(struct hif_softc * scn,uint32_t ce_id)3629  hif_ce_rtpm_mark_last_busy(struct hif_softc *scn, uint32_t ce_id)
3630  {
3631  	hif_rtpm_record_ce_last_busy_evt(scn, ce_id);
3632  	hif_rtpm_mark_last_busy(HIF_RTPM_ID_CE);
3633  }
3634  #endif
3635  
3636  /* Called by lower (CE) layer when data is received from the Target. */
3637  static void
hif_pci_ce_recv_data(struct CE_handle * copyeng,void * ce_context,void * transfer_context,qdf_dma_addr_t CE_data,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)3638  hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
3639  		     void *transfer_context, qdf_dma_addr_t CE_data,
3640  		     unsigned int nbytes, unsigned int transfer_id,
3641  		     unsigned int flags)
3642  {
3643  	struct HIF_CE_pipe_info *pipe_info =
3644  		(struct HIF_CE_pipe_info *)ce_context;
3645  	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
3646  	struct CE_state *ce_state = (struct CE_state *) copyeng;
3647  	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3648  	struct hif_msg_callbacks *msg_callbacks = &pipe_info->pipe_callbacks;
3649  
3650  	do {
3651  		hif_ce_rtpm_mark_last_busy(scn, ce_state->id);
3652  		qdf_nbuf_unmap_single(scn->qdf_dev,
3653  				      (qdf_nbuf_t) transfer_context,
3654  				      QDF_DMA_FROM_DEVICE);
3655  
3656  		atomic_inc(&pipe_info->recv_bufs_needed);
3657  		hif_post_recv_buffers_for_pipe(pipe_info);
3658  		if (scn->target_status == TARGET_STATUS_RESET)
3659  			hif_ce_rx_nbuf_free(transfer_context);
3660  		else
3661  			hif_ce_do_recv(msg_callbacks, transfer_context,
3662  				nbytes, pipe_info);
3663  
3664  		/* Set up force_break flag if num of receices reaches
3665  		 * MAX_NUM_OF_RECEIVES
3666  		 */
3667  		ce_state->receive_count++;
3668  		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
3669  			ce_state->force_break = 1;
3670  			break;
3671  		}
3672  	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
3673  					&CE_data, &nbytes, &transfer_id,
3674  					&flags) == QDF_STATUS_SUCCESS);
3675  
3676  }
3677  
3678  /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
3679  
3680  void
hif_post_init(struct hif_opaque_softc * hif_ctx,void * unused,struct hif_msg_callbacks * callbacks)3681  hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
3682  	      struct hif_msg_callbacks *callbacks)
3683  {
3684  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3685  
3686  #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3687  	spin_lock_init(&pcie_access_log_lock);
3688  #endif
3689  	/* Save callbacks for later installation */
3690  	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
3691  		 sizeof(hif_state->msg_callbacks_pending));
3692  
3693  }
3694  
hif_completion_thread_startup_by_ceid(struct HIF_CE_state * hif_state,int pipe_num)3695  static int hif_completion_thread_startup_by_ceid(struct HIF_CE_state *hif_state,
3696  						 int pipe_num)
3697  {
3698  	struct CE_attr attr;
3699  	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3700  	struct hif_msg_callbacks *hif_msg_callbacks =
3701  		&hif_state->msg_callbacks_current;
3702  	struct HIF_CE_pipe_info *pipe_info;
3703  	struct CE_state *ce_state;
3704  
3705  	if (pipe_num >= CE_COUNT_MAX)
3706  		return -EINVAL;
3707  
3708  	pipe_info = &hif_state->pipe_info[pipe_num];
3709  	ce_state = scn->ce_id_to_state[pipe_num];
3710  
3711  	if (!hif_msg_callbacks ||
3712  	    !hif_msg_callbacks->rxCompletionHandler ||
3713  	    !hif_msg_callbacks->txCompletionHandler) {
3714  		hif_err("no completion handler registered");
3715  		return -EFAULT;
3716  	}
3717  
3718  	attr = hif_state->host_ce_config[pipe_num];
3719  	if (attr.src_nentries) {
3720  		/* pipe used to send to target */
3721  		hif_debug("pipe_num:%d pipe_info:0x%pK\n",
3722  			  pipe_num, pipe_info);
3723  		ce_send_cb_register(pipe_info->ce_hdl,
3724  				    hif_pci_ce_send_done, pipe_info,
3725  				    attr.flags & CE_ATTR_DISABLE_INTR);
3726  		pipe_info->num_sends_allowed = attr.src_nentries - 1;
3727  	}
3728  	if (attr.dest_nentries) {
3729  		hif_debug("pipe_num:%d pipe_info:0x%pK\n",
3730  			  pipe_num, pipe_info);
3731  		/* pipe used to receive from target */
3732  		ce_recv_cb_register(pipe_info->ce_hdl,
3733  				    hif_pci_ce_recv_data, pipe_info,
3734  				    attr.flags & CE_ATTR_DISABLE_INTR);
3735  	}
3736  
3737  	if (attr.src_nentries)
3738  		qdf_spinlock_create(&pipe_info->completion_freeq_lock);
3739  
3740  	if (!(ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND))
3741  		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
3742  			     sizeof(pipe_info->pipe_callbacks));
3743  
3744  	return 0;
3745  }
3746  
hif_completion_thread_startup(struct HIF_CE_state * hif_state)3747  static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
3748  {
3749  	struct CE_handle *ce_diag = hif_state->ce_diag;
3750  	int pipe_num, ret;
3751  	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3752  
3753  	/* daemonize("hif_compl_thread"); */
3754  
3755  	if (scn->ce_count == 0) {
3756  		hif_err("ce_count is 0");
3757  		return -EINVAL;
3758  	}
3759  
3760  
3761  	A_TARGET_ACCESS_LIKELY(scn);
3762  	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3763  		struct HIF_CE_pipe_info *pipe_info;
3764  
3765  		pipe_info = &hif_state->pipe_info[pipe_num];
3766  		if (pipe_info->ce_hdl == ce_diag)
3767  			continue;       /* Handle Diagnostic CE specially */
3768  
3769  		ret = hif_completion_thread_startup_by_ceid(hif_state,
3770  							    pipe_num);
3771  		if (ret < 0)
3772  			return ret;
3773  
3774  	}
3775  
3776  	A_TARGET_ACCESS_UNLIKELY(scn);
3777  	return 0;
3778  }
3779  
3780  /*
3781   * Install pending msg callbacks.
3782   *
3783   * TBDXXX: This hack is needed because upper layers install msg callbacks
3784   * for use with HTC before BMI is done; yet this HIF implementation
3785   * needs to continue to use BMI msg callbacks. Really, upper layers
3786   * should not register HTC callbacks until AFTER BMI phase.
3787   */
hif_msg_callbacks_install(struct hif_softc * scn)3788  static void hif_msg_callbacks_install(struct hif_softc *scn)
3789  {
3790  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3791  
3792  	qdf_mem_copy(&hif_state->msg_callbacks_current,
3793  		 &hif_state->msg_callbacks_pending,
3794  		 sizeof(hif_state->msg_callbacks_pending));
3795  }
3796  
hif_get_default_pipe(struct hif_opaque_softc * hif_hdl,uint8_t * ULPipe,uint8_t * DLPipe)3797  void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
3798  							uint8_t *DLPipe)
3799  {
3800  	int ul_is_polled, dl_is_polled;
3801  
3802  	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
3803  		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
3804  }
3805  
3806  /**
3807   * hif_dump_pipe_debug_count() - Log error count
3808   * @scn: hif_softc pointer.
3809   *
3810   * Output the pipe error counts of each pipe to log file
3811   *
3812   * Return: N/A
3813   */
hif_dump_pipe_debug_count(struct hif_softc * scn)3814  void hif_dump_pipe_debug_count(struct hif_softc *scn)
3815  {
3816  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3817  	int pipe_num;
3818  
3819  	if (!hif_state) {
3820  		hif_err("hif_state is NULL");
3821  		return;
3822  	}
3823  	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3824  		struct HIF_CE_pipe_info *pipe_info;
3825  
3826  	pipe_info = &hif_state->pipe_info[pipe_num];
3827  
3828  	if (pipe_info->nbuf_alloc_err_count > 0 ||
3829  			pipe_info->nbuf_dma_err_count > 0 ||
3830  			pipe_info->nbuf_ce_enqueue_err_count)
3831  		hif_err(
3832  			"pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
3833  			pipe_info->pipe_num,
3834  			atomic_read(&pipe_info->recv_bufs_needed),
3835  			pipe_info->nbuf_alloc_err_count,
3836  			pipe_info->nbuf_dma_err_count,
3837  			pipe_info->nbuf_ce_enqueue_err_count);
3838  	}
3839  }
3840  
hif_post_recv_buffers_failure(struct HIF_CE_pipe_info * pipe_info,void * nbuf,uint32_t * error_cnt,enum hif_ce_event_type failure_type,const char * failure_type_string)3841  static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
3842  					  void *nbuf, uint32_t *error_cnt,
3843  					  enum hif_ce_event_type failure_type,
3844  					  const char *failure_type_string)
3845  {
3846  	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
3847  	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
3848  	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
3849  	int ce_id = CE_state->id;
3850  	uint32_t error_cnt_tmp;
3851  
3852  	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3853  	error_cnt_tmp = ++(*error_cnt);
3854  	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3855  	hif_debug("pipe_num: %d, needed: %d, err_cnt: %u, fail_type: %s",
3856  		  pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
3857  		  failure_type_string);
3858  	hif_record_ce_desc_event(scn, ce_id, failure_type,
3859  				 NULL, nbuf, bufs_needed_tmp, 0);
3860  	/* if we fail to allocate the last buffer for an rx pipe,
3861  	 *	there is no trigger to refill the ce and we will
3862  	 *	eventually crash
3863  	 */
3864  	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 ||
3865  	    (ce_srng_based(scn) &&
3866  	     bufs_needed_tmp == CE_state->dest_ring->nentries - 2)) {
3867  		qdf_atomic_inc(&scn->active_oom_work_cnt);
3868  		if (!qdf_sched_work(scn->qdf_dev,
3869  				    &CE_state->oom_allocation_work))
3870  			qdf_atomic_dec(&scn->active_oom_work_cnt);
3871  	}
3872  
3873  }
3874  
hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info * pipe_info)3875  QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
3876  {
3877  	struct CE_handle *ce_hdl;
3878  	qdf_size_t buf_sz;
3879  	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
3880  	QDF_STATUS status;
3881  	uint32_t bufs_posted = 0;
3882  	unsigned int ce_id;
3883  
3884  	buf_sz = pipe_info->buf_sz;
3885  	if (buf_sz == 0) {
3886  		/* Unused Copy Engine */
3887  		return QDF_STATUS_SUCCESS;
3888  	}
3889  
3890  	ce_hdl = pipe_info->ce_hdl;
3891  	if (!ce_hdl) {
3892  		hif_err("ce_hdl is NULL");
3893  		return QDF_STATUS_E_INVAL;
3894  	}
3895  
3896  	ce_id = ((struct CE_state *)ce_hdl)->id;
3897  
3898  	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3899  	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
3900  		qdf_dma_addr_t CE_data;      /* CE space buffer address */
3901  		qdf_nbuf_t nbuf;
3902  
3903  		atomic_dec(&pipe_info->recv_bufs_needed);
3904  		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3905  
3906  		hif_record_ce_desc_event(scn, ce_id,
3907  					 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
3908  					 0, 0);
3909  		nbuf = hif_ce_rx_nbuf_alloc(scn, ce_id);
3910  		if (!nbuf) {
3911  			hif_post_recv_buffers_failure(pipe_info, nbuf,
3912  					&pipe_info->nbuf_alloc_err_count,
3913  					 HIF_RX_NBUF_ALLOC_FAILURE,
3914  					"HIF_RX_NBUF_ALLOC_FAILURE");
3915  			return QDF_STATUS_E_NOMEM;
3916  		}
3917  
3918  		hif_record_ce_desc_event(scn, ce_id,
3919  					 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
3920  					 0, 0);
3921  		/*
3922  		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
3923  		 * CE_data = dma_map_single(dev, data, buf_sz, );
3924  		 * DMA_FROM_DEVICE);
3925  		 */
3926  		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
3927  					    QDF_DMA_FROM_DEVICE);
3928  
3929  		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3930  			hif_post_recv_buffers_failure(pipe_info, nbuf,
3931  					&pipe_info->nbuf_dma_err_count,
3932  					 HIF_RX_NBUF_MAP_FAILURE,
3933  					"HIF_RX_NBUF_MAP_FAILURE");
3934  			hif_ce_rx_nbuf_free(nbuf);
3935  			return status;
3936  		}
3937  
3938  		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
3939  		hif_record_ce_desc_event(scn, ce_id,
3940  					 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
3941  					 0, 0);
3942  		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
3943  					       buf_sz, DMA_FROM_DEVICE);
3944  		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
3945  		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3946  			hif_post_recv_buffers_failure(pipe_info, nbuf,
3947  					&pipe_info->nbuf_ce_enqueue_err_count,
3948  					 HIF_RX_NBUF_ENQUEUE_FAILURE,
3949  					"HIF_RX_NBUF_ENQUEUE_FAILURE");
3950  
3951  			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
3952  						QDF_DMA_FROM_DEVICE);
3953  			hif_ce_rx_nbuf_free(nbuf);
3954  			return status;
3955  		}
3956  
3957  		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3958  		bufs_posted++;
3959  	}
3960  	pipe_info->nbuf_alloc_err_count =
3961  		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
3962  		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
3963  	pipe_info->nbuf_dma_err_count =
3964  		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
3965  		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
3966  	pipe_info->nbuf_ce_enqueue_err_count =
3967  		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
3968  	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
3969  
3970  	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3971  
3972  	return QDF_STATUS_SUCCESS;
3973  }
3974  
3975  #ifdef FEATURE_DIRECT_LINK
3976  static QDF_STATUS
hif_alloc_pages_for_direct_link_recv_pipe(struct HIF_CE_state * hif_ce_state,int pipe_num)3977  hif_alloc_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
3978  					  int pipe_num)
3979  {
3980  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
3981  	struct service_to_pipe *tgt_svc_cfg;
3982  	struct HIF_CE_pipe_info *pipe_info;
3983  	int32_t recv_bufs_needed;
3984  	qdf_dma_addr_t dma_addr;
3985  	uint16_t num_elem_per_page;
3986  	uint16_t i;
3987  	bool is_found = false;
3988  
3989  	tgt_svc_cfg = hif_ce_state->tgt_svc_map;
3990  
3991  	for (i = 0; i < hif_ce_state->sz_tgt_svc_map; i++) {
3992  		if (tgt_svc_cfg[i].service_id != LPASS_DATA_MSG_SVC ||
3993  		    tgt_svc_cfg[i].pipedir != PIPEDIR_IN ||
3994  		    tgt_svc_cfg[i].pipenum != pipe_num)
3995  			continue;
3996  
3997  		pipe_info = &hif_ce_state->pipe_info[pipe_num];
3998  		recv_bufs_needed = atomic_read(&pipe_info->recv_bufs_needed);
3999  
4000  		if (!pipe_info->buf_sz || !recv_bufs_needed)
4001  			continue;
4002  
4003  		is_found = true;
4004  		break;
4005  	}
4006  
4007  	if (!is_found)
4008  		return QDF_STATUS_E_NOSUPPORT;
4009  
4010  	scn->dl_recv_pipe_num = pipe_num;
4011  
4012  	hif_prealloc_get_multi_pages(scn, QDF_DP_RX_DIRECT_LINK_CE_BUF_TYPE,
4013  				     pipe_info->buf_sz, recv_bufs_needed,
4014  				     &scn->dl_recv_pages, false);
4015  	if (!scn->dl_recv_pages.num_pages)
4016  		return QDF_STATUS_E_NOMEM;
4017  
4018  	num_elem_per_page = scn->dl_recv_pages.num_element_per_page;
4019  	for (i = 0; i < recv_bufs_needed; i++) {
4020  		dma_addr = scn->dl_recv_pages.dma_pages[i / num_elem_per_page].page_p_addr;
4021  		dma_addr += (i % num_elem_per_page) * pipe_info->buf_sz;
4022  		ce_recv_buf_enqueue(pipe_info->ce_hdl, NULL, dma_addr);
4023  	}
4024  
4025  	return QDF_STATUS_SUCCESS;
4026  }
4027  
4028  static QDF_STATUS
hif_free_pages_for_direct_link_recv_pipe(struct HIF_CE_state * hif_ce_state,int pipe_num)4029  hif_free_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
4030  					 int pipe_num)
4031  {
4032  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
4033  
4034  	if (pipe_num != scn->dl_recv_pipe_num)
4035  		return QDF_STATUS_E_NOSUPPORT;
4036  
4037  	hif_prealloc_put_multi_pages(scn, QDF_DP_RX_DIRECT_LINK_CE_BUF_TYPE,
4038  				     &scn->dl_recv_pages, false);
4039  
4040  	return QDF_STATUS_SUCCESS;
4041  }
4042  #else
4043  static inline QDF_STATUS
hif_alloc_pages_for_direct_link_recv_pipe(struct HIF_CE_state * hif_ce_state,int pipe_num)4044  hif_alloc_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
4045  					  int pipe_num)
4046  {
4047  	return QDF_STATUS_E_NOSUPPORT;
4048  }
4049  
4050  static inline QDF_STATUS
hif_free_pages_for_direct_link_recv_pipe(struct HIF_CE_state * hif_ce_state,int pipe_num)4051  hif_free_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
4052  					 int pipe_num)
4053  {
4054  	return QDF_STATUS_E_NOSUPPORT;
4055  }
4056  #endif
4057  
4058  /*
4059   * Try to post all desired receive buffers for all pipes.
4060   * Returns 0 for non fastpath rx copy engine as
4061   * oom_allocation_work will be scheduled to recover any
4062   * failures, non-zero if unable to completely replenish
4063   * receive buffers for fastpath rx Copy engine.
4064   */
hif_post_recv_buffers(struct hif_softc * scn)4065  static QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
4066  {
4067  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4068  	int pipe_num;
4069  	struct CE_state *ce_state = NULL;
4070  	QDF_STATUS qdf_status;
4071  
4072  	A_TARGET_ACCESS_LIKELY(scn);
4073  	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
4074  		struct HIF_CE_pipe_info *pipe_info;
4075  
4076  		if (pipe_num >= CE_COUNT_MAX) {
4077  			A_TARGET_ACCESS_UNLIKELY(scn);
4078  			return QDF_STATUS_E_INVAL;
4079  		}
4080  
4081  		ce_state = scn->ce_id_to_state[pipe_num];
4082  		pipe_info = &hif_state->pipe_info[pipe_num];
4083  
4084  		if (!ce_state)
4085  			continue;
4086  
4087  		/* Do not init dynamic CEs, during initial load */
4088  		if (ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND)
4089  			continue;
4090  
4091  		if (hif_is_nss_wifi_enabled(scn) &&
4092  		    ce_state && (ce_state->htt_rx_data))
4093  			continue;
4094  
4095  		qdf_status =
4096  			hif_alloc_pages_for_direct_link_recv_pipe(hif_state,
4097  								  pipe_num);
4098  		if (QDF_IS_STATUS_SUCCESS(qdf_status))
4099  			continue;
4100  
4101  		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
4102  		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
4103  			ce_state->htt_rx_data &&
4104  			scn->fastpath_mode_on) {
4105  			A_TARGET_ACCESS_UNLIKELY(scn);
4106  			return qdf_status;
4107  		}
4108  	}
4109  
4110  	A_TARGET_ACCESS_UNLIKELY(scn);
4111  
4112  	return QDF_STATUS_SUCCESS;
4113  }
4114  
hif_start(struct hif_opaque_softc * hif_ctx)4115  QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
4116  {
4117  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4118  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4119  	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
4120  
4121  	hif_update_fastpath_recv_bufs_cnt(scn);
4122  
4123  	hif_msg_callbacks_install(scn);
4124  
4125  	if (hif_completion_thread_startup(hif_state))
4126  		return QDF_STATUS_E_FAILURE;
4127  
4128  	hif_ce_rx_wbuff_register(scn);
4129  
4130  	/* enable buffer cleanup */
4131  	hif_state->started = true;
4132  
4133  	/* Post buffers once to start things off. */
4134  	qdf_status = hif_post_recv_buffers(scn);
4135  	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
4136  		/* cleanup is done in hif_ce_disable */
4137  		hif_err("Failed to post buffers");
4138  		return qdf_status;
4139  	}
4140  
4141  	return qdf_status;
4142  }
4143  
hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info * pipe_info)4144  static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
4145  {
4146  	struct hif_softc *scn;
4147  	struct CE_handle *ce_hdl;
4148  	uint32_t buf_sz;
4149  	struct HIF_CE_state *hif_state;
4150  	qdf_nbuf_t netbuf;
4151  	qdf_dma_addr_t CE_data;
4152  	void *per_CE_context;
4153  	QDF_STATUS status;
4154  
4155  	buf_sz = pipe_info->buf_sz;
4156  	/* Unused Copy Engine */
4157  	if (buf_sz == 0)
4158  		return;
4159  
4160  
4161  	hif_state = pipe_info->HIF_CE_state;
4162  	if (!hif_state->started)
4163  		return;
4164  
4165  	scn = HIF_GET_SOFTC(hif_state);
4166  	ce_hdl = pipe_info->ce_hdl;
4167  
4168  	if (!scn->qdf_dev)
4169  		return;
4170  
4171  	status = hif_free_pages_for_direct_link_recv_pipe(hif_state,
4172  							  pipe_info->pipe_num);
4173  	if (QDF_IS_STATUS_SUCCESS(status))
4174  		return;
4175  
4176  	while (ce_revoke_recv_next
4177  		       (ce_hdl, &per_CE_context, (void **)&netbuf,
4178  			&CE_data) == QDF_STATUS_SUCCESS) {
4179  		if (netbuf) {
4180  			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
4181  					      QDF_DMA_FROM_DEVICE);
4182  			hif_ce_rx_nbuf_free(netbuf);
4183  		}
4184  	}
4185  }
4186  
hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info * pipe_info)4187  static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
4188  {
4189  	struct CE_handle *ce_hdl;
4190  	struct HIF_CE_state *hif_state;
4191  	struct hif_softc *scn;
4192  	qdf_nbuf_t netbuf;
4193  	void *per_CE_context;
4194  	qdf_dma_addr_t CE_data;
4195  	unsigned int nbytes;
4196  	unsigned int id;
4197  	uint32_t buf_sz;
4198  	uint32_t toeplitz_hash_result;
4199  
4200  	buf_sz = pipe_info->buf_sz;
4201  	if (buf_sz == 0) {
4202  		/* Unused Copy Engine */
4203  		return;
4204  	}
4205  
4206  	hif_state = pipe_info->HIF_CE_state;
4207  	if (!hif_state->started) {
4208  		return;
4209  	}
4210  
4211  	scn = HIF_GET_SOFTC(hif_state);
4212  
4213  	ce_hdl = pipe_info->ce_hdl;
4214  
4215  	while (ce_cancel_send_next
4216  		       (ce_hdl, &per_CE_context,
4217  		       (void **)&netbuf, &CE_data, &nbytes,
4218  		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
4219  		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
4220  			/*
4221  			 * Packets enqueued by htt_h2t_ver_req_msg() and
4222  			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
4223  			 * freed in htt_htc_misc_pkt_pool_free() in
4224  			 * wlantl_close(), so do not free them here again
4225  			 * by checking whether it's the endpoint
4226  			 * which they are queued in.
4227  			 */
4228  			if (id == scn->htc_htt_tx_endpoint)
4229  				return;
4230  			/* Indicate the completion to higher
4231  			 * layer to free the buffer
4232  			 */
4233  			if (pipe_info->pipe_callbacks.txCompletionHandler)
4234  				pipe_info->pipe_callbacks.
4235  				    txCompletionHandler(pipe_info->
4236  					    pipe_callbacks.Context,
4237  					    netbuf, id, toeplitz_hash_result);
4238  		}
4239  	}
4240  }
4241  
4242  /*
4243   * Cleanup residual buffers for device shutdown:
4244   *    buffers that were enqueued for receive
4245   *    buffers that were to be sent
4246   * Note: Buffers that had completed but which were
4247   * not yet processed are on a completion queue. They
4248   * are handled when the completion thread shuts down.
4249   */
hif_buffer_cleanup(struct HIF_CE_state * hif_state)4250  static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
4251  {
4252  	int pipe_num;
4253  	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
4254  	struct CE_state *ce_state;
4255  
4256  	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
4257  		struct HIF_CE_pipe_info *pipe_info;
4258  
4259  		ce_state = scn->ce_id_to_state[pipe_num];
4260  		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
4261  				((ce_state->htt_tx_data) ||
4262  				 (ce_state->htt_rx_data))) {
4263  			continue;
4264  		}
4265  
4266  		pipe_info = &hif_state->pipe_info[pipe_num];
4267  		hif_recv_buffer_cleanup_on_pipe(pipe_info);
4268  		hif_send_buffer_cleanup_on_pipe(pipe_info);
4269  	}
4270  }
4271  
hif_flush_surprise_remove(struct hif_opaque_softc * hif_ctx)4272  void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
4273  {
4274  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4275  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4276  
4277  	hif_buffer_cleanup(hif_state);
4278  }
4279  
hif_destroy_oom_work(struct hif_softc * scn)4280  static void hif_destroy_oom_work(struct hif_softc *scn)
4281  {
4282  	struct CE_state *ce_state;
4283  	int ce_id;
4284  
4285  	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
4286  		ce_state = scn->ce_id_to_state[ce_id];
4287  		if (ce_state)
4288  			qdf_destroy_work(scn->qdf_dev,
4289  					 &ce_state->oom_allocation_work);
4290  	}
4291  	qdf_atomic_set(&scn->active_oom_work_cnt, 0);
4292  }
4293  
hif_ce_stop(struct hif_softc * scn)4294  void hif_ce_stop(struct hif_softc *scn)
4295  {
4296  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4297  	int pipe_num;
4298  
4299  	/*
4300  	 * before cleaning up any memory, ensure irq &
4301  	 * bottom half contexts will not be re-entered
4302  	 */
4303  	hif_disable_isr(&scn->osc);
4304  	hif_destroy_oom_work(scn);
4305  	scn->hif_init_done = false;
4306  
4307  	/*
4308  	 * At this point, asynchronous threads are stopped,
4309  	 * The Target should not DMA nor interrupt, Host code may
4310  	 * not initiate anything more.  So we just need to clean
4311  	 * up Host-side state.
4312  	 */
4313  
4314  	if (scn->athdiag_procfs_inited) {
4315  		athdiag_procfs_remove();
4316  		scn->athdiag_procfs_inited = false;
4317  	}
4318  
4319  	hif_buffer_cleanup(hif_state);
4320  
4321  	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
4322  		struct HIF_CE_pipe_info *pipe_info;
4323  		struct CE_attr attr;
4324  		struct CE_handle *ce_diag = hif_state->ce_diag;
4325  
4326  		pipe_info = &hif_state->pipe_info[pipe_num];
4327  		if (pipe_info->ce_hdl) {
4328  			if (pipe_info->ce_hdl != ce_diag &&
4329  			    hif_state->started) {
4330  				attr = hif_state->host_ce_config[pipe_num];
4331  				if (attr.src_nentries)
4332  					qdf_spinlock_destroy(&pipe_info->
4333  							completion_freeq_lock);
4334  			}
4335  			ce_fini(pipe_info->ce_hdl);
4336  			pipe_info->ce_hdl = NULL;
4337  			pipe_info->buf_sz = 0;
4338  			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
4339  		}
4340  	}
4341  
4342  	hif_ce_rx_wbuff_deregister(scn);
4343  
4344  	if (hif_state->sleep_timer_init) {
4345  		qdf_timer_stop(&hif_state->sleep_timer);
4346  		qdf_timer_free(&hif_state->sleep_timer);
4347  		hif_state->sleep_timer_init = false;
4348  	}
4349  
4350  	hif_state->started = false;
4351  }
4352  
4353  #ifdef CONFIG_SHADOW_V3
hif_preare_shadow_register_cfg_v3(struct hif_softc * scn)4354  void hif_preare_shadow_register_cfg_v3(struct hif_softc *scn)
4355  {
4356  	int shadow_cfg_idx = scn->num_shadow_registers_configured;
4357  	int i;
4358  
4359  	/* shadow reg config for CE SRC registers */
4360  	for (i = 0; i < scn->ce_count; i++) {
4361  		scn->shadow_regs[shadow_cfg_idx].addr =
4362  				CE_BASE_ADDRESS(i) + SR_WR_INDEX_ADDRESS;
4363  		shadow_cfg_idx++;
4364  	}
4365  
4366  	/* shadow reg config for CE DST registers */
4367  	for (i = 0; i < scn->ce_count; i++) {
4368  		scn->shadow_regs[shadow_cfg_idx].addr =
4369  				CE_BASE_ADDRESS(i) + DST_WR_INDEX_ADDRESS;
4370  		shadow_cfg_idx++;
4371  	}
4372  
4373  	scn->num_shadow_registers_configured = shadow_cfg_idx;
4374  }
4375  
hif_get_shadow_reg_config_v3(struct hif_softc * scn,struct pld_shadow_reg_v3_cfg ** shadow_config,int * num_shadow_registers_configured)4376  void hif_get_shadow_reg_config_v3(struct hif_softc *scn,
4377  				  struct pld_shadow_reg_v3_cfg **shadow_config,
4378  				  int *num_shadow_registers_configured)
4379  {
4380  	*shadow_config = scn->shadow_regs;
4381  	*num_shadow_registers_configured =
4382  				scn->num_shadow_registers_configured;
4383  }
4384  #endif
4385  
hif_get_shadow_reg_cfg(struct hif_softc * scn,struct shadow_reg_cfg ** target_shadow_reg_cfg_ret,uint32_t * shadow_cfg_sz_ret)4386  static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
4387  				   struct shadow_reg_cfg
4388  				   **target_shadow_reg_cfg_ret,
4389  				   uint32_t *shadow_cfg_sz_ret)
4390  {
4391  	if (target_shadow_reg_cfg_ret)
4392  		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
4393  	if (shadow_cfg_sz_ret)
4394  		*shadow_cfg_sz_ret = shadow_cfg_sz;
4395  }
4396  
4397  /**
4398   * hif_get_target_ce_config() - get copy engine configuration
4399   * @scn: HIF context
4400   * @target_ce_config_ret: basic copy engine configuration
4401   * @target_ce_config_sz_ret: size of the basic configuration in bytes
4402   * @target_service_to_ce_map_ret: service mapping for the copy engines
4403   * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
4404   * @target_shadow_reg_cfg_ret: shadow register configuration
4405   * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
4406   *
4407   * providing accessor to these values outside of this file.
4408   * currently these are stored in static pointers to const sections.
4409   * there are multiple configurations that are selected from at compile time.
4410   * Runtime selection would need to consider mode, target type and bus type.
4411   *
4412   * Return: return by parameter.
4413   */
hif_get_target_ce_config(struct hif_softc * scn,struct CE_pipe_config ** target_ce_config_ret,uint32_t * target_ce_config_sz_ret,struct service_to_pipe ** target_service_to_ce_map_ret,uint32_t * target_service_to_ce_map_sz_ret,struct shadow_reg_cfg ** target_shadow_reg_cfg_ret,uint32_t * shadow_cfg_sz_ret)4414  void hif_get_target_ce_config(struct hif_softc *scn,
4415  		struct CE_pipe_config **target_ce_config_ret,
4416  		uint32_t *target_ce_config_sz_ret,
4417  		struct service_to_pipe **target_service_to_ce_map_ret,
4418  		uint32_t *target_service_to_ce_map_sz_ret,
4419  		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
4420  		uint32_t *shadow_cfg_sz_ret)
4421  {
4422  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4423  
4424  	*target_ce_config_ret = hif_state->target_ce_config;
4425  	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
4426  
4427  	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
4428  				       target_service_to_ce_map_sz_ret);
4429  	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
4430  			       shadow_cfg_sz_ret);
4431  }
4432  
4433  #ifdef CONFIG_SHADOW_V3
hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg * cfg)4434  static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
4435  {
4436  	int i;
4437  
4438  	hif_info("v3: num_config %d", cfg->num_shadow_reg_v3_cfg);
4439  	for (i = 0; i < cfg->num_shadow_reg_v3_cfg; i++)
4440  		hif_info("i %d, val %x", i, cfg->shadow_reg_v3_cfg[i].addr);
4441  }
4442  
4443  #elif defined(CONFIG_SHADOW_V2)
hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg * cfg)4444  static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
4445  {
4446  	int i;
4447  
4448  	hif_info("v2: num_config %d", cfg->num_shadow_reg_v2_cfg);
4449  	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++)
4450  		hif_info("i %d, val %x", i, cfg->shadow_reg_v2_cfg[i].addr);
4451  }
4452  
4453  #else
hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg * cfg)4454  static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
4455  {
4456  	hif_info("CONFIG_SHADOW V2/V3 not defined");
4457  }
4458  #endif
4459  
4460  #ifdef ADRASTEA_RRI_ON_DDR
4461  /**
4462   * hif_get_src_ring_read_index(): Called to get the SRRI
4463   *
4464   * @scn: hif_softc pointer
4465   * @CE_ctrl_addr: base address of the CE whose RRI is to be read
4466   *
4467   * This function returns the SRRI to the caller. For CEs that
4468   * dont have interrupts enabled, we look at the DDR based SRRI
4469   *
4470   * Return: SRRI
4471   */
hif_get_src_ring_read_index(struct hif_softc * scn,uint32_t CE_ctrl_addr)4472  inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
4473  		uint32_t CE_ctrl_addr)
4474  {
4475  	struct CE_attr attr;
4476  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4477  
4478  	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
4479  	if (attr.flags & CE_ATTR_DISABLE_INTR) {
4480  		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
4481  	} else {
4482  		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
4483  			return A_TARGET_READ(scn,
4484  					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
4485  		else
4486  			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
4487  					CE_ctrl_addr);
4488  	}
4489  }
4490  
4491  /**
4492   * hif_get_dst_ring_read_index(): Called to get the DRRI
4493   *
4494   * @scn: hif_softc pointer
4495   * @CE_ctrl_addr: base address of the CE whose RRI is to be read
4496   *
4497   * This function returns the DRRI to the caller. For CEs that
4498   * dont have interrupts enabled, we look at the DDR based DRRI
4499   *
4500   * Return: DRRI
4501   */
hif_get_dst_ring_read_index(struct hif_softc * scn,uint32_t CE_ctrl_addr)4502  inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
4503  		uint32_t CE_ctrl_addr)
4504  {
4505  	struct CE_attr attr;
4506  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4507  
4508  	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
4509  
4510  	if (attr.flags & CE_ATTR_DISABLE_INTR) {
4511  		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
4512  	} else {
4513  		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
4514  			return A_TARGET_READ(scn,
4515  					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
4516  		else
4517  			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
4518  					CE_ctrl_addr);
4519  	}
4520  }
4521  
4522  /**
4523   * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
4524   * @scn: hif_softc pointer
4525   *
4526   * Return: qdf status
4527   */
hif_alloc_rri_on_ddr(struct hif_softc * scn)4528  static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
4529  {
4530  	qdf_dma_addr_t paddr_rri_on_ddr = 0;
4531  
4532  	scn->vaddr_rri_on_ddr =
4533  		(void *)qdf_mem_alloc_consistent(scn->qdf_dev,
4534  		scn->qdf_dev->dev, RRI_ON_DDR_MEM_SIZE,
4535  		&paddr_rri_on_ddr);
4536  
4537  	if (!scn->vaddr_rri_on_ddr) {
4538  		hif_err("dmaable page alloc fail");
4539  		return QDF_STATUS_E_NOMEM;
4540  	}
4541  
4542  	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
4543  
4544  	qdf_mem_zero(scn->vaddr_rri_on_ddr, RRI_ON_DDR_MEM_SIZE);
4545  
4546  	return QDF_STATUS_SUCCESS;
4547  }
4548  #endif
4549  
4550  #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
4551  /**
4552   * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
4553   *
4554   * @scn: hif_softc pointer
4555   *
4556   * This function allocates non cached memory on ddr and sends
4557   * the physical address of this memory to the CE hardware. The
4558   * hardware updates the RRI on this particular location.
4559   *
4560   * Return: None
4561   */
4562  #ifdef QCA_WIFI_WCN6450
hif_config_rri_on_ddr(struct hif_softc * scn)4563  static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
4564  {
4565  	unsigned int i;
4566  	uint32_t high_paddr, low_paddr;
4567  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4568  	struct CE_attr *attr;
4569  
4570  	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
4571  		return;
4572  
4573  	low_paddr  = RRI_ON_DDR_PADDR_LOW(scn->paddr_rri_on_ddr);
4574  	high_paddr = RRI_ON_DDR_PADDR_HIGH(scn->paddr_rri_on_ddr);
4575  
4576  	hif_debug("using srri and drri from DDR");
4577  
4578  	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
4579  	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
4580  
4581  	for (i = 0; i < CE_COUNT; i++) {
4582  		attr = &hif_state->host_ce_config[i];
4583  		if (attr->src_nentries || attr->dest_nentries)
4584  			CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
4585  	}
4586  }
4587  #else
hif_config_rri_on_ddr(struct hif_softc * scn)4588  static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
4589  {
4590  	unsigned int i;
4591  	uint32_t high_paddr, low_paddr;
4592  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4593  	struct CE_pipe_config *ce_config;
4594  
4595  	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
4596  		return;
4597  
4598  	low_paddr  = RRI_ON_DDR_PADDR_LOW(scn->paddr_rri_on_ddr);
4599  	high_paddr = RRI_ON_DDR_PADDR_HIGH(scn->paddr_rri_on_ddr);
4600  
4601  	hif_debug("using srri and drri from DDR");
4602  
4603  	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
4604  	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
4605  
4606  	for (i = 0; i < CE_COUNT; i++) {
4607  		ce_config = &hif_state->target_ce_config[i];
4608  		/*
4609  		 * For DST channel program both IDX_UPD_EN and
4610  		 * DMAX length(behalf of F.W) at once to avoid
4611  		 * race with F.W register update.
4612  		 */
4613  		if (ce_config->pipedir == PIPEDIR_IN && ce_config->nbytes_max)
4614  			CE_IDX_UPD_EN_DMAX_LEN_SET(scn, CE_BASE_ADDRESS(i),
4615  						   ce_config->nbytes_max);
4616  		else
4617  			CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
4618  	}
4619  }
4620  #endif
4621  
4622  #else
4623  /**
4624   * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
4625   *
4626   * @scn: hif_softc pointer
4627   *
4628   * This is a dummy implementation for platforms that don't
4629   * support this functionality.
4630   *
4631   * Return: None
4632   */
hif_config_rri_on_ddr(struct hif_softc * scn)4633  static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
4634  {
4635  }
4636  #endif
4637  
4638  /**
4639   * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
4640   *                                    QMI command
4641   * @scn: hif context
4642   * @cfg: wlan enable config
4643   *
4644   * In case of Genoa, rri_over_ddr memory configuration is passed
4645   * to firmware through QMI configure command.
4646   */
4647  #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
hif_update_rri_over_ddr_config(struct hif_softc * scn,struct pld_wlan_enable_cfg * cfg)4648  static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
4649  					   struct pld_wlan_enable_cfg *cfg)
4650  {
4651  	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
4652  		return;
4653  
4654  	cfg->rri_over_ddr_cfg_valid = true;
4655  	cfg->rri_over_ddr_cfg.base_addr_low =
4656  		 BITS0_TO_31(scn->paddr_rri_on_ddr);
4657  	cfg->rri_over_ddr_cfg.base_addr_high =
4658  		 BITS32_TO_35(scn->paddr_rri_on_ddr);
4659  }
4660  #else
hif_update_rri_over_ddr_config(struct hif_softc * scn,struct pld_wlan_enable_cfg * cfg)4661  static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
4662  					   struct pld_wlan_enable_cfg *cfg)
4663  {
4664  }
4665  #endif
4666  
4667  /**
4668   * hif_wlan_enable(): call the platform driver to enable wlan
4669   * @scn: HIF Context
4670   *
4671   * This function passes the con_mode and CE configuration to
4672   * platform driver to enable wlan.
4673   *
4674   * Return: linux error code
4675   */
hif_wlan_enable(struct hif_softc * scn)4676  int hif_wlan_enable(struct hif_softc *scn)
4677  {
4678  	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4679  	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
4680  	struct pld_wlan_enable_cfg cfg = { 0 };
4681  	enum pld_driver_mode mode;
4682  	uint32_t con_mode = hif_get_conparam(scn);
4683  
4684  	hif_get_target_ce_config(scn,
4685  			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
4686  			&cfg.num_ce_tgt_cfg,
4687  			(struct service_to_pipe **)&cfg.ce_svc_cfg,
4688  			&cfg.num_ce_svc_pipe_cfg,
4689  			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
4690  			&cfg.num_shadow_reg_cfg);
4691  
4692  	/* translate from structure size to array size */
4693  	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
4694  	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
4695  	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
4696  
4697  	switch (tgt_info->target_type) {
4698  	case TARGET_TYPE_KIWI:
4699  	case TARGET_TYPE_MANGO:
4700  	case TARGET_TYPE_PEACH:
4701  	case TARGET_TYPE_WCN6450:
4702  		hif_prepare_hal_shadow_reg_cfg_v3(scn, &cfg);
4703  		break;
4704  	default:
4705  		hif_prepare_hal_shadow_register_cfg(scn,
4706  						    &cfg.shadow_reg_v2_cfg,
4707  						    &cfg.num_shadow_reg_v2_cfg);
4708  		break;
4709  	}
4710  
4711  	hif_print_hal_shadow_register_cfg(&cfg);
4712  
4713  	hif_update_rri_over_ddr_config(scn, &cfg);
4714  
4715  	if (QDF_GLOBAL_FTM_MODE == con_mode)
4716  		mode = PLD_FTM;
4717  	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
4718  		mode = PLD_COLDBOOT_CALIBRATION;
4719  	else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
4720  		mode = PLD_FTM_COLDBOOT_CALIBRATION;
4721  	else if (QDF_IS_EPPING_ENABLED(con_mode))
4722  		mode = PLD_EPPING;
4723  	else
4724  		mode = PLD_MISSION;
4725  
4726  	if (BYPASS_QMI)
4727  		return 0;
4728  	else
4729  		return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
4730  }
4731  
4732  #ifdef WLAN_FEATURE_EPPING
4733  
4734  #define CE_EPPING_USES_IRQ true
4735  
hif_ce_prepare_epping_config(struct hif_softc * scn,struct HIF_CE_state * hif_state)4736  void hif_ce_prepare_epping_config(struct hif_softc *scn,
4737  				  struct HIF_CE_state *hif_state)
4738  {
4739  	if (CE_EPPING_USES_IRQ)
4740  		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
4741  	else
4742  		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
4743  	hif_state->target_ce_config = target_ce_config_wlan_epping;
4744  	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
4745  	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
4746  	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
4747  	scn->ce_count = EPPING_HOST_CE_COUNT;
4748  }
4749  #endif
4750  
4751  #ifdef QCN7605_SUPPORT
4752  static inline
hif_set_ce_config_qcn7605(struct hif_softc * scn,struct HIF_CE_state * hif_state)4753  void hif_set_ce_config_qcn7605(struct hif_softc *scn,
4754  			       struct HIF_CE_state *hif_state)
4755  {
4756  	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
4757  	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
4758  	hif_state->target_ce_config_sz =
4759  				 sizeof(target_ce_config_wlan_qcn7605);
4760  	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
4761  	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
4762  	scn->ce_count = QCN7605_CE_COUNT;
4763  }
4764  #else
4765  static inline
hif_set_ce_config_qcn7605(struct hif_softc * scn,struct HIF_CE_state * hif_state)4766  void hif_set_ce_config_qcn7605(struct hif_softc *scn,
4767  			       struct HIF_CE_state *hif_state)
4768  {
4769  	hif_err("QCN7605 not supported");
4770  }
4771  #endif
4772  
4773  #ifdef CE_SVC_CMN_INIT
4774  #ifdef QCA_WIFI_SUPPORT_SRNG
hif_ce_service_init(void)4775  static inline void hif_ce_service_init(void)
4776  {
4777  	ce_service_srng_init();
4778  }
4779  #else
hif_ce_service_init(void)4780  static inline void hif_ce_service_init(void)
4781  {
4782  	ce_service_legacy_init();
4783  }
4784  #endif
4785  #else
hif_ce_service_init(void)4786  static inline void hif_ce_service_init(void)
4787  {
4788  }
4789  #endif
4790  
4791  #ifdef FEATURE_DIRECT_LINK
4792  /**
4793   * hif_ce_select_config_kiwi() - Select the host and target CE
4794   *  configuration for Kiwi
4795   * @hif_state: HIF CE context
4796   *
4797   * Return: None
4798   */
4799  static inline
hif_ce_select_config_kiwi(struct HIF_CE_state * hif_state)4800  void hif_ce_select_config_kiwi(struct HIF_CE_state *hif_state)
4801  {
4802  	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif_state);
4803  
4804  	if (pld_is_direct_link_supported(hif_ctx->qdf_dev->dev)) {
4805  		hif_state->host_ce_config =
4806  				host_ce_config_wlan_kiwi_direct_link;
4807  		hif_state->target_ce_config =
4808  				target_ce_config_wlan_kiwi_direct_link;
4809  		hif_state->target_ce_config_sz =
4810  				sizeof(target_ce_config_wlan_kiwi_direct_link);
4811  	} else {
4812  		hif_state->host_ce_config = host_ce_config_wlan_kiwi;
4813  		hif_state->target_ce_config = target_ce_config_wlan_kiwi;
4814  		hif_state->target_ce_config_sz =
4815  				sizeof(target_ce_config_wlan_kiwi);
4816  	}
4817  }
4818  #else
4819  static inline
hif_ce_select_config_kiwi(struct HIF_CE_state * hif_state)4820  void hif_ce_select_config_kiwi(struct HIF_CE_state *hif_state)
4821  {
4822  	hif_state->host_ce_config = host_ce_config_wlan_kiwi;
4823  	hif_state->target_ce_config = target_ce_config_wlan_kiwi;
4824  	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_kiwi);
4825  }
4826  #endif
4827  
4828  /**
4829   * hif_ce_prepare_config() - load the correct static tables.
4830   * @scn: hif context
4831   *
4832   * Epping uses different static attribute tables than mission mode.
4833   */
hif_ce_prepare_config(struct hif_softc * scn)4834  void hif_ce_prepare_config(struct hif_softc *scn)
4835  {
4836  	uint32_t mode = hif_get_conparam(scn);
4837  	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4838  	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
4839  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4840  	int ret;
4841  	int msi_data_count = 0;
4842  	int msi_data_start = 0;
4843  	int msi_irq_start = 0;
4844  
4845  	hif_ce_service_init();
4846  	hif_state->ce_services = ce_services_attach(scn);
4847  
4848  	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
4849  					  &msi_data_count, &msi_data_start,
4850  					  &msi_irq_start);
4851  
4852  	scn->ce_count = HOST_CE_COUNT;
4853  	scn->int_assignment = &ce_int_context[msi_data_count];
4854  	scn->free_irq_done = false;
4855  	/* if epping is enabled we need to use the epping configuration. */
4856  	if (QDF_IS_EPPING_ENABLED(mode)) {
4857  		hif_ce_prepare_epping_config(scn, hif_state);
4858  		return;
4859  	}
4860  
4861  	switch (tgt_info->target_type) {
4862  	default:
4863  		hif_state->host_ce_config = host_ce_config_wlan;
4864  		hif_state->target_ce_config = target_ce_config_wlan;
4865  		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
4866  		break;
4867  	case TARGET_TYPE_QCN7605:
4868  		hif_set_ce_config_qcn7605(scn, hif_state);
4869  		break;
4870  	case TARGET_TYPE_AR900B:
4871  	case TARGET_TYPE_QCA9984:
4872  	case TARGET_TYPE_QCA9888:
4873  		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
4874  			hif_state->host_ce_config =
4875  				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
4876  		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
4877  			hif_state->host_ce_config =
4878  				host_lowdesc_ce_cfg_wlan_ar900b;
4879  		} else {
4880  			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
4881  		}
4882  
4883  		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
4884  		hif_state->target_ce_config_sz =
4885  				sizeof(target_ce_config_wlan_ar900b);
4886  
4887  		break;
4888  
4889  	case TARGET_TYPE_AR9888:
4890  	case TARGET_TYPE_AR9888V2:
4891  		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
4892  			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
4893  		} else {
4894  			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
4895  		}
4896  
4897  		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
4898  		hif_state->target_ce_config_sz =
4899  					sizeof(target_ce_config_wlan_ar9888);
4900  
4901  		break;
4902  
4903  	case TARGET_TYPE_QCA8074:
4904  	case TARGET_TYPE_QCA8074V2:
4905  	case TARGET_TYPE_QCA6018:
4906  		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
4907  			hif_state->host_ce_config =
4908  					host_ce_config_wlan_qca8074_pci;
4909  			hif_state->target_ce_config =
4910  				target_ce_config_wlan_qca8074_pci;
4911  			hif_state->target_ce_config_sz =
4912  				sizeof(target_ce_config_wlan_qca8074_pci);
4913  		} else {
4914  			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
4915  			hif_state->target_ce_config =
4916  					target_ce_config_wlan_qca8074;
4917  			hif_state->target_ce_config_sz =
4918  				sizeof(target_ce_config_wlan_qca8074);
4919  		}
4920  		break;
4921  	case TARGET_TYPE_QCA6290:
4922  		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
4923  		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
4924  		hif_state->target_ce_config_sz =
4925  					sizeof(target_ce_config_wlan_qca6290);
4926  
4927  		scn->ce_count = QCA_6290_CE_COUNT;
4928  		break;
4929  	case TARGET_TYPE_QCN9000:
4930  		hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
4931  		hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
4932  		hif_state->target_ce_config_sz =
4933  					sizeof(target_ce_config_wlan_qcn9000);
4934  		scn->ce_count = QCN_9000_CE_COUNT;
4935  		scn->ini_cfg.disable_wake_irq = 1;
4936  		break;
4937  	case TARGET_TYPE_QCN9224:
4938  		hif_set_ce_config_qcn9224(scn, hif_state);
4939  		break;
4940  	case TARGET_TYPE_QCA5332:
4941  		hif_state->host_ce_config = host_ce_config_wlan_qca5332;
4942  		hif_state->target_ce_config = target_ce_config_wlan_qca5332;
4943  		hif_state->target_ce_config_sz =
4944  					 sizeof(target_ce_config_wlan_qca5332);
4945  		scn->ce_count = QCA_5332_CE_COUNT;
4946  		break;
4947  	case TARGET_TYPE_QCN6122:
4948  		hif_state->host_ce_config = host_ce_config_wlan_qcn6122;
4949  		hif_state->target_ce_config = target_ce_config_wlan_qcn6122;
4950  		hif_state->target_ce_config_sz =
4951  					sizeof(target_ce_config_wlan_qcn6122);
4952  		scn->ce_count = QCN_6122_CE_COUNT;
4953  		scn->ini_cfg.disable_wake_irq = 1;
4954  		break;
4955  	case TARGET_TYPE_QCN9160:
4956  		hif_state->host_ce_config = host_ce_config_wlan_qcn9160;
4957  		hif_state->target_ce_config = target_ce_config_wlan_qcn9160;
4958  		hif_state->target_ce_config_sz =
4959  					sizeof(target_ce_config_wlan_qcn9160);
4960  		scn->ce_count = QCN_9160_CE_COUNT;
4961  		scn->ini_cfg.disable_wake_irq = 1;
4962  		break;
4963  	case TARGET_TYPE_QCN6432:
4964  		hif_state->host_ce_config = host_ce_config_wlan_qcn6432;
4965  		hif_state->target_ce_config = target_ce_config_wlan_qcn6432;
4966  		hif_state->target_ce_config_sz =
4967  					sizeof(target_ce_config_wlan_qcn6432);
4968  		scn->ce_count = QCN_6432_CE_COUNT;
4969  		scn->ini_cfg.disable_wake_irq = 1;
4970  		break;
4971  	case TARGET_TYPE_QCA5018:
4972  		hif_state->host_ce_config = host_ce_config_wlan_qca5018;
4973  		hif_state->target_ce_config = target_ce_config_wlan_qca5018;
4974  		hif_state->target_ce_config_sz =
4975  					sizeof(target_ce_config_wlan_qca5018);
4976  		scn->ce_count = QCA_5018_CE_COUNT;
4977  		break;
4978  	case TARGET_TYPE_QCA9574:
4979  		hif_state->host_ce_config = host_ce_config_wlan_qca9574;
4980  		hif_state->target_ce_config = target_ce_config_wlan_qca9574;
4981  		hif_state->target_ce_config_sz =
4982  					sizeof(target_ce_config_wlan_qca9574);
4983  		break;
4984  	case TARGET_TYPE_QCA6390:
4985  		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
4986  		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
4987  		hif_state->target_ce_config_sz =
4988  					sizeof(target_ce_config_wlan_qca6390);
4989  
4990  		scn->ce_count = QCA_6390_CE_COUNT;
4991  		break;
4992  	case TARGET_TYPE_QCA6490:
4993  		hif_state->host_ce_config = host_ce_config_wlan_qca6490;
4994  		hif_state->target_ce_config = target_ce_config_wlan_qca6490;
4995  		hif_state->target_ce_config_sz =
4996  					sizeof(target_ce_config_wlan_qca6490);
4997  
4998  		scn->ce_count = QCA_6490_CE_COUNT;
4999  		break;
5000  	case TARGET_TYPE_QCA6750:
5001  		hif_state->host_ce_config = host_ce_config_wlan_qca6750;
5002  		hif_state->target_ce_config = target_ce_config_wlan_qca6750;
5003  		hif_state->target_ce_config_sz =
5004  					sizeof(target_ce_config_wlan_qca6750);
5005  
5006  		scn->ce_count = QCA_6750_CE_COUNT;
5007  		break;
5008  	case TARGET_TYPE_KIWI:
5009  	case TARGET_TYPE_MANGO:
5010  	case TARGET_TYPE_PEACH:
5011  		hif_ce_select_config_kiwi(hif_state);
5012  		scn->ce_count = KIWI_CE_COUNT;
5013  		break;
5014  	case TARGET_TYPE_ADRASTEA:
5015  		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
5016  			hif_state->host_ce_config =
5017  				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
5018  			hif_state->target_ce_config =
5019  			       target_lowdesc_ce_config_wlan_adrastea_nopktlog;
5020  			hif_state->target_ce_config_sz =
5021  			sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
5022  		} else {
5023  			hif_state->host_ce_config =
5024  				host_ce_config_wlan_adrastea;
5025  			hif_state->target_ce_config =
5026  					target_ce_config_wlan_adrastea;
5027  			hif_state->target_ce_config_sz =
5028  					sizeof(target_ce_config_wlan_adrastea);
5029  		}
5030  		break;
5031  	case TARGET_TYPE_WCN6450:
5032  		hif_state->host_ce_config = host_ce_config_wlan_wcn6450;
5033  		hif_state->target_ce_config = target_ce_config_wlan_wcn6450;
5034  		hif_state->target_ce_config_sz =
5035  				sizeof(target_ce_config_wlan_wcn6450);
5036  		break;
5037  	}
5038  	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
5039  }
5040  
5041  /**
5042   * hif_ce_open() - do ce specific allocations
5043   * @hif_sc: pointer to hif context
5044   *
5045   * return: 0 for success or QDF_STATUS_E_NOMEM
5046   */
hif_ce_open(struct hif_softc * hif_sc)5047  QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
5048  {
5049  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
5050  
5051  	qdf_spinlock_create(&hif_state->irq_reg_lock);
5052  	qdf_spinlock_create(&hif_state->keep_awake_lock);
5053  	return QDF_STATUS_SUCCESS;
5054  }
5055  
5056  /**
5057   * hif_ce_close() - do ce specific free
5058   * @hif_sc: pointer to hif context
5059   */
hif_ce_close(struct hif_softc * hif_sc)5060  void hif_ce_close(struct hif_softc *hif_sc)
5061  {
5062  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
5063  
5064  	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
5065  	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
5066  }
5067  
5068  /**
5069   * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
5070   * @hif_sc: hif context
5071   *
5072   * uses state variables to support cleaning up when hif_config_ce fails.
5073   */
hif_unconfig_ce(struct hif_softc * hif_sc)5074  void hif_unconfig_ce(struct hif_softc *hif_sc)
5075  {
5076  	int pipe_num;
5077  	struct HIF_CE_pipe_info *pipe_info;
5078  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
5079  	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
5080  
5081  	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
5082  		pipe_info = &hif_state->pipe_info[pipe_num];
5083  		if (pipe_info->ce_hdl) {
5084  			ce_unregister_irq(hif_state, (1 << pipe_num));
5085  		}
5086  	}
5087  	deinit_tasklet_workers(hif_hdl);
5088  	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
5089  		pipe_info = &hif_state->pipe_info[pipe_num];
5090  		if (pipe_info->ce_hdl) {
5091  			ce_fini(pipe_info->ce_hdl);
5092  			pipe_info->ce_hdl = NULL;
5093  			pipe_info->buf_sz = 0;
5094  			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
5095  		}
5096  	}
5097  	if (hif_sc->athdiag_procfs_inited) {
5098  		athdiag_procfs_remove();
5099  		hif_sc->athdiag_procfs_inited = false;
5100  	}
5101  }
5102  
5103  #ifdef CONFIG_BYPASS_QMI
5104  #ifdef QCN7605_SUPPORT
5105  /**
5106   * hif_post_static_buf_to_target() - post static buffer to WLAN FW
5107   * @scn: pointer to HIF structure
5108   *
5109   * WLAN FW needs 2MB memory from DDR when QMI is disabled.
5110   *
5111   * Return: void
5112   */
hif_post_static_buf_to_target(struct hif_softc * scn)5113  static void hif_post_static_buf_to_target(struct hif_softc *scn)
5114  {
5115  	phys_addr_t target_pa;
5116  	struct ce_info *ce_info_ptr;
5117  	uint32_t msi_data_start;
5118  	uint32_t msi_data_count;
5119  	uint32_t msi_irq_start;
5120  	uint32_t i = 0;
5121  	int ret;
5122  
5123  	scn->vaddr_qmi_bypass =
5124  			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
5125  							     scn->qdf_dev->dev,
5126  							     FW_SHARED_MEM,
5127  							     &target_pa);
5128  	if (!scn->vaddr_qmi_bypass) {
5129  		hif_err("Memory allocation failed could not post target buf");
5130  		return;
5131  	}
5132  
5133  	scn->paddr_qmi_bypass = target_pa;
5134  
5135  	ce_info_ptr = (struct ce_info *)scn->vaddr_qmi_bypass;
5136  
5137  	if (scn->vaddr_rri_on_ddr) {
5138  		ce_info_ptr->rri_over_ddr_low_paddr  =
5139  			 BITS0_TO_31(scn->paddr_rri_on_ddr);
5140  		ce_info_ptr->rri_over_ddr_high_paddr =
5141  			 BITS32_TO_35(scn->paddr_rri_on_ddr);
5142  	}
5143  
5144  	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
5145  					  &msi_data_count, &msi_data_start,
5146  					  &msi_irq_start);
5147  	if (ret) {
5148  		hif_err("Failed to get CE msi config");
5149  		return;
5150  	}
5151  
5152  	for (i = 0; i < CE_COUNT_MAX; i++) {
5153  		ce_info_ptr->cfg[i].ce_id = i;
5154  		ce_info_ptr->cfg[i].msi_vector =
5155  			 (i % msi_data_count) + msi_irq_start;
5156  	}
5157  
5158  	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
5159  	hif_info("target va %pK target pa %pa", scn->vaddr_qmi_bypass,
5160  		 &target_pa);
5161  }
5162  
5163  /**
5164   * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
5165   * @scn: pointer to HIF structure
5166   *
5167   *
5168   * Return: void
5169   */
hif_cleanup_static_buf_to_target(struct hif_softc * scn)5170  void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
5171  {
5172  	void *target_va = scn->vaddr_qmi_bypass;
5173  	phys_addr_t target_pa = scn->paddr_qmi_bypass;
5174  
5175  	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
5176  				FW_SHARED_MEM, target_va,
5177  				target_pa, 0);
5178  	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
5179  }
5180  #else
5181  /**
5182   * hif_post_static_buf_to_target() - post static buffer to WLAN FW
5183   * @scn: pointer to HIF structure
5184   *
5185   * WLAN FW needs 2MB memory from DDR when QMI is disabled.
5186   *
5187   * Return: void
5188   */
hif_post_static_buf_to_target(struct hif_softc * scn)5189  static void hif_post_static_buf_to_target(struct hif_softc *scn)
5190  {
5191  	qdf_dma_addr_t target_pa;
5192  
5193  	scn->vaddr_qmi_bypass =
5194  			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
5195  							     scn->qdf_dev->dev,
5196  							     FW_SHARED_MEM,
5197  							     &target_pa);
5198  	if (!scn->vaddr_qmi_bypass) {
5199  		hif_err("Memory allocation failed could not post target buf");
5200  		return;
5201  	}
5202  
5203  	scn->paddr_qmi_bypass = target_pa;
5204  	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
5205  }
5206  
5207  /**
5208   * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
5209   * @scn: pointer to HIF structure
5210   *
5211   *
5212   * Return: void
5213   */
hif_cleanup_static_buf_to_target(struct hif_softc * scn)5214  void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
5215  {
5216  	void *target_va = scn->vaddr_qmi_bypass;
5217  	phys_addr_t target_pa = scn->paddr_qmi_bypass;
5218  
5219  	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
5220  				FW_SHARED_MEM, target_va,
5221  				target_pa, 0);
5222  	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
5223  }
5224  #endif
5225  
5226  #else
hif_post_static_buf_to_target(struct hif_softc * scn)5227  static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
5228  {
5229  }
5230  
hif_cleanup_static_buf_to_target(struct hif_softc * scn)5231  void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
5232  {
5233  }
5234  #endif
5235  
hif_srng_sleep_state_adjust(struct hif_softc * scn,bool sleep_ok,bool wait_for_it)5236  static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
5237  				bool wait_for_it)
5238  {
5239  	/* todo */
5240  	return 0;
5241  }
5242  
hif_config_ce_by_id(struct hif_softc * scn,int pipe_num)5243  int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num)
5244  {
5245  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5246  	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
5247  	struct HIF_CE_pipe_info *pipe_info;
5248  	struct CE_state *ce_state = NULL;
5249  	struct CE_attr *attr;
5250  	int rv = 0;
5251  
5252  	if (pipe_num >= CE_COUNT_MAX)
5253  		return -EINVAL;
5254  
5255  	pipe_info = &hif_state->pipe_info[pipe_num];
5256  	pipe_info->pipe_num = pipe_num;
5257  	pipe_info->HIF_CE_state = hif_state;
5258  	attr = &hif_state->host_ce_config[pipe_num];
5259  	ce_state = scn->ce_id_to_state[pipe_num];
5260  
5261  	if (ce_state) {
5262  		/* Do not reinitialize the CE if its done already */
5263  		rv = QDF_STATUS_E_BUSY;
5264  		goto err;
5265  	}
5266  
5267  	pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
5268  	ce_state = scn->ce_id_to_state[pipe_num];
5269  	if (!ce_state) {
5270  		A_TARGET_ACCESS_UNLIKELY(scn);
5271  		rv = QDF_STATUS_E_FAILURE;
5272  		goto err;
5273  	}
5274  	qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
5275  	QDF_ASSERT(pipe_info->ce_hdl);
5276  	if (!pipe_info->ce_hdl) {
5277  		rv = QDF_STATUS_E_FAILURE;
5278  		A_TARGET_ACCESS_UNLIKELY(scn);
5279  		goto err;
5280  	}
5281  
5282  	ce_state->lro_data = qdf_lro_init();
5283  
5284  	if (attr->flags & CE_ATTR_DIAG) {
5285  		/* Reserve the ultimate CE for
5286  		 * Diagnostic Window support
5287  		 */
5288  		hif_state->ce_diag = pipe_info->ce_hdl;
5289  		goto skip;
5290  	}
5291  
5292  	if (hif_is_nss_wifi_enabled(scn) && ce_state &&
5293  	    (ce_state->htt_rx_data)) {
5294  		goto skip;
5295  	}
5296  
5297  	pipe_info->buf_sz = (qdf_size_t)(attr->src_sz_max);
5298  	if (attr->dest_nentries > 0) {
5299  		atomic_set(&pipe_info->recv_bufs_needed,
5300  			   init_buffer_count(attr->dest_nentries - 1));
5301  		/*SRNG based CE has one entry less */
5302  		if (ce_srng_based(scn))
5303  			atomic_dec(&pipe_info->recv_bufs_needed);
5304  	} else {
5305  		atomic_set(&pipe_info->recv_bufs_needed, 0);
5306  	}
5307  	ce_tasklet_init(hif_state, (1 << pipe_num));
5308  	ce_register_irq(hif_state, (1 << pipe_num));
5309  
5310  	init_tasklet_worker_by_ceid(hif_hdl, pipe_num);
5311  skip:
5312  	return 0;
5313  err:
5314  	return rv;
5315  }
5316  
5317  #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
hif_gen_ce_id_history_idx_mapping(struct hif_softc * scn)5318  static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn)
5319  {
5320  	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
5321  	uint8_t ce_id, hist_idx = 0;
5322  
5323  	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
5324  		if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE & (1 << ce_id))
5325  			ce_hist->ce_id_hist_map[ce_id] = hist_idx++;
5326  		else
5327  			ce_hist->ce_id_hist_map[ce_id] = -1;
5328  	}
5329  }
5330  #else
hif_gen_ce_id_history_idx_mapping(struct hif_softc * scn)5331  static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn)
5332  {
5333  }
5334  #endif
5335  
5336  /**
5337   * hif_config_ce() - configure copy engines
5338   * @scn: hif context
5339   *
5340   * Prepares fw, copy engine hardware and host sw according
5341   * to the attributes selected by hif_ce_prepare_config.
5342   *
5343   * also calls athdiag_procfs_init
5344   *
5345   * return: 0 for success nonzero for failure.
5346   */
hif_config_ce(struct hif_softc * scn)5347  int hif_config_ce(struct hif_softc *scn)
5348  {
5349  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5350  	struct HIF_CE_pipe_info *pipe_info;
5351  	int pipe_num;
5352  
5353  #ifdef ADRASTEA_SHADOW_REGISTERS
5354  	int i;
5355  #endif
5356  	QDF_STATUS rv = QDF_STATUS_SUCCESS;
5357  
5358  	scn->notice_send = true;
5359  	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
5360  
5361  	hif_post_static_buf_to_target(scn);
5362  
5363  	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
5364  
5365  	hif_config_rri_on_ddr(scn);
5366  
5367  	if (ce_srng_based(scn))
5368  		scn->bus_ops.hif_target_sleep_state_adjust =
5369  			&hif_srng_sleep_state_adjust;
5370  
5371  	/* Initialise the CE debug history sysfs interface inputs ce_id and
5372  	 * index. Disable data storing
5373  	 */
5374  	reset_ce_debug_history(scn);
5375  	hif_gen_ce_id_history_idx_mapping(scn);
5376  
5377  	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
5378  		struct CE_attr *attr;
5379  
5380  		pipe_info = &hif_state->pipe_info[pipe_num];
5381  		attr = &hif_state->host_ce_config[pipe_num];
5382  
5383  		if (attr->flags & CE_ATTR_INIT_ON_DEMAND)
5384  			continue;
5385  
5386  		if (hif_config_ce_by_id(scn, pipe_num))
5387  			goto err;
5388  	}
5389  
5390  	if (athdiag_procfs_init(scn) != 0) {
5391  		A_TARGET_ACCESS_UNLIKELY(scn);
5392  		goto err;
5393  	}
5394  	scn->athdiag_procfs_inited = true;
5395  
5396  	hif_debug("ce_init done");
5397  	hif_debug("X, ret = %d", rv);
5398  
5399  #ifdef ADRASTEA_SHADOW_REGISTERS
5400  	hif_debug("Using Shadow Registers instead of CE Registers");
5401  	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
5402  		hif_debug("Shadow Register%d is mapped to address %x",
5403  			  i,
5404  			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
5405  	}
5406  #endif
5407  
5408  	return rv != QDF_STATUS_SUCCESS;
5409  err:
5410  	/* Failure, so clean up */
5411  	hif_unconfig_ce(scn);
5412  	hif_info("X, ret = %d", rv);
5413  	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
5414  }
5415  
5416  /**
5417   * hif_config_ce_pktlog() - configure copy engines
5418   * @hif_hdl: hif context
5419   *
5420   * Prepares fw, copy engine hardware and host sw according
5421   * to the attributes selected by hif_ce_prepare_config.
5422   *
5423   * also calls athdiag_procfs_init
5424   *
5425   * return: 0 for success nonzero for failure.
5426   */
hif_config_ce_pktlog(struct hif_opaque_softc * hif_hdl)5427  int hif_config_ce_pktlog(struct hif_opaque_softc *hif_hdl)
5428  {
5429  	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
5430  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5431  	int pipe_num;
5432  	QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
5433  	struct HIF_CE_pipe_info *pipe_info;
5434  
5435  	if (!scn)
5436  		goto err;
5437  
5438  	if (scn->pktlog_init)
5439  		return QDF_STATUS_SUCCESS;
5440  
5441  	pipe_num =  hif_get_pktlog_ce_num(scn);
5442  	if (pipe_num < 0) {
5443  		qdf_status = QDF_STATUS_E_FAILURE;
5444  		goto err;
5445  	}
5446  
5447  	pipe_info = &hif_state->pipe_info[pipe_num];
5448  
5449  	qdf_status = hif_config_ce_by_id(scn, pipe_num);
5450  	/* CE Already initialized. Do not try to reinitialized again */
5451  	if (qdf_status == QDF_STATUS_E_BUSY)
5452  		return QDF_STATUS_SUCCESS;
5453  
5454  	qdf_status = hif_config_irq_by_ceid(scn, pipe_num);
5455  	if (qdf_status < 0)
5456  		goto err;
5457  
5458  	qdf_status = hif_completion_thread_startup_by_ceid(hif_state, pipe_num);
5459  	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
5460  		hif_err("Failed to start hif thread");
5461  		goto err;
5462  	}
5463  
5464  	/* Post buffers for pktlog copy engine. */
5465  	qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
5466  	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
5467  		/* cleanup is done in hif_ce_disable */
5468  		hif_err("Failed to post buffers");
5469  		return qdf_status;
5470  	}
5471  	scn->pktlog_init = true;
5472  	return qdf_status != QDF_STATUS_SUCCESS;
5473  
5474  err:
5475  	hif_debug("X, ret = %d", qdf_status);
5476  	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
5477  }
5478  
5479  #ifdef IPA_OFFLOAD
5480  /**
5481   * hif_ce_ipa_get_ce_resource() - get uc resource on hif
5482   * @scn: bus context
5483   * @ce_sr: copyengine source ring base physical address
5484   * @ce_sr_ring_size: copyengine source ring size
5485   * @ce_reg_paddr: copyengine register physical address
5486   *
5487   * IPA micro controller data path offload feature enabled,
5488   * HIF should release copy engine related resource information to IPA UC
5489   * IPA UC will access hardware resource with released information
5490   *
5491   * Return: None
5492   */
hif_ce_ipa_get_ce_resource(struct hif_softc * scn,qdf_shared_mem_t ** ce_sr,uint32_t * ce_sr_ring_size,qdf_dma_addr_t * ce_reg_paddr)5493  void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
5494  			     qdf_shared_mem_t **ce_sr,
5495  			     uint32_t *ce_sr_ring_size,
5496  			     qdf_dma_addr_t *ce_reg_paddr)
5497  {
5498  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5499  	struct HIF_CE_pipe_info *pipe_info =
5500  		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
5501  	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
5502  
5503  	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
5504  			    ce_reg_paddr);
5505  }
5506  #endif /* IPA_OFFLOAD */
5507  
5508  
5509  #ifdef ADRASTEA_SHADOW_REGISTERS
5510  
5511  /*
5512   * Current shadow register config
5513   *
5514   * -----------------------------------------------------------
5515   * Shadow Register      |     CE   |    src/dst write index
5516   * -----------------------------------------------------------
5517   *         0            |     0    |           src
5518   *         1     No Config - Doesn't point to anything
5519   *         2     No Config - Doesn't point to anything
5520   *         3            |     3    |           src
5521   *         4            |     4    |           src
5522   *         5            |     5    |           src
5523   *         6     No Config - Doesn't point to anything
5524   *         7            |     7    |           src
5525   *         8     No Config - Doesn't point to anything
5526   *         9     No Config - Doesn't point to anything
5527   *         10    No Config - Doesn't point to anything
5528   *         11    No Config - Doesn't point to anything
5529   * -----------------------------------------------------------
5530   *         12    No Config - Doesn't point to anything
5531   *         13           |     1    |           dst
5532   *         14           |     2    |           dst
5533   *         15    No Config - Doesn't point to anything
5534   *         16    No Config - Doesn't point to anything
5535   *         17    No Config - Doesn't point to anything
5536   *         18    No Config - Doesn't point to anything
5537   *         19           |     7    |           dst
5538   *         20           |     8    |           dst
5539   *         21    No Config - Doesn't point to anything
5540   *         22    No Config - Doesn't point to anything
5541   *         23    No Config - Doesn't point to anything
5542   * -----------------------------------------------------------
5543   *
5544   *
5545   * ToDo - Move shadow register config to following in the future
5546   * This helps free up a block of shadow registers towards the end.
5547   * Can be used for other purposes
5548   *
5549   * -----------------------------------------------------------
5550   * Shadow Register      |     CE   |    src/dst write index
5551   * -----------------------------------------------------------
5552   *      0            |     0    |           src
5553   *      1            |     3    |           src
5554   *      2            |     4    |           src
5555   *      3            |     5    |           src
5556   *      4            |     7    |           src
5557   * -----------------------------------------------------------
5558   *      5            |     1    |           dst
5559   *      6            |     2    |           dst
5560   *      7            |     7    |           dst
5561   *      8            |     8    |           dst
5562   * -----------------------------------------------------------
5563   *      9     No Config - Doesn't point to anything
5564   *      12    No Config - Doesn't point to anything
5565   *      13    No Config - Doesn't point to anything
5566   *      14    No Config - Doesn't point to anything
5567   *      15    No Config - Doesn't point to anything
5568   *      16    No Config - Doesn't point to anything
5569   *      17    No Config - Doesn't point to anything
5570   *      18    No Config - Doesn't point to anything
5571   *      19    No Config - Doesn't point to anything
5572   *      20    No Config - Doesn't point to anything
5573   *      21    No Config - Doesn't point to anything
5574   *      22    No Config - Doesn't point to anything
5575   *      23    No Config - Doesn't point to anything
5576   * -----------------------------------------------------------
5577  */
5578  #ifndef QCN7605_SUPPORT
shadow_sr_wr_ind_addr(struct hif_softc * scn,u32 ctrl_addr)5579  u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
5580  {
5581  	u32 addr = 0;
5582  	u32 ce = COPY_ENGINE_ID(ctrl_addr);
5583  
5584  	switch (ce) {
5585  	case 0:
5586  		addr = SHADOW_VALUE0;
5587  		break;
5588  	case 3:
5589  		addr = SHADOW_VALUE3;
5590  		break;
5591  	case 4:
5592  		addr = SHADOW_VALUE4;
5593  		break;
5594  	case 5:
5595  		addr = SHADOW_VALUE5;
5596  		break;
5597  	case 7:
5598  		addr = SHADOW_VALUE7;
5599  		break;
5600  	default:
5601  		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
5602  		QDF_ASSERT(0);
5603  	}
5604  	return addr;
5605  
5606  }
5607  
shadow_dst_wr_ind_addr(struct hif_softc * scn,u32 ctrl_addr)5608  u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
5609  {
5610  	u32 addr = 0;
5611  	u32 ce = COPY_ENGINE_ID(ctrl_addr);
5612  
5613  	switch (ce) {
5614  	case 1:
5615  		addr = SHADOW_VALUE13;
5616  		break;
5617  	case 2:
5618  		addr = SHADOW_VALUE14;
5619  		break;
5620  	case 5:
5621  		addr = SHADOW_VALUE17;
5622  		break;
5623  	case 7:
5624  		addr = SHADOW_VALUE19;
5625  		break;
5626  	case 8:
5627  		addr = SHADOW_VALUE20;
5628  		break;
5629  	case 9:
5630  		addr = SHADOW_VALUE21;
5631  		break;
5632  	case 10:
5633  		addr = SHADOW_VALUE22;
5634  		break;
5635  	case 11:
5636  		addr = SHADOW_VALUE23;
5637  		break;
5638  	default:
5639  		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
5640  		QDF_ASSERT(0);
5641  	}
5642  
5643  	return addr;
5644  
5645  }
5646  #else
shadow_sr_wr_ind_addr(struct hif_softc * scn,u32 ctrl_addr)5647  u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
5648  {
5649  	u32 addr = 0;
5650  	u32 ce = COPY_ENGINE_ID(ctrl_addr);
5651  
5652  	switch (ce) {
5653  	case 0:
5654  		addr = SHADOW_VALUE0;
5655  		break;
5656  	case 3:
5657  		addr = SHADOW_VALUE3;
5658  		break;
5659  	case 4:
5660  		addr = SHADOW_VALUE4;
5661  		break;
5662  	case 5:
5663  		addr = SHADOW_VALUE5;
5664  		break;
5665  	default:
5666  		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
5667  		QDF_ASSERT(0);
5668  	}
5669  	return addr;
5670  }
5671  
shadow_dst_wr_ind_addr(struct hif_softc * scn,u32 ctrl_addr)5672  u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
5673  {
5674  	u32 addr = 0;
5675  	u32 ce = COPY_ENGINE_ID(ctrl_addr);
5676  
5677  	switch (ce) {
5678  	case 1:
5679  		addr = SHADOW_VALUE13;
5680  		break;
5681  	case 2:
5682  		addr = SHADOW_VALUE14;
5683  		break;
5684  	case 3:
5685  		addr = SHADOW_VALUE15;
5686  		break;
5687  	case 5:
5688  		addr = SHADOW_VALUE17;
5689  		break;
5690  	case 7:
5691  		addr = SHADOW_VALUE19;
5692  		break;
5693  	case 8:
5694  		addr = SHADOW_VALUE20;
5695  		break;
5696  	case 9:
5697  		addr = SHADOW_VALUE21;
5698  		break;
5699  	case 10:
5700  		addr = SHADOW_VALUE22;
5701  		break;
5702  	case 11:
5703  		addr = SHADOW_VALUE23;
5704  		break;
5705  	default:
5706  		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
5707  		QDF_ASSERT(0);
5708  	}
5709  
5710  	return addr;
5711  }
5712  #endif
5713  #endif
5714  
5715  #if defined(FEATURE_LRO)
hif_ce_get_lro_ctx(struct hif_opaque_softc * hif_hdl,int ctx_id)5716  void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
5717  {
5718  	struct CE_state *ce_state;
5719  	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
5720  
5721  	ce_state = scn->ce_id_to_state[ctx_id];
5722  
5723  	return ce_state->lro_data;
5724  }
5725  #endif
5726  
5727  /**
5728   * hif_map_service_to_pipe() - returns  the ce ids pertaining to
5729   * this service
5730   * @hif_hdl: hif_softc pointer.
5731   * @svc_id: Service ID for which the mapping is needed.
5732   * @ul_pipe: address of the container in which ul pipe is returned.
5733   * @dl_pipe: address of the container in which dl pipe is returned.
5734   * @ul_is_polled: address of the container in which a bool
5735   *			indicating if the UL CE for this service
5736   *			is polled is returned.
5737   * @dl_is_polled: address of the container in which a bool
5738   *			indicating if the DL CE for this service
5739   *			is polled is returned.
5740   *
5741   * Return: Indicates whether the service has been found in the table.
5742   *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
5743   *         There will be warning logs if either leg has not been updated
5744   *         because it missed the entry in the table (but this is not an err).
5745   */
hif_map_service_to_pipe(struct hif_opaque_softc * hif_hdl,uint16_t svc_id,uint8_t * ul_pipe,uint8_t * dl_pipe,int * ul_is_polled,int * dl_is_polled)5746  int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
5747  			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
5748  			int *dl_is_polled)
5749  {
5750  	int status = -EINVAL;
5751  	unsigned int i;
5752  	struct service_to_pipe element;
5753  	struct service_to_pipe *tgt_svc_map_to_use;
5754  	uint32_t sz_tgt_svc_map_to_use;
5755  	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
5756  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5757  	bool dl_updated = false;
5758  	bool ul_updated = false;
5759  
5760  	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
5761  				       &sz_tgt_svc_map_to_use);
5762  
5763  	*dl_is_polled = 0;  /* polling for received messages not supported */
5764  
5765  	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
5766  
5767  		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
5768  		if (element.service_id == svc_id) {
5769  			if (element.pipedir == PIPEDIR_OUT) {
5770  				*ul_pipe = element.pipenum;
5771  				*ul_is_polled =
5772  					(hif_state->host_ce_config[*ul_pipe].flags &
5773  					 CE_ATTR_DISABLE_INTR) != 0;
5774  				ul_updated = true;
5775  			} else if (element.pipedir == PIPEDIR_IN) {
5776  				*dl_pipe = element.pipenum;
5777  				dl_updated = true;
5778  			}
5779  			status = 0;
5780  		}
5781  	}
5782  	if (ul_updated == false)
5783  		hif_debug("ul pipe is NOT updated for service %d", svc_id);
5784  	if (dl_updated == false)
5785  		hif_debug("dl pipe is NOT updated for service %d", svc_id);
5786  
5787  	return status;
5788  }
5789  
5790  #ifdef SHADOW_REG_DEBUG
DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc * scn,uint32_t CE_ctrl_addr)5791  inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
5792  		uint32_t CE_ctrl_addr)
5793  {
5794  	uint32_t read_from_hw, srri_from_ddr = 0;
5795  
5796  	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
5797  
5798  	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
5799  
5800  	if (read_from_hw != srri_from_ddr) {
5801  		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
5802  		       srri_from_ddr, read_from_hw,
5803  		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
5804  		QDF_ASSERT(0);
5805  	}
5806  	return srri_from_ddr;
5807  }
5808  
5809  
DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc * scn,uint32_t CE_ctrl_addr)5810  inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
5811  		uint32_t CE_ctrl_addr)
5812  {
5813  	uint32_t read_from_hw, drri_from_ddr = 0;
5814  
5815  	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
5816  
5817  	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
5818  
5819  	if (read_from_hw != drri_from_ddr) {
5820  		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
5821  		       drri_from_ddr, read_from_hw,
5822  		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
5823  		QDF_ASSERT(0);
5824  	}
5825  	return drri_from_ddr;
5826  }
5827  
5828  #endif
5829  
5830  /**
5831   * hif_dump_ce_registers() - dump ce registers
5832   * @scn: hif_opaque_softc pointer.
5833   *
5834   * Output the copy engine registers
5835   *
5836   * Return: 0 for success or error code
5837   */
hif_dump_ce_registers(struct hif_softc * scn)5838  int hif_dump_ce_registers(struct hif_softc *scn)
5839  {
5840  	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
5841  	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
5842  	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
5843  	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
5844  	uint16_t i;
5845  	QDF_STATUS status;
5846  
5847  	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
5848  		if (!scn->ce_id_to_state[i]) {
5849  			hif_debug("CE%d not used", i);
5850  			continue;
5851  		}
5852  
5853  		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
5854  					   (uint8_t *) &ce_reg_values[0],
5855  					   ce_reg_word_size * sizeof(uint32_t));
5856  
5857  		if (status != QDF_STATUS_SUCCESS) {
5858  			hif_err("Dumping CE register failed!");
5859  			return -EACCES;
5860  		}
5861  		hif_debug("CE%d=>", i);
5862  		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
5863  				   (uint8_t *) &ce_reg_values[0],
5864  				   ce_reg_word_size * sizeof(uint32_t));
5865  		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
5866  				+ SR_WR_INDEX_ADDRESS),
5867  				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
5868  		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
5869  				+ CURRENT_SRRI_ADDRESS),
5870  				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
5871  		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
5872  				+ DST_WR_INDEX_ADDRESS),
5873  				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
5874  		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
5875  				+ CURRENT_DRRI_ADDRESS),
5876  				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
5877  		qdf_print("---");
5878  	}
5879  	return 0;
5880  }
5881  qdf_export_symbol(hif_dump_ce_registers);
5882  #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
hif_get_addl_pipe_info(struct hif_opaque_softc * osc,struct hif_pipe_addl_info * hif_info,uint32_t pipe)5883  struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
5884  		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
5885  {
5886  	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5887  	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
5888  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
5889  	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
5890  	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
5891  	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
5892  	struct CE_ring_state *src_ring = ce_state->src_ring;
5893  	struct CE_ring_state *dest_ring = ce_state->dest_ring;
5894  
5895  	if (src_ring) {
5896  		hif_info->ul_pipe.nentries = src_ring->nentries;
5897  		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
5898  		hif_info->ul_pipe.sw_index = src_ring->sw_index;
5899  		hif_info->ul_pipe.write_index = src_ring->write_index;
5900  		hif_info->ul_pipe.hw_index = src_ring->hw_index;
5901  		hif_info->ul_pipe.base_addr_CE_space =
5902  			src_ring->base_addr_CE_space;
5903  		hif_info->ul_pipe.base_addr_owner_space =
5904  			src_ring->base_addr_owner_space;
5905  	}
5906  
5907  
5908  	if (dest_ring) {
5909  		hif_info->dl_pipe.nentries = dest_ring->nentries;
5910  		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
5911  		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
5912  		hif_info->dl_pipe.write_index = dest_ring->write_index;
5913  		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
5914  		hif_info->dl_pipe.base_addr_CE_space =
5915  			dest_ring->base_addr_CE_space;
5916  		hif_info->dl_pipe.base_addr_owner_space =
5917  			dest_ring->base_addr_owner_space;
5918  	}
5919  
5920  	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
5921  	hif_info->ctrl_addr = ce_state->ctrl_addr;
5922  
5923  	return hif_info;
5924  }
5925  qdf_export_symbol(hif_get_addl_pipe_info);
5926  
hif_set_nss_wifiol_mode(struct hif_opaque_softc * osc,uint32_t mode)5927  uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
5928  {
5929  	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5930  
5931  	scn->nss_wifi_ol_mode = mode;
5932  	return 0;
5933  }
5934  qdf_export_symbol(hif_set_nss_wifiol_mode);
5935  #endif
5936  
hif_set_attribute(struct hif_opaque_softc * osc,uint8_t hif_attrib)5937  void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
5938  {
5939  	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5940  	scn->hif_attribute = hif_attrib;
5941  }
5942  
5943  
5944  /* disable interrupts (only applicable for legacy copy engine currently */
hif_disable_interrupt(struct hif_opaque_softc * osc,uint32_t pipe_num)5945  void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
5946  {
5947  	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5948  	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
5949  	uint32_t ctrl_addr = CE_state->ctrl_addr;
5950  
5951  	Q_TARGET_ACCESS_BEGIN(scn);
5952  	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
5953  	Q_TARGET_ACCESS_END(scn);
5954  }
5955  qdf_export_symbol(hif_disable_interrupt);
5956  
5957  /**
5958   * hif_fw_event_handler() - hif fw event handler
5959   * @hif_state: pointer to hif ce state structure
5960   *
5961   * Process fw events and raise HTC callback to process fw events.
5962   *
5963   * Return: none
5964   */
hif_fw_event_handler(struct HIF_CE_state * hif_state)5965  static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
5966  {
5967  	struct hif_msg_callbacks *msg_callbacks =
5968  		&hif_state->msg_callbacks_current;
5969  
5970  	if (!msg_callbacks->fwEventHandler)
5971  		return;
5972  
5973  	msg_callbacks->fwEventHandler(msg_callbacks->Context,
5974  			QDF_STATUS_E_FAILURE);
5975  }
5976  
5977  #ifndef QCA_WIFI_3_0
5978  /**
5979   * hif_fw_interrupt_handler() - FW interrupt handler
5980   * @irq: irq number
5981   * @arg: the user pointer
5982   *
5983   * Called from the PCI interrupt handler when a
5984   * firmware-generated interrupt to the Host.
5985   *
5986   * only registered for legacy ce devices
5987   *
5988   * Return: status of handled irq
5989   */
hif_fw_interrupt_handler(int irq,void * arg)5990  irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
5991  {
5992  	struct hif_softc *scn = arg;
5993  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5994  	uint32_t fw_indicator_address, fw_indicator;
5995  
5996  	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
5997  		return ATH_ISR_NOSCHED;
5998  
5999  	fw_indicator_address = hif_state->fw_indicator_address;
6000  	/* For sudden unplug this will return ~0 */
6001  	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
6002  
6003  	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
6004  		/* ACK: clear Target-side pending event */
6005  		A_TARGET_WRITE(scn, fw_indicator_address,
6006  			       fw_indicator & ~FW_IND_EVENT_PENDING);
6007  		if (Q_TARGET_ACCESS_END(scn) < 0)
6008  			return ATH_ISR_SCHED;
6009  
6010  		if (hif_state->started) {
6011  			hif_fw_event_handler(hif_state);
6012  		} else {
6013  			/*
6014  			 * Probable Target failure before we're prepared
6015  			 * to handle it.  Generally unexpected.
6016  			 * fw_indicator used as bitmap, and defined as below:
6017  			 *     FW_IND_EVENT_PENDING    0x1
6018  			 *     FW_IND_INITIALIZED      0x2
6019  			 *     FW_IND_NEEDRECOVER      0x4
6020  			 */
6021  			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
6022  				("%s: Early firmware event indicated 0x%x\n",
6023  				 __func__, fw_indicator));
6024  		}
6025  	} else {
6026  		if (Q_TARGET_ACCESS_END(scn) < 0)
6027  			return ATH_ISR_SCHED;
6028  	}
6029  
6030  	return ATH_ISR_SCHED;
6031  }
6032  #else
hif_fw_interrupt_handler(int irq,void * arg)6033  irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
6034  {
6035  	return ATH_ISR_SCHED;
6036  }
6037  #endif /* #ifdef QCA_WIFI_3_0 */
6038  
6039  
6040  /**
6041   * hif_wlan_disable(): call the platform driver to disable wlan
6042   * @scn: HIF Context
6043   *
6044   * This function passes the con_mode to platform driver to disable
6045   * wlan.
6046   *
6047   * Return: void
6048   */
hif_wlan_disable(struct hif_softc * scn)6049  void hif_wlan_disable(struct hif_softc *scn)
6050  {
6051  	enum pld_driver_mode mode;
6052  	uint32_t con_mode = hif_get_conparam(scn);
6053  
6054  	if (scn->target_status == TARGET_STATUS_RESET)
6055  		return;
6056  
6057  	if (QDF_GLOBAL_FTM_MODE == con_mode)
6058  		mode = PLD_FTM;
6059  	else if (QDF_IS_EPPING_ENABLED(con_mode))
6060  		mode = PLD_EPPING;
6061  	else
6062  		mode = PLD_MISSION;
6063  
6064  	pld_wlan_disable(scn->qdf_dev->dev, mode);
6065  }
6066  
hif_get_wake_ce_id(struct hif_softc * scn,uint8_t * ce_id)6067  int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
6068  {
6069  	int status;
6070  	uint8_t ul_pipe, dl_pipe;
6071  	int ul_is_polled, dl_is_polled;
6072  
6073  	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
6074  	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
6075  					 HTC_CTRL_RSVD_SVC,
6076  					 &ul_pipe, &dl_pipe,
6077  					 &ul_is_polled, &dl_is_polled);
6078  	if (status) {
6079  		hif_err("Failed to map pipe: %d", status);
6080  		return status;
6081  	}
6082  
6083  	*ce_id = dl_pipe;
6084  
6085  	return 0;
6086  }
6087  
hif_get_fw_diag_ce_id(struct hif_softc * scn,uint8_t * ce_id)6088  int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id)
6089  {
6090  	int status;
6091  	uint8_t ul_pipe, dl_pipe;
6092  	int ul_is_polled, dl_is_polled;
6093  
6094  	/* DL pipe for WMI_CONTROL_DIAG_SVC should map to the FW DIAG CE_ID */
6095  	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
6096  					 WMI_CONTROL_DIAG_SVC,
6097  					 &ul_pipe, &dl_pipe,
6098  					 &ul_is_polled, &dl_is_polled);
6099  	if (status) {
6100  		hif_err("Failed to map pipe: %d", status);
6101  		return status;
6102  	}
6103  
6104  	*ce_id = dl_pipe;
6105  
6106  	return 0;
6107  }
6108  
6109  #ifdef HIF_CE_LOG_INFO
6110  /**
6111   * ce_get_index_info(): Get CE index info
6112   * @scn: HIF Context
6113   * @ce_state: CE opaque handle
6114   * @info: CE info
6115   *
6116   * Return: 0 for success and non zero for failure
6117   */
6118  static
ce_get_index_info(struct hif_softc * scn,void * ce_state,struct ce_index * info)6119  int ce_get_index_info(struct hif_softc *scn, void *ce_state,
6120  		      struct ce_index *info)
6121  {
6122  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
6123  
6124  	return hif_state->ce_services->ce_get_index_info(scn, ce_state, info);
6125  }
6126  
hif_log_ce_info(struct hif_softc * scn,uint8_t * data,unsigned int * offset)6127  void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
6128  		     unsigned int *offset)
6129  {
6130  	struct hang_event_info info = {0};
6131  	static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) |
6132  		BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10);
6133  	uint8_t curr_index = 0;
6134  	uint8_t i;
6135  	uint16_t size;
6136  
6137  	info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt);
6138  	info.active_grp_tasklet_cnt =
6139  				qdf_atomic_read(&scn->active_grp_tasklet_cnt);
6140  
6141  	for (i = 0; i < scn->ce_count; i++) {
6142  		if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i])
6143  			continue;
6144  
6145  		if (ce_get_index_info(scn, scn->ce_id_to_state[i],
6146  				      &info.ce_info[curr_index]))
6147  			continue;
6148  
6149  		curr_index++;
6150  	}
6151  
6152  	info.ce_count = curr_index;
6153  	size = sizeof(info) -
6154  		(CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index);
6155  
6156  	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
6157  		return;
6158  
6159  	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO,
6160  			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
6161  
6162  	qdf_mem_copy(data + *offset, &info, size);
6163  	*offset = *offset + size;
6164  }
6165  #endif
6166  
6167  #ifdef FEATURE_DIRECT_LINK
6168  QDF_STATUS
hif_set_irq_config_by_ceid(struct hif_opaque_softc * scn,uint8_t ce_id,uint64_t addr,uint32_t data)6169  hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
6170  			   uint64_t addr, uint32_t data)
6171  {
6172  	struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn);
6173  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
6174  
6175  	if (hif_state->ce_services->ce_set_irq_config_by_ceid)
6176  		return hif_state->ce_services->ce_set_irq_config_by_ceid(
6177  									hif_ctx,
6178  									ce_id,
6179  									addr,
6180  									data);
6181  
6182  	return QDF_STATUS_E_NOSUPPORT;
6183  }
6184  
hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc * scn,uint64_t ** dma_addr,uint32_t * buf_size)6185  uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
6186  						  uint64_t **dma_addr,
6187  						  uint32_t *buf_size)
6188  {
6189  	struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn);
6190  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
6191  	struct ce_ops *ce_services = hif_state->ce_services;
6192  
6193  	if (ce_services->ce_get_direct_link_dest_buffers)
6194  		return ce_services->ce_get_direct_link_dest_buffers(hif_ctx,
6195  								    dma_addr,
6196  								    buf_size);
6197  
6198  	return 0;
6199  }
6200  
6201  QDF_STATUS
hif_get_direct_link_ce_srng_info(struct hif_opaque_softc * scn,struct hif_direct_link_ce_info * info,uint8_t max_ce_info_len)6202  hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
6203  				 struct hif_direct_link_ce_info *info,
6204  				 uint8_t max_ce_info_len)
6205  {
6206  	struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn);
6207  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
6208  	struct ce_ops *ce_services = hif_state->ce_services;
6209  
6210  	if (ce_services->ce_get_direct_link_ring_info)
6211  		return ce_services->ce_get_direct_link_ring_info(hif_ctx,
6212  							       info,
6213  							       max_ce_info_len);
6214  
6215  	return QDF_STATUS_E_NOSUPPORT;
6216  }
6217  #endif
6218