1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (c) 2014-2015, 2022 MediaTek Inc.
4   * Author: Chaotian.Jing <chaotian.jing@mediatek.com>
5   */
6  
7  #include <linux/module.h>
8  #include <linux/bitops.h>
9  #include <linux/clk.h>
10  #include <linux/delay.h>
11  #include <linux/dma-mapping.h>
12  #include <linux/iopoll.h>
13  #include <linux/ioport.h>
14  #include <linux/irq.h>
15  #include <linux/of.h>
16  #include <linux/pinctrl/consumer.h>
17  #include <linux/platform_device.h>
18  #include <linux/pm.h>
19  #include <linux/pm_runtime.h>
20  #include <linux/pm_wakeirq.h>
21  #include <linux/regulator/consumer.h>
22  #include <linux/slab.h>
23  #include <linux/spinlock.h>
24  #include <linux/interrupt.h>
25  #include <linux/reset.h>
26  
27  #include <linux/mmc/card.h>
28  #include <linux/mmc/core.h>
29  #include <linux/mmc/host.h>
30  #include <linux/mmc/mmc.h>
31  #include <linux/mmc/sd.h>
32  #include <linux/mmc/sdio.h>
33  #include <linux/mmc/slot-gpio.h>
34  
35  #include "cqhci.h"
36  
37  #define MAX_BD_NUM          1024
38  #define MSDC_NR_CLOCKS      3
39  
40  /*--------------------------------------------------------------------------*/
41  /* Common Definition                                                        */
42  /*--------------------------------------------------------------------------*/
43  #define MSDC_BUS_1BITS          0x0
44  #define MSDC_BUS_4BITS          0x1
45  #define MSDC_BUS_8BITS          0x2
46  
47  #define MSDC_BURST_64B          0x6
48  
49  /*--------------------------------------------------------------------------*/
50  /* Register Offset                                                          */
51  /*--------------------------------------------------------------------------*/
52  #define MSDC_CFG         0x0
53  #define MSDC_IOCON       0x04
54  #define MSDC_PS          0x08
55  #define MSDC_INT         0x0c
56  #define MSDC_INTEN       0x10
57  #define MSDC_FIFOCS      0x14
58  #define SDC_CFG          0x30
59  #define SDC_CMD          0x34
60  #define SDC_ARG          0x38
61  #define SDC_STS          0x3c
62  #define SDC_RESP0        0x40
63  #define SDC_RESP1        0x44
64  #define SDC_RESP2        0x48
65  #define SDC_RESP3        0x4c
66  #define SDC_BLK_NUM      0x50
67  #define SDC_ADV_CFG0     0x64
68  #define EMMC_IOCON       0x7c
69  #define SDC_ACMD_RESP    0x80
70  #define DMA_SA_H4BIT     0x8c
71  #define MSDC_DMA_SA      0x90
72  #define MSDC_DMA_CTRL    0x98
73  #define MSDC_DMA_CFG     0x9c
74  #define MSDC_PATCH_BIT   0xb0
75  #define MSDC_PATCH_BIT1  0xb4
76  #define MSDC_PATCH_BIT2  0xb8
77  #define MSDC_PAD_TUNE    0xec
78  #define MSDC_PAD_TUNE0   0xf0
79  #define PAD_DS_TUNE      0x188
80  #define PAD_CMD_TUNE     0x18c
81  #define EMMC51_CFG0	 0x204
82  #define EMMC50_CFG0      0x208
83  #define EMMC50_CFG1      0x20c
84  #define EMMC50_CFG3      0x220
85  #define SDC_FIFO_CFG     0x228
86  #define CQHCI_SETTING	 0x7fc
87  
88  /*--------------------------------------------------------------------------*/
89  /* Top Pad Register Offset                                                  */
90  /*--------------------------------------------------------------------------*/
91  #define EMMC_TOP_CONTROL	0x00
92  #define EMMC_TOP_CMD		0x04
93  #define EMMC50_PAD_DS_TUNE	0x0c
94  
95  /*--------------------------------------------------------------------------*/
96  /* Register Mask                                                            */
97  /*--------------------------------------------------------------------------*/
98  
99  /* MSDC_CFG mask */
100  #define MSDC_CFG_MODE           BIT(0)	/* RW */
101  #define MSDC_CFG_CKPDN          BIT(1)	/* RW */
102  #define MSDC_CFG_RST            BIT(2)	/* RW */
103  #define MSDC_CFG_PIO            BIT(3)	/* RW */
104  #define MSDC_CFG_CKDRVEN        BIT(4)	/* RW */
105  #define MSDC_CFG_BV18SDT        BIT(5)	/* RW */
106  #define MSDC_CFG_BV18PSS        BIT(6)	/* R  */
107  #define MSDC_CFG_CKSTB          BIT(7)	/* R  */
108  #define MSDC_CFG_CKDIV          GENMASK(15, 8)	/* RW */
109  #define MSDC_CFG_CKMOD          GENMASK(17, 16)	/* RW */
110  #define MSDC_CFG_HS400_CK_MODE  BIT(18)	/* RW */
111  #define MSDC_CFG_HS400_CK_MODE_EXTRA  BIT(22)	/* RW */
112  #define MSDC_CFG_CKDIV_EXTRA    GENMASK(19, 8)	/* RW */
113  #define MSDC_CFG_CKMOD_EXTRA    GENMASK(21, 20)	/* RW */
114  
115  /* MSDC_IOCON mask */
116  #define MSDC_IOCON_SDR104CKS    BIT(0)	/* RW */
117  #define MSDC_IOCON_RSPL         BIT(1)	/* RW */
118  #define MSDC_IOCON_DSPL         BIT(2)	/* RW */
119  #define MSDC_IOCON_DDLSEL       BIT(3)	/* RW */
120  #define MSDC_IOCON_DDR50CKD     BIT(4)	/* RW */
121  #define MSDC_IOCON_DSPLSEL      BIT(5)	/* RW */
122  #define MSDC_IOCON_W_DSPL       BIT(8)	/* RW */
123  #define MSDC_IOCON_D0SPL        BIT(16)	/* RW */
124  #define MSDC_IOCON_D1SPL        BIT(17)	/* RW */
125  #define MSDC_IOCON_D2SPL        BIT(18)	/* RW */
126  #define MSDC_IOCON_D3SPL        BIT(19)	/* RW */
127  #define MSDC_IOCON_D4SPL        BIT(20)	/* RW */
128  #define MSDC_IOCON_D5SPL        BIT(21)	/* RW */
129  #define MSDC_IOCON_D6SPL        BIT(22)	/* RW */
130  #define MSDC_IOCON_D7SPL        BIT(23)	/* RW */
131  #define MSDC_IOCON_RISCSZ       GENMASK(25, 24)	/* RW */
132  
133  /* MSDC_PS mask */
134  #define MSDC_PS_CDEN            BIT(0)	/* RW */
135  #define MSDC_PS_CDSTS           BIT(1)	/* R  */
136  #define MSDC_PS_CDDEBOUNCE      GENMASK(15, 12)	/* RW */
137  #define MSDC_PS_DAT             GENMASK(23, 16)	/* R  */
138  #define MSDC_PS_DATA1           BIT(17)	/* R  */
139  #define MSDC_PS_CMD             BIT(24)	/* R  */
140  #define MSDC_PS_WP              BIT(31)	/* R  */
141  
142  /* MSDC_INT mask */
143  #define MSDC_INT_MMCIRQ         BIT(0)	/* W1C */
144  #define MSDC_INT_CDSC           BIT(1)	/* W1C */
145  #define MSDC_INT_ACMDRDY        BIT(3)	/* W1C */
146  #define MSDC_INT_ACMDTMO        BIT(4)	/* W1C */
147  #define MSDC_INT_ACMDCRCERR     BIT(5)	/* W1C */
148  #define MSDC_INT_DMAQ_EMPTY     BIT(6)	/* W1C */
149  #define MSDC_INT_SDIOIRQ        BIT(7)	/* W1C */
150  #define MSDC_INT_CMDRDY         BIT(8)	/* W1C */
151  #define MSDC_INT_CMDTMO         BIT(9)	/* W1C */
152  #define MSDC_INT_RSPCRCERR      BIT(10)	/* W1C */
153  #define MSDC_INT_CSTA           BIT(11)	/* R */
154  #define MSDC_INT_XFER_COMPL     BIT(12)	/* W1C */
155  #define MSDC_INT_DXFER_DONE     BIT(13)	/* W1C */
156  #define MSDC_INT_DATTMO         BIT(14)	/* W1C */
157  #define MSDC_INT_DATCRCERR      BIT(15)	/* W1C */
158  #define MSDC_INT_ACMD19_DONE    BIT(16)	/* W1C */
159  #define MSDC_INT_DMA_BDCSERR    BIT(17)	/* W1C */
160  #define MSDC_INT_DMA_GPDCSERR   BIT(18)	/* W1C */
161  #define MSDC_INT_DMA_PROTECT    BIT(19)	/* W1C */
162  #define MSDC_INT_CMDQ           BIT(28)	/* W1C */
163  
164  /* MSDC_INTEN mask */
165  #define MSDC_INTEN_MMCIRQ       BIT(0)	/* RW */
166  #define MSDC_INTEN_CDSC         BIT(1)	/* RW */
167  #define MSDC_INTEN_ACMDRDY      BIT(3)	/* RW */
168  #define MSDC_INTEN_ACMDTMO      BIT(4)	/* RW */
169  #define MSDC_INTEN_ACMDCRCERR   BIT(5)	/* RW */
170  #define MSDC_INTEN_DMAQ_EMPTY   BIT(6)	/* RW */
171  #define MSDC_INTEN_SDIOIRQ      BIT(7)	/* RW */
172  #define MSDC_INTEN_CMDRDY       BIT(8)	/* RW */
173  #define MSDC_INTEN_CMDTMO       BIT(9)	/* RW */
174  #define MSDC_INTEN_RSPCRCERR    BIT(10)	/* RW */
175  #define MSDC_INTEN_CSTA         BIT(11)	/* RW */
176  #define MSDC_INTEN_XFER_COMPL   BIT(12)	/* RW */
177  #define MSDC_INTEN_DXFER_DONE   BIT(13)	/* RW */
178  #define MSDC_INTEN_DATTMO       BIT(14)	/* RW */
179  #define MSDC_INTEN_DATCRCERR    BIT(15)	/* RW */
180  #define MSDC_INTEN_ACMD19_DONE  BIT(16)	/* RW */
181  #define MSDC_INTEN_DMA_BDCSERR  BIT(17)	/* RW */
182  #define MSDC_INTEN_DMA_GPDCSERR BIT(18)	/* RW */
183  #define MSDC_INTEN_DMA_PROTECT  BIT(19)	/* RW */
184  
185  /* MSDC_FIFOCS mask */
186  #define MSDC_FIFOCS_RXCNT       GENMASK(7, 0)	/* R */
187  #define MSDC_FIFOCS_TXCNT       GENMASK(23, 16)	/* R */
188  #define MSDC_FIFOCS_CLR         BIT(31)	/* RW */
189  
190  /* SDC_CFG mask */
191  #define SDC_CFG_SDIOINTWKUP     BIT(0)	/* RW */
192  #define SDC_CFG_INSWKUP         BIT(1)	/* RW */
193  #define SDC_CFG_WRDTOC          GENMASK(14, 2)  /* RW */
194  #define SDC_CFG_BUSWIDTH        GENMASK(17, 16)	/* RW */
195  #define SDC_CFG_SDIO            BIT(19)	/* RW */
196  #define SDC_CFG_SDIOIDE         BIT(20)	/* RW */
197  #define SDC_CFG_INTATGAP        BIT(21)	/* RW */
198  #define SDC_CFG_DTOC            GENMASK(31, 24)	/* RW */
199  
200  /* SDC_STS mask */
201  #define SDC_STS_SDCBUSY         BIT(0)	/* RW */
202  #define SDC_STS_CMDBUSY         BIT(1)	/* RW */
203  #define SDC_STS_SWR_COMPL       BIT(31)	/* RW */
204  
205  #define SDC_DAT1_IRQ_TRIGGER	BIT(19)	/* RW */
206  /* SDC_ADV_CFG0 mask */
207  #define SDC_RX_ENHANCE_EN	BIT(20)	/* RW */
208  
209  /* DMA_SA_H4BIT mask */
210  #define DMA_ADDR_HIGH_4BIT      GENMASK(3, 0)	/* RW */
211  
212  /* MSDC_DMA_CTRL mask */
213  #define MSDC_DMA_CTRL_START     BIT(0)	/* W */
214  #define MSDC_DMA_CTRL_STOP      BIT(1)	/* W */
215  #define MSDC_DMA_CTRL_RESUME    BIT(2)	/* W */
216  #define MSDC_DMA_CTRL_MODE      BIT(8)	/* RW */
217  #define MSDC_DMA_CTRL_LASTBUF   BIT(10)	/* RW */
218  #define MSDC_DMA_CTRL_BRUSTSZ   GENMASK(14, 12)	/* RW */
219  
220  /* MSDC_DMA_CFG mask */
221  #define MSDC_DMA_CFG_STS        BIT(0)	/* R */
222  #define MSDC_DMA_CFG_DECSEN     BIT(1)	/* RW */
223  #define MSDC_DMA_CFG_AHBHPROT2  BIT(9)	/* RW */
224  #define MSDC_DMA_CFG_ACTIVEEN   BIT(13)	/* RW */
225  #define MSDC_DMA_CFG_CS12B16B   BIT(16)	/* RW */
226  
227  /* MSDC_PATCH_BIT mask */
228  #define MSDC_PATCH_BIT_ODDSUPP    BIT(1)	/* RW */
229  #define MSDC_INT_DAT_LATCH_CK_SEL GENMASK(9, 7)
230  #define MSDC_CKGEN_MSDC_DLY_SEL   GENMASK(14, 10)
231  #define MSDC_PATCH_BIT_IODSSEL    BIT(16)	/* RW */
232  #define MSDC_PATCH_BIT_IOINTSEL   BIT(17)	/* RW */
233  #define MSDC_PATCH_BIT_BUSYDLY    GENMASK(21, 18)	/* RW */
234  #define MSDC_PATCH_BIT_WDOD       GENMASK(25, 22)	/* RW */
235  #define MSDC_PATCH_BIT_IDRTSEL    BIT(26)	/* RW */
236  #define MSDC_PATCH_BIT_CMDFSEL    BIT(27)	/* RW */
237  #define MSDC_PATCH_BIT_INTDLSEL   BIT(28)	/* RW */
238  #define MSDC_PATCH_BIT_SPCPUSH    BIT(29)	/* RW */
239  #define MSDC_PATCH_BIT_DECRCTMO   BIT(30)	/* RW */
240  
241  #define MSDC_PATCH_BIT1_CMDTA     GENMASK(5, 3)    /* RW */
242  #define MSDC_PB1_BUSY_CHECK_SEL   BIT(7)    /* RW */
243  #define MSDC_PATCH_BIT1_STOP_DLY  GENMASK(11, 8)    /* RW */
244  
245  #define MSDC_PATCH_BIT2_CFGRESP   BIT(15)   /* RW */
246  #define MSDC_PATCH_BIT2_CFGCRCSTS BIT(28)   /* RW */
247  #define MSDC_PB2_SUPPORT_64G      BIT(1)    /* RW */
248  #define MSDC_PB2_RESPWAIT         GENMASK(3, 2)   /* RW */
249  #define MSDC_PB2_RESPSTSENSEL     GENMASK(18, 16) /* RW */
250  #define MSDC_PB2_CRCSTSENSEL      GENMASK(31, 29) /* RW */
251  
252  #define MSDC_PAD_TUNE_DATWRDLY	  GENMASK(4, 0)		/* RW */
253  #define MSDC_PAD_TUNE_DATRRDLY	  GENMASK(12, 8)	/* RW */
254  #define MSDC_PAD_TUNE_DATRRDLY2	  GENMASK(12, 8)	/* RW */
255  #define MSDC_PAD_TUNE_CMDRDLY	  GENMASK(20, 16)	/* RW */
256  #define MSDC_PAD_TUNE_CMDRDLY2	  GENMASK(20, 16)	/* RW */
257  #define MSDC_PAD_TUNE_CMDRRDLY	  GENMASK(26, 22)	/* RW */
258  #define MSDC_PAD_TUNE_CLKTDLY	  GENMASK(31, 27)	/* RW */
259  #define MSDC_PAD_TUNE_RXDLYSEL	  BIT(15)   /* RW */
260  #define MSDC_PAD_TUNE_RD_SEL	  BIT(13)   /* RW */
261  #define MSDC_PAD_TUNE_CMD_SEL	  BIT(21)   /* RW */
262  #define MSDC_PAD_TUNE_RD2_SEL	  BIT(13)   /* RW */
263  #define MSDC_PAD_TUNE_CMD2_SEL	  BIT(21)   /* RW */
264  
265  #define PAD_DS_TUNE_DLY_SEL       BIT(0)	  /* RW */
266  #define PAD_DS_TUNE_DLY1	  GENMASK(6, 2)   /* RW */
267  #define PAD_DS_TUNE_DLY2	  GENMASK(11, 7)  /* RW */
268  #define PAD_DS_TUNE_DLY3	  GENMASK(16, 12) /* RW */
269  
270  #define PAD_CMD_TUNE_RX_DLY3	  GENMASK(5, 1)   /* RW */
271  
272  /* EMMC51_CFG0 mask */
273  #define CMDQ_RDAT_CNT		  GENMASK(21, 12) /* RW */
274  
275  #define EMMC50_CFG_PADCMD_LATCHCK BIT(0)   /* RW */
276  #define EMMC50_CFG_CRCSTS_EDGE    BIT(3)   /* RW */
277  #define EMMC50_CFG_CFCSTS_SEL     BIT(4)   /* RW */
278  #define EMMC50_CFG_CMD_RESP_SEL   BIT(9)   /* RW */
279  
280  /* EMMC50_CFG1 mask */
281  #define EMMC50_CFG1_DS_CFG        BIT(28)  /* RW */
282  
283  #define EMMC50_CFG3_OUTS_WR       GENMASK(4, 0)  /* RW */
284  
285  #define SDC_FIFO_CFG_WRVALIDSEL   BIT(24)  /* RW */
286  #define SDC_FIFO_CFG_RDVALIDSEL   BIT(25)  /* RW */
287  
288  /* CQHCI_SETTING */
289  #define CQHCI_RD_CMD_WND_SEL	  BIT(14) /* RW */
290  #define CQHCI_WR_CMD_WND_SEL	  BIT(15) /* RW */
291  
292  /* EMMC_TOP_CONTROL mask */
293  #define PAD_RXDLY_SEL           BIT(0)      /* RW */
294  #define DELAY_EN                BIT(1)      /* RW */
295  #define PAD_DAT_RD_RXDLY2       GENMASK(6, 2)     /* RW */
296  #define PAD_DAT_RD_RXDLY        GENMASK(11, 7)    /* RW */
297  #define PAD_DAT_RD_RXDLY2_SEL   BIT(12)     /* RW */
298  #define PAD_DAT_RD_RXDLY_SEL    BIT(13)     /* RW */
299  #define DATA_K_VALUE_SEL        BIT(14)     /* RW */
300  #define SDC_RX_ENH_EN           BIT(15)     /* TW */
301  
302  /* EMMC_TOP_CMD mask */
303  #define PAD_CMD_RXDLY2          GENMASK(4, 0)	/* RW */
304  #define PAD_CMD_RXDLY           GENMASK(9, 5)	/* RW */
305  #define PAD_CMD_RD_RXDLY2_SEL   BIT(10)		/* RW */
306  #define PAD_CMD_RD_RXDLY_SEL    BIT(11)		/* RW */
307  #define PAD_CMD_TX_DLY          GENMASK(16, 12)	/* RW */
308  
309  /* EMMC50_PAD_DS_TUNE mask */
310  #define PAD_DS_DLY_SEL		BIT(16)	/* RW */
311  #define PAD_DS_DLY1		GENMASK(14, 10)	/* RW */
312  #define PAD_DS_DLY3		GENMASK(4, 0)	/* RW */
313  
314  #define REQ_CMD_EIO  BIT(0)
315  #define REQ_CMD_TMO  BIT(1)
316  #define REQ_DAT_ERR  BIT(2)
317  #define REQ_STOP_EIO BIT(3)
318  #define REQ_STOP_TMO BIT(4)
319  #define REQ_CMD_BUSY BIT(5)
320  
321  #define MSDC_PREPARE_FLAG BIT(0)
322  #define MSDC_ASYNC_FLAG BIT(1)
323  #define MSDC_MMAP_FLAG BIT(2)
324  
325  #define MTK_MMC_AUTOSUSPEND_DELAY	50
326  #define CMD_TIMEOUT         (HZ/10 * 5)	/* 100ms x5 */
327  #define DAT_TIMEOUT         (HZ    * 5)	/* 1000ms x5 */
328  
329  #define DEFAULT_DEBOUNCE	(8)	/* 8 cycles CD debounce */
330  
331  #define TUNING_REG2_FIXED_OFFEST	4
332  #define PAD_DELAY_HALF	32 /* PAD delay cells */
333  #define PAD_DELAY_FULL	64
334  /*--------------------------------------------------------------------------*/
335  /* Descriptor Structure                                                     */
336  /*--------------------------------------------------------------------------*/
337  struct mt_gpdma_desc {
338  	u32 gpd_info;
339  #define GPDMA_DESC_HWO		BIT(0)
340  #define GPDMA_DESC_BDP		BIT(1)
341  #define GPDMA_DESC_CHECKSUM	GENMASK(15, 8)
342  #define GPDMA_DESC_INT		BIT(16)
343  #define GPDMA_DESC_NEXT_H4	GENMASK(27, 24)
344  #define GPDMA_DESC_PTR_H4	GENMASK(31, 28)
345  	u32 next;
346  	u32 ptr;
347  	u32 gpd_data_len;
348  #define GPDMA_DESC_BUFLEN	GENMASK(15, 0)
349  #define GPDMA_DESC_EXTLEN	GENMASK(23, 16)
350  	u32 arg;
351  	u32 blknum;
352  	u32 cmd;
353  };
354  
355  struct mt_bdma_desc {
356  	u32 bd_info;
357  #define BDMA_DESC_EOL		BIT(0)
358  #define BDMA_DESC_CHECKSUM	GENMASK(15, 8)
359  #define BDMA_DESC_BLKPAD	BIT(17)
360  #define BDMA_DESC_DWPAD		BIT(18)
361  #define BDMA_DESC_NEXT_H4	GENMASK(27, 24)
362  #define BDMA_DESC_PTR_H4	GENMASK(31, 28)
363  	u32 next;
364  	u32 ptr;
365  	u32 bd_data_len;
366  #define BDMA_DESC_BUFLEN	GENMASK(15, 0)
367  #define BDMA_DESC_BUFLEN_EXT	GENMASK(23, 0)
368  };
369  
370  struct msdc_dma {
371  	struct scatterlist *sg;	/* I/O scatter list */
372  	struct mt_gpdma_desc *gpd;		/* pointer to gpd array */
373  	struct mt_bdma_desc *bd;		/* pointer to bd array */
374  	dma_addr_t gpd_addr;	/* the physical address of gpd array */
375  	dma_addr_t bd_addr;	/* the physical address of bd array */
376  };
377  
378  struct msdc_save_para {
379  	u32 msdc_cfg;
380  	u32 iocon;
381  	u32 sdc_cfg;
382  	u32 pad_tune;
383  	u32 patch_bit0;
384  	u32 patch_bit1;
385  	u32 patch_bit2;
386  	u32 pad_ds_tune;
387  	u32 pad_cmd_tune;
388  	u32 emmc50_cfg0;
389  	u32 emmc50_cfg3;
390  	u32 sdc_fifo_cfg;
391  	u32 emmc_top_control;
392  	u32 emmc_top_cmd;
393  	u32 emmc50_pad_ds_tune;
394  };
395  
396  struct mtk_mmc_compatible {
397  	u8 clk_div_bits;
398  	bool recheck_sdio_irq;
399  	bool hs400_tune; /* only used for MT8173 */
400  	u32 pad_tune_reg;
401  	bool async_fifo;
402  	bool data_tune;
403  	bool busy_check;
404  	bool stop_clk_fix;
405  	bool enhance_rx;
406  	bool support_64g;
407  	bool use_internal_cd;
408  };
409  
410  struct msdc_tune_para {
411  	u32 iocon;
412  	u32 pad_tune;
413  	u32 pad_cmd_tune;
414  	u32 emmc_top_control;
415  	u32 emmc_top_cmd;
416  };
417  
418  struct msdc_delay_phase {
419  	u8 maxlen;
420  	u8 start;
421  	u8 final_phase;
422  };
423  
424  struct msdc_host {
425  	struct device *dev;
426  	const struct mtk_mmc_compatible *dev_comp;
427  	int cmd_rsp;
428  
429  	spinlock_t lock;
430  	struct mmc_request *mrq;
431  	struct mmc_command *cmd;
432  	struct mmc_data *data;
433  	int error;
434  
435  	void __iomem *base;		/* host base address */
436  	void __iomem *top_base;		/* host top register base address */
437  
438  	struct msdc_dma dma;	/* dma channel */
439  	u64 dma_mask;
440  
441  	u32 timeout_ns;		/* data timeout ns */
442  	u32 timeout_clks;	/* data timeout clks */
443  
444  	struct pinctrl *pinctrl;
445  	struct pinctrl_state *pins_default;
446  	struct pinctrl_state *pins_uhs;
447  	struct pinctrl_state *pins_eint;
448  	struct delayed_work req_timeout;
449  	int irq;		/* host interrupt */
450  	int eint_irq;		/* interrupt from sdio device for waking up system */
451  	struct reset_control *reset;
452  
453  	struct clk *src_clk;	/* msdc source clock */
454  	struct clk *h_clk;      /* msdc h_clk */
455  	struct clk *bus_clk;	/* bus clock which used to access register */
456  	struct clk *src_clk_cg; /* msdc source clock control gate */
457  	struct clk *sys_clk_cg;	/* msdc subsys clock control gate */
458  	struct clk *crypto_clk; /* msdc crypto clock control gate */
459  	struct clk_bulk_data bulk_clks[MSDC_NR_CLOCKS];
460  	u32 mclk;		/* mmc subsystem clock frequency */
461  	u32 src_clk_freq;	/* source clock frequency */
462  	unsigned char timing;
463  	bool vqmmc_enabled;
464  	u32 latch_ck;
465  	u32 hs400_ds_delay;
466  	u32 hs400_ds_dly3;
467  	u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
468  	u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
469  	u32 tuning_step;
470  	bool hs400_cmd_resp_sel_rising;
471  				 /* cmd response sample selection for HS400 */
472  	bool hs400_mode;	/* current eMMC will run at hs400 mode */
473  	bool hs400_tuning;	/* hs400 mode online tuning */
474  	bool internal_cd;	/* Use internal card-detect logic */
475  	bool cqhci;		/* support eMMC hw cmdq */
476  	struct msdc_save_para save_para; /* used when gate HCLK */
477  	struct msdc_tune_para def_tune_para; /* default tune setting */
478  	struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
479  	struct cqhci_host *cq_host;
480  	u32 cq_ssc1_time;
481  };
482  
483  static const struct mtk_mmc_compatible mt2701_compat = {
484  	.clk_div_bits = 12,
485  	.recheck_sdio_irq = true,
486  	.hs400_tune = false,
487  	.pad_tune_reg = MSDC_PAD_TUNE0,
488  	.async_fifo = true,
489  	.data_tune = true,
490  	.busy_check = false,
491  	.stop_clk_fix = false,
492  	.enhance_rx = false,
493  	.support_64g = false,
494  };
495  
496  static const struct mtk_mmc_compatible mt2712_compat = {
497  	.clk_div_bits = 12,
498  	.recheck_sdio_irq = false,
499  	.hs400_tune = false,
500  	.pad_tune_reg = MSDC_PAD_TUNE0,
501  	.async_fifo = true,
502  	.data_tune = true,
503  	.busy_check = true,
504  	.stop_clk_fix = true,
505  	.enhance_rx = true,
506  	.support_64g = true,
507  };
508  
509  static const struct mtk_mmc_compatible mt6779_compat = {
510  	.clk_div_bits = 12,
511  	.recheck_sdio_irq = false,
512  	.hs400_tune = false,
513  	.pad_tune_reg = MSDC_PAD_TUNE0,
514  	.async_fifo = true,
515  	.data_tune = true,
516  	.busy_check = true,
517  	.stop_clk_fix = true,
518  	.enhance_rx = true,
519  	.support_64g = true,
520  };
521  
522  static const struct mtk_mmc_compatible mt6795_compat = {
523  	.clk_div_bits = 8,
524  	.recheck_sdio_irq = false,
525  	.hs400_tune = true,
526  	.pad_tune_reg = MSDC_PAD_TUNE,
527  	.async_fifo = false,
528  	.data_tune = false,
529  	.busy_check = false,
530  	.stop_clk_fix = false,
531  	.enhance_rx = false,
532  	.support_64g = false,
533  };
534  
535  static const struct mtk_mmc_compatible mt7620_compat = {
536  	.clk_div_bits = 8,
537  	.recheck_sdio_irq = true,
538  	.hs400_tune = false,
539  	.pad_tune_reg = MSDC_PAD_TUNE,
540  	.async_fifo = false,
541  	.data_tune = false,
542  	.busy_check = false,
543  	.stop_clk_fix = false,
544  	.enhance_rx = false,
545  	.use_internal_cd = true,
546  };
547  
548  static const struct mtk_mmc_compatible mt7622_compat = {
549  	.clk_div_bits = 12,
550  	.recheck_sdio_irq = true,
551  	.hs400_tune = false,
552  	.pad_tune_reg = MSDC_PAD_TUNE0,
553  	.async_fifo = true,
554  	.data_tune = true,
555  	.busy_check = true,
556  	.stop_clk_fix = true,
557  	.enhance_rx = true,
558  	.support_64g = false,
559  };
560  
561  static const struct mtk_mmc_compatible mt7986_compat = {
562  	.clk_div_bits = 12,
563  	.recheck_sdio_irq = true,
564  	.hs400_tune = false,
565  	.pad_tune_reg = MSDC_PAD_TUNE0,
566  	.async_fifo = true,
567  	.data_tune = true,
568  	.busy_check = true,
569  	.stop_clk_fix = true,
570  	.enhance_rx = true,
571  	.support_64g = true,
572  };
573  
574  static const struct mtk_mmc_compatible mt8135_compat = {
575  	.clk_div_bits = 8,
576  	.recheck_sdio_irq = true,
577  	.hs400_tune = false,
578  	.pad_tune_reg = MSDC_PAD_TUNE,
579  	.async_fifo = false,
580  	.data_tune = false,
581  	.busy_check = false,
582  	.stop_clk_fix = false,
583  	.enhance_rx = false,
584  	.support_64g = false,
585  };
586  
587  static const struct mtk_mmc_compatible mt8173_compat = {
588  	.clk_div_bits = 8,
589  	.recheck_sdio_irq = true,
590  	.hs400_tune = true,
591  	.pad_tune_reg = MSDC_PAD_TUNE,
592  	.async_fifo = false,
593  	.data_tune = false,
594  	.busy_check = false,
595  	.stop_clk_fix = false,
596  	.enhance_rx = false,
597  	.support_64g = false,
598  };
599  
600  static const struct mtk_mmc_compatible mt8183_compat = {
601  	.clk_div_bits = 12,
602  	.recheck_sdio_irq = false,
603  	.hs400_tune = false,
604  	.pad_tune_reg = MSDC_PAD_TUNE0,
605  	.async_fifo = true,
606  	.data_tune = true,
607  	.busy_check = true,
608  	.stop_clk_fix = true,
609  	.enhance_rx = true,
610  	.support_64g = true,
611  };
612  
613  static const struct mtk_mmc_compatible mt8516_compat = {
614  	.clk_div_bits = 12,
615  	.recheck_sdio_irq = true,
616  	.hs400_tune = false,
617  	.pad_tune_reg = MSDC_PAD_TUNE0,
618  	.async_fifo = true,
619  	.data_tune = true,
620  	.busy_check = true,
621  	.stop_clk_fix = true,
622  };
623  
624  static const struct of_device_id msdc_of_ids[] = {
625  	{ .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
626  	{ .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
627  	{ .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat},
628  	{ .compatible = "mediatek,mt6795-mmc", .data = &mt6795_compat},
629  	{ .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
630  	{ .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
631  	{ .compatible = "mediatek,mt7986-mmc", .data = &mt7986_compat},
632  	{ .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
633  	{ .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
634  	{ .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat},
635  	{ .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat},
636  
637  	{}
638  };
639  MODULE_DEVICE_TABLE(of, msdc_of_ids);
640  
sdr_set_bits(void __iomem * reg,u32 bs)641  static void sdr_set_bits(void __iomem *reg, u32 bs)
642  {
643  	u32 val = readl(reg);
644  
645  	val |= bs;
646  	writel(val, reg);
647  }
648  
sdr_clr_bits(void __iomem * reg,u32 bs)649  static void sdr_clr_bits(void __iomem *reg, u32 bs)
650  {
651  	u32 val = readl(reg);
652  
653  	val &= ~bs;
654  	writel(val, reg);
655  }
656  
sdr_set_field(void __iomem * reg,u32 field,u32 val)657  static void sdr_set_field(void __iomem *reg, u32 field, u32 val)
658  {
659  	unsigned int tv = readl(reg);
660  
661  	tv &= ~field;
662  	tv |= ((val) << (ffs((unsigned int)field) - 1));
663  	writel(tv, reg);
664  }
665  
sdr_get_field(void __iomem * reg,u32 field,u32 * val)666  static void sdr_get_field(void __iomem *reg, u32 field, u32 *val)
667  {
668  	unsigned int tv = readl(reg);
669  
670  	*val = ((tv & field) >> (ffs((unsigned int)field) - 1));
671  }
672  
msdc_reset_hw(struct msdc_host * host)673  static void msdc_reset_hw(struct msdc_host *host)
674  {
675  	u32 val;
676  
677  	sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
678  	readl_poll_timeout_atomic(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
679  
680  	sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
681  	readl_poll_timeout_atomic(host->base + MSDC_FIFOCS, val,
682  				  !(val & MSDC_FIFOCS_CLR), 0, 0);
683  
684  	val = readl(host->base + MSDC_INT);
685  	writel(val, host->base + MSDC_INT);
686  }
687  
688  static void msdc_cmd_next(struct msdc_host *host,
689  		struct mmc_request *mrq, struct mmc_command *cmd);
690  static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb);
691  
692  static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR |
693  			MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY |
694  			MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO;
695  static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO |
696  			MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR |
697  			MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT;
698  
msdc_dma_calcs(u8 * buf,u32 len)699  static u8 msdc_dma_calcs(u8 *buf, u32 len)
700  {
701  	u32 i, sum = 0;
702  
703  	for (i = 0; i < len; i++)
704  		sum += buf[i];
705  	return 0xff - (u8) sum;
706  }
707  
msdc_dma_setup(struct msdc_host * host,struct msdc_dma * dma,struct mmc_data * data)708  static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
709  		struct mmc_data *data)
710  {
711  	unsigned int j, dma_len;
712  	dma_addr_t dma_address;
713  	u32 dma_ctrl;
714  	struct scatterlist *sg;
715  	struct mt_gpdma_desc *gpd;
716  	struct mt_bdma_desc *bd;
717  
718  	sg = data->sg;
719  
720  	gpd = dma->gpd;
721  	bd = dma->bd;
722  
723  	/* modify gpd */
724  	gpd->gpd_info |= GPDMA_DESC_HWO;
725  	gpd->gpd_info |= GPDMA_DESC_BDP;
726  	/* need to clear first. use these bits to calc checksum */
727  	gpd->gpd_info &= ~GPDMA_DESC_CHECKSUM;
728  	gpd->gpd_info |= msdc_dma_calcs((u8 *) gpd, 16) << 8;
729  
730  	/* modify bd */
731  	for_each_sg(data->sg, sg, data->sg_count, j) {
732  		dma_address = sg_dma_address(sg);
733  		dma_len = sg_dma_len(sg);
734  
735  		/* init bd */
736  		bd[j].bd_info &= ~BDMA_DESC_BLKPAD;
737  		bd[j].bd_info &= ~BDMA_DESC_DWPAD;
738  		bd[j].ptr = lower_32_bits(dma_address);
739  		if (host->dev_comp->support_64g) {
740  			bd[j].bd_info &= ~BDMA_DESC_PTR_H4;
741  			bd[j].bd_info |= (upper_32_bits(dma_address) & 0xf)
742  					 << 28;
743  		}
744  
745  		if (host->dev_comp->support_64g) {
746  			bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN_EXT;
747  			bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN_EXT);
748  		} else {
749  			bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN;
750  			bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN);
751  		}
752  
753  		if (j == data->sg_count - 1) /* the last bd */
754  			bd[j].bd_info |= BDMA_DESC_EOL;
755  		else
756  			bd[j].bd_info &= ~BDMA_DESC_EOL;
757  
758  		/* checksum need to clear first */
759  		bd[j].bd_info &= ~BDMA_DESC_CHECKSUM;
760  		bd[j].bd_info |= msdc_dma_calcs((u8 *)(&bd[j]), 16) << 8;
761  	}
762  
763  	sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
764  	dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL);
765  	dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE);
766  	dma_ctrl |= (MSDC_BURST_64B << 12 | BIT(8));
767  	writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL);
768  	if (host->dev_comp->support_64g)
769  		sdr_set_field(host->base + DMA_SA_H4BIT, DMA_ADDR_HIGH_4BIT,
770  			      upper_32_bits(dma->gpd_addr) & 0xf);
771  	writel(lower_32_bits(dma->gpd_addr), host->base + MSDC_DMA_SA);
772  }
773  
msdc_prepare_data(struct msdc_host * host,struct mmc_data * data)774  static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data)
775  {
776  	if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
777  		data->host_cookie |= MSDC_PREPARE_FLAG;
778  		data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
779  					    mmc_get_dma_dir(data));
780  	}
781  }
782  
msdc_unprepare_data(struct msdc_host * host,struct mmc_data * data)783  static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data)
784  {
785  	if (data->host_cookie & MSDC_ASYNC_FLAG)
786  		return;
787  
788  	if (data->host_cookie & MSDC_PREPARE_FLAG) {
789  		dma_unmap_sg(host->dev, data->sg, data->sg_len,
790  			     mmc_get_dma_dir(data));
791  		data->host_cookie &= ~MSDC_PREPARE_FLAG;
792  	}
793  }
794  
msdc_timeout_cal(struct msdc_host * host,u64 ns,u64 clks)795  static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks)
796  {
797  	struct mmc_host *mmc = mmc_from_priv(host);
798  	u64 timeout;
799  	u32 clk_ns, mode = 0;
800  
801  	if (mmc->actual_clock == 0) {
802  		timeout = 0;
803  	} else {
804  		clk_ns = 1000000000U / mmc->actual_clock;
805  		timeout = ns + clk_ns - 1;
806  		do_div(timeout, clk_ns);
807  		timeout += clks;
808  		/* in 1048576 sclk cycle unit */
809  		timeout = DIV_ROUND_UP(timeout, BIT(20));
810  		if (host->dev_comp->clk_div_bits == 8)
811  			sdr_get_field(host->base + MSDC_CFG,
812  				      MSDC_CFG_CKMOD, &mode);
813  		else
814  			sdr_get_field(host->base + MSDC_CFG,
815  				      MSDC_CFG_CKMOD_EXTRA, &mode);
816  		/*DDR mode will double the clk cycles for data timeout */
817  		timeout = mode >= 2 ? timeout * 2 : timeout;
818  		timeout = timeout > 1 ? timeout - 1 : 0;
819  	}
820  	return timeout;
821  }
822  
823  /* clock control primitives */
msdc_set_timeout(struct msdc_host * host,u64 ns,u64 clks)824  static void msdc_set_timeout(struct msdc_host *host, u64 ns, u64 clks)
825  {
826  	u64 timeout;
827  
828  	host->timeout_ns = ns;
829  	host->timeout_clks = clks;
830  
831  	timeout = msdc_timeout_cal(host, ns, clks);
832  	sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC,
833  		      min_t(u32, timeout, 255));
834  }
835  
msdc_set_busy_timeout(struct msdc_host * host,u64 ns,u64 clks)836  static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks)
837  {
838  	u64 timeout;
839  
840  	timeout = msdc_timeout_cal(host, ns, clks);
841  	sdr_set_field(host->base + SDC_CFG, SDC_CFG_WRDTOC,
842  		      min_t(u32, timeout, 8191));
843  }
844  
msdc_gate_clock(struct msdc_host * host)845  static void msdc_gate_clock(struct msdc_host *host)
846  {
847  	clk_bulk_disable_unprepare(MSDC_NR_CLOCKS, host->bulk_clks);
848  	clk_disable_unprepare(host->crypto_clk);
849  	clk_disable_unprepare(host->src_clk_cg);
850  	clk_disable_unprepare(host->src_clk);
851  	clk_disable_unprepare(host->bus_clk);
852  	clk_disable_unprepare(host->h_clk);
853  }
854  
msdc_ungate_clock(struct msdc_host * host)855  static int msdc_ungate_clock(struct msdc_host *host)
856  {
857  	u32 val;
858  	int ret;
859  
860  	clk_prepare_enable(host->h_clk);
861  	clk_prepare_enable(host->bus_clk);
862  	clk_prepare_enable(host->src_clk);
863  	clk_prepare_enable(host->src_clk_cg);
864  	clk_prepare_enable(host->crypto_clk);
865  	ret = clk_bulk_prepare_enable(MSDC_NR_CLOCKS, host->bulk_clks);
866  	if (ret) {
867  		dev_err(host->dev, "Cannot enable pclk/axi/ahb clock gates\n");
868  		return ret;
869  	}
870  
871  	return readl_poll_timeout(host->base + MSDC_CFG, val,
872  				  (val & MSDC_CFG_CKSTB), 1, 20000);
873  }
874  
msdc_set_mclk(struct msdc_host * host,unsigned char timing,u32 hz)875  static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
876  {
877  	struct mmc_host *mmc = mmc_from_priv(host);
878  	u32 mode;
879  	u32 flags;
880  	u32 div;
881  	u32 sclk;
882  	u32 tune_reg = host->dev_comp->pad_tune_reg;
883  	u32 val;
884  
885  	if (!hz) {
886  		dev_dbg(host->dev, "set mclk to 0\n");
887  		host->mclk = 0;
888  		mmc->actual_clock = 0;
889  		sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
890  		return;
891  	}
892  
893  	flags = readl(host->base + MSDC_INTEN);
894  	sdr_clr_bits(host->base + MSDC_INTEN, flags);
895  	if (host->dev_comp->clk_div_bits == 8)
896  		sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
897  	else
898  		sdr_clr_bits(host->base + MSDC_CFG,
899  			     MSDC_CFG_HS400_CK_MODE_EXTRA);
900  	if (timing == MMC_TIMING_UHS_DDR50 ||
901  	    timing == MMC_TIMING_MMC_DDR52 ||
902  	    timing == MMC_TIMING_MMC_HS400) {
903  		if (timing == MMC_TIMING_MMC_HS400)
904  			mode = 0x3;
905  		else
906  			mode = 0x2; /* ddr mode and use divisor */
907  
908  		if (hz >= (host->src_clk_freq >> 2)) {
909  			div = 0; /* mean div = 1/4 */
910  			sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */
911  		} else {
912  			div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2);
913  			sclk = (host->src_clk_freq >> 2) / div;
914  			div = (div >> 1);
915  		}
916  
917  		if (timing == MMC_TIMING_MMC_HS400 &&
918  		    hz >= (host->src_clk_freq >> 1)) {
919  			if (host->dev_comp->clk_div_bits == 8)
920  				sdr_set_bits(host->base + MSDC_CFG,
921  					     MSDC_CFG_HS400_CK_MODE);
922  			else
923  				sdr_set_bits(host->base + MSDC_CFG,
924  					     MSDC_CFG_HS400_CK_MODE_EXTRA);
925  			sclk = host->src_clk_freq >> 1;
926  			div = 0; /* div is ignore when bit18 is set */
927  		}
928  	} else if (hz >= host->src_clk_freq) {
929  		mode = 0x1; /* no divisor */
930  		div = 0;
931  		sclk = host->src_clk_freq;
932  	} else {
933  		mode = 0x0; /* use divisor */
934  		if (hz >= (host->src_clk_freq >> 1)) {
935  			div = 0; /* mean div = 1/2 */
936  			sclk = host->src_clk_freq >> 1; /* sclk = clk / 2 */
937  		} else {
938  			div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2);
939  			sclk = (host->src_clk_freq >> 2) / div;
940  		}
941  	}
942  	sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
943  
944  	clk_disable_unprepare(host->src_clk_cg);
945  	if (host->dev_comp->clk_div_bits == 8)
946  		sdr_set_field(host->base + MSDC_CFG,
947  			      MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
948  			      (mode << 8) | div);
949  	else
950  		sdr_set_field(host->base + MSDC_CFG,
951  			      MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA,
952  			      (mode << 12) | div);
953  
954  	clk_prepare_enable(host->src_clk_cg);
955  	readl_poll_timeout(host->base + MSDC_CFG, val, (val & MSDC_CFG_CKSTB), 0, 0);
956  	sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
957  	mmc->actual_clock = sclk;
958  	host->mclk = hz;
959  	host->timing = timing;
960  	/* need because clk changed. */
961  	msdc_set_timeout(host, host->timeout_ns, host->timeout_clks);
962  	sdr_set_bits(host->base + MSDC_INTEN, flags);
963  
964  	/*
965  	 * mmc_select_hs400() will drop to 50Mhz and High speed mode,
966  	 * tune result of hs200/200Mhz is not suitable for 50Mhz
967  	 */
968  	if (mmc->actual_clock <= 52000000) {
969  		writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
970  		if (host->top_base) {
971  			writel(host->def_tune_para.emmc_top_control,
972  			       host->top_base + EMMC_TOP_CONTROL);
973  			writel(host->def_tune_para.emmc_top_cmd,
974  			       host->top_base + EMMC_TOP_CMD);
975  		} else {
976  			writel(host->def_tune_para.pad_tune,
977  			       host->base + tune_reg);
978  		}
979  	} else {
980  		writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
981  		writel(host->saved_tune_para.pad_cmd_tune,
982  		       host->base + PAD_CMD_TUNE);
983  		if (host->top_base) {
984  			writel(host->saved_tune_para.emmc_top_control,
985  			       host->top_base + EMMC_TOP_CONTROL);
986  			writel(host->saved_tune_para.emmc_top_cmd,
987  			       host->top_base + EMMC_TOP_CMD);
988  		} else {
989  			writel(host->saved_tune_para.pad_tune,
990  			       host->base + tune_reg);
991  		}
992  	}
993  
994  	if (timing == MMC_TIMING_MMC_HS400 &&
995  	    host->dev_comp->hs400_tune)
996  		sdr_set_field(host->base + tune_reg,
997  			      MSDC_PAD_TUNE_CMDRRDLY,
998  			      host->hs400_cmd_int_delay);
999  	dev_dbg(host->dev, "sclk: %d, timing: %d\n", mmc->actual_clock,
1000  		timing);
1001  }
1002  
msdc_cmd_find_resp(struct msdc_host * host,struct mmc_command * cmd)1003  static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
1004  		struct mmc_command *cmd)
1005  {
1006  	u32 resp;
1007  
1008  	switch (mmc_resp_type(cmd)) {
1009  		/* Actually, R1, R5, R6, R7 are the same */
1010  	case MMC_RSP_R1:
1011  		resp = 0x1;
1012  		break;
1013  	case MMC_RSP_R1B:
1014  		resp = 0x7;
1015  		break;
1016  	case MMC_RSP_R2:
1017  		resp = 0x2;
1018  		break;
1019  	case MMC_RSP_R3:
1020  		resp = 0x3;
1021  		break;
1022  	case MMC_RSP_NONE:
1023  	default:
1024  		resp = 0x0;
1025  		break;
1026  	}
1027  
1028  	return resp;
1029  }
1030  
msdc_cmd_prepare_raw_cmd(struct msdc_host * host,struct mmc_request * mrq,struct mmc_command * cmd)1031  static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
1032  		struct mmc_request *mrq, struct mmc_command *cmd)
1033  {
1034  	struct mmc_host *mmc = mmc_from_priv(host);
1035  	/* rawcmd :
1036  	 * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
1037  	 * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
1038  	 */
1039  	u32 opcode = cmd->opcode;
1040  	u32 resp = msdc_cmd_find_resp(host, cmd);
1041  	u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7);
1042  
1043  	host->cmd_rsp = resp;
1044  
1045  	if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) ||
1046  	    opcode == MMC_STOP_TRANSMISSION)
1047  		rawcmd |= BIT(14);
1048  	else if (opcode == SD_SWITCH_VOLTAGE)
1049  		rawcmd |= BIT(30);
1050  	else if (opcode == SD_APP_SEND_SCR ||
1051  		 opcode == SD_APP_SEND_NUM_WR_BLKS ||
1052  		 (opcode == SD_SWITCH && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
1053  		 (opcode == SD_APP_SD_STATUS && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
1054  		 (opcode == MMC_SEND_EXT_CSD && mmc_cmd_type(cmd) == MMC_CMD_ADTC))
1055  		rawcmd |= BIT(11);
1056  
1057  	if (cmd->data) {
1058  		struct mmc_data *data = cmd->data;
1059  
1060  		if (mmc_op_multi(opcode)) {
1061  			if (mmc_card_mmc(mmc->card) && mrq->sbc &&
1062  			    !(mrq->sbc->arg & 0xFFFF0000))
1063  				rawcmd |= BIT(29); /* AutoCMD23 */
1064  		}
1065  
1066  		rawcmd |= ((data->blksz & 0xFFF) << 16);
1067  		if (data->flags & MMC_DATA_WRITE)
1068  			rawcmd |= BIT(13);
1069  		if (data->blocks > 1)
1070  			rawcmd |= BIT(12);
1071  		else
1072  			rawcmd |= BIT(11);
1073  		/* Always use dma mode */
1074  		sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO);
1075  
1076  		if (host->timeout_ns != data->timeout_ns ||
1077  		    host->timeout_clks != data->timeout_clks)
1078  			msdc_set_timeout(host, data->timeout_ns,
1079  					data->timeout_clks);
1080  
1081  		writel(data->blocks, host->base + SDC_BLK_NUM);
1082  	}
1083  	return rawcmd;
1084  }
1085  
msdc_start_data(struct msdc_host * host,struct mmc_command * cmd,struct mmc_data * data)1086  static void msdc_start_data(struct msdc_host *host, struct mmc_command *cmd,
1087  		struct mmc_data *data)
1088  {
1089  	bool read;
1090  
1091  	WARN_ON(host->data);
1092  	host->data = data;
1093  	read = data->flags & MMC_DATA_READ;
1094  
1095  	mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
1096  	msdc_dma_setup(host, &host->dma, data);
1097  	sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask);
1098  	sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
1099  	dev_dbg(host->dev, "DMA start\n");
1100  	dev_dbg(host->dev, "%s: cmd=%d DMA data: %d blocks; read=%d\n",
1101  			__func__, cmd->opcode, data->blocks, read);
1102  }
1103  
msdc_auto_cmd_done(struct msdc_host * host,int events,struct mmc_command * cmd)1104  static int msdc_auto_cmd_done(struct msdc_host *host, int events,
1105  		struct mmc_command *cmd)
1106  {
1107  	u32 *rsp = cmd->resp;
1108  
1109  	rsp[0] = readl(host->base + SDC_ACMD_RESP);
1110  
1111  	if (events & MSDC_INT_ACMDRDY) {
1112  		cmd->error = 0;
1113  	} else {
1114  		msdc_reset_hw(host);
1115  		if (events & MSDC_INT_ACMDCRCERR) {
1116  			cmd->error = -EILSEQ;
1117  			host->error |= REQ_STOP_EIO;
1118  		} else if (events & MSDC_INT_ACMDTMO) {
1119  			cmd->error = -ETIMEDOUT;
1120  			host->error |= REQ_STOP_TMO;
1121  		}
1122  		dev_err(host->dev,
1123  			"%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n",
1124  			__func__, cmd->opcode, cmd->arg, rsp[0], cmd->error);
1125  	}
1126  	return cmd->error;
1127  }
1128  
1129  /*
1130   * msdc_recheck_sdio_irq - recheck whether the SDIO irq is lost
1131   *
1132   * Host controller may lost interrupt in some special case.
1133   * Add SDIO irq recheck mechanism to make sure all interrupts
1134   * can be processed immediately
1135   */
msdc_recheck_sdio_irq(struct msdc_host * host)1136  static void msdc_recheck_sdio_irq(struct msdc_host *host)
1137  {
1138  	struct mmc_host *mmc = mmc_from_priv(host);
1139  	u32 reg_int, reg_inten, reg_ps;
1140  
1141  	if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1142  		reg_inten = readl(host->base + MSDC_INTEN);
1143  		if (reg_inten & MSDC_INTEN_SDIOIRQ) {
1144  			reg_int = readl(host->base + MSDC_INT);
1145  			reg_ps = readl(host->base + MSDC_PS);
1146  			if (!(reg_int & MSDC_INT_SDIOIRQ ||
1147  			      reg_ps & MSDC_PS_DATA1)) {
1148  				__msdc_enable_sdio_irq(host, 0);
1149  				sdio_signal_irq(mmc);
1150  			}
1151  		}
1152  	}
1153  }
1154  
msdc_track_cmd_data(struct msdc_host * host,struct mmc_command * cmd)1155  static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd)
1156  {
1157  	if (host->error &&
1158  	    ((!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning) ||
1159  	     cmd->error == -ETIMEDOUT))
1160  		dev_warn(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n",
1161  			 __func__, cmd->opcode, cmd->arg, host->error);
1162  }
1163  
msdc_request_done(struct msdc_host * host,struct mmc_request * mrq)1164  static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
1165  {
1166  	unsigned long flags;
1167  
1168  	/*
1169  	 * No need check the return value of cancel_delayed_work, as only ONE
1170  	 * path will go here!
1171  	 */
1172  	cancel_delayed_work(&host->req_timeout);
1173  
1174  	spin_lock_irqsave(&host->lock, flags);
1175  	host->mrq = NULL;
1176  	spin_unlock_irqrestore(&host->lock, flags);
1177  
1178  	msdc_track_cmd_data(host, mrq->cmd);
1179  	if (mrq->data)
1180  		msdc_unprepare_data(host, mrq->data);
1181  	if (host->error)
1182  		msdc_reset_hw(host);
1183  	mmc_request_done(mmc_from_priv(host), mrq);
1184  	if (host->dev_comp->recheck_sdio_irq)
1185  		msdc_recheck_sdio_irq(host);
1186  }
1187  
1188  /* returns true if command is fully handled; returns false otherwise */
msdc_cmd_done(struct msdc_host * host,int events,struct mmc_request * mrq,struct mmc_command * cmd)1189  static bool msdc_cmd_done(struct msdc_host *host, int events,
1190  			  struct mmc_request *mrq, struct mmc_command *cmd)
1191  {
1192  	bool done = false;
1193  	bool sbc_error;
1194  	unsigned long flags;
1195  	u32 *rsp;
1196  
1197  	if (mrq->sbc && cmd == mrq->cmd &&
1198  	    (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR
1199  				   | MSDC_INT_ACMDTMO)))
1200  		msdc_auto_cmd_done(host, events, mrq->sbc);
1201  
1202  	sbc_error = mrq->sbc && mrq->sbc->error;
1203  
1204  	if (!sbc_error && !(events & (MSDC_INT_CMDRDY
1205  					| MSDC_INT_RSPCRCERR
1206  					| MSDC_INT_CMDTMO)))
1207  		return done;
1208  
1209  	spin_lock_irqsave(&host->lock, flags);
1210  	done = !host->cmd;
1211  	host->cmd = NULL;
1212  	spin_unlock_irqrestore(&host->lock, flags);
1213  
1214  	if (done)
1215  		return true;
1216  	rsp = cmd->resp;
1217  
1218  	sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
1219  
1220  	if (cmd->flags & MMC_RSP_PRESENT) {
1221  		if (cmd->flags & MMC_RSP_136) {
1222  			rsp[0] = readl(host->base + SDC_RESP3);
1223  			rsp[1] = readl(host->base + SDC_RESP2);
1224  			rsp[2] = readl(host->base + SDC_RESP1);
1225  			rsp[3] = readl(host->base + SDC_RESP0);
1226  		} else {
1227  			rsp[0] = readl(host->base + SDC_RESP0);
1228  		}
1229  	}
1230  
1231  	if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
1232  		if ((events & MSDC_INT_CMDTMO && !host->hs400_tuning) ||
1233  		    (!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning))
1234  			/*
1235  			 * should not clear fifo/interrupt as the tune data
1236  			 * may have already come when cmd19/cmd21 gets response
1237  			 * CRC error.
1238  			 */
1239  			msdc_reset_hw(host);
1240  		if (events & MSDC_INT_RSPCRCERR) {
1241  			cmd->error = -EILSEQ;
1242  			host->error |= REQ_CMD_EIO;
1243  		} else if (events & MSDC_INT_CMDTMO) {
1244  			cmd->error = -ETIMEDOUT;
1245  			host->error |= REQ_CMD_TMO;
1246  		}
1247  	}
1248  	if (cmd->error)
1249  		dev_dbg(host->dev,
1250  				"%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n",
1251  				__func__, cmd->opcode, cmd->arg, rsp[0],
1252  				cmd->error);
1253  
1254  	msdc_cmd_next(host, mrq, cmd);
1255  	return true;
1256  }
1257  
1258  /* It is the core layer's responsibility to ensure card status
1259   * is correct before issue a request. but host design do below
1260   * checks recommended.
1261   */
msdc_cmd_is_ready(struct msdc_host * host,struct mmc_request * mrq,struct mmc_command * cmd)1262  static inline bool msdc_cmd_is_ready(struct msdc_host *host,
1263  		struct mmc_request *mrq, struct mmc_command *cmd)
1264  {
1265  	u32 val;
1266  	int ret;
1267  
1268  	/* The max busy time we can endure is 20ms */
1269  	ret = readl_poll_timeout_atomic(host->base + SDC_STS, val,
1270  					!(val & SDC_STS_CMDBUSY), 1, 20000);
1271  	if (ret) {
1272  		dev_err(host->dev, "CMD bus busy detected\n");
1273  		host->error |= REQ_CMD_BUSY;
1274  		msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
1275  		return false;
1276  	}
1277  
1278  	if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) {
1279  		/* R1B or with data, should check SDCBUSY */
1280  		ret = readl_poll_timeout_atomic(host->base + SDC_STS, val,
1281  						!(val & SDC_STS_SDCBUSY), 1, 20000);
1282  		if (ret) {
1283  			dev_err(host->dev, "Controller busy detected\n");
1284  			host->error |= REQ_CMD_BUSY;
1285  			msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
1286  			return false;
1287  		}
1288  	}
1289  	return true;
1290  }
1291  
msdc_start_command(struct msdc_host * host,struct mmc_request * mrq,struct mmc_command * cmd)1292  static void msdc_start_command(struct msdc_host *host,
1293  		struct mmc_request *mrq, struct mmc_command *cmd)
1294  {
1295  	u32 rawcmd;
1296  	unsigned long flags;
1297  
1298  	WARN_ON(host->cmd);
1299  	host->cmd = cmd;
1300  
1301  	mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
1302  	if (!msdc_cmd_is_ready(host, mrq, cmd))
1303  		return;
1304  
1305  	if ((readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16 ||
1306  	    readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) {
1307  		dev_err(host->dev, "TX/RX FIFO non-empty before start of IO. Reset\n");
1308  		msdc_reset_hw(host);
1309  	}
1310  
1311  	cmd->error = 0;
1312  	rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
1313  
1314  	spin_lock_irqsave(&host->lock, flags);
1315  	sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
1316  	spin_unlock_irqrestore(&host->lock, flags);
1317  
1318  	writel(cmd->arg, host->base + SDC_ARG);
1319  	writel(rawcmd, host->base + SDC_CMD);
1320  }
1321  
msdc_cmd_next(struct msdc_host * host,struct mmc_request * mrq,struct mmc_command * cmd)1322  static void msdc_cmd_next(struct msdc_host *host,
1323  		struct mmc_request *mrq, struct mmc_command *cmd)
1324  {
1325  	if ((cmd->error && !host->hs400_tuning &&
1326  	     !(cmd->error == -EILSEQ &&
1327  	     mmc_op_tuning(cmd->opcode))) ||
1328  	    (mrq->sbc && mrq->sbc->error))
1329  		msdc_request_done(host, mrq);
1330  	else if (cmd == mrq->sbc)
1331  		msdc_start_command(host, mrq, mrq->cmd);
1332  	else if (!cmd->data)
1333  		msdc_request_done(host, mrq);
1334  	else
1335  		msdc_start_data(host, cmd, cmd->data);
1336  }
1337  
msdc_ops_request(struct mmc_host * mmc,struct mmc_request * mrq)1338  static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
1339  {
1340  	struct msdc_host *host = mmc_priv(mmc);
1341  
1342  	host->error = 0;
1343  	WARN_ON(host->mrq);
1344  	host->mrq = mrq;
1345  
1346  	if (mrq->data)
1347  		msdc_prepare_data(host, mrq->data);
1348  
1349  	/* if SBC is required, we have HW option and SW option.
1350  	 * if HW option is enabled, and SBC does not have "special" flags,
1351  	 * use HW option,  otherwise use SW option
1352  	 */
1353  	if (mrq->sbc && (!mmc_card_mmc(mmc->card) ||
1354  	    (mrq->sbc->arg & 0xFFFF0000)))
1355  		msdc_start_command(host, mrq, mrq->sbc);
1356  	else
1357  		msdc_start_command(host, mrq, mrq->cmd);
1358  }
1359  
msdc_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)1360  static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
1361  {
1362  	struct msdc_host *host = mmc_priv(mmc);
1363  	struct mmc_data *data = mrq->data;
1364  
1365  	if (!data)
1366  		return;
1367  
1368  	msdc_prepare_data(host, data);
1369  	data->host_cookie |= MSDC_ASYNC_FLAG;
1370  }
1371  
msdc_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)1372  static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
1373  		int err)
1374  {
1375  	struct msdc_host *host = mmc_priv(mmc);
1376  	struct mmc_data *data = mrq->data;
1377  
1378  	if (!data)
1379  		return;
1380  
1381  	if (data->host_cookie) {
1382  		data->host_cookie &= ~MSDC_ASYNC_FLAG;
1383  		msdc_unprepare_data(host, data);
1384  	}
1385  }
1386  
msdc_data_xfer_next(struct msdc_host * host,struct mmc_request * mrq)1387  static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq)
1388  {
1389  	if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error &&
1390  	    !mrq->sbc)
1391  		msdc_start_command(host, mrq, mrq->stop);
1392  	else
1393  		msdc_request_done(host, mrq);
1394  }
1395  
msdc_data_xfer_done(struct msdc_host * host,u32 events,struct mmc_request * mrq,struct mmc_data * data)1396  static void msdc_data_xfer_done(struct msdc_host *host, u32 events,
1397  				struct mmc_request *mrq, struct mmc_data *data)
1398  {
1399  	struct mmc_command *stop;
1400  	unsigned long flags;
1401  	bool done;
1402  	unsigned int check_data = events &
1403  	    (MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO
1404  	     | MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR
1405  	     | MSDC_INT_DMA_PROTECT);
1406  	u32 val;
1407  	int ret;
1408  
1409  	spin_lock_irqsave(&host->lock, flags);
1410  	done = !host->data;
1411  	if (check_data)
1412  		host->data = NULL;
1413  	spin_unlock_irqrestore(&host->lock, flags);
1414  
1415  	if (done)
1416  		return;
1417  	stop = data->stop;
1418  
1419  	if (check_data || (stop && stop->error)) {
1420  		dev_dbg(host->dev, "DMA status: 0x%8X\n",
1421  				readl(host->base + MSDC_DMA_CFG));
1422  		sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP,
1423  				1);
1424  
1425  		ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CTRL, val,
1426  						!(val & MSDC_DMA_CTRL_STOP), 1, 20000);
1427  		if (ret)
1428  			dev_dbg(host->dev, "DMA stop timed out\n");
1429  
1430  		ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val,
1431  						!(val & MSDC_DMA_CFG_STS), 1, 20000);
1432  		if (ret)
1433  			dev_dbg(host->dev, "DMA inactive timed out\n");
1434  
1435  		sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask);
1436  		dev_dbg(host->dev, "DMA stop\n");
1437  
1438  		if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) {
1439  			data->bytes_xfered = data->blocks * data->blksz;
1440  		} else {
1441  			dev_dbg(host->dev, "interrupt events: %x\n", events);
1442  			msdc_reset_hw(host);
1443  			host->error |= REQ_DAT_ERR;
1444  			data->bytes_xfered = 0;
1445  
1446  			if (events & MSDC_INT_DATTMO)
1447  				data->error = -ETIMEDOUT;
1448  			else if (events & MSDC_INT_DATCRCERR)
1449  				data->error = -EILSEQ;
1450  
1451  			dev_dbg(host->dev, "%s: cmd=%d; blocks=%d",
1452  				__func__, mrq->cmd->opcode, data->blocks);
1453  			dev_dbg(host->dev, "data_error=%d xfer_size=%d\n",
1454  				(int)data->error, data->bytes_xfered);
1455  		}
1456  
1457  		msdc_data_xfer_next(host, mrq);
1458  	}
1459  }
1460  
msdc_set_buswidth(struct msdc_host * host,u32 width)1461  static void msdc_set_buswidth(struct msdc_host *host, u32 width)
1462  {
1463  	u32 val = readl(host->base + SDC_CFG);
1464  
1465  	val &= ~SDC_CFG_BUSWIDTH;
1466  
1467  	switch (width) {
1468  	default:
1469  	case MMC_BUS_WIDTH_1:
1470  		val |= (MSDC_BUS_1BITS << 16);
1471  		break;
1472  	case MMC_BUS_WIDTH_4:
1473  		val |= (MSDC_BUS_4BITS << 16);
1474  		break;
1475  	case MMC_BUS_WIDTH_8:
1476  		val |= (MSDC_BUS_8BITS << 16);
1477  		break;
1478  	}
1479  
1480  	writel(val, host->base + SDC_CFG);
1481  	dev_dbg(host->dev, "Bus Width = %d", width);
1482  }
1483  
msdc_ops_switch_volt(struct mmc_host * mmc,struct mmc_ios * ios)1484  static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios)
1485  {
1486  	struct msdc_host *host = mmc_priv(mmc);
1487  	int ret;
1488  
1489  	if (!IS_ERR(mmc->supply.vqmmc)) {
1490  		if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_330 &&
1491  		    ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
1492  			dev_err(host->dev, "Unsupported signal voltage!\n");
1493  			return -EINVAL;
1494  		}
1495  
1496  		ret = mmc_regulator_set_vqmmc(mmc, ios);
1497  		if (ret < 0) {
1498  			dev_dbg(host->dev, "Regulator set error %d (%d)\n",
1499  				ret, ios->signal_voltage);
1500  			return ret;
1501  		}
1502  
1503  		/* Apply different pinctrl settings for different signal voltage */
1504  		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
1505  			pinctrl_select_state(host->pinctrl, host->pins_uhs);
1506  		else
1507  			pinctrl_select_state(host->pinctrl, host->pins_default);
1508  	}
1509  	return 0;
1510  }
1511  
msdc_card_busy(struct mmc_host * mmc)1512  static int msdc_card_busy(struct mmc_host *mmc)
1513  {
1514  	struct msdc_host *host = mmc_priv(mmc);
1515  	u32 status = readl(host->base + MSDC_PS);
1516  
1517  	/* only check if data0 is low */
1518  	return !(status & BIT(16));
1519  }
1520  
msdc_request_timeout(struct work_struct * work)1521  static void msdc_request_timeout(struct work_struct *work)
1522  {
1523  	struct msdc_host *host = container_of(work, struct msdc_host,
1524  			req_timeout.work);
1525  
1526  	/* simulate HW timeout status */
1527  	dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__);
1528  	if (host->mrq) {
1529  		dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__,
1530  				host->mrq, host->mrq->cmd->opcode);
1531  		if (host->cmd) {
1532  			dev_err(host->dev, "%s: aborting cmd=%d\n",
1533  					__func__, host->cmd->opcode);
1534  			msdc_cmd_done(host, MSDC_INT_CMDTMO, host->mrq,
1535  					host->cmd);
1536  		} else if (host->data) {
1537  			dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n",
1538  					__func__, host->mrq->cmd->opcode,
1539  					host->data->blocks);
1540  			msdc_data_xfer_done(host, MSDC_INT_DATTMO, host->mrq,
1541  					host->data);
1542  		}
1543  	}
1544  }
1545  
__msdc_enable_sdio_irq(struct msdc_host * host,int enb)1546  static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
1547  {
1548  	if (enb) {
1549  		sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
1550  		sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
1551  		if (host->dev_comp->recheck_sdio_irq)
1552  			msdc_recheck_sdio_irq(host);
1553  	} else {
1554  		sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
1555  		sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
1556  	}
1557  }
1558  
msdc_enable_sdio_irq(struct mmc_host * mmc,int enb)1559  static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
1560  {
1561  	struct msdc_host *host = mmc_priv(mmc);
1562  	unsigned long flags;
1563  	int ret;
1564  
1565  	spin_lock_irqsave(&host->lock, flags);
1566  	__msdc_enable_sdio_irq(host, enb);
1567  	spin_unlock_irqrestore(&host->lock, flags);
1568  
1569  	if (mmc_card_enable_async_irq(mmc->card) && host->pins_eint) {
1570  		if (enb) {
1571  			/*
1572  			 * In dev_pm_set_dedicated_wake_irq_reverse(), eint pin will be set to
1573  			 * GPIO mode. We need to restore it to SDIO DAT1 mode after that.
1574  			 * Since the current pinstate is pins_uhs, to ensure pinctrl select take
1575  			 * affect successfully, we change the pinstate to pins_eint firstly.
1576  			 */
1577  			pinctrl_select_state(host->pinctrl, host->pins_eint);
1578  			ret = dev_pm_set_dedicated_wake_irq_reverse(host->dev, host->eint_irq);
1579  
1580  			if (ret) {
1581  				dev_err(host->dev, "Failed to register SDIO wakeup irq!\n");
1582  				host->pins_eint = NULL;
1583  				pm_runtime_get_noresume(host->dev);
1584  			} else {
1585  				dev_dbg(host->dev, "SDIO eint irq: %d!\n", host->eint_irq);
1586  			}
1587  
1588  			pinctrl_select_state(host->pinctrl, host->pins_uhs);
1589  		} else {
1590  			dev_pm_clear_wake_irq(host->dev);
1591  		}
1592  	} else {
1593  		if (enb) {
1594  			/* Ensure host->pins_eint is NULL */
1595  			host->pins_eint = NULL;
1596  			pm_runtime_get_noresume(host->dev);
1597  		} else {
1598  			pm_runtime_put_noidle(host->dev);
1599  		}
1600  	}
1601  }
1602  
msdc_cmdq_irq(struct msdc_host * host,u32 intsts)1603  static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
1604  {
1605  	struct mmc_host *mmc = mmc_from_priv(host);
1606  	int cmd_err = 0, dat_err = 0;
1607  
1608  	if (intsts & MSDC_INT_RSPCRCERR) {
1609  		cmd_err = -EILSEQ;
1610  		dev_err(host->dev, "%s: CMD CRC ERR", __func__);
1611  	} else if (intsts & MSDC_INT_CMDTMO) {
1612  		cmd_err = -ETIMEDOUT;
1613  		dev_err(host->dev, "%s: CMD TIMEOUT ERR", __func__);
1614  	}
1615  
1616  	if (intsts & MSDC_INT_DATCRCERR) {
1617  		dat_err = -EILSEQ;
1618  		dev_err(host->dev, "%s: DATA CRC ERR", __func__);
1619  	} else if (intsts & MSDC_INT_DATTMO) {
1620  		dat_err = -ETIMEDOUT;
1621  		dev_err(host->dev, "%s: DATA TIMEOUT ERR", __func__);
1622  	}
1623  
1624  	if (cmd_err || dat_err) {
1625  		dev_err(host->dev, "cmd_err = %d, dat_err = %d, intsts = 0x%x",
1626  			cmd_err, dat_err, intsts);
1627  	}
1628  
1629  	return cqhci_irq(mmc, 0, cmd_err, dat_err);
1630  }
1631  
msdc_irq(int irq,void * dev_id)1632  static irqreturn_t msdc_irq(int irq, void *dev_id)
1633  {
1634  	struct msdc_host *host = (struct msdc_host *) dev_id;
1635  	struct mmc_host *mmc = mmc_from_priv(host);
1636  
1637  	while (true) {
1638  		struct mmc_request *mrq;
1639  		struct mmc_command *cmd;
1640  		struct mmc_data *data;
1641  		u32 events, event_mask;
1642  
1643  		spin_lock(&host->lock);
1644  		events = readl(host->base + MSDC_INT);
1645  		event_mask = readl(host->base + MSDC_INTEN);
1646  		if ((events & event_mask) & MSDC_INT_SDIOIRQ)
1647  			__msdc_enable_sdio_irq(host, 0);
1648  		/* clear interrupts */
1649  		writel(events & event_mask, host->base + MSDC_INT);
1650  
1651  		mrq = host->mrq;
1652  		cmd = host->cmd;
1653  		data = host->data;
1654  		spin_unlock(&host->lock);
1655  
1656  		if ((events & event_mask) & MSDC_INT_SDIOIRQ)
1657  			sdio_signal_irq(mmc);
1658  
1659  		if ((events & event_mask) & MSDC_INT_CDSC) {
1660  			if (host->internal_cd)
1661  				mmc_detect_change(mmc, msecs_to_jiffies(20));
1662  			events &= ~MSDC_INT_CDSC;
1663  		}
1664  
1665  		if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ)))
1666  			break;
1667  
1668  		if ((mmc->caps2 & MMC_CAP2_CQE) &&
1669  		    (events & MSDC_INT_CMDQ)) {
1670  			msdc_cmdq_irq(host, events);
1671  			/* clear interrupts */
1672  			writel(events, host->base + MSDC_INT);
1673  			return IRQ_HANDLED;
1674  		}
1675  
1676  		if (!mrq) {
1677  			dev_err(host->dev,
1678  				"%s: MRQ=NULL; events=%08X; event_mask=%08X\n",
1679  				__func__, events, event_mask);
1680  			WARN_ON(1);
1681  			break;
1682  		}
1683  
1684  		dev_dbg(host->dev, "%s: events=%08X\n", __func__, events);
1685  
1686  		if (cmd)
1687  			msdc_cmd_done(host, events, mrq, cmd);
1688  		else if (data)
1689  			msdc_data_xfer_done(host, events, mrq, data);
1690  	}
1691  
1692  	return IRQ_HANDLED;
1693  }
1694  
msdc_init_hw(struct msdc_host * host)1695  static void msdc_init_hw(struct msdc_host *host)
1696  {
1697  	u32 val;
1698  	u32 tune_reg = host->dev_comp->pad_tune_reg;
1699  	struct mmc_host *mmc = mmc_from_priv(host);
1700  
1701  	if (host->reset) {
1702  		reset_control_assert(host->reset);
1703  		usleep_range(10, 50);
1704  		reset_control_deassert(host->reset);
1705  	}
1706  
1707  	/* Configure to MMC/SD mode, clock free running */
1708  	sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
1709  
1710  	/* Reset */
1711  	msdc_reset_hw(host);
1712  
1713  	/* Disable and clear all interrupts */
1714  	writel(0, host->base + MSDC_INTEN);
1715  	val = readl(host->base + MSDC_INT);
1716  	writel(val, host->base + MSDC_INT);
1717  
1718  	/* Configure card detection */
1719  	if (host->internal_cd) {
1720  		sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE,
1721  			      DEFAULT_DEBOUNCE);
1722  		sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
1723  		sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
1724  		sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
1725  	} else {
1726  		sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
1727  		sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
1728  		sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
1729  	}
1730  
1731  	if (host->top_base) {
1732  		writel(0, host->top_base + EMMC_TOP_CONTROL);
1733  		writel(0, host->top_base + EMMC_TOP_CMD);
1734  	} else {
1735  		writel(0, host->base + tune_reg);
1736  	}
1737  	writel(0, host->base + MSDC_IOCON);
1738  	sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
1739  	writel(0x403c0046, host->base + MSDC_PATCH_BIT);
1740  	sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
1741  	writel(0xffff4089, host->base + MSDC_PATCH_BIT1);
1742  	sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
1743  
1744  	if (host->dev_comp->stop_clk_fix) {
1745  		sdr_set_field(host->base + MSDC_PATCH_BIT1,
1746  			      MSDC_PATCH_BIT1_STOP_DLY, 3);
1747  		sdr_clr_bits(host->base + SDC_FIFO_CFG,
1748  			     SDC_FIFO_CFG_WRVALIDSEL);
1749  		sdr_clr_bits(host->base + SDC_FIFO_CFG,
1750  			     SDC_FIFO_CFG_RDVALIDSEL);
1751  	}
1752  
1753  	if (host->dev_comp->busy_check)
1754  		sdr_clr_bits(host->base + MSDC_PATCH_BIT1, BIT(7));
1755  
1756  	if (host->dev_comp->async_fifo) {
1757  		sdr_set_field(host->base + MSDC_PATCH_BIT2,
1758  			      MSDC_PB2_RESPWAIT, 3);
1759  		if (host->dev_comp->enhance_rx) {
1760  			if (host->top_base)
1761  				sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
1762  					     SDC_RX_ENH_EN);
1763  			else
1764  				sdr_set_bits(host->base + SDC_ADV_CFG0,
1765  					     SDC_RX_ENHANCE_EN);
1766  		} else {
1767  			sdr_set_field(host->base + MSDC_PATCH_BIT2,
1768  				      MSDC_PB2_RESPSTSENSEL, 2);
1769  			sdr_set_field(host->base + MSDC_PATCH_BIT2,
1770  				      MSDC_PB2_CRCSTSENSEL, 2);
1771  		}
1772  		/* use async fifo, then no need tune internal delay */
1773  		sdr_clr_bits(host->base + MSDC_PATCH_BIT2,
1774  			     MSDC_PATCH_BIT2_CFGRESP);
1775  		sdr_set_bits(host->base + MSDC_PATCH_BIT2,
1776  			     MSDC_PATCH_BIT2_CFGCRCSTS);
1777  	}
1778  
1779  	if (host->dev_comp->support_64g)
1780  		sdr_set_bits(host->base + MSDC_PATCH_BIT2,
1781  			     MSDC_PB2_SUPPORT_64G);
1782  	if (host->dev_comp->data_tune) {
1783  		if (host->top_base) {
1784  			sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
1785  				     PAD_DAT_RD_RXDLY_SEL);
1786  			sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL,
1787  				     DATA_K_VALUE_SEL);
1788  			sdr_set_bits(host->top_base + EMMC_TOP_CMD,
1789  				     PAD_CMD_RD_RXDLY_SEL);
1790  			if (host->tuning_step > PAD_DELAY_HALF) {
1791  				sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
1792  					     PAD_DAT_RD_RXDLY2_SEL);
1793  				sdr_set_bits(host->top_base + EMMC_TOP_CMD,
1794  					     PAD_CMD_RD_RXDLY2_SEL);
1795  			}
1796  		} else {
1797  			sdr_set_bits(host->base + tune_reg,
1798  				     MSDC_PAD_TUNE_RD_SEL |
1799  				     MSDC_PAD_TUNE_CMD_SEL);
1800  			if (host->tuning_step > PAD_DELAY_HALF)
1801  				sdr_set_bits(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
1802  					     MSDC_PAD_TUNE_RD2_SEL |
1803  					     MSDC_PAD_TUNE_CMD2_SEL);
1804  		}
1805  	} else {
1806  		/* choose clock tune */
1807  		if (host->top_base)
1808  			sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
1809  				     PAD_RXDLY_SEL);
1810  		else
1811  			sdr_set_bits(host->base + tune_reg,
1812  				     MSDC_PAD_TUNE_RXDLYSEL);
1813  	}
1814  
1815  	if (mmc->caps2 & MMC_CAP2_NO_SDIO) {
1816  		sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
1817  		sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
1818  		sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
1819  	} else {
1820  		/* Configure to enable SDIO mode, otherwise SDIO CMD5 fails */
1821  		sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
1822  
1823  		/* Config SDIO device detect interrupt function */
1824  		sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
1825  		sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
1826  	}
1827  
1828  	/* Configure to default data timeout */
1829  	sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
1830  
1831  	host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
1832  	host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
1833  	if (host->top_base) {
1834  		host->def_tune_para.emmc_top_control =
1835  			readl(host->top_base + EMMC_TOP_CONTROL);
1836  		host->def_tune_para.emmc_top_cmd =
1837  			readl(host->top_base + EMMC_TOP_CMD);
1838  		host->saved_tune_para.emmc_top_control =
1839  			readl(host->top_base + EMMC_TOP_CONTROL);
1840  		host->saved_tune_para.emmc_top_cmd =
1841  			readl(host->top_base + EMMC_TOP_CMD);
1842  	} else {
1843  		host->def_tune_para.pad_tune = readl(host->base + tune_reg);
1844  		host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
1845  	}
1846  	dev_dbg(host->dev, "init hardware done!");
1847  }
1848  
msdc_deinit_hw(struct msdc_host * host)1849  static void msdc_deinit_hw(struct msdc_host *host)
1850  {
1851  	u32 val;
1852  
1853  	if (host->internal_cd) {
1854  		/* Disabled card-detect */
1855  		sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
1856  		sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
1857  	}
1858  
1859  	/* Disable and clear all interrupts */
1860  	writel(0, host->base + MSDC_INTEN);
1861  
1862  	val = readl(host->base + MSDC_INT);
1863  	writel(val, host->base + MSDC_INT);
1864  }
1865  
1866  /* init gpd and bd list in msdc_drv_probe */
msdc_init_gpd_bd(struct msdc_host * host,struct msdc_dma * dma)1867  static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
1868  {
1869  	struct mt_gpdma_desc *gpd = dma->gpd;
1870  	struct mt_bdma_desc *bd = dma->bd;
1871  	dma_addr_t dma_addr;
1872  	int i;
1873  
1874  	memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2);
1875  
1876  	dma_addr = dma->gpd_addr + sizeof(struct mt_gpdma_desc);
1877  	gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */
1878  	/* gpd->next is must set for desc DMA
1879  	 * That's why must alloc 2 gpd structure.
1880  	 */
1881  	gpd->next = lower_32_bits(dma_addr);
1882  	if (host->dev_comp->support_64g)
1883  		gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 24;
1884  
1885  	dma_addr = dma->bd_addr;
1886  	gpd->ptr = lower_32_bits(dma->bd_addr); /* physical address */
1887  	if (host->dev_comp->support_64g)
1888  		gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 28;
1889  
1890  	memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM);
1891  	for (i = 0; i < (MAX_BD_NUM - 1); i++) {
1892  		dma_addr = dma->bd_addr + sizeof(*bd) * (i + 1);
1893  		bd[i].next = lower_32_bits(dma_addr);
1894  		if (host->dev_comp->support_64g)
1895  			bd[i].bd_info |= (upper_32_bits(dma_addr) & 0xf) << 24;
1896  	}
1897  }
1898  
msdc_ops_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)1899  static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1900  {
1901  	struct msdc_host *host = mmc_priv(mmc);
1902  	int ret;
1903  
1904  	msdc_set_buswidth(host, ios->bus_width);
1905  
1906  	/* Suspend/Resume will do power off/on */
1907  	switch (ios->power_mode) {
1908  	case MMC_POWER_UP:
1909  		if (!IS_ERR(mmc->supply.vmmc)) {
1910  			msdc_init_hw(host);
1911  			ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1912  					ios->vdd);
1913  			if (ret) {
1914  				dev_err(host->dev, "Failed to set vmmc power!\n");
1915  				return;
1916  			}
1917  		}
1918  		break;
1919  	case MMC_POWER_ON:
1920  		if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1921  			ret = regulator_enable(mmc->supply.vqmmc);
1922  			if (ret)
1923  				dev_err(host->dev, "Failed to set vqmmc power!\n");
1924  			else
1925  				host->vqmmc_enabled = true;
1926  		}
1927  		break;
1928  	case MMC_POWER_OFF:
1929  		if (!IS_ERR(mmc->supply.vmmc))
1930  			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1931  
1932  		if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1933  			regulator_disable(mmc->supply.vqmmc);
1934  			host->vqmmc_enabled = false;
1935  		}
1936  		break;
1937  	default:
1938  		break;
1939  	}
1940  
1941  	if (host->mclk != ios->clock || host->timing != ios->timing)
1942  		msdc_set_mclk(host, ios->timing, ios->clock);
1943  }
1944  
test_delay_bit(u64 delay,u32 bit)1945  static u64 test_delay_bit(u64 delay, u32 bit)
1946  {
1947  	bit %= PAD_DELAY_FULL;
1948  	return delay & BIT_ULL(bit);
1949  }
1950  
get_delay_len(u64 delay,u32 start_bit)1951  static int get_delay_len(u64 delay, u32 start_bit)
1952  {
1953  	int i;
1954  
1955  	for (i = 0; i < (PAD_DELAY_FULL - start_bit); i++) {
1956  		if (test_delay_bit(delay, start_bit + i) == 0)
1957  			return i;
1958  	}
1959  	return PAD_DELAY_FULL - start_bit;
1960  }
1961  
get_best_delay(struct msdc_host * host,u64 delay)1962  static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u64 delay)
1963  {
1964  	int start = 0, len = 0;
1965  	int start_final = 0, len_final = 0;
1966  	u8 final_phase = 0xff;
1967  	struct msdc_delay_phase delay_phase = { 0, };
1968  
1969  	if (delay == 0) {
1970  		dev_err(host->dev, "phase error: [map:%016llx]\n", delay);
1971  		delay_phase.final_phase = final_phase;
1972  		return delay_phase;
1973  	}
1974  
1975  	while (start < PAD_DELAY_FULL) {
1976  		len = get_delay_len(delay, start);
1977  		if (len_final < len) {
1978  			start_final = start;
1979  			len_final = len;
1980  		}
1981  		start += len ? len : 1;
1982  		if (!upper_32_bits(delay) && len >= 12 && start_final < 4)
1983  			break;
1984  	}
1985  
1986  	/* The rule is that to find the smallest delay cell */
1987  	if (start_final == 0)
1988  		final_phase = (start_final + len_final / 3) % PAD_DELAY_FULL;
1989  	else
1990  		final_phase = (start_final + len_final / 2) % PAD_DELAY_FULL;
1991  	dev_dbg(host->dev, "phase: [map:%016llx] [maxlen:%d] [final:%d]\n",
1992  		delay, len_final, final_phase);
1993  
1994  	delay_phase.maxlen = len_final;
1995  	delay_phase.start = start_final;
1996  	delay_phase.final_phase = final_phase;
1997  	return delay_phase;
1998  }
1999  
msdc_set_cmd_delay(struct msdc_host * host,u32 value)2000  static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value)
2001  {
2002  	u32 tune_reg = host->dev_comp->pad_tune_reg;
2003  
2004  	if (host->top_base) {
2005  		if (value < PAD_DELAY_HALF) {
2006  			sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, value);
2007  			sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, 0);
2008  		} else {
2009  			sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY,
2010  				      PAD_DELAY_HALF - 1);
2011  			sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2,
2012  				      value - PAD_DELAY_HALF);
2013  		}
2014  	} else {
2015  		if (value < PAD_DELAY_HALF) {
2016  			sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, value);
2017  			sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
2018  				      MSDC_PAD_TUNE_CMDRDLY2, 0);
2019  		} else {
2020  			sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
2021  				      PAD_DELAY_HALF - 1);
2022  			sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
2023  				      MSDC_PAD_TUNE_CMDRDLY2, value - PAD_DELAY_HALF);
2024  		}
2025  	}
2026  }
2027  
msdc_set_data_delay(struct msdc_host * host,u32 value)2028  static inline void msdc_set_data_delay(struct msdc_host *host, u32 value)
2029  {
2030  	u32 tune_reg = host->dev_comp->pad_tune_reg;
2031  
2032  	if (host->top_base) {
2033  		if (value < PAD_DELAY_HALF) {
2034  			sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
2035  				      PAD_DAT_RD_RXDLY, value);
2036  			sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
2037  				      PAD_DAT_RD_RXDLY2, 0);
2038  		} else {
2039  			sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
2040  				      PAD_DAT_RD_RXDLY, PAD_DELAY_HALF - 1);
2041  			sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
2042  				      PAD_DAT_RD_RXDLY2, value - PAD_DELAY_HALF);
2043  		}
2044  	} else {
2045  		if (value < PAD_DELAY_HALF) {
2046  			sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, value);
2047  			sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
2048  				      MSDC_PAD_TUNE_DATRRDLY2, 0);
2049  		} else {
2050  			sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY,
2051  				      PAD_DELAY_HALF - 1);
2052  			sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
2053  				      MSDC_PAD_TUNE_DATRRDLY2, value - PAD_DELAY_HALF);
2054  		}
2055  	}
2056  }
2057  
msdc_tune_response(struct mmc_host * mmc,u32 opcode)2058  static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
2059  {
2060  	struct msdc_host *host = mmc_priv(mmc);
2061  	u64 rise_delay = 0, fall_delay = 0;
2062  	struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
2063  	struct msdc_delay_phase internal_delay_phase;
2064  	u8 final_delay, final_maxlen;
2065  	u32 internal_delay = 0;
2066  	u32 tune_reg = host->dev_comp->pad_tune_reg;
2067  	int cmd_err;
2068  	int i, j;
2069  
2070  	if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
2071  	    mmc->ios.timing == MMC_TIMING_UHS_SDR104)
2072  		sdr_set_field(host->base + tune_reg,
2073  			      MSDC_PAD_TUNE_CMDRRDLY,
2074  			      host->hs200_cmd_int_delay);
2075  
2076  	sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
2077  	for (i = 0; i < host->tuning_step; i++) {
2078  		msdc_set_cmd_delay(host, i);
2079  		/*
2080  		 * Using the same parameters, it may sometimes pass the test,
2081  		 * but sometimes it may fail. To make sure the parameters are
2082  		 * more stable, we test each set of parameters 3 times.
2083  		 */
2084  		for (j = 0; j < 3; j++) {
2085  			mmc_send_tuning(mmc, opcode, &cmd_err);
2086  			if (!cmd_err) {
2087  				rise_delay |= BIT_ULL(i);
2088  			} else {
2089  				rise_delay &= ~BIT_ULL(i);
2090  				break;
2091  			}
2092  		}
2093  	}
2094  	final_rise_delay = get_best_delay(host, rise_delay);
2095  	/* if rising edge has enough margin, then do not scan falling edge */
2096  	if (final_rise_delay.maxlen >= 12 ||
2097  	    (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
2098  		goto skip_fall;
2099  
2100  	sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
2101  	for (i = 0; i < host->tuning_step; i++) {
2102  		msdc_set_cmd_delay(host, i);
2103  		/*
2104  		 * Using the same parameters, it may sometimes pass the test,
2105  		 * but sometimes it may fail. To make sure the parameters are
2106  		 * more stable, we test each set of parameters 3 times.
2107  		 */
2108  		for (j = 0; j < 3; j++) {
2109  			mmc_send_tuning(mmc, opcode, &cmd_err);
2110  			if (!cmd_err) {
2111  				fall_delay |= BIT_ULL(i);
2112  			} else {
2113  				fall_delay &= ~BIT_ULL(i);
2114  				break;
2115  			}
2116  		}
2117  	}
2118  	final_fall_delay = get_best_delay(host, fall_delay);
2119  
2120  skip_fall:
2121  	final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
2122  	if (final_fall_delay.maxlen >= 12 && final_fall_delay.start < 4)
2123  		final_maxlen = final_fall_delay.maxlen;
2124  	if (final_maxlen == final_rise_delay.maxlen) {
2125  		sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
2126  		final_delay = final_rise_delay.final_phase;
2127  	} else {
2128  		sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
2129  		final_delay = final_fall_delay.final_phase;
2130  	}
2131  	msdc_set_cmd_delay(host, final_delay);
2132  
2133  	if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
2134  		goto skip_internal;
2135  
2136  	for (i = 0; i < host->tuning_step; i++) {
2137  		sdr_set_field(host->base + tune_reg,
2138  			      MSDC_PAD_TUNE_CMDRRDLY, i);
2139  		mmc_send_tuning(mmc, opcode, &cmd_err);
2140  		if (!cmd_err)
2141  			internal_delay |= BIT_ULL(i);
2142  	}
2143  	dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
2144  	internal_delay_phase = get_best_delay(host, internal_delay);
2145  	sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY,
2146  		      internal_delay_phase.final_phase);
2147  skip_internal:
2148  	dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
2149  	return final_delay == 0xff ? -EIO : 0;
2150  }
2151  
hs400_tune_response(struct mmc_host * mmc,u32 opcode)2152  static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
2153  {
2154  	struct msdc_host *host = mmc_priv(mmc);
2155  	u32 cmd_delay = 0;
2156  	struct msdc_delay_phase final_cmd_delay = { 0,};
2157  	u8 final_delay;
2158  	int cmd_err;
2159  	int i, j;
2160  
2161  	/* select EMMC50 PAD CMD tune */
2162  	sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0));
2163  	sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2);
2164  
2165  	if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
2166  	    mmc->ios.timing == MMC_TIMING_UHS_SDR104)
2167  		sdr_set_field(host->base + MSDC_PAD_TUNE,
2168  			      MSDC_PAD_TUNE_CMDRRDLY,
2169  			      host->hs200_cmd_int_delay);
2170  
2171  	if (host->hs400_cmd_resp_sel_rising)
2172  		sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
2173  	else
2174  		sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
2175  
2176  	for (i = 0; i < PAD_DELAY_HALF; i++) {
2177  		sdr_set_field(host->base + PAD_CMD_TUNE,
2178  			      PAD_CMD_TUNE_RX_DLY3, i);
2179  		/*
2180  		 * Using the same parameters, it may sometimes pass the test,
2181  		 * but sometimes it may fail. To make sure the parameters are
2182  		 * more stable, we test each set of parameters 3 times.
2183  		 */
2184  		for (j = 0; j < 3; j++) {
2185  			mmc_send_tuning(mmc, opcode, &cmd_err);
2186  			if (!cmd_err) {
2187  				cmd_delay |= BIT(i);
2188  			} else {
2189  				cmd_delay &= ~BIT(i);
2190  				break;
2191  			}
2192  		}
2193  	}
2194  	final_cmd_delay = get_best_delay(host, cmd_delay);
2195  	sdr_set_field(host->base + PAD_CMD_TUNE, PAD_CMD_TUNE_RX_DLY3,
2196  		      final_cmd_delay.final_phase);
2197  	final_delay = final_cmd_delay.final_phase;
2198  
2199  	dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
2200  	return final_delay == 0xff ? -EIO : 0;
2201  }
2202  
msdc_tune_data(struct mmc_host * mmc,u32 opcode)2203  static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
2204  {
2205  	struct msdc_host *host = mmc_priv(mmc);
2206  	u64 rise_delay = 0, fall_delay = 0;
2207  	struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
2208  	u8 final_delay, final_maxlen;
2209  	int i, ret;
2210  
2211  	sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
2212  		      host->latch_ck);
2213  	sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
2214  	sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
2215  	for (i = 0; i < host->tuning_step; i++) {
2216  		msdc_set_data_delay(host, i);
2217  		ret = mmc_send_tuning(mmc, opcode, NULL);
2218  		if (!ret)
2219  			rise_delay |= BIT_ULL(i);
2220  	}
2221  	final_rise_delay = get_best_delay(host, rise_delay);
2222  	/* if rising edge has enough margin, then do not scan falling edge */
2223  	if (final_rise_delay.maxlen >= 12 ||
2224  	    (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
2225  		goto skip_fall;
2226  
2227  	sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
2228  	sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
2229  	for (i = 0; i < host->tuning_step; i++) {
2230  		msdc_set_data_delay(host, i);
2231  		ret = mmc_send_tuning(mmc, opcode, NULL);
2232  		if (!ret)
2233  			fall_delay |= BIT_ULL(i);
2234  	}
2235  	final_fall_delay = get_best_delay(host, fall_delay);
2236  
2237  skip_fall:
2238  	final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
2239  	if (final_maxlen == final_rise_delay.maxlen) {
2240  		sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
2241  		sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
2242  		final_delay = final_rise_delay.final_phase;
2243  	} else {
2244  		sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
2245  		sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
2246  		final_delay = final_fall_delay.final_phase;
2247  	}
2248  	msdc_set_data_delay(host, final_delay);
2249  
2250  	dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay);
2251  	return final_delay == 0xff ? -EIO : 0;
2252  }
2253  
2254  /*
2255   * MSDC IP which supports data tune + async fifo can do CMD/DAT tune
2256   * together, which can save the tuning time.
2257   */
msdc_tune_together(struct mmc_host * mmc,u32 opcode)2258  static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
2259  {
2260  	struct msdc_host *host = mmc_priv(mmc);
2261  	u64 rise_delay = 0, fall_delay = 0;
2262  	struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
2263  	u8 final_delay, final_maxlen;
2264  	int i, ret;
2265  
2266  	sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
2267  		      host->latch_ck);
2268  
2269  	sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
2270  	sdr_clr_bits(host->base + MSDC_IOCON,
2271  		     MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
2272  	for (i = 0; i < host->tuning_step; i++) {
2273  		msdc_set_cmd_delay(host, i);
2274  		msdc_set_data_delay(host, i);
2275  		ret = mmc_send_tuning(mmc, opcode, NULL);
2276  		if (!ret)
2277  			rise_delay |= BIT_ULL(i);
2278  	}
2279  	final_rise_delay = get_best_delay(host, rise_delay);
2280  	/* if rising edge has enough margin, then do not scan falling edge */
2281  	if (final_rise_delay.maxlen >= 12 ||
2282  	    (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
2283  		goto skip_fall;
2284  
2285  	sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
2286  	sdr_set_bits(host->base + MSDC_IOCON,
2287  		     MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
2288  	for (i = 0; i < host->tuning_step; i++) {
2289  		msdc_set_cmd_delay(host, i);
2290  		msdc_set_data_delay(host, i);
2291  		ret = mmc_send_tuning(mmc, opcode, NULL);
2292  		if (!ret)
2293  			fall_delay |= BIT_ULL(i);
2294  	}
2295  	final_fall_delay = get_best_delay(host, fall_delay);
2296  
2297  skip_fall:
2298  	final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
2299  	if (final_maxlen == final_rise_delay.maxlen) {
2300  		sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
2301  		sdr_clr_bits(host->base + MSDC_IOCON,
2302  			     MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
2303  		final_delay = final_rise_delay.final_phase;
2304  	} else {
2305  		sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
2306  		sdr_set_bits(host->base + MSDC_IOCON,
2307  			     MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
2308  		final_delay = final_fall_delay.final_phase;
2309  	}
2310  
2311  	msdc_set_cmd_delay(host, final_delay);
2312  	msdc_set_data_delay(host, final_delay);
2313  
2314  	dev_dbg(host->dev, "Final pad delay: %x\n", final_delay);
2315  	return final_delay == 0xff ? -EIO : 0;
2316  }
2317  
msdc_execute_tuning(struct mmc_host * mmc,u32 opcode)2318  static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
2319  {
2320  	struct msdc_host *host = mmc_priv(mmc);
2321  	int ret;
2322  	u32 tune_reg = host->dev_comp->pad_tune_reg;
2323  
2324  	if (host->dev_comp->data_tune && host->dev_comp->async_fifo) {
2325  		ret = msdc_tune_together(mmc, opcode);
2326  		if (host->hs400_mode) {
2327  			sdr_clr_bits(host->base + MSDC_IOCON,
2328  				     MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
2329  			msdc_set_data_delay(host, 0);
2330  		}
2331  		goto tune_done;
2332  	}
2333  	if (host->hs400_mode &&
2334  	    host->dev_comp->hs400_tune)
2335  		ret = hs400_tune_response(mmc, opcode);
2336  	else
2337  		ret = msdc_tune_response(mmc, opcode);
2338  	if (ret == -EIO) {
2339  		dev_err(host->dev, "Tune response fail!\n");
2340  		return ret;
2341  	}
2342  	if (host->hs400_mode == false) {
2343  		ret = msdc_tune_data(mmc, opcode);
2344  		if (ret == -EIO)
2345  			dev_err(host->dev, "Tune data fail!\n");
2346  	}
2347  
2348  tune_done:
2349  	host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
2350  	host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
2351  	host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
2352  	if (host->top_base) {
2353  		host->saved_tune_para.emmc_top_control = readl(host->top_base +
2354  				EMMC_TOP_CONTROL);
2355  		host->saved_tune_para.emmc_top_cmd = readl(host->top_base +
2356  				EMMC_TOP_CMD);
2357  	}
2358  	return ret;
2359  }
2360  
msdc_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)2361  static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2362  {
2363  	struct msdc_host *host = mmc_priv(mmc);
2364  	host->hs400_mode = true;
2365  
2366  	if (host->top_base)
2367  		writel(host->hs400_ds_delay,
2368  		       host->top_base + EMMC50_PAD_DS_TUNE);
2369  	else
2370  		writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
2371  	/* hs400 mode must set it to 0 */
2372  	sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
2373  	/* to improve read performance, set outstanding to 2 */
2374  	sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2);
2375  
2376  	return 0;
2377  }
2378  
msdc_execute_hs400_tuning(struct mmc_host * mmc,struct mmc_card * card)2379  static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card)
2380  {
2381  	struct msdc_host *host = mmc_priv(mmc);
2382  	struct msdc_delay_phase dly1_delay;
2383  	u32 val, result_dly1 = 0;
2384  	u8 *ext_csd;
2385  	int i, ret;
2386  
2387  	if (host->top_base) {
2388  		sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
2389  			     PAD_DS_DLY_SEL);
2390  		if (host->hs400_ds_dly3)
2391  			sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
2392  				      PAD_DS_DLY3, host->hs400_ds_dly3);
2393  	} else {
2394  		sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
2395  		if (host->hs400_ds_dly3)
2396  			sdr_set_field(host->base + PAD_DS_TUNE,
2397  				      PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
2398  	}
2399  
2400  	host->hs400_tuning = true;
2401  	for (i = 0; i < PAD_DELAY_HALF; i++) {
2402  		if (host->top_base)
2403  			sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
2404  				      PAD_DS_DLY1, i);
2405  		else
2406  			sdr_set_field(host->base + PAD_DS_TUNE,
2407  				      PAD_DS_TUNE_DLY1, i);
2408  		ret = mmc_get_ext_csd(card, &ext_csd);
2409  		if (!ret) {
2410  			result_dly1 |= BIT(i);
2411  			kfree(ext_csd);
2412  		}
2413  	}
2414  	host->hs400_tuning = false;
2415  
2416  	dly1_delay = get_best_delay(host, result_dly1);
2417  	if (dly1_delay.maxlen == 0) {
2418  		dev_err(host->dev, "Failed to get DLY1 delay!\n");
2419  		goto fail;
2420  	}
2421  	if (host->top_base)
2422  		sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
2423  			      PAD_DS_DLY1, dly1_delay.final_phase);
2424  	else
2425  		sdr_set_field(host->base + PAD_DS_TUNE,
2426  			      PAD_DS_TUNE_DLY1, dly1_delay.final_phase);
2427  
2428  	if (host->top_base)
2429  		val = readl(host->top_base + EMMC50_PAD_DS_TUNE);
2430  	else
2431  		val = readl(host->base + PAD_DS_TUNE);
2432  
2433  	dev_info(host->dev, "Final PAD_DS_TUNE: 0x%x\n", val);
2434  
2435  	return 0;
2436  
2437  fail:
2438  	dev_err(host->dev, "Failed to tuning DS pin delay!\n");
2439  	return -EIO;
2440  }
2441  
msdc_hw_reset(struct mmc_host * mmc)2442  static void msdc_hw_reset(struct mmc_host *mmc)
2443  {
2444  	struct msdc_host *host = mmc_priv(mmc);
2445  
2446  	sdr_set_bits(host->base + EMMC_IOCON, 1);
2447  	udelay(10); /* 10us is enough */
2448  	sdr_clr_bits(host->base + EMMC_IOCON, 1);
2449  }
2450  
msdc_ack_sdio_irq(struct mmc_host * mmc)2451  static void msdc_ack_sdio_irq(struct mmc_host *mmc)
2452  {
2453  	unsigned long flags;
2454  	struct msdc_host *host = mmc_priv(mmc);
2455  
2456  	spin_lock_irqsave(&host->lock, flags);
2457  	__msdc_enable_sdio_irq(host, 1);
2458  	spin_unlock_irqrestore(&host->lock, flags);
2459  }
2460  
msdc_get_cd(struct mmc_host * mmc)2461  static int msdc_get_cd(struct mmc_host *mmc)
2462  {
2463  	struct msdc_host *host = mmc_priv(mmc);
2464  	int val;
2465  
2466  	if (mmc->caps & MMC_CAP_NONREMOVABLE)
2467  		return 1;
2468  
2469  	if (!host->internal_cd)
2470  		return mmc_gpio_get_cd(mmc);
2471  
2472  	val = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS;
2473  	if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
2474  		return !!val;
2475  	else
2476  		return !val;
2477  }
2478  
msdc_hs400_enhanced_strobe(struct mmc_host * mmc,struct mmc_ios * ios)2479  static void msdc_hs400_enhanced_strobe(struct mmc_host *mmc,
2480  				       struct mmc_ios *ios)
2481  {
2482  	struct msdc_host *host = mmc_priv(mmc);
2483  
2484  	if (ios->enhanced_strobe) {
2485  		msdc_prepare_hs400_tuning(mmc, ios);
2486  		sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_PADCMD_LATCHCK, 1);
2487  		sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_CMD_RESP_SEL, 1);
2488  		sdr_set_field(host->base + EMMC50_CFG1, EMMC50_CFG1_DS_CFG, 1);
2489  
2490  		sdr_clr_bits(host->base + CQHCI_SETTING, CQHCI_RD_CMD_WND_SEL);
2491  		sdr_clr_bits(host->base + CQHCI_SETTING, CQHCI_WR_CMD_WND_SEL);
2492  		sdr_clr_bits(host->base + EMMC51_CFG0, CMDQ_RDAT_CNT);
2493  	} else {
2494  		sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_PADCMD_LATCHCK, 0);
2495  		sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_CMD_RESP_SEL, 0);
2496  		sdr_set_field(host->base + EMMC50_CFG1, EMMC50_CFG1_DS_CFG, 0);
2497  
2498  		sdr_set_bits(host->base + CQHCI_SETTING, CQHCI_RD_CMD_WND_SEL);
2499  		sdr_set_bits(host->base + CQHCI_SETTING, CQHCI_WR_CMD_WND_SEL);
2500  		sdr_set_field(host->base + EMMC51_CFG0, CMDQ_RDAT_CNT, 0xb4);
2501  	}
2502  }
2503  
msdc_cqe_cit_cal(struct msdc_host * host,u64 timer_ns)2504  static void msdc_cqe_cit_cal(struct msdc_host *host, u64 timer_ns)
2505  {
2506  	struct mmc_host *mmc = mmc_from_priv(host);
2507  	struct cqhci_host *cq_host = mmc->cqe_private;
2508  	u8 itcfmul;
2509  	u64 hclk_freq, value;
2510  
2511  	/*
2512  	 * On MediaTek SoCs the MSDC controller's CQE uses msdc_hclk as ITCFVAL
2513  	 * so we multiply/divide the HCLK frequency by ITCFMUL to calculate the
2514  	 * Send Status Command Idle Timer (CIT) value.
2515  	 */
2516  	hclk_freq = (u64)clk_get_rate(host->h_clk);
2517  	itcfmul = CQHCI_ITCFMUL(cqhci_readl(cq_host, CQHCI_CAP));
2518  	switch (itcfmul) {
2519  	case 0x0:
2520  		do_div(hclk_freq, 1000);
2521  		break;
2522  	case 0x1:
2523  		do_div(hclk_freq, 100);
2524  		break;
2525  	case 0x2:
2526  		do_div(hclk_freq, 10);
2527  		break;
2528  	case 0x3:
2529  		break;
2530  	case 0x4:
2531  		hclk_freq = hclk_freq * 10;
2532  		break;
2533  	default:
2534  		host->cq_ssc1_time = 0x40;
2535  		return;
2536  	}
2537  
2538  	value = hclk_freq * timer_ns;
2539  	do_div(value, 1000000000);
2540  	host->cq_ssc1_time = value;
2541  }
2542  
msdc_cqe_enable(struct mmc_host * mmc)2543  static void msdc_cqe_enable(struct mmc_host *mmc)
2544  {
2545  	struct msdc_host *host = mmc_priv(mmc);
2546  	struct cqhci_host *cq_host = mmc->cqe_private;
2547  
2548  	/* enable cmdq irq */
2549  	writel(MSDC_INT_CMDQ, host->base + MSDC_INTEN);
2550  	/* enable busy check */
2551  	sdr_set_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
2552  	/* default write data / busy timeout 20s */
2553  	msdc_set_busy_timeout(host, 20 * 1000000000ULL, 0);
2554  	/* default read data timeout 1s */
2555  	msdc_set_timeout(host, 1000000000ULL, 0);
2556  
2557  	/* Set the send status command idle timer */
2558  	cqhci_writel(cq_host, host->cq_ssc1_time, CQHCI_SSC1);
2559  }
2560  
msdc_cqe_disable(struct mmc_host * mmc,bool recovery)2561  static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
2562  {
2563  	struct msdc_host *host = mmc_priv(mmc);
2564  	unsigned int val = 0;
2565  
2566  	/* disable cmdq irq */
2567  	sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ);
2568  	/* disable busy check */
2569  	sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
2570  
2571  	val = readl(host->base + MSDC_INT);
2572  	writel(val, host->base + MSDC_INT);
2573  
2574  	if (recovery) {
2575  		sdr_set_field(host->base + MSDC_DMA_CTRL,
2576  			      MSDC_DMA_CTRL_STOP, 1);
2577  		if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CTRL, val,
2578  			!(val & MSDC_DMA_CTRL_STOP), 1, 3000)))
2579  			return;
2580  		if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val,
2581  			!(val & MSDC_DMA_CFG_STS), 1, 3000)))
2582  			return;
2583  		msdc_reset_hw(host);
2584  	}
2585  }
2586  
msdc_cqe_pre_enable(struct mmc_host * mmc)2587  static void msdc_cqe_pre_enable(struct mmc_host *mmc)
2588  {
2589  	struct cqhci_host *cq_host = mmc->cqe_private;
2590  	u32 reg;
2591  
2592  	reg = cqhci_readl(cq_host, CQHCI_CFG);
2593  	reg |= CQHCI_ENABLE;
2594  	cqhci_writel(cq_host, reg, CQHCI_CFG);
2595  }
2596  
msdc_cqe_post_disable(struct mmc_host * mmc)2597  static void msdc_cqe_post_disable(struct mmc_host *mmc)
2598  {
2599  	struct cqhci_host *cq_host = mmc->cqe_private;
2600  	u32 reg;
2601  
2602  	reg = cqhci_readl(cq_host, CQHCI_CFG);
2603  	reg &= ~CQHCI_ENABLE;
2604  	cqhci_writel(cq_host, reg, CQHCI_CFG);
2605  }
2606  
2607  static const struct mmc_host_ops mt_msdc_ops = {
2608  	.post_req = msdc_post_req,
2609  	.pre_req = msdc_pre_req,
2610  	.request = msdc_ops_request,
2611  	.set_ios = msdc_ops_set_ios,
2612  	.get_ro = mmc_gpio_get_ro,
2613  	.get_cd = msdc_get_cd,
2614  	.hs400_enhanced_strobe = msdc_hs400_enhanced_strobe,
2615  	.enable_sdio_irq = msdc_enable_sdio_irq,
2616  	.ack_sdio_irq = msdc_ack_sdio_irq,
2617  	.start_signal_voltage_switch = msdc_ops_switch_volt,
2618  	.card_busy = msdc_card_busy,
2619  	.execute_tuning = msdc_execute_tuning,
2620  	.prepare_hs400_tuning = msdc_prepare_hs400_tuning,
2621  	.execute_hs400_tuning = msdc_execute_hs400_tuning,
2622  	.card_hw_reset = msdc_hw_reset,
2623  };
2624  
2625  static const struct cqhci_host_ops msdc_cmdq_ops = {
2626  	.enable         = msdc_cqe_enable,
2627  	.disable        = msdc_cqe_disable,
2628  	.pre_enable = msdc_cqe_pre_enable,
2629  	.post_disable = msdc_cqe_post_disable,
2630  };
2631  
msdc_of_property_parse(struct platform_device * pdev,struct msdc_host * host)2632  static void msdc_of_property_parse(struct platform_device *pdev,
2633  				   struct msdc_host *host)
2634  {
2635  	struct mmc_host *mmc = mmc_from_priv(host);
2636  
2637  	of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck",
2638  			     &host->latch_ck);
2639  
2640  	of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
2641  			     &host->hs400_ds_delay);
2642  
2643  	of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-ds-dly3",
2644  			     &host->hs400_ds_dly3);
2645  
2646  	of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay",
2647  			     &host->hs200_cmd_int_delay);
2648  
2649  	of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-cmd-int-delay",
2650  			     &host->hs400_cmd_int_delay);
2651  
2652  	if (of_property_read_bool(pdev->dev.of_node,
2653  				  "mediatek,hs400-cmd-resp-sel-rising"))
2654  		host->hs400_cmd_resp_sel_rising = true;
2655  	else
2656  		host->hs400_cmd_resp_sel_rising = false;
2657  
2658  	if (of_property_read_u32(pdev->dev.of_node, "mediatek,tuning-step",
2659  				 &host->tuning_step)) {
2660  		if (mmc->caps2 & MMC_CAP2_NO_MMC)
2661  			host->tuning_step = PAD_DELAY_FULL;
2662  		else
2663  			host->tuning_step = PAD_DELAY_HALF;
2664  	}
2665  
2666  	if (of_property_read_bool(pdev->dev.of_node,
2667  				  "supports-cqe"))
2668  		host->cqhci = true;
2669  	else
2670  		host->cqhci = false;
2671  }
2672  
msdc_of_clock_parse(struct platform_device * pdev,struct msdc_host * host)2673  static int msdc_of_clock_parse(struct platform_device *pdev,
2674  			       struct msdc_host *host)
2675  {
2676  	int ret;
2677  
2678  	host->src_clk = devm_clk_get(&pdev->dev, "source");
2679  	if (IS_ERR(host->src_clk))
2680  		return PTR_ERR(host->src_clk);
2681  
2682  	host->h_clk = devm_clk_get(&pdev->dev, "hclk");
2683  	if (IS_ERR(host->h_clk))
2684  		return PTR_ERR(host->h_clk);
2685  
2686  	host->bus_clk = devm_clk_get_optional(&pdev->dev, "bus_clk");
2687  	if (IS_ERR(host->bus_clk))
2688  		host->bus_clk = NULL;
2689  
2690  	/*source clock control gate is optional clock*/
2691  	host->src_clk_cg = devm_clk_get_optional(&pdev->dev, "source_cg");
2692  	if (IS_ERR(host->src_clk_cg))
2693  		return PTR_ERR(host->src_clk_cg);
2694  
2695  	/*
2696  	 * Fallback for legacy device-trees: src_clk and HCLK use the same
2697  	 * bit to control gating but they are parented to a different mux,
2698  	 * hence if our intention is to gate only the source, required
2699  	 * during a clk mode switch to avoid hw hangs, we need to gate
2700  	 * its parent (specified as a different clock only on new DTs).
2701  	 */
2702  	if (!host->src_clk_cg) {
2703  		host->src_clk_cg = clk_get_parent(host->src_clk);
2704  		if (IS_ERR(host->src_clk_cg))
2705  			return PTR_ERR(host->src_clk_cg);
2706  	}
2707  
2708  	/* If present, always enable for this clock gate */
2709  	host->sys_clk_cg = devm_clk_get_optional_enabled(&pdev->dev, "sys_cg");
2710  	if (IS_ERR(host->sys_clk_cg))
2711  		host->sys_clk_cg = NULL;
2712  
2713  	host->bulk_clks[0].id = "pclk_cg";
2714  	host->bulk_clks[1].id = "axi_cg";
2715  	host->bulk_clks[2].id = "ahb_cg";
2716  	ret = devm_clk_bulk_get_optional(&pdev->dev, MSDC_NR_CLOCKS,
2717  					 host->bulk_clks);
2718  	if (ret) {
2719  		dev_err(&pdev->dev, "Cannot get pclk/axi/ahb clock gates\n");
2720  		return ret;
2721  	}
2722  
2723  	return 0;
2724  }
2725  
msdc_drv_probe(struct platform_device * pdev)2726  static int msdc_drv_probe(struct platform_device *pdev)
2727  {
2728  	struct mmc_host *mmc;
2729  	struct msdc_host *host;
2730  	struct resource *res;
2731  	int ret;
2732  
2733  	if (!pdev->dev.of_node) {
2734  		dev_err(&pdev->dev, "No DT found\n");
2735  		return -EINVAL;
2736  	}
2737  
2738  	/* Allocate MMC host for this device */
2739  	mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
2740  	if (!mmc)
2741  		return -ENOMEM;
2742  
2743  	host = mmc_priv(mmc);
2744  	ret = mmc_of_parse(mmc);
2745  	if (ret)
2746  		goto host_free;
2747  
2748  	host->base = devm_platform_ioremap_resource(pdev, 0);
2749  	if (IS_ERR(host->base)) {
2750  		ret = PTR_ERR(host->base);
2751  		goto host_free;
2752  	}
2753  
2754  	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2755  	if (res) {
2756  		host->top_base = devm_ioremap_resource(&pdev->dev, res);
2757  		if (IS_ERR(host->top_base))
2758  			host->top_base = NULL;
2759  	}
2760  
2761  	ret = mmc_regulator_get_supply(mmc);
2762  	if (ret)
2763  		goto host_free;
2764  
2765  	ret = msdc_of_clock_parse(pdev, host);
2766  	if (ret)
2767  		goto host_free;
2768  
2769  	host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
2770  								"hrst");
2771  	if (IS_ERR(host->reset)) {
2772  		ret = PTR_ERR(host->reset);
2773  		goto host_free;
2774  	}
2775  
2776  	/* only eMMC has crypto property */
2777  	if (!(mmc->caps2 & MMC_CAP2_NO_MMC)) {
2778  		host->crypto_clk = devm_clk_get_optional(&pdev->dev, "crypto");
2779  		if (IS_ERR(host->crypto_clk))
2780  			host->crypto_clk = NULL;
2781  		else
2782  			mmc->caps2 |= MMC_CAP2_CRYPTO;
2783  	}
2784  
2785  	host->irq = platform_get_irq(pdev, 0);
2786  	if (host->irq < 0) {
2787  		ret = host->irq;
2788  		goto host_free;
2789  	}
2790  
2791  	host->pinctrl = devm_pinctrl_get(&pdev->dev);
2792  	if (IS_ERR(host->pinctrl)) {
2793  		ret = PTR_ERR(host->pinctrl);
2794  		dev_err(&pdev->dev, "Cannot find pinctrl!\n");
2795  		goto host_free;
2796  	}
2797  
2798  	host->pins_default = pinctrl_lookup_state(host->pinctrl, "default");
2799  	if (IS_ERR(host->pins_default)) {
2800  		ret = PTR_ERR(host->pins_default);
2801  		dev_err(&pdev->dev, "Cannot find pinctrl default!\n");
2802  		goto host_free;
2803  	}
2804  
2805  	host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
2806  	if (IS_ERR(host->pins_uhs)) {
2807  		ret = PTR_ERR(host->pins_uhs);
2808  		dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n");
2809  		goto host_free;
2810  	}
2811  
2812  	/* Support for SDIO eint irq ? */
2813  	if ((mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) && (mmc->pm_caps & MMC_PM_KEEP_POWER)) {
2814  		host->eint_irq = platform_get_irq_byname_optional(pdev, "sdio_wakeup");
2815  		if (host->eint_irq > 0) {
2816  			host->pins_eint = pinctrl_lookup_state(host->pinctrl, "state_eint");
2817  			if (IS_ERR(host->pins_eint)) {
2818  				dev_err(&pdev->dev, "Cannot find pinctrl eint!\n");
2819  				host->pins_eint = NULL;
2820  			} else {
2821  				device_init_wakeup(&pdev->dev, true);
2822  			}
2823  		}
2824  	}
2825  
2826  	msdc_of_property_parse(pdev, host);
2827  
2828  	host->dev = &pdev->dev;
2829  	host->dev_comp = of_device_get_match_data(&pdev->dev);
2830  	host->src_clk_freq = clk_get_rate(host->src_clk);
2831  	/* Set host parameters to mmc */
2832  	mmc->ops = &mt_msdc_ops;
2833  	if (host->dev_comp->clk_div_bits == 8)
2834  		mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
2835  	else
2836  		mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
2837  
2838  	if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
2839  	    !mmc_can_gpio_cd(mmc) &&
2840  	    host->dev_comp->use_internal_cd) {
2841  		/*
2842  		 * Is removable but no GPIO declared, so
2843  		 * use internal functionality.
2844  		 */
2845  		host->internal_cd = true;
2846  	}
2847  
2848  	if (mmc->caps & MMC_CAP_SDIO_IRQ)
2849  		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2850  
2851  	mmc->caps |= MMC_CAP_CMD23;
2852  	if (host->cqhci)
2853  		mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
2854  	/* MMC core transfer sizes tunable parameters */
2855  	mmc->max_segs = MAX_BD_NUM;
2856  	if (host->dev_comp->support_64g)
2857  		mmc->max_seg_size = BDMA_DESC_BUFLEN_EXT;
2858  	else
2859  		mmc->max_seg_size = BDMA_DESC_BUFLEN;
2860  	mmc->max_blk_size = 2048;
2861  	mmc->max_req_size = 512 * 1024;
2862  	mmc->max_blk_count = mmc->max_req_size / 512;
2863  	if (host->dev_comp->support_64g)
2864  		host->dma_mask = DMA_BIT_MASK(36);
2865  	else
2866  		host->dma_mask = DMA_BIT_MASK(32);
2867  	mmc_dev(mmc)->dma_mask = &host->dma_mask;
2868  
2869  	host->timeout_clks = 3 * 1048576;
2870  	host->dma.gpd = dma_alloc_coherent(&pdev->dev,
2871  				2 * sizeof(struct mt_gpdma_desc),
2872  				&host->dma.gpd_addr, GFP_KERNEL);
2873  	host->dma.bd = dma_alloc_coherent(&pdev->dev,
2874  				MAX_BD_NUM * sizeof(struct mt_bdma_desc),
2875  				&host->dma.bd_addr, GFP_KERNEL);
2876  	if (!host->dma.gpd || !host->dma.bd) {
2877  		ret = -ENOMEM;
2878  		goto release_mem;
2879  	}
2880  	msdc_init_gpd_bd(host, &host->dma);
2881  	INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
2882  	spin_lock_init(&host->lock);
2883  
2884  	platform_set_drvdata(pdev, mmc);
2885  	ret = msdc_ungate_clock(host);
2886  	if (ret) {
2887  		dev_err(&pdev->dev, "Cannot ungate clocks!\n");
2888  		goto release_mem;
2889  	}
2890  	msdc_init_hw(host);
2891  
2892  	if (mmc->caps2 & MMC_CAP2_CQE) {
2893  		host->cq_host = devm_kzalloc(mmc->parent,
2894  					     sizeof(*host->cq_host),
2895  					     GFP_KERNEL);
2896  		if (!host->cq_host) {
2897  			ret = -ENOMEM;
2898  			goto host_free;
2899  		}
2900  		host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
2901  		host->cq_host->mmio = host->base + 0x800;
2902  		host->cq_host->ops = &msdc_cmdq_ops;
2903  		ret = cqhci_init(host->cq_host, mmc, true);
2904  		if (ret)
2905  			goto host_free;
2906  		mmc->max_segs = 128;
2907  		/* cqhci 16bit length */
2908  		/* 0 size, means 65536 so we don't have to -1 here */
2909  		mmc->max_seg_size = 64 * 1024;
2910  		/* Reduce CIT to 0x40 that corresponds to 2.35us */
2911  		msdc_cqe_cit_cal(host, 2350);
2912  	}
2913  
2914  	ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
2915  			       IRQF_TRIGGER_NONE, pdev->name, host);
2916  	if (ret)
2917  		goto release;
2918  
2919  	pm_runtime_set_active(host->dev);
2920  	pm_runtime_set_autosuspend_delay(host->dev, MTK_MMC_AUTOSUSPEND_DELAY);
2921  	pm_runtime_use_autosuspend(host->dev);
2922  	pm_runtime_enable(host->dev);
2923  	ret = mmc_add_host(mmc);
2924  
2925  	if (ret)
2926  		goto end;
2927  
2928  	return 0;
2929  end:
2930  	pm_runtime_disable(host->dev);
2931  release:
2932  	platform_set_drvdata(pdev, NULL);
2933  	msdc_deinit_hw(host);
2934  	msdc_gate_clock(host);
2935  release_mem:
2936  	if (host->dma.gpd)
2937  		dma_free_coherent(&pdev->dev,
2938  			2 * sizeof(struct mt_gpdma_desc),
2939  			host->dma.gpd, host->dma.gpd_addr);
2940  	if (host->dma.bd)
2941  		dma_free_coherent(&pdev->dev,
2942  			MAX_BD_NUM * sizeof(struct mt_bdma_desc),
2943  			host->dma.bd, host->dma.bd_addr);
2944  host_free:
2945  	mmc_free_host(mmc);
2946  
2947  	return ret;
2948  }
2949  
msdc_drv_remove(struct platform_device * pdev)2950  static void msdc_drv_remove(struct platform_device *pdev)
2951  {
2952  	struct mmc_host *mmc;
2953  	struct msdc_host *host;
2954  
2955  	mmc = platform_get_drvdata(pdev);
2956  	host = mmc_priv(mmc);
2957  
2958  	pm_runtime_get_sync(host->dev);
2959  
2960  	platform_set_drvdata(pdev, NULL);
2961  	mmc_remove_host(mmc);
2962  	msdc_deinit_hw(host);
2963  	msdc_gate_clock(host);
2964  
2965  	pm_runtime_disable(host->dev);
2966  	pm_runtime_put_noidle(host->dev);
2967  	dma_free_coherent(&pdev->dev,
2968  			2 * sizeof(struct mt_gpdma_desc),
2969  			host->dma.gpd, host->dma.gpd_addr);
2970  	dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
2971  			host->dma.bd, host->dma.bd_addr);
2972  
2973  	mmc_free_host(mmc);
2974  }
2975  
msdc_save_reg(struct msdc_host * host)2976  static void msdc_save_reg(struct msdc_host *host)
2977  {
2978  	u32 tune_reg = host->dev_comp->pad_tune_reg;
2979  
2980  	host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
2981  	host->save_para.iocon = readl(host->base + MSDC_IOCON);
2982  	host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
2983  	host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
2984  	host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
2985  	host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2);
2986  	host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
2987  	host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
2988  	host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
2989  	host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3);
2990  	host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG);
2991  	if (host->top_base) {
2992  		host->save_para.emmc_top_control =
2993  			readl(host->top_base + EMMC_TOP_CONTROL);
2994  		host->save_para.emmc_top_cmd =
2995  			readl(host->top_base + EMMC_TOP_CMD);
2996  		host->save_para.emmc50_pad_ds_tune =
2997  			readl(host->top_base + EMMC50_PAD_DS_TUNE);
2998  	} else {
2999  		host->save_para.pad_tune = readl(host->base + tune_reg);
3000  	}
3001  }
3002  
msdc_restore_reg(struct msdc_host * host)3003  static void msdc_restore_reg(struct msdc_host *host)
3004  {
3005  	struct mmc_host *mmc = mmc_from_priv(host);
3006  	u32 tune_reg = host->dev_comp->pad_tune_reg;
3007  
3008  	writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
3009  	writel(host->save_para.iocon, host->base + MSDC_IOCON);
3010  	writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
3011  	writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
3012  	writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
3013  	writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2);
3014  	writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
3015  	writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE);
3016  	writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
3017  	writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3);
3018  	writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG);
3019  	if (host->top_base) {
3020  		writel(host->save_para.emmc_top_control,
3021  		       host->top_base + EMMC_TOP_CONTROL);
3022  		writel(host->save_para.emmc_top_cmd,
3023  		       host->top_base + EMMC_TOP_CMD);
3024  		writel(host->save_para.emmc50_pad_ds_tune,
3025  		       host->top_base + EMMC50_PAD_DS_TUNE);
3026  	} else {
3027  		writel(host->save_para.pad_tune, host->base + tune_reg);
3028  	}
3029  
3030  	if (sdio_irq_claimed(mmc))
3031  		__msdc_enable_sdio_irq(host, 1);
3032  }
3033  
msdc_runtime_suspend(struct device * dev)3034  static int __maybe_unused msdc_runtime_suspend(struct device *dev)
3035  {
3036  	struct mmc_host *mmc = dev_get_drvdata(dev);
3037  	struct msdc_host *host = mmc_priv(mmc);
3038  
3039  	msdc_save_reg(host);
3040  
3041  	if (sdio_irq_claimed(mmc)) {
3042  		if (host->pins_eint) {
3043  			disable_irq(host->irq);
3044  			pinctrl_select_state(host->pinctrl, host->pins_eint);
3045  		}
3046  
3047  		__msdc_enable_sdio_irq(host, 0);
3048  	}
3049  	msdc_gate_clock(host);
3050  	return 0;
3051  }
3052  
msdc_runtime_resume(struct device * dev)3053  static int __maybe_unused msdc_runtime_resume(struct device *dev)
3054  {
3055  	struct mmc_host *mmc = dev_get_drvdata(dev);
3056  	struct msdc_host *host = mmc_priv(mmc);
3057  	int ret;
3058  
3059  	ret = msdc_ungate_clock(host);
3060  	if (ret)
3061  		return ret;
3062  
3063  	msdc_restore_reg(host);
3064  
3065  	if (sdio_irq_claimed(mmc) && host->pins_eint) {
3066  		pinctrl_select_state(host->pinctrl, host->pins_uhs);
3067  		enable_irq(host->irq);
3068  	}
3069  	return 0;
3070  }
3071  
msdc_suspend(struct device * dev)3072  static int __maybe_unused msdc_suspend(struct device *dev)
3073  {
3074  	struct mmc_host *mmc = dev_get_drvdata(dev);
3075  	struct msdc_host *host = mmc_priv(mmc);
3076  	int ret;
3077  	u32 val;
3078  
3079  	if (mmc->caps2 & MMC_CAP2_CQE) {
3080  		ret = cqhci_suspend(mmc);
3081  		if (ret)
3082  			return ret;
3083  		val = readl(host->base + MSDC_INT);
3084  		writel(val, host->base + MSDC_INT);
3085  	}
3086  
3087  	/*
3088  	 * Bump up runtime PM usage counter otherwise dev->power.needs_force_resume will
3089  	 * not be marked as 1, pm_runtime_force_resume() will go out directly.
3090  	 */
3091  	if (sdio_irq_claimed(mmc) && host->pins_eint)
3092  		pm_runtime_get_noresume(dev);
3093  
3094  	return pm_runtime_force_suspend(dev);
3095  }
3096  
msdc_resume(struct device * dev)3097  static int __maybe_unused msdc_resume(struct device *dev)
3098  {
3099  	struct mmc_host *mmc = dev_get_drvdata(dev);
3100  	struct msdc_host *host = mmc_priv(mmc);
3101  
3102  	if (sdio_irq_claimed(mmc) && host->pins_eint)
3103  		pm_runtime_put_noidle(dev);
3104  
3105  	return pm_runtime_force_resume(dev);
3106  }
3107  
3108  static const struct dev_pm_ops msdc_dev_pm_ops = {
3109  	SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume)
3110  	SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
3111  };
3112  
3113  static struct platform_driver mt_msdc_driver = {
3114  	.probe = msdc_drv_probe,
3115  	.remove_new = msdc_drv_remove,
3116  	.driver = {
3117  		.name = "mtk-msdc",
3118  		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
3119  		.of_match_table = msdc_of_ids,
3120  		.pm = &msdc_dev_pm_ops,
3121  	},
3122  };
3123  
3124  module_platform_driver(mt_msdc_driver);
3125  MODULE_LICENSE("GPL v2");
3126  MODULE_DESCRIPTION("MediaTek SD/MMC Card Driver");
3127