1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * PCIe host controller driver for Tegra SoCs
4   *
5   * Copyright (c) 2010, CompuLab, Ltd.
6   * Author: Mike Rapoport <mike@compulab.co.il>
7   *
8   * Based on NVIDIA PCIe driver
9   * Copyright (c) 2008-2009, NVIDIA Corporation.
10   *
11   * Bits taken from arch/arm/mach-dove/pcie.c
12   *
13   * Author: Thierry Reding <treding@nvidia.com>
14   */
15  
16  #include <linux/clk.h>
17  #include <linux/debugfs.h>
18  #include <linux/delay.h>
19  #include <linux/export.h>
20  #include <linux/gpio/consumer.h>
21  #include <linux/interrupt.h>
22  #include <linux/iopoll.h>
23  #include <linux/irq.h>
24  #include <linux/irqchip/chained_irq.h>
25  #include <linux/irqdomain.h>
26  #include <linux/kernel.h>
27  #include <linux/init.h>
28  #include <linux/module.h>
29  #include <linux/msi.h>
30  #include <linux/of_address.h>
31  #include <linux/of_pci.h>
32  #include <linux/of_platform.h>
33  #include <linux/pci.h>
34  #include <linux/phy/phy.h>
35  #include <linux/pinctrl/consumer.h>
36  #include <linux/platform_device.h>
37  #include <linux/reset.h>
38  #include <linux/sizes.h>
39  #include <linux/slab.h>
40  #include <linux/vmalloc.h>
41  #include <linux/regulator/consumer.h>
42  
43  #include <soc/tegra/cpuidle.h>
44  #include <soc/tegra/pmc.h>
45  
46  #include "../pci.h"
47  
48  #define INT_PCI_MSI_NR (8 * 32)
49  
50  /* register definitions */
51  
52  #define AFI_AXI_BAR0_SZ	0x00
53  #define AFI_AXI_BAR1_SZ	0x04
54  #define AFI_AXI_BAR2_SZ	0x08
55  #define AFI_AXI_BAR3_SZ	0x0c
56  #define AFI_AXI_BAR4_SZ	0x10
57  #define AFI_AXI_BAR5_SZ	0x14
58  
59  #define AFI_AXI_BAR0_START	0x18
60  #define AFI_AXI_BAR1_START	0x1c
61  #define AFI_AXI_BAR2_START	0x20
62  #define AFI_AXI_BAR3_START	0x24
63  #define AFI_AXI_BAR4_START	0x28
64  #define AFI_AXI_BAR5_START	0x2c
65  
66  #define AFI_FPCI_BAR0	0x30
67  #define AFI_FPCI_BAR1	0x34
68  #define AFI_FPCI_BAR2	0x38
69  #define AFI_FPCI_BAR3	0x3c
70  #define AFI_FPCI_BAR4	0x40
71  #define AFI_FPCI_BAR5	0x44
72  
73  #define AFI_CACHE_BAR0_SZ	0x48
74  #define AFI_CACHE_BAR0_ST	0x4c
75  #define AFI_CACHE_BAR1_SZ	0x50
76  #define AFI_CACHE_BAR1_ST	0x54
77  
78  #define AFI_MSI_BAR_SZ		0x60
79  #define AFI_MSI_FPCI_BAR_ST	0x64
80  #define AFI_MSI_AXI_BAR_ST	0x68
81  
82  #define AFI_MSI_VEC(x)		(0x6c + ((x) * 4))
83  #define AFI_MSI_EN_VEC(x)	(0x8c + ((x) * 4))
84  
85  #define AFI_CONFIGURATION		0xac
86  #define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
87  #define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
88  
89  #define AFI_FPCI_ERROR_MASKS	0xb0
90  
91  #define AFI_INTR_MASK		0xb4
92  #define  AFI_INTR_MASK_INT_MASK	(1 << 0)
93  #define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
94  
95  #define AFI_INTR_CODE			0xb8
96  #define  AFI_INTR_CODE_MASK		0xf
97  #define  AFI_INTR_INI_SLAVE_ERROR	1
98  #define  AFI_INTR_INI_DECODE_ERROR	2
99  #define  AFI_INTR_TARGET_ABORT		3
100  #define  AFI_INTR_MASTER_ABORT		4
101  #define  AFI_INTR_INVALID_WRITE		5
102  #define  AFI_INTR_LEGACY		6
103  #define  AFI_INTR_FPCI_DECODE_ERROR	7
104  #define  AFI_INTR_AXI_DECODE_ERROR	8
105  #define  AFI_INTR_FPCI_TIMEOUT		9
106  #define  AFI_INTR_PE_PRSNT_SENSE	10
107  #define  AFI_INTR_PE_CLKREQ_SENSE	11
108  #define  AFI_INTR_CLKCLAMP_SENSE	12
109  #define  AFI_INTR_RDY4PD_SENSE		13
110  #define  AFI_INTR_P2P_ERROR		14
111  
112  #define AFI_INTR_SIGNATURE	0xbc
113  #define AFI_UPPER_FPCI_ADDRESS	0xc0
114  #define AFI_SM_INTR_ENABLE	0xc4
115  #define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
116  #define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
117  #define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
118  #define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
119  #define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
120  #define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
121  #define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
122  #define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
123  
124  #define AFI_AFI_INTR_ENABLE		0xc8
125  #define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
126  #define  AFI_INTR_EN_INI_DECERR		(1 << 1)
127  #define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
128  #define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
129  #define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
130  #define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
131  #define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
132  #define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
133  #define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
134  
135  #define AFI_PCIE_PME		0xf0
136  
137  #define AFI_PCIE_CONFIG					0x0f8
138  #define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
139  #define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
140  #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
141  #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
142  #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
143  #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
144  #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
145  #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
146  #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
147  #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
148  #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
149  #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
150  #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
151  #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
152  #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
153  
154  #define AFI_FUSE			0x104
155  #define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
156  
157  #define AFI_PEX0_CTRL			0x110
158  #define AFI_PEX1_CTRL			0x118
159  #define  AFI_PEX_CTRL_RST		(1 << 0)
160  #define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
161  #define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
162  #define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
163  
164  #define AFI_PLLE_CONTROL		0x160
165  #define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
166  #define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
167  
168  #define AFI_PEXBIAS_CTRL_0		0x168
169  
170  #define RP_ECTL_2_R1	0x00000e84
171  #define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
172  
173  #define RP_ECTL_4_R1	0x00000e8c
174  #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
175  #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
176  
177  #define RP_ECTL_5_R1	0x00000e90
178  #define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
179  
180  #define RP_ECTL_6_R1	0x00000e94
181  #define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
182  
183  #define RP_ECTL_2_R2	0x00000ea4
184  #define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
185  
186  #define RP_ECTL_4_R2	0x00000eac
187  #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
188  #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
189  
190  #define RP_ECTL_5_R2	0x00000eb0
191  #define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
192  
193  #define RP_ECTL_6_R2	0x00000eb4
194  #define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
195  
196  #define RP_VEND_XP	0x00000f00
197  #define  RP_VEND_XP_DL_UP			(1 << 30)
198  #define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
199  #define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
200  #define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
201  
202  #define RP_VEND_CTL0	0x00000f44
203  #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
204  #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
205  
206  #define RP_VEND_CTL1	0x00000f48
207  #define  RP_VEND_CTL1_ERPT	(1 << 13)
208  
209  #define RP_VEND_XP_BIST	0x00000f4c
210  #define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
211  
212  #define RP_VEND_CTL2 0x00000fa8
213  #define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
214  
215  #define RP_PRIV_MISC	0x00000fe0
216  #define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
217  #define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
218  #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
219  #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
220  #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
221  #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
222  #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
223  #define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
224  
225  #define RP_LINK_CONTROL_STATUS			0x00000090
226  #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
227  #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
228  
229  #define RP_LINK_CONTROL_STATUS_2		0x000000b0
230  
231  #define PADS_CTL_SEL		0x0000009c
232  
233  #define PADS_CTL		0x000000a0
234  #define  PADS_CTL_IDDQ_1L	(1 << 0)
235  #define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
236  #define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
237  
238  #define PADS_PLL_CTL_TEGRA20			0x000000b8
239  #define PADS_PLL_CTL_TEGRA30			0x000000b4
240  #define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
241  #define  PADS_PLL_CTL_LOCKDET			(1 << 8)
242  #define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
243  #define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
244  #define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
245  #define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
246  #define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
247  #define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
248  #define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
249  #define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
250  
251  #define PADS_REFCLK_CFG0			0x000000c8
252  #define PADS_REFCLK_CFG1			0x000000cc
253  #define PADS_REFCLK_BIAS			0x000000d0
254  
255  /*
256   * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
257   * entries, one entry per PCIe port. These field definitions and desired
258   * values aren't in the TRM, but do come from NVIDIA.
259   */
260  #define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
261  #define PADS_REFCLK_CFG_E_TERM_SHIFT		7
262  #define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
263  #define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
264  
265  #define PME_ACK_TIMEOUT 10000
266  #define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
267  
268  struct tegra_msi {
269  	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
270  	struct irq_domain *domain;
271  	struct mutex map_lock;
272  	spinlock_t mask_lock;
273  	void *virt;
274  	dma_addr_t phys;
275  	int irq;
276  };
277  
278  /* used to differentiate between Tegra SoC generations */
279  struct tegra_pcie_port_soc {
280  	struct {
281  		u8 turnoff_bit;
282  		u8 ack_bit;
283  	} pme;
284  };
285  
286  struct tegra_pcie_soc {
287  	unsigned int num_ports;
288  	const struct tegra_pcie_port_soc *ports;
289  	unsigned int msi_base_shift;
290  	unsigned long afi_pex2_ctrl;
291  	u32 pads_pll_ctl;
292  	u32 tx_ref_sel;
293  	u32 pads_refclk_cfg0;
294  	u32 pads_refclk_cfg1;
295  	u32 update_fc_threshold;
296  	bool has_pex_clkreq_en;
297  	bool has_pex_bias_ctrl;
298  	bool has_intr_prsnt_sense;
299  	bool has_cml_clk;
300  	bool has_gen2;
301  	bool force_pca_enable;
302  	bool program_uphy;
303  	bool update_clamp_threshold;
304  	bool program_deskew_time;
305  	bool update_fc_timer;
306  	bool has_cache_bars;
307  	struct {
308  		struct {
309  			u32 rp_ectl_2_r1;
310  			u32 rp_ectl_4_r1;
311  			u32 rp_ectl_5_r1;
312  			u32 rp_ectl_6_r1;
313  			u32 rp_ectl_2_r2;
314  			u32 rp_ectl_4_r2;
315  			u32 rp_ectl_5_r2;
316  			u32 rp_ectl_6_r2;
317  		} regs;
318  		bool enable;
319  	} ectl;
320  };
321  
322  struct tegra_pcie {
323  	struct device *dev;
324  
325  	void __iomem *pads;
326  	void __iomem *afi;
327  	void __iomem *cfg;
328  	int irq;
329  
330  	struct resource cs;
331  
332  	struct clk *pex_clk;
333  	struct clk *afi_clk;
334  	struct clk *pll_e;
335  	struct clk *cml_clk;
336  
337  	struct reset_control *pex_rst;
338  	struct reset_control *afi_rst;
339  	struct reset_control *pcie_xrst;
340  
341  	bool legacy_phy;
342  	struct phy *phy;
343  
344  	struct tegra_msi msi;
345  
346  	struct list_head ports;
347  	u32 xbar_config;
348  
349  	struct regulator_bulk_data *supplies;
350  	unsigned int num_supplies;
351  
352  	const struct tegra_pcie_soc *soc;
353  	struct dentry *debugfs;
354  };
355  
msi_to_pcie(struct tegra_msi * msi)356  static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
357  {
358  	return container_of(msi, struct tegra_pcie, msi);
359  }
360  
361  struct tegra_pcie_port {
362  	struct tegra_pcie *pcie;
363  	struct device_node *np;
364  	struct list_head list;
365  	struct resource regs;
366  	void __iomem *base;
367  	unsigned int index;
368  	unsigned int lanes;
369  
370  	struct phy **phys;
371  
372  	struct gpio_desc *reset_gpio;
373  };
374  
afi_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)375  static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
376  			      unsigned long offset)
377  {
378  	writel(value, pcie->afi + offset);
379  }
380  
afi_readl(struct tegra_pcie * pcie,unsigned long offset)381  static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
382  {
383  	return readl(pcie->afi + offset);
384  }
385  
pads_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)386  static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
387  			       unsigned long offset)
388  {
389  	writel(value, pcie->pads + offset);
390  }
391  
pads_readl(struct tegra_pcie * pcie,unsigned long offset)392  static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
393  {
394  	return readl(pcie->pads + offset);
395  }
396  
397  /*
398   * The configuration space mapping on Tegra is somewhat similar to the ECAM
399   * defined by PCIe. However it deviates a bit in how the 4 bits for extended
400   * register accesses are mapped:
401   *
402   *    [27:24] extended register number
403   *    [23:16] bus number
404   *    [15:11] device number
405   *    [10: 8] function number
406   *    [ 7: 0] register number
407   *
408   * Mapping the whole extended configuration space would require 256 MiB of
409   * virtual address space, only a small part of which will actually be used.
410   *
411   * To work around this, a 4 KiB region is used to generate the required
412   * configuration transaction with relevant B:D:F and register offset values.
413   * This is achieved by dynamically programming base address and size of
414   * AFI_AXI_BAR used for end point config space mapping to make sure that the
415   * address (access to which generates correct config transaction) falls in
416   * this 4 KiB region.
417   */
tegra_pcie_conf_offset(u8 bus,unsigned int devfn,unsigned int where)418  static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
419  					   unsigned int where)
420  {
421  	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
422  	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
423  }
424  
tegra_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)425  static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
426  					unsigned int devfn,
427  					int where)
428  {
429  	struct tegra_pcie *pcie = bus->sysdata;
430  	void __iomem *addr = NULL;
431  
432  	if (bus->number == 0) {
433  		unsigned int slot = PCI_SLOT(devfn);
434  		struct tegra_pcie_port *port;
435  
436  		list_for_each_entry(port, &pcie->ports, list) {
437  			if (port->index + 1 == slot) {
438  				addr = port->base + (where & ~3);
439  				break;
440  			}
441  		}
442  	} else {
443  		unsigned int offset;
444  		u32 base;
445  
446  		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
447  
448  		/* move 4 KiB window to offset within the FPCI region */
449  		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
450  		afi_writel(pcie, base, AFI_FPCI_BAR0);
451  
452  		/* move to correct offset within the 4 KiB page */
453  		addr = pcie->cfg + (offset & (SZ_4K - 1));
454  	}
455  
456  	return addr;
457  }
458  
tegra_pcie_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)459  static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
460  				  int where, int size, u32 *value)
461  {
462  	if (bus->number == 0)
463  		return pci_generic_config_read32(bus, devfn, where, size,
464  						 value);
465  
466  	return pci_generic_config_read(bus, devfn, where, size, value);
467  }
468  
tegra_pcie_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)469  static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
470  				   int where, int size, u32 value)
471  {
472  	if (bus->number == 0)
473  		return pci_generic_config_write32(bus, devfn, where, size,
474  						  value);
475  
476  	return pci_generic_config_write(bus, devfn, where, size, value);
477  }
478  
479  static struct pci_ops tegra_pcie_ops = {
480  	.map_bus = tegra_pcie_map_bus,
481  	.read = tegra_pcie_config_read,
482  	.write = tegra_pcie_config_write,
483  };
484  
tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port * port)485  static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
486  {
487  	const struct tegra_pcie_soc *soc = port->pcie->soc;
488  	unsigned long ret = 0;
489  
490  	switch (port->index) {
491  	case 0:
492  		ret = AFI_PEX0_CTRL;
493  		break;
494  
495  	case 1:
496  		ret = AFI_PEX1_CTRL;
497  		break;
498  
499  	case 2:
500  		ret = soc->afi_pex2_ctrl;
501  		break;
502  	}
503  
504  	return ret;
505  }
506  
tegra_pcie_port_reset(struct tegra_pcie_port * port)507  static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
508  {
509  	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
510  	unsigned long value;
511  
512  	/* pulse reset signal */
513  	if (port->reset_gpio) {
514  		gpiod_set_value(port->reset_gpio, 1);
515  	} else {
516  		value = afi_readl(port->pcie, ctrl);
517  		value &= ~AFI_PEX_CTRL_RST;
518  		afi_writel(port->pcie, value, ctrl);
519  	}
520  
521  	usleep_range(1000, 2000);
522  
523  	if (port->reset_gpio) {
524  		gpiod_set_value(port->reset_gpio, 0);
525  	} else {
526  		value = afi_readl(port->pcie, ctrl);
527  		value |= AFI_PEX_CTRL_RST;
528  		afi_writel(port->pcie, value, ctrl);
529  	}
530  }
531  
tegra_pcie_enable_rp_features(struct tegra_pcie_port * port)532  static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
533  {
534  	const struct tegra_pcie_soc *soc = port->pcie->soc;
535  	u32 value;
536  
537  	/* Enable AER capability */
538  	value = readl(port->base + RP_VEND_CTL1);
539  	value |= RP_VEND_CTL1_ERPT;
540  	writel(value, port->base + RP_VEND_CTL1);
541  
542  	/* Optimal settings to enhance bandwidth */
543  	value = readl(port->base + RP_VEND_XP);
544  	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
545  	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
546  	writel(value, port->base + RP_VEND_XP);
547  
548  	/*
549  	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
550  	 * to avoid truncation of PM messages which results in receiver errors
551  	 */
552  	value = readl(port->base + RP_VEND_XP_BIST);
553  	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
554  	writel(value, port->base + RP_VEND_XP_BIST);
555  
556  	value = readl(port->base + RP_PRIV_MISC);
557  	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
558  	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
559  
560  	if (soc->update_clamp_threshold) {
561  		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
562  				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
563  		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
564  			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
565  	}
566  
567  	writel(value, port->base + RP_PRIV_MISC);
568  }
569  
tegra_pcie_program_ectl_settings(struct tegra_pcie_port * port)570  static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
571  {
572  	const struct tegra_pcie_soc *soc = port->pcie->soc;
573  	u32 value;
574  
575  	value = readl(port->base + RP_ECTL_2_R1);
576  	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
577  	value |= soc->ectl.regs.rp_ectl_2_r1;
578  	writel(value, port->base + RP_ECTL_2_R1);
579  
580  	value = readl(port->base + RP_ECTL_4_R1);
581  	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
582  	value |= soc->ectl.regs.rp_ectl_4_r1 <<
583  				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
584  	writel(value, port->base + RP_ECTL_4_R1);
585  
586  	value = readl(port->base + RP_ECTL_5_R1);
587  	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
588  	value |= soc->ectl.regs.rp_ectl_5_r1;
589  	writel(value, port->base + RP_ECTL_5_R1);
590  
591  	value = readl(port->base + RP_ECTL_6_R1);
592  	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
593  	value |= soc->ectl.regs.rp_ectl_6_r1;
594  	writel(value, port->base + RP_ECTL_6_R1);
595  
596  	value = readl(port->base + RP_ECTL_2_R2);
597  	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
598  	value |= soc->ectl.regs.rp_ectl_2_r2;
599  	writel(value, port->base + RP_ECTL_2_R2);
600  
601  	value = readl(port->base + RP_ECTL_4_R2);
602  	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
603  	value |= soc->ectl.regs.rp_ectl_4_r2 <<
604  				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
605  	writel(value, port->base + RP_ECTL_4_R2);
606  
607  	value = readl(port->base + RP_ECTL_5_R2);
608  	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
609  	value |= soc->ectl.regs.rp_ectl_5_r2;
610  	writel(value, port->base + RP_ECTL_5_R2);
611  
612  	value = readl(port->base + RP_ECTL_6_R2);
613  	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
614  	value |= soc->ectl.regs.rp_ectl_6_r2;
615  	writel(value, port->base + RP_ECTL_6_R2);
616  }
617  
tegra_pcie_apply_sw_fixup(struct tegra_pcie_port * port)618  static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
619  {
620  	const struct tegra_pcie_soc *soc = port->pcie->soc;
621  	u32 value;
622  
623  	/*
624  	 * Sometimes link speed change from Gen2 to Gen1 fails due to
625  	 * instability in deskew logic on lane-0. Increase the deskew
626  	 * retry time to resolve this issue.
627  	 */
628  	if (soc->program_deskew_time) {
629  		value = readl(port->base + RP_VEND_CTL0);
630  		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
631  		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
632  		writel(value, port->base + RP_VEND_CTL0);
633  	}
634  
635  	if (soc->update_fc_timer) {
636  		value = readl(port->base + RP_VEND_XP);
637  		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
638  		value |= soc->update_fc_threshold;
639  		writel(value, port->base + RP_VEND_XP);
640  	}
641  
642  	/*
643  	 * PCIe link doesn't come up with few legacy PCIe endpoints if
644  	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
645  	 * Hence, the strategy followed here is to initially advertise
646  	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
647  	 */
648  	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
649  	value &= ~PCI_EXP_LNKSTA_CLS;
650  	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
651  	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
652  }
653  
tegra_pcie_port_enable(struct tegra_pcie_port * port)654  static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
655  {
656  	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
657  	const struct tegra_pcie_soc *soc = port->pcie->soc;
658  	unsigned long value;
659  
660  	/* enable reference clock */
661  	value = afi_readl(port->pcie, ctrl);
662  	value |= AFI_PEX_CTRL_REFCLK_EN;
663  
664  	if (soc->has_pex_clkreq_en)
665  		value |= AFI_PEX_CTRL_CLKREQ_EN;
666  
667  	value |= AFI_PEX_CTRL_OVERRIDE_EN;
668  
669  	afi_writel(port->pcie, value, ctrl);
670  
671  	tegra_pcie_port_reset(port);
672  
673  	if (soc->force_pca_enable) {
674  		value = readl(port->base + RP_VEND_CTL2);
675  		value |= RP_VEND_CTL2_PCA_ENABLE;
676  		writel(value, port->base + RP_VEND_CTL2);
677  	}
678  
679  	tegra_pcie_enable_rp_features(port);
680  
681  	if (soc->ectl.enable)
682  		tegra_pcie_program_ectl_settings(port);
683  
684  	tegra_pcie_apply_sw_fixup(port);
685  }
686  
tegra_pcie_port_disable(struct tegra_pcie_port * port)687  static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
688  {
689  	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
690  	const struct tegra_pcie_soc *soc = port->pcie->soc;
691  	unsigned long value;
692  
693  	/* assert port reset */
694  	value = afi_readl(port->pcie, ctrl);
695  	value &= ~AFI_PEX_CTRL_RST;
696  	afi_writel(port->pcie, value, ctrl);
697  
698  	/* disable reference clock */
699  	value = afi_readl(port->pcie, ctrl);
700  
701  	if (soc->has_pex_clkreq_en)
702  		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
703  
704  	value &= ~AFI_PEX_CTRL_REFCLK_EN;
705  	afi_writel(port->pcie, value, ctrl);
706  
707  	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
708  	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
709  	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
710  	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
711  	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
712  }
713  
tegra_pcie_port_free(struct tegra_pcie_port * port)714  static void tegra_pcie_port_free(struct tegra_pcie_port *port)
715  {
716  	struct tegra_pcie *pcie = port->pcie;
717  	struct device *dev = pcie->dev;
718  
719  	devm_iounmap(dev, port->base);
720  	devm_release_mem_region(dev, port->regs.start,
721  				resource_size(&port->regs));
722  	list_del(&port->list);
723  	devm_kfree(dev, port);
724  }
725  
726  /* Tegra PCIE root complex wrongly reports device class */
tegra_pcie_fixup_class(struct pci_dev * dev)727  static void tegra_pcie_fixup_class(struct pci_dev *dev)
728  {
729  	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
730  }
731  DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
732  DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
733  DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
734  DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
735  
736  /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
tegra_pcie_relax_enable(struct pci_dev * dev)737  static void tegra_pcie_relax_enable(struct pci_dev *dev)
738  {
739  	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
740  }
741  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
742  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
743  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
744  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
745  
tegra_pcie_map_irq(const struct pci_dev * pdev,u8 slot,u8 pin)746  static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
747  {
748  	struct tegra_pcie *pcie = pdev->bus->sysdata;
749  	int irq;
750  
751  	tegra_cpuidle_pcie_irqs_in_use();
752  
753  	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
754  	if (!irq)
755  		irq = pcie->irq;
756  
757  	return irq;
758  }
759  
tegra_pcie_isr(int irq,void * arg)760  static irqreturn_t tegra_pcie_isr(int irq, void *arg)
761  {
762  	static const char * const err_msg[] = {
763  		"Unknown",
764  		"AXI slave error",
765  		"AXI decode error",
766  		"Target abort",
767  		"Master abort",
768  		"Invalid write",
769  		"Legacy interrupt",
770  		"Response decoding error",
771  		"AXI response decoding error",
772  		"Transaction timeout",
773  		"Slot present pin change",
774  		"Slot clock request change",
775  		"TMS clock ramp change",
776  		"TMS ready for power down",
777  		"Peer2Peer error",
778  	};
779  	struct tegra_pcie *pcie = arg;
780  	struct device *dev = pcie->dev;
781  	u32 code, signature;
782  
783  	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
784  	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
785  	afi_writel(pcie, 0, AFI_INTR_CODE);
786  
787  	if (code == AFI_INTR_LEGACY)
788  		return IRQ_NONE;
789  
790  	if (code >= ARRAY_SIZE(err_msg))
791  		code = 0;
792  
793  	/*
794  	 * do not pollute kernel log with master abort reports since they
795  	 * happen a lot during enumeration
796  	 */
797  	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
798  		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
799  	else
800  		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
801  
802  	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
803  	    code == AFI_INTR_FPCI_DECODE_ERROR) {
804  		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
805  		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
806  
807  		if (code == AFI_INTR_MASTER_ABORT)
808  			dev_dbg(dev, "  FPCI address: %10llx\n", address);
809  		else
810  			dev_err(dev, "  FPCI address: %10llx\n", address);
811  	}
812  
813  	return IRQ_HANDLED;
814  }
815  
816  /*
817   * FPCI map is as follows:
818   * - 0xfdfc000000: I/O space
819   * - 0xfdfe000000: type 0 configuration space
820   * - 0xfdff000000: type 1 configuration space
821   * - 0xfe00000000: type 0 extended configuration space
822   * - 0xfe10000000: type 1 extended configuration space
823   */
tegra_pcie_setup_translations(struct tegra_pcie * pcie)824  static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
825  {
826  	u32 size;
827  	struct resource_entry *entry;
828  	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
829  
830  	/* Bar 0: type 1 extended configuration space */
831  	size = resource_size(&pcie->cs);
832  	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
833  	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
834  
835  	resource_list_for_each_entry(entry, &bridge->windows) {
836  		u32 fpci_bar, axi_address;
837  		struct resource *res = entry->res;
838  
839  		size = resource_size(res);
840  
841  		switch (resource_type(res)) {
842  		case IORESOURCE_IO:
843  			/* Bar 1: downstream IO bar */
844  			fpci_bar = 0xfdfc0000;
845  			axi_address = pci_pio_to_address(res->start);
846  			afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
847  			afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
848  			afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
849  			break;
850  		case IORESOURCE_MEM:
851  			fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
852  			axi_address = res->start;
853  
854  			if (res->flags & IORESOURCE_PREFETCH) {
855  				/* Bar 2: prefetchable memory BAR */
856  				afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
857  				afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
858  				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
859  
860  			} else {
861  				/* Bar 3: non prefetchable memory BAR */
862  				afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
863  				afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
864  				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
865  			}
866  			break;
867  		}
868  	}
869  
870  	/* NULL out the remaining BARs as they are not used */
871  	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
872  	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
873  	afi_writel(pcie, 0, AFI_FPCI_BAR4);
874  
875  	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
876  	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
877  	afi_writel(pcie, 0, AFI_FPCI_BAR5);
878  
879  	if (pcie->soc->has_cache_bars) {
880  		/* map all upstream transactions as uncached */
881  		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
882  		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
883  		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
884  		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
885  	}
886  
887  	/* MSI translations are setup only when needed */
888  	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
889  	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
890  	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
891  	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
892  }
893  
tegra_pcie_pll_wait(struct tegra_pcie * pcie,unsigned long timeout)894  static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
895  {
896  	const struct tegra_pcie_soc *soc = pcie->soc;
897  	u32 value;
898  
899  	timeout = jiffies + msecs_to_jiffies(timeout);
900  
901  	while (time_before(jiffies, timeout)) {
902  		value = pads_readl(pcie, soc->pads_pll_ctl);
903  		if (value & PADS_PLL_CTL_LOCKDET)
904  			return 0;
905  	}
906  
907  	return -ETIMEDOUT;
908  }
909  
tegra_pcie_phy_enable(struct tegra_pcie * pcie)910  static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
911  {
912  	struct device *dev = pcie->dev;
913  	const struct tegra_pcie_soc *soc = pcie->soc;
914  	u32 value;
915  	int err;
916  
917  	/* initialize internal PHY, enable up to 16 PCIE lanes */
918  	pads_writel(pcie, 0x0, PADS_CTL_SEL);
919  
920  	/* override IDDQ to 1 on all 4 lanes */
921  	value = pads_readl(pcie, PADS_CTL);
922  	value |= PADS_CTL_IDDQ_1L;
923  	pads_writel(pcie, value, PADS_CTL);
924  
925  	/*
926  	 * Set up PHY PLL inputs select PLLE output as refclock,
927  	 * set TX ref sel to div10 (not div5).
928  	 */
929  	value = pads_readl(pcie, soc->pads_pll_ctl);
930  	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
931  	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
932  	pads_writel(pcie, value, soc->pads_pll_ctl);
933  
934  	/* reset PLL */
935  	value = pads_readl(pcie, soc->pads_pll_ctl);
936  	value &= ~PADS_PLL_CTL_RST_B4SM;
937  	pads_writel(pcie, value, soc->pads_pll_ctl);
938  
939  	usleep_range(20, 100);
940  
941  	/* take PLL out of reset  */
942  	value = pads_readl(pcie, soc->pads_pll_ctl);
943  	value |= PADS_PLL_CTL_RST_B4SM;
944  	pads_writel(pcie, value, soc->pads_pll_ctl);
945  
946  	/* wait for the PLL to lock */
947  	err = tegra_pcie_pll_wait(pcie, 500);
948  	if (err < 0) {
949  		dev_err(dev, "PLL failed to lock: %d\n", err);
950  		return err;
951  	}
952  
953  	/* turn off IDDQ override */
954  	value = pads_readl(pcie, PADS_CTL);
955  	value &= ~PADS_CTL_IDDQ_1L;
956  	pads_writel(pcie, value, PADS_CTL);
957  
958  	/* enable TX/RX data */
959  	value = pads_readl(pcie, PADS_CTL);
960  	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
961  	pads_writel(pcie, value, PADS_CTL);
962  
963  	return 0;
964  }
965  
tegra_pcie_phy_disable(struct tegra_pcie * pcie)966  static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
967  {
968  	const struct tegra_pcie_soc *soc = pcie->soc;
969  	u32 value;
970  
971  	/* disable TX/RX data */
972  	value = pads_readl(pcie, PADS_CTL);
973  	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
974  	pads_writel(pcie, value, PADS_CTL);
975  
976  	/* override IDDQ */
977  	value = pads_readl(pcie, PADS_CTL);
978  	value |= PADS_CTL_IDDQ_1L;
979  	pads_writel(pcie, value, PADS_CTL);
980  
981  	/* reset PLL */
982  	value = pads_readl(pcie, soc->pads_pll_ctl);
983  	value &= ~PADS_PLL_CTL_RST_B4SM;
984  	pads_writel(pcie, value, soc->pads_pll_ctl);
985  
986  	usleep_range(20, 100);
987  
988  	return 0;
989  }
990  
tegra_pcie_port_phy_power_on(struct tegra_pcie_port * port)991  static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
992  {
993  	struct device *dev = port->pcie->dev;
994  	unsigned int i;
995  	int err;
996  
997  	for (i = 0; i < port->lanes; i++) {
998  		err = phy_power_on(port->phys[i]);
999  		if (err < 0) {
1000  			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1001  			return err;
1002  		}
1003  	}
1004  
1005  	return 0;
1006  }
1007  
tegra_pcie_port_phy_power_off(struct tegra_pcie_port * port)1008  static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1009  {
1010  	struct device *dev = port->pcie->dev;
1011  	unsigned int i;
1012  	int err;
1013  
1014  	for (i = 0; i < port->lanes; i++) {
1015  		err = phy_power_off(port->phys[i]);
1016  		if (err < 0) {
1017  			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1018  				err);
1019  			return err;
1020  		}
1021  	}
1022  
1023  	return 0;
1024  }
1025  
tegra_pcie_phy_power_on(struct tegra_pcie * pcie)1026  static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1027  {
1028  	struct device *dev = pcie->dev;
1029  	struct tegra_pcie_port *port;
1030  	int err;
1031  
1032  	if (pcie->legacy_phy) {
1033  		if (pcie->phy)
1034  			err = phy_power_on(pcie->phy);
1035  		else
1036  			err = tegra_pcie_phy_enable(pcie);
1037  
1038  		if (err < 0)
1039  			dev_err(dev, "failed to power on PHY: %d\n", err);
1040  
1041  		return err;
1042  	}
1043  
1044  	list_for_each_entry(port, &pcie->ports, list) {
1045  		err = tegra_pcie_port_phy_power_on(port);
1046  		if (err < 0) {
1047  			dev_err(dev,
1048  				"failed to power on PCIe port %u PHY: %d\n",
1049  				port->index, err);
1050  			return err;
1051  		}
1052  	}
1053  
1054  	return 0;
1055  }
1056  
tegra_pcie_phy_power_off(struct tegra_pcie * pcie)1057  static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1058  {
1059  	struct device *dev = pcie->dev;
1060  	struct tegra_pcie_port *port;
1061  	int err;
1062  
1063  	if (pcie->legacy_phy) {
1064  		if (pcie->phy)
1065  			err = phy_power_off(pcie->phy);
1066  		else
1067  			err = tegra_pcie_phy_disable(pcie);
1068  
1069  		if (err < 0)
1070  			dev_err(dev, "failed to power off PHY: %d\n", err);
1071  
1072  		return err;
1073  	}
1074  
1075  	list_for_each_entry(port, &pcie->ports, list) {
1076  		err = tegra_pcie_port_phy_power_off(port);
1077  		if (err < 0) {
1078  			dev_err(dev,
1079  				"failed to power off PCIe port %u PHY: %d\n",
1080  				port->index, err);
1081  			return err;
1082  		}
1083  	}
1084  
1085  	return 0;
1086  }
1087  
tegra_pcie_enable_controller(struct tegra_pcie * pcie)1088  static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1089  {
1090  	const struct tegra_pcie_soc *soc = pcie->soc;
1091  	struct tegra_pcie_port *port;
1092  	unsigned long value;
1093  
1094  	/* enable PLL power down */
1095  	if (pcie->phy) {
1096  		value = afi_readl(pcie, AFI_PLLE_CONTROL);
1097  		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1098  		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1099  		afi_writel(pcie, value, AFI_PLLE_CONTROL);
1100  	}
1101  
1102  	/* power down PCIe slot clock bias pad */
1103  	if (soc->has_pex_bias_ctrl)
1104  		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1105  
1106  	/* configure mode and disable all ports */
1107  	value = afi_readl(pcie, AFI_PCIE_CONFIG);
1108  	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1109  	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1110  	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1111  
1112  	list_for_each_entry(port, &pcie->ports, list) {
1113  		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1114  		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1115  	}
1116  
1117  	afi_writel(pcie, value, AFI_PCIE_CONFIG);
1118  
1119  	if (soc->has_gen2) {
1120  		value = afi_readl(pcie, AFI_FUSE);
1121  		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1122  		afi_writel(pcie, value, AFI_FUSE);
1123  	} else {
1124  		value = afi_readl(pcie, AFI_FUSE);
1125  		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1126  		afi_writel(pcie, value, AFI_FUSE);
1127  	}
1128  
1129  	/* Disable AFI dynamic clock gating and enable PCIe */
1130  	value = afi_readl(pcie, AFI_CONFIGURATION);
1131  	value |= AFI_CONFIGURATION_EN_FPCI;
1132  	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1133  	afi_writel(pcie, value, AFI_CONFIGURATION);
1134  
1135  	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1136  		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1137  		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1138  
1139  	if (soc->has_intr_prsnt_sense)
1140  		value |= AFI_INTR_EN_PRSNT_SENSE;
1141  
1142  	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1143  	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1144  
1145  	/* don't enable MSI for now, only when needed */
1146  	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1147  
1148  	/* disable all exceptions */
1149  	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1150  }
1151  
tegra_pcie_power_off(struct tegra_pcie * pcie)1152  static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1153  {
1154  	struct device *dev = pcie->dev;
1155  	const struct tegra_pcie_soc *soc = pcie->soc;
1156  	int err;
1157  
1158  	reset_control_assert(pcie->afi_rst);
1159  
1160  	clk_disable_unprepare(pcie->pll_e);
1161  	if (soc->has_cml_clk)
1162  		clk_disable_unprepare(pcie->cml_clk);
1163  	clk_disable_unprepare(pcie->afi_clk);
1164  
1165  	if (!dev->pm_domain)
1166  		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1167  
1168  	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1169  	if (err < 0)
1170  		dev_warn(dev, "failed to disable regulators: %d\n", err);
1171  }
1172  
tegra_pcie_power_on(struct tegra_pcie * pcie)1173  static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1174  {
1175  	struct device *dev = pcie->dev;
1176  	const struct tegra_pcie_soc *soc = pcie->soc;
1177  	int err;
1178  
1179  	reset_control_assert(pcie->pcie_xrst);
1180  	reset_control_assert(pcie->afi_rst);
1181  	reset_control_assert(pcie->pex_rst);
1182  
1183  	if (!dev->pm_domain)
1184  		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1185  
1186  	/* enable regulators */
1187  	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1188  	if (err < 0)
1189  		dev_err(dev, "failed to enable regulators: %d\n", err);
1190  
1191  	if (!dev->pm_domain) {
1192  		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1193  		if (err) {
1194  			dev_err(dev, "failed to power ungate: %d\n", err);
1195  			goto regulator_disable;
1196  		}
1197  		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1198  		if (err) {
1199  			dev_err(dev, "failed to remove clamp: %d\n", err);
1200  			goto powergate;
1201  		}
1202  	}
1203  
1204  	err = clk_prepare_enable(pcie->afi_clk);
1205  	if (err < 0) {
1206  		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1207  		goto powergate;
1208  	}
1209  
1210  	if (soc->has_cml_clk) {
1211  		err = clk_prepare_enable(pcie->cml_clk);
1212  		if (err < 0) {
1213  			dev_err(dev, "failed to enable CML clock: %d\n", err);
1214  			goto disable_afi_clk;
1215  		}
1216  	}
1217  
1218  	err = clk_prepare_enable(pcie->pll_e);
1219  	if (err < 0) {
1220  		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1221  		goto disable_cml_clk;
1222  	}
1223  
1224  	reset_control_deassert(pcie->afi_rst);
1225  
1226  	return 0;
1227  
1228  disable_cml_clk:
1229  	if (soc->has_cml_clk)
1230  		clk_disable_unprepare(pcie->cml_clk);
1231  disable_afi_clk:
1232  	clk_disable_unprepare(pcie->afi_clk);
1233  powergate:
1234  	if (!dev->pm_domain)
1235  		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1236  regulator_disable:
1237  	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1238  
1239  	return err;
1240  }
1241  
tegra_pcie_apply_pad_settings(struct tegra_pcie * pcie)1242  static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1243  {
1244  	const struct tegra_pcie_soc *soc = pcie->soc;
1245  
1246  	/* Configure the reference clock driver */
1247  	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1248  
1249  	if (soc->num_ports > 2)
1250  		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1251  }
1252  
tegra_pcie_clocks_get(struct tegra_pcie * pcie)1253  static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1254  {
1255  	struct device *dev = pcie->dev;
1256  	const struct tegra_pcie_soc *soc = pcie->soc;
1257  
1258  	pcie->pex_clk = devm_clk_get(dev, "pex");
1259  	if (IS_ERR(pcie->pex_clk))
1260  		return PTR_ERR(pcie->pex_clk);
1261  
1262  	pcie->afi_clk = devm_clk_get(dev, "afi");
1263  	if (IS_ERR(pcie->afi_clk))
1264  		return PTR_ERR(pcie->afi_clk);
1265  
1266  	pcie->pll_e = devm_clk_get(dev, "pll_e");
1267  	if (IS_ERR(pcie->pll_e))
1268  		return PTR_ERR(pcie->pll_e);
1269  
1270  	if (soc->has_cml_clk) {
1271  		pcie->cml_clk = devm_clk_get(dev, "cml");
1272  		if (IS_ERR(pcie->cml_clk))
1273  			return PTR_ERR(pcie->cml_clk);
1274  	}
1275  
1276  	return 0;
1277  }
1278  
tegra_pcie_resets_get(struct tegra_pcie * pcie)1279  static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1280  {
1281  	struct device *dev = pcie->dev;
1282  
1283  	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1284  	if (IS_ERR(pcie->pex_rst))
1285  		return PTR_ERR(pcie->pex_rst);
1286  
1287  	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1288  	if (IS_ERR(pcie->afi_rst))
1289  		return PTR_ERR(pcie->afi_rst);
1290  
1291  	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1292  	if (IS_ERR(pcie->pcie_xrst))
1293  		return PTR_ERR(pcie->pcie_xrst);
1294  
1295  	return 0;
1296  }
1297  
tegra_pcie_phys_get_legacy(struct tegra_pcie * pcie)1298  static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1299  {
1300  	struct device *dev = pcie->dev;
1301  	int err;
1302  
1303  	pcie->phy = devm_phy_optional_get(dev, "pcie");
1304  	if (IS_ERR(pcie->phy)) {
1305  		err = PTR_ERR(pcie->phy);
1306  		dev_err(dev, "failed to get PHY: %d\n", err);
1307  		return err;
1308  	}
1309  
1310  	err = phy_init(pcie->phy);
1311  	if (err < 0) {
1312  		dev_err(dev, "failed to initialize PHY: %d\n", err);
1313  		return err;
1314  	}
1315  
1316  	pcie->legacy_phy = true;
1317  
1318  	return 0;
1319  }
1320  
devm_of_phy_optional_get_index(struct device * dev,struct device_node * np,const char * consumer,unsigned int index)1321  static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1322  						  struct device_node *np,
1323  						  const char *consumer,
1324  						  unsigned int index)
1325  {
1326  	struct phy *phy;
1327  	char *name;
1328  
1329  	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1330  	if (!name)
1331  		return ERR_PTR(-ENOMEM);
1332  
1333  	phy = devm_of_phy_optional_get(dev, np, name);
1334  	kfree(name);
1335  
1336  	return phy;
1337  }
1338  
tegra_pcie_port_get_phys(struct tegra_pcie_port * port)1339  static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1340  {
1341  	struct device *dev = port->pcie->dev;
1342  	struct phy *phy;
1343  	unsigned int i;
1344  	int err;
1345  
1346  	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1347  	if (!port->phys)
1348  		return -ENOMEM;
1349  
1350  	for (i = 0; i < port->lanes; i++) {
1351  		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1352  		if (IS_ERR(phy)) {
1353  			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1354  				PTR_ERR(phy));
1355  			return PTR_ERR(phy);
1356  		}
1357  
1358  		err = phy_init(phy);
1359  		if (err < 0) {
1360  			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1361  				err);
1362  			return err;
1363  		}
1364  
1365  		port->phys[i] = phy;
1366  	}
1367  
1368  	return 0;
1369  }
1370  
tegra_pcie_phys_get(struct tegra_pcie * pcie)1371  static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1372  {
1373  	const struct tegra_pcie_soc *soc = pcie->soc;
1374  	struct device_node *np = pcie->dev->of_node;
1375  	struct tegra_pcie_port *port;
1376  	int err;
1377  
1378  	if (!soc->has_gen2 || of_property_present(np, "phys"))
1379  		return tegra_pcie_phys_get_legacy(pcie);
1380  
1381  	list_for_each_entry(port, &pcie->ports, list) {
1382  		err = tegra_pcie_port_get_phys(port);
1383  		if (err < 0)
1384  			return err;
1385  	}
1386  
1387  	return 0;
1388  }
1389  
tegra_pcie_phys_put(struct tegra_pcie * pcie)1390  static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1391  {
1392  	struct tegra_pcie_port *port;
1393  	struct device *dev = pcie->dev;
1394  	int err, i;
1395  
1396  	if (pcie->legacy_phy) {
1397  		err = phy_exit(pcie->phy);
1398  		if (err < 0)
1399  			dev_err(dev, "failed to teardown PHY: %d\n", err);
1400  		return;
1401  	}
1402  
1403  	list_for_each_entry(port, &pcie->ports, list) {
1404  		for (i = 0; i < port->lanes; i++) {
1405  			err = phy_exit(port->phys[i]);
1406  			if (err < 0)
1407  				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1408  					i, err);
1409  		}
1410  	}
1411  }
1412  
tegra_pcie_get_resources(struct tegra_pcie * pcie)1413  static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1414  {
1415  	struct device *dev = pcie->dev;
1416  	struct platform_device *pdev = to_platform_device(dev);
1417  	struct resource *res;
1418  	const struct tegra_pcie_soc *soc = pcie->soc;
1419  	int err;
1420  
1421  	err = tegra_pcie_clocks_get(pcie);
1422  	if (err) {
1423  		dev_err(dev, "failed to get clocks: %d\n", err);
1424  		return err;
1425  	}
1426  
1427  	err = tegra_pcie_resets_get(pcie);
1428  	if (err) {
1429  		dev_err(dev, "failed to get resets: %d\n", err);
1430  		return err;
1431  	}
1432  
1433  	if (soc->program_uphy) {
1434  		err = tegra_pcie_phys_get(pcie);
1435  		if (err < 0) {
1436  			dev_err(dev, "failed to get PHYs: %d\n", err);
1437  			return err;
1438  		}
1439  	}
1440  
1441  	pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
1442  	if (IS_ERR(pcie->pads)) {
1443  		err = PTR_ERR(pcie->pads);
1444  		goto phys_put;
1445  	}
1446  
1447  	pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
1448  	if (IS_ERR(pcie->afi)) {
1449  		err = PTR_ERR(pcie->afi);
1450  		goto phys_put;
1451  	}
1452  
1453  	/* request configuration space, but remap later, on demand */
1454  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1455  	if (!res) {
1456  		err = -EADDRNOTAVAIL;
1457  		goto phys_put;
1458  	}
1459  
1460  	pcie->cs = *res;
1461  
1462  	/* constrain configuration space to 4 KiB */
1463  	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1464  
1465  	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1466  	if (IS_ERR(pcie->cfg)) {
1467  		err = PTR_ERR(pcie->cfg);
1468  		goto phys_put;
1469  	}
1470  
1471  	/* request interrupt */
1472  	err = platform_get_irq_byname(pdev, "intr");
1473  	if (err < 0)
1474  		goto phys_put;
1475  
1476  	pcie->irq = err;
1477  
1478  	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1479  	if (err) {
1480  		dev_err(dev, "failed to register IRQ: %d\n", err);
1481  		goto phys_put;
1482  	}
1483  
1484  	return 0;
1485  
1486  phys_put:
1487  	if (soc->program_uphy)
1488  		tegra_pcie_phys_put(pcie);
1489  
1490  	return err;
1491  }
1492  
tegra_pcie_put_resources(struct tegra_pcie * pcie)1493  static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1494  {
1495  	const struct tegra_pcie_soc *soc = pcie->soc;
1496  
1497  	if (pcie->irq > 0)
1498  		free_irq(pcie->irq, pcie);
1499  
1500  	if (soc->program_uphy)
1501  		tegra_pcie_phys_put(pcie);
1502  
1503  	return 0;
1504  }
1505  
tegra_pcie_pme_turnoff(struct tegra_pcie_port * port)1506  static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1507  {
1508  	struct tegra_pcie *pcie = port->pcie;
1509  	const struct tegra_pcie_soc *soc = pcie->soc;
1510  	int err;
1511  	u32 val;
1512  	u8 ack_bit;
1513  
1514  	val = afi_readl(pcie, AFI_PCIE_PME);
1515  	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1516  	afi_writel(pcie, val, AFI_PCIE_PME);
1517  
1518  	ack_bit = soc->ports[port->index].pme.ack_bit;
1519  	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1520  				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1521  	if (err)
1522  		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1523  			port->index);
1524  
1525  	usleep_range(10000, 11000);
1526  
1527  	val = afi_readl(pcie, AFI_PCIE_PME);
1528  	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1529  	afi_writel(pcie, val, AFI_PCIE_PME);
1530  }
1531  
tegra_pcie_msi_irq(struct irq_desc * desc)1532  static void tegra_pcie_msi_irq(struct irq_desc *desc)
1533  {
1534  	struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
1535  	struct irq_chip *chip = irq_desc_get_chip(desc);
1536  	struct tegra_msi *msi = &pcie->msi;
1537  	struct device *dev = pcie->dev;
1538  	unsigned int i;
1539  
1540  	chained_irq_enter(chip, desc);
1541  
1542  	for (i = 0; i < 8; i++) {
1543  		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
1544  
1545  		while (reg) {
1546  			unsigned int offset = find_first_bit(&reg, 32);
1547  			unsigned int index = i * 32 + offset;
1548  			int ret;
1549  
1550  			ret = generic_handle_domain_irq(msi->domain->parent, index);
1551  			if (ret) {
1552  				/*
1553  				 * that's weird who triggered this?
1554  				 * just clear it
1555  				 */
1556  				dev_info(dev, "unexpected MSI\n");
1557  				afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
1558  			}
1559  
1560  			/* see if there's any more pending in this vector */
1561  			reg = afi_readl(pcie, AFI_MSI_VEC(i));
1562  		}
1563  	}
1564  
1565  	chained_irq_exit(chip, desc);
1566  }
1567  
tegra_msi_top_irq_ack(struct irq_data * d)1568  static void tegra_msi_top_irq_ack(struct irq_data *d)
1569  {
1570  	irq_chip_ack_parent(d);
1571  }
1572  
tegra_msi_top_irq_mask(struct irq_data * d)1573  static void tegra_msi_top_irq_mask(struct irq_data *d)
1574  {
1575  	pci_msi_mask_irq(d);
1576  	irq_chip_mask_parent(d);
1577  }
1578  
tegra_msi_top_irq_unmask(struct irq_data * d)1579  static void tegra_msi_top_irq_unmask(struct irq_data *d)
1580  {
1581  	pci_msi_unmask_irq(d);
1582  	irq_chip_unmask_parent(d);
1583  }
1584  
1585  static struct irq_chip tegra_msi_top_chip = {
1586  	.name		= "Tegra PCIe MSI",
1587  	.irq_ack	= tegra_msi_top_irq_ack,
1588  	.irq_mask	= tegra_msi_top_irq_mask,
1589  	.irq_unmask	= tegra_msi_top_irq_unmask,
1590  };
1591  
tegra_msi_irq_ack(struct irq_data * d)1592  static void tegra_msi_irq_ack(struct irq_data *d)
1593  {
1594  	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1595  	struct tegra_pcie *pcie = msi_to_pcie(msi);
1596  	unsigned int index = d->hwirq / 32;
1597  
1598  	/* clear the interrupt */
1599  	afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
1600  }
1601  
tegra_msi_irq_mask(struct irq_data * d)1602  static void tegra_msi_irq_mask(struct irq_data *d)
1603  {
1604  	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1605  	struct tegra_pcie *pcie = msi_to_pcie(msi);
1606  	unsigned int index = d->hwirq / 32;
1607  	unsigned long flags;
1608  	u32 value;
1609  
1610  	spin_lock_irqsave(&msi->mask_lock, flags);
1611  	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1612  	value &= ~BIT(d->hwirq % 32);
1613  	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1614  	spin_unlock_irqrestore(&msi->mask_lock, flags);
1615  }
1616  
tegra_msi_irq_unmask(struct irq_data * d)1617  static void tegra_msi_irq_unmask(struct irq_data *d)
1618  {
1619  	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1620  	struct tegra_pcie *pcie = msi_to_pcie(msi);
1621  	unsigned int index = d->hwirq / 32;
1622  	unsigned long flags;
1623  	u32 value;
1624  
1625  	spin_lock_irqsave(&msi->mask_lock, flags);
1626  	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1627  	value |= BIT(d->hwirq % 32);
1628  	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1629  	spin_unlock_irqrestore(&msi->mask_lock, flags);
1630  }
1631  
tegra_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)1632  static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1633  {
1634  	struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
1635  
1636  	msg->address_lo = lower_32_bits(msi->phys);
1637  	msg->address_hi = upper_32_bits(msi->phys);
1638  	msg->data = data->hwirq;
1639  }
1640  
1641  static struct irq_chip tegra_msi_bottom_chip = {
1642  	.name			= "Tegra MSI",
1643  	.irq_ack		= tegra_msi_irq_ack,
1644  	.irq_mask		= tegra_msi_irq_mask,
1645  	.irq_unmask		= tegra_msi_irq_unmask,
1646  	.irq_compose_msi_msg	= tegra_compose_msi_msg,
1647  };
1648  
tegra_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)1649  static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
1650  				  unsigned int nr_irqs, void *args)
1651  {
1652  	struct tegra_msi *msi = domain->host_data;
1653  	unsigned int i;
1654  	int hwirq;
1655  
1656  	mutex_lock(&msi->map_lock);
1657  
1658  	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
1659  
1660  	mutex_unlock(&msi->map_lock);
1661  
1662  	if (hwirq < 0)
1663  		return -ENOSPC;
1664  
1665  	for (i = 0; i < nr_irqs; i++)
1666  		irq_domain_set_info(domain, virq + i, hwirq + i,
1667  				    &tegra_msi_bottom_chip, domain->host_data,
1668  				    handle_edge_irq, NULL, NULL);
1669  
1670  	tegra_cpuidle_pcie_irqs_in_use();
1671  
1672  	return 0;
1673  }
1674  
tegra_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)1675  static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
1676  				  unsigned int nr_irqs)
1677  {
1678  	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1679  	struct tegra_msi *msi = domain->host_data;
1680  
1681  	mutex_lock(&msi->map_lock);
1682  
1683  	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
1684  
1685  	mutex_unlock(&msi->map_lock);
1686  }
1687  
1688  static const struct irq_domain_ops tegra_msi_domain_ops = {
1689  	.alloc = tegra_msi_domain_alloc,
1690  	.free = tegra_msi_domain_free,
1691  };
1692  
1693  static struct msi_domain_info tegra_msi_info = {
1694  	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1695  		  MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
1696  	.chip	= &tegra_msi_top_chip,
1697  };
1698  
tegra_allocate_domains(struct tegra_msi * msi)1699  static int tegra_allocate_domains(struct tegra_msi *msi)
1700  {
1701  	struct tegra_pcie *pcie = msi_to_pcie(msi);
1702  	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
1703  	struct irq_domain *parent;
1704  
1705  	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
1706  					  &tegra_msi_domain_ops, msi);
1707  	if (!parent) {
1708  		dev_err(pcie->dev, "failed to create IRQ domain\n");
1709  		return -ENOMEM;
1710  	}
1711  	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
1712  
1713  	msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
1714  	if (!msi->domain) {
1715  		dev_err(pcie->dev, "failed to create MSI domain\n");
1716  		irq_domain_remove(parent);
1717  		return -ENOMEM;
1718  	}
1719  
1720  	return 0;
1721  }
1722  
tegra_free_domains(struct tegra_msi * msi)1723  static void tegra_free_domains(struct tegra_msi *msi)
1724  {
1725  	struct irq_domain *parent = msi->domain->parent;
1726  
1727  	irq_domain_remove(msi->domain);
1728  	irq_domain_remove(parent);
1729  }
1730  
tegra_pcie_msi_setup(struct tegra_pcie * pcie)1731  static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1732  {
1733  	struct platform_device *pdev = to_platform_device(pcie->dev);
1734  	struct tegra_msi *msi = &pcie->msi;
1735  	struct device *dev = pcie->dev;
1736  	int err;
1737  
1738  	mutex_init(&msi->map_lock);
1739  	spin_lock_init(&msi->mask_lock);
1740  
1741  	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1742  		err = tegra_allocate_domains(msi);
1743  		if (err)
1744  			return err;
1745  	}
1746  
1747  	err = platform_get_irq_byname(pdev, "msi");
1748  	if (err < 0)
1749  		goto free_irq_domain;
1750  
1751  	msi->irq = err;
1752  
1753  	irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
1754  
1755  	/* Though the PCIe controller can address >32-bit address space, to
1756  	 * facilitate endpoints that support only 32-bit MSI target address,
1757  	 * the mask is set to 32-bit to make sure that MSI target address is
1758  	 * always a 32-bit address
1759  	 */
1760  	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1761  	if (err < 0) {
1762  		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1763  		goto free_irq;
1764  	}
1765  
1766  	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1767  				    DMA_ATTR_NO_KERNEL_MAPPING);
1768  	if (!msi->virt) {
1769  		dev_err(dev, "failed to allocate DMA memory for MSI\n");
1770  		err = -ENOMEM;
1771  		goto free_irq;
1772  	}
1773  
1774  	return 0;
1775  
1776  free_irq:
1777  	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1778  free_irq_domain:
1779  	if (IS_ENABLED(CONFIG_PCI_MSI))
1780  		tegra_free_domains(msi);
1781  
1782  	return err;
1783  }
1784  
tegra_pcie_enable_msi(struct tegra_pcie * pcie)1785  static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1786  {
1787  	const struct tegra_pcie_soc *soc = pcie->soc;
1788  	struct tegra_msi *msi = &pcie->msi;
1789  	u32 reg, msi_state[INT_PCI_MSI_NR / 32];
1790  	int i;
1791  
1792  	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1793  	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1794  	/* this register is in 4K increments */
1795  	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1796  
1797  	/* Restore the MSI allocation state */
1798  	bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
1799  	for (i = 0; i < ARRAY_SIZE(msi_state); i++)
1800  		afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
1801  
1802  	/* and unmask the MSI interrupt */
1803  	reg = afi_readl(pcie, AFI_INTR_MASK);
1804  	reg |= AFI_INTR_MASK_MSI_MASK;
1805  	afi_writel(pcie, reg, AFI_INTR_MASK);
1806  }
1807  
tegra_pcie_msi_teardown(struct tegra_pcie * pcie)1808  static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1809  {
1810  	struct tegra_msi *msi = &pcie->msi;
1811  	unsigned int i, irq;
1812  
1813  	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1814  		       DMA_ATTR_NO_KERNEL_MAPPING);
1815  
1816  	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1817  		irq = irq_find_mapping(msi->domain, i);
1818  		if (irq > 0)
1819  			irq_domain_free_irqs(irq, 1);
1820  	}
1821  
1822  	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1823  
1824  	if (IS_ENABLED(CONFIG_PCI_MSI))
1825  		tegra_free_domains(msi);
1826  }
1827  
tegra_pcie_disable_msi(struct tegra_pcie * pcie)1828  static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1829  {
1830  	u32 value;
1831  
1832  	/* mask the MSI interrupt */
1833  	value = afi_readl(pcie, AFI_INTR_MASK);
1834  	value &= ~AFI_INTR_MASK_MSI_MASK;
1835  	afi_writel(pcie, value, AFI_INTR_MASK);
1836  
1837  	return 0;
1838  }
1839  
tegra_pcie_disable_interrupts(struct tegra_pcie * pcie)1840  static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1841  {
1842  	u32 value;
1843  
1844  	value = afi_readl(pcie, AFI_INTR_MASK);
1845  	value &= ~AFI_INTR_MASK_INT_MASK;
1846  	afi_writel(pcie, value, AFI_INTR_MASK);
1847  }
1848  
tegra_pcie_get_xbar_config(struct tegra_pcie * pcie,u32 lanes,u32 * xbar)1849  static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1850  				      u32 *xbar)
1851  {
1852  	struct device *dev = pcie->dev;
1853  	struct device_node *np = dev->of_node;
1854  
1855  	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1856  		switch (lanes) {
1857  		case 0x010004:
1858  			dev_info(dev, "4x1, 1x1 configuration\n");
1859  			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1860  			return 0;
1861  
1862  		case 0x010102:
1863  			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1864  			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1865  			return 0;
1866  
1867  		case 0x010101:
1868  			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1869  			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1870  			return 0;
1871  
1872  		default:
1873  			dev_info(dev, "wrong configuration updated in DT, "
1874  				 "switching to default 2x1, 1x1, 1x1 "
1875  				 "configuration\n");
1876  			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1877  			return 0;
1878  		}
1879  	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1880  		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1881  		switch (lanes) {
1882  		case 0x0000104:
1883  			dev_info(dev, "4x1, 1x1 configuration\n");
1884  			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1885  			return 0;
1886  
1887  		case 0x0000102:
1888  			dev_info(dev, "2x1, 1x1 configuration\n");
1889  			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1890  			return 0;
1891  		}
1892  	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1893  		switch (lanes) {
1894  		case 0x00000204:
1895  			dev_info(dev, "4x1, 2x1 configuration\n");
1896  			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1897  			return 0;
1898  
1899  		case 0x00020202:
1900  			dev_info(dev, "2x3 configuration\n");
1901  			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1902  			return 0;
1903  
1904  		case 0x00010104:
1905  			dev_info(dev, "4x1, 1x2 configuration\n");
1906  			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1907  			return 0;
1908  		}
1909  	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1910  		switch (lanes) {
1911  		case 0x00000004:
1912  			dev_info(dev, "single-mode configuration\n");
1913  			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1914  			return 0;
1915  
1916  		case 0x00000202:
1917  			dev_info(dev, "dual-mode configuration\n");
1918  			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1919  			return 0;
1920  		}
1921  	}
1922  
1923  	return -EINVAL;
1924  }
1925  
1926  /*
1927   * Check whether a given set of supplies is available in a device tree node.
1928   * This is used to check whether the new or the legacy device tree bindings
1929   * should be used.
1930   */
of_regulator_bulk_available(struct device_node * np,struct regulator_bulk_data * supplies,unsigned int num_supplies)1931  static bool of_regulator_bulk_available(struct device_node *np,
1932  					struct regulator_bulk_data *supplies,
1933  					unsigned int num_supplies)
1934  {
1935  	char property[32];
1936  	unsigned int i;
1937  
1938  	for (i = 0; i < num_supplies; i++) {
1939  		snprintf(property, 32, "%s-supply", supplies[i].supply);
1940  
1941  		if (!of_property_present(np, property))
1942  			return false;
1943  	}
1944  
1945  	return true;
1946  }
1947  
1948  /*
1949   * Old versions of the device tree binding for this device used a set of power
1950   * supplies that didn't match the hardware inputs. This happened to work for a
1951   * number of cases but is not future proof. However to preserve backwards-
1952   * compatibility with old device trees, this function will try to use the old
1953   * set of supplies.
1954   */
tegra_pcie_get_legacy_regulators(struct tegra_pcie * pcie)1955  static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1956  {
1957  	struct device *dev = pcie->dev;
1958  	struct device_node *np = dev->of_node;
1959  
1960  	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1961  		pcie->num_supplies = 3;
1962  	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1963  		pcie->num_supplies = 2;
1964  
1965  	if (pcie->num_supplies == 0) {
1966  		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1967  		return -ENODEV;
1968  	}
1969  
1970  	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1971  				      sizeof(*pcie->supplies),
1972  				      GFP_KERNEL);
1973  	if (!pcie->supplies)
1974  		return -ENOMEM;
1975  
1976  	pcie->supplies[0].supply = "pex-clk";
1977  	pcie->supplies[1].supply = "vdd";
1978  
1979  	if (pcie->num_supplies > 2)
1980  		pcie->supplies[2].supply = "avdd";
1981  
1982  	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1983  }
1984  
1985  /*
1986   * Obtains the list of regulators required for a particular generation of the
1987   * IP block.
1988   *
1989   * This would've been nice to do simply by providing static tables for use
1990   * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1991   * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1992   * and either seems to be optional depending on which ports are being used.
1993   */
tegra_pcie_get_regulators(struct tegra_pcie * pcie,u32 lane_mask)1994  static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1995  {
1996  	struct device *dev = pcie->dev;
1997  	struct device_node *np = dev->of_node;
1998  	unsigned int i = 0;
1999  
2000  	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
2001  		pcie->num_supplies = 4;
2002  
2003  		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2004  					      sizeof(*pcie->supplies),
2005  					      GFP_KERNEL);
2006  		if (!pcie->supplies)
2007  			return -ENOMEM;
2008  
2009  		pcie->supplies[i++].supply = "dvdd-pex";
2010  		pcie->supplies[i++].supply = "hvdd-pex-pll";
2011  		pcie->supplies[i++].supply = "hvdd-pex";
2012  		pcie->supplies[i++].supply = "vddio-pexctl-aud";
2013  	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
2014  		pcie->num_supplies = 3;
2015  
2016  		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2017  					      sizeof(*pcie->supplies),
2018  					      GFP_KERNEL);
2019  		if (!pcie->supplies)
2020  			return -ENOMEM;
2021  
2022  		pcie->supplies[i++].supply = "hvddio-pex";
2023  		pcie->supplies[i++].supply = "dvddio-pex";
2024  		pcie->supplies[i++].supply = "vddio-pex-ctl";
2025  	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2026  		pcie->num_supplies = 4;
2027  
2028  		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2029  					      sizeof(*pcie->supplies),
2030  					      GFP_KERNEL);
2031  		if (!pcie->supplies)
2032  			return -ENOMEM;
2033  
2034  		pcie->supplies[i++].supply = "avddio-pex";
2035  		pcie->supplies[i++].supply = "dvddio-pex";
2036  		pcie->supplies[i++].supply = "hvdd-pex";
2037  		pcie->supplies[i++].supply = "vddio-pex-ctl";
2038  	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2039  		bool need_pexa = false, need_pexb = false;
2040  
2041  		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2042  		if (lane_mask & 0x0f)
2043  			need_pexa = true;
2044  
2045  		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2046  		if (lane_mask & 0x30)
2047  			need_pexb = true;
2048  
2049  		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2050  					 (need_pexb ? 2 : 0);
2051  
2052  		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2053  					      sizeof(*pcie->supplies),
2054  					      GFP_KERNEL);
2055  		if (!pcie->supplies)
2056  			return -ENOMEM;
2057  
2058  		pcie->supplies[i++].supply = "avdd-pex-pll";
2059  		pcie->supplies[i++].supply = "hvdd-pex";
2060  		pcie->supplies[i++].supply = "vddio-pex-ctl";
2061  		pcie->supplies[i++].supply = "avdd-plle";
2062  
2063  		if (need_pexa) {
2064  			pcie->supplies[i++].supply = "avdd-pexa";
2065  			pcie->supplies[i++].supply = "vdd-pexa";
2066  		}
2067  
2068  		if (need_pexb) {
2069  			pcie->supplies[i++].supply = "avdd-pexb";
2070  			pcie->supplies[i++].supply = "vdd-pexb";
2071  		}
2072  	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2073  		pcie->num_supplies = 5;
2074  
2075  		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2076  					      sizeof(*pcie->supplies),
2077  					      GFP_KERNEL);
2078  		if (!pcie->supplies)
2079  			return -ENOMEM;
2080  
2081  		pcie->supplies[0].supply = "avdd-pex";
2082  		pcie->supplies[1].supply = "vdd-pex";
2083  		pcie->supplies[2].supply = "avdd-pex-pll";
2084  		pcie->supplies[3].supply = "avdd-plle";
2085  		pcie->supplies[4].supply = "vddio-pex-clk";
2086  	}
2087  
2088  	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2089  					pcie->num_supplies))
2090  		return devm_regulator_bulk_get(dev, pcie->num_supplies,
2091  					       pcie->supplies);
2092  
2093  	/*
2094  	 * If not all regulators are available for this new scheme, assume
2095  	 * that the device tree complies with an older version of the device
2096  	 * tree binding.
2097  	 */
2098  	dev_info(dev, "using legacy DT binding for power supplies\n");
2099  
2100  	devm_kfree(dev, pcie->supplies);
2101  	pcie->num_supplies = 0;
2102  
2103  	return tegra_pcie_get_legacy_regulators(pcie);
2104  }
2105  
tegra_pcie_parse_dt(struct tegra_pcie * pcie)2106  static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2107  {
2108  	struct device *dev = pcie->dev;
2109  	struct device_node *np = dev->of_node, *port;
2110  	const struct tegra_pcie_soc *soc = pcie->soc;
2111  	u32 lanes = 0, mask = 0;
2112  	unsigned int lane = 0;
2113  	int err;
2114  
2115  	/* parse root ports */
2116  	for_each_child_of_node(np, port) {
2117  		struct tegra_pcie_port *rp;
2118  		unsigned int index;
2119  		u32 value;
2120  		char *label;
2121  
2122  		err = of_pci_get_devfn(port);
2123  		if (err < 0) {
2124  			dev_err(dev, "failed to parse address: %d\n", err);
2125  			goto err_node_put;
2126  		}
2127  
2128  		index = PCI_SLOT(err);
2129  
2130  		if (index < 1 || index > soc->num_ports) {
2131  			dev_err(dev, "invalid port number: %d\n", index);
2132  			err = -EINVAL;
2133  			goto err_node_put;
2134  		}
2135  
2136  		index--;
2137  
2138  		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2139  		if (err < 0) {
2140  			dev_err(dev, "failed to parse # of lanes: %d\n",
2141  				err);
2142  			goto err_node_put;
2143  		}
2144  
2145  		if (value > 16) {
2146  			dev_err(dev, "invalid # of lanes: %u\n", value);
2147  			err = -EINVAL;
2148  			goto err_node_put;
2149  		}
2150  
2151  		lanes |= value << (index << 3);
2152  
2153  		if (!of_device_is_available(port)) {
2154  			lane += value;
2155  			continue;
2156  		}
2157  
2158  		mask |= ((1 << value) - 1) << lane;
2159  		lane += value;
2160  
2161  		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2162  		if (!rp) {
2163  			err = -ENOMEM;
2164  			goto err_node_put;
2165  		}
2166  
2167  		err = of_address_to_resource(port, 0, &rp->regs);
2168  		if (err < 0) {
2169  			dev_err(dev, "failed to parse address: %d\n", err);
2170  			goto err_node_put;
2171  		}
2172  
2173  		INIT_LIST_HEAD(&rp->list);
2174  		rp->index = index;
2175  		rp->lanes = value;
2176  		rp->pcie = pcie;
2177  		rp->np = port;
2178  
2179  		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2180  		if (IS_ERR(rp->base)) {
2181  			err = PTR_ERR(rp->base);
2182  			goto err_node_put;
2183  		}
2184  
2185  		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2186  		if (!label) {
2187  			err = -ENOMEM;
2188  			goto err_node_put;
2189  		}
2190  
2191  		/*
2192  		 * Returns -ENOENT if reset-gpios property is not populated
2193  		 * and in this case fall back to using AFI per port register
2194  		 * to toggle PERST# SFIO line.
2195  		 */
2196  		rp->reset_gpio = devm_fwnode_gpiod_get(dev,
2197  						       of_fwnode_handle(port),
2198  						       "reset",
2199  						       GPIOD_OUT_LOW,
2200  						       label);
2201  		if (IS_ERR(rp->reset_gpio)) {
2202  			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2203  				rp->reset_gpio = NULL;
2204  			} else {
2205  				dev_err(dev, "failed to get reset GPIO: %ld\n",
2206  					PTR_ERR(rp->reset_gpio));
2207  				err = PTR_ERR(rp->reset_gpio);
2208  				goto err_node_put;
2209  			}
2210  		}
2211  
2212  		list_add_tail(&rp->list, &pcie->ports);
2213  	}
2214  
2215  	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2216  	if (err < 0) {
2217  		dev_err(dev, "invalid lane configuration\n");
2218  		return err;
2219  	}
2220  
2221  	err = tegra_pcie_get_regulators(pcie, mask);
2222  	if (err < 0)
2223  		return err;
2224  
2225  	return 0;
2226  
2227  err_node_put:
2228  	of_node_put(port);
2229  	return err;
2230  }
2231  
2232  /*
2233   * FIXME: If there are no PCIe cards attached, then calling this function
2234   * can result in the increase of the bootup time as there are big timeout
2235   * loops.
2236   */
2237  #define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
tegra_pcie_port_check_link(struct tegra_pcie_port * port)2238  static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2239  {
2240  	struct device *dev = port->pcie->dev;
2241  	unsigned int retries = 3;
2242  	unsigned long value;
2243  
2244  	/* override presence detection */
2245  	value = readl(port->base + RP_PRIV_MISC);
2246  	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2247  	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2248  	writel(value, port->base + RP_PRIV_MISC);
2249  
2250  	do {
2251  		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2252  
2253  		do {
2254  			value = readl(port->base + RP_VEND_XP);
2255  
2256  			if (value & RP_VEND_XP_DL_UP)
2257  				break;
2258  
2259  			usleep_range(1000, 2000);
2260  		} while (--timeout);
2261  
2262  		if (!timeout) {
2263  			dev_dbg(dev, "link %u down, retrying\n", port->index);
2264  			goto retry;
2265  		}
2266  
2267  		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2268  
2269  		do {
2270  			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2271  
2272  			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2273  				return true;
2274  
2275  			usleep_range(1000, 2000);
2276  		} while (--timeout);
2277  
2278  retry:
2279  		tegra_pcie_port_reset(port);
2280  	} while (--retries);
2281  
2282  	return false;
2283  }
2284  
tegra_pcie_change_link_speed(struct tegra_pcie * pcie)2285  static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2286  {
2287  	struct device *dev = pcie->dev;
2288  	struct tegra_pcie_port *port;
2289  	ktime_t deadline;
2290  	u32 value;
2291  
2292  	list_for_each_entry(port, &pcie->ports, list) {
2293  		/*
2294  		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2295  		 * is not supported by Tegra. tegra_pcie_change_link_speed()
2296  		 * is called only for Tegra chips which support Gen2.
2297  		 * So there no harm if supported link speed is not verified.
2298  		 */
2299  		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2300  		value &= ~PCI_EXP_LNKSTA_CLS;
2301  		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2302  		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2303  
2304  		/*
2305  		 * Poll until link comes back from recovery to avoid race
2306  		 * condition.
2307  		 */
2308  		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2309  
2310  		while (ktime_before(ktime_get(), deadline)) {
2311  			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2312  			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2313  				break;
2314  
2315  			usleep_range(2000, 3000);
2316  		}
2317  
2318  		if (value & PCI_EXP_LNKSTA_LT)
2319  			dev_warn(dev, "PCIe port %u link is in recovery\n",
2320  				 port->index);
2321  
2322  		/* Retrain the link */
2323  		value = readl(port->base + RP_LINK_CONTROL_STATUS);
2324  		value |= PCI_EXP_LNKCTL_RL;
2325  		writel(value, port->base + RP_LINK_CONTROL_STATUS);
2326  
2327  		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2328  
2329  		while (ktime_before(ktime_get(), deadline)) {
2330  			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2331  			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2332  				break;
2333  
2334  			usleep_range(2000, 3000);
2335  		}
2336  
2337  		if (value & PCI_EXP_LNKSTA_LT)
2338  			dev_err(dev, "failed to retrain link of port %u\n",
2339  				port->index);
2340  	}
2341  }
2342  
tegra_pcie_enable_ports(struct tegra_pcie * pcie)2343  static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2344  {
2345  	struct device *dev = pcie->dev;
2346  	struct tegra_pcie_port *port, *tmp;
2347  
2348  	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2349  		dev_info(dev, "probing port %u, using %u lanes\n",
2350  			 port->index, port->lanes);
2351  
2352  		tegra_pcie_port_enable(port);
2353  	}
2354  
2355  	/* Start LTSSM from Tegra side */
2356  	reset_control_deassert(pcie->pcie_xrst);
2357  
2358  	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2359  		if (tegra_pcie_port_check_link(port))
2360  			continue;
2361  
2362  		dev_info(dev, "link %u down, ignoring\n", port->index);
2363  
2364  		tegra_pcie_port_disable(port);
2365  		tegra_pcie_port_free(port);
2366  	}
2367  
2368  	if (pcie->soc->has_gen2)
2369  		tegra_pcie_change_link_speed(pcie);
2370  }
2371  
tegra_pcie_disable_ports(struct tegra_pcie * pcie)2372  static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2373  {
2374  	struct tegra_pcie_port *port, *tmp;
2375  
2376  	reset_control_assert(pcie->pcie_xrst);
2377  
2378  	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2379  		tegra_pcie_port_disable(port);
2380  }
2381  
2382  static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2383  	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2384  	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2385  };
2386  
2387  static const struct tegra_pcie_soc tegra20_pcie = {
2388  	.num_ports = 2,
2389  	.ports = tegra20_pcie_ports,
2390  	.msi_base_shift = 0,
2391  	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2392  	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2393  	.pads_refclk_cfg0 = 0xfa5cfa5c,
2394  	.has_pex_clkreq_en = false,
2395  	.has_pex_bias_ctrl = false,
2396  	.has_intr_prsnt_sense = false,
2397  	.has_cml_clk = false,
2398  	.has_gen2 = false,
2399  	.force_pca_enable = false,
2400  	.program_uphy = true,
2401  	.update_clamp_threshold = false,
2402  	.program_deskew_time = false,
2403  	.update_fc_timer = false,
2404  	.has_cache_bars = true,
2405  	.ectl.enable = false,
2406  };
2407  
2408  static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2409  	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2410  	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2411  	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2412  };
2413  
2414  static const struct tegra_pcie_soc tegra30_pcie = {
2415  	.num_ports = 3,
2416  	.ports = tegra30_pcie_ports,
2417  	.msi_base_shift = 8,
2418  	.afi_pex2_ctrl = 0x128,
2419  	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2420  	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2421  	.pads_refclk_cfg0 = 0xfa5cfa5c,
2422  	.pads_refclk_cfg1 = 0xfa5cfa5c,
2423  	.has_pex_clkreq_en = true,
2424  	.has_pex_bias_ctrl = true,
2425  	.has_intr_prsnt_sense = true,
2426  	.has_cml_clk = true,
2427  	.has_gen2 = false,
2428  	.force_pca_enable = false,
2429  	.program_uphy = true,
2430  	.update_clamp_threshold = false,
2431  	.program_deskew_time = false,
2432  	.update_fc_timer = false,
2433  	.has_cache_bars = false,
2434  	.ectl.enable = false,
2435  };
2436  
2437  static const struct tegra_pcie_soc tegra124_pcie = {
2438  	.num_ports = 2,
2439  	.ports = tegra20_pcie_ports,
2440  	.msi_base_shift = 8,
2441  	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2442  	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2443  	.pads_refclk_cfg0 = 0x44ac44ac,
2444  	.has_pex_clkreq_en = true,
2445  	.has_pex_bias_ctrl = true,
2446  	.has_intr_prsnt_sense = true,
2447  	.has_cml_clk = true,
2448  	.has_gen2 = true,
2449  	.force_pca_enable = false,
2450  	.program_uphy = true,
2451  	.update_clamp_threshold = true,
2452  	.program_deskew_time = false,
2453  	.update_fc_timer = false,
2454  	.has_cache_bars = false,
2455  	.ectl.enable = false,
2456  };
2457  
2458  static const struct tegra_pcie_soc tegra210_pcie = {
2459  	.num_ports = 2,
2460  	.ports = tegra20_pcie_ports,
2461  	.msi_base_shift = 8,
2462  	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2463  	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2464  	.pads_refclk_cfg0 = 0x90b890b8,
2465  	/* FC threshold is bit[25:18] */
2466  	.update_fc_threshold = 0x01800000,
2467  	.has_pex_clkreq_en = true,
2468  	.has_pex_bias_ctrl = true,
2469  	.has_intr_prsnt_sense = true,
2470  	.has_cml_clk = true,
2471  	.has_gen2 = true,
2472  	.force_pca_enable = true,
2473  	.program_uphy = true,
2474  	.update_clamp_threshold = true,
2475  	.program_deskew_time = true,
2476  	.update_fc_timer = true,
2477  	.has_cache_bars = false,
2478  	.ectl = {
2479  		.regs = {
2480  			.rp_ectl_2_r1 = 0x0000000f,
2481  			.rp_ectl_4_r1 = 0x00000067,
2482  			.rp_ectl_5_r1 = 0x55010000,
2483  			.rp_ectl_6_r1 = 0x00000001,
2484  			.rp_ectl_2_r2 = 0x0000008f,
2485  			.rp_ectl_4_r2 = 0x000000c7,
2486  			.rp_ectl_5_r2 = 0x55010000,
2487  			.rp_ectl_6_r2 = 0x00000001,
2488  		},
2489  		.enable = true,
2490  	},
2491  };
2492  
2493  static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2494  	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2495  	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2496  	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2497  };
2498  
2499  static const struct tegra_pcie_soc tegra186_pcie = {
2500  	.num_ports = 3,
2501  	.ports = tegra186_pcie_ports,
2502  	.msi_base_shift = 8,
2503  	.afi_pex2_ctrl = 0x19c,
2504  	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2505  	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2506  	.pads_refclk_cfg0 = 0x80b880b8,
2507  	.pads_refclk_cfg1 = 0x000480b8,
2508  	.has_pex_clkreq_en = true,
2509  	.has_pex_bias_ctrl = true,
2510  	.has_intr_prsnt_sense = true,
2511  	.has_cml_clk = false,
2512  	.has_gen2 = true,
2513  	.force_pca_enable = false,
2514  	.program_uphy = false,
2515  	.update_clamp_threshold = false,
2516  	.program_deskew_time = false,
2517  	.update_fc_timer = false,
2518  	.has_cache_bars = false,
2519  	.ectl.enable = false,
2520  };
2521  
2522  static const struct of_device_id tegra_pcie_of_match[] = {
2523  	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2524  	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2525  	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2526  	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2527  	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2528  	{ },
2529  };
2530  MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
2531  
tegra_pcie_ports_seq_start(struct seq_file * s,loff_t * pos)2532  static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2533  {
2534  	struct tegra_pcie *pcie = s->private;
2535  
2536  	if (list_empty(&pcie->ports))
2537  		return NULL;
2538  
2539  	seq_puts(s, "Index  Status\n");
2540  
2541  	return seq_list_start(&pcie->ports, *pos);
2542  }
2543  
tegra_pcie_ports_seq_next(struct seq_file * s,void * v,loff_t * pos)2544  static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2545  {
2546  	struct tegra_pcie *pcie = s->private;
2547  
2548  	return seq_list_next(v, &pcie->ports, pos);
2549  }
2550  
tegra_pcie_ports_seq_stop(struct seq_file * s,void * v)2551  static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2552  {
2553  }
2554  
tegra_pcie_ports_seq_show(struct seq_file * s,void * v)2555  static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2556  {
2557  	bool up = false, active = false;
2558  	struct tegra_pcie_port *port;
2559  	unsigned int value;
2560  
2561  	port = list_entry(v, struct tegra_pcie_port, list);
2562  
2563  	value = readl(port->base + RP_VEND_XP);
2564  
2565  	if (value & RP_VEND_XP_DL_UP)
2566  		up = true;
2567  
2568  	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2569  
2570  	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2571  		active = true;
2572  
2573  	seq_printf(s, "%2u     ", port->index);
2574  
2575  	if (up)
2576  		seq_puts(s, "up");
2577  
2578  	if (active) {
2579  		if (up)
2580  			seq_puts(s, ", ");
2581  
2582  		seq_puts(s, "active");
2583  	}
2584  
2585  	seq_puts(s, "\n");
2586  	return 0;
2587  }
2588  
2589  static const struct seq_operations tegra_pcie_ports_sops = {
2590  	.start = tegra_pcie_ports_seq_start,
2591  	.next = tegra_pcie_ports_seq_next,
2592  	.stop = tegra_pcie_ports_seq_stop,
2593  	.show = tegra_pcie_ports_seq_show,
2594  };
2595  
2596  DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
2597  
tegra_pcie_debugfs_exit(struct tegra_pcie * pcie)2598  static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2599  {
2600  	debugfs_remove_recursive(pcie->debugfs);
2601  	pcie->debugfs = NULL;
2602  }
2603  
tegra_pcie_debugfs_init(struct tegra_pcie * pcie)2604  static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2605  {
2606  	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2607  
2608  	debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
2609  			    &tegra_pcie_ports_fops);
2610  }
2611  
tegra_pcie_probe(struct platform_device * pdev)2612  static int tegra_pcie_probe(struct platform_device *pdev)
2613  {
2614  	struct device *dev = &pdev->dev;
2615  	struct pci_host_bridge *host;
2616  	struct tegra_pcie *pcie;
2617  	int err;
2618  
2619  	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2620  	if (!host)
2621  		return -ENOMEM;
2622  
2623  	pcie = pci_host_bridge_priv(host);
2624  	host->sysdata = pcie;
2625  	platform_set_drvdata(pdev, pcie);
2626  
2627  	pcie->soc = of_device_get_match_data(dev);
2628  	INIT_LIST_HEAD(&pcie->ports);
2629  	pcie->dev = dev;
2630  
2631  	err = tegra_pcie_parse_dt(pcie);
2632  	if (err < 0)
2633  		return err;
2634  
2635  	err = tegra_pcie_get_resources(pcie);
2636  	if (err < 0) {
2637  		dev_err(dev, "failed to request resources: %d\n", err);
2638  		return err;
2639  	}
2640  
2641  	err = tegra_pcie_msi_setup(pcie);
2642  	if (err < 0) {
2643  		dev_err(dev, "failed to enable MSI support: %d\n", err);
2644  		goto put_resources;
2645  	}
2646  
2647  	pm_runtime_enable(pcie->dev);
2648  	err = pm_runtime_get_sync(pcie->dev);
2649  	if (err < 0) {
2650  		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2651  		goto pm_runtime_put;
2652  	}
2653  
2654  	host->ops = &tegra_pcie_ops;
2655  	host->map_irq = tegra_pcie_map_irq;
2656  
2657  	err = pci_host_probe(host);
2658  	if (err < 0) {
2659  		dev_err(dev, "failed to register host: %d\n", err);
2660  		goto pm_runtime_put;
2661  	}
2662  
2663  	if (IS_ENABLED(CONFIG_DEBUG_FS))
2664  		tegra_pcie_debugfs_init(pcie);
2665  
2666  	return 0;
2667  
2668  pm_runtime_put:
2669  	pm_runtime_put_sync(pcie->dev);
2670  	pm_runtime_disable(pcie->dev);
2671  	tegra_pcie_msi_teardown(pcie);
2672  put_resources:
2673  	tegra_pcie_put_resources(pcie);
2674  	return err;
2675  }
2676  
tegra_pcie_remove(struct platform_device * pdev)2677  static void tegra_pcie_remove(struct platform_device *pdev)
2678  {
2679  	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2680  	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2681  	struct tegra_pcie_port *port, *tmp;
2682  
2683  	if (IS_ENABLED(CONFIG_DEBUG_FS))
2684  		tegra_pcie_debugfs_exit(pcie);
2685  
2686  	pci_stop_root_bus(host->bus);
2687  	pci_remove_root_bus(host->bus);
2688  	pm_runtime_put_sync(pcie->dev);
2689  	pm_runtime_disable(pcie->dev);
2690  
2691  	if (IS_ENABLED(CONFIG_PCI_MSI))
2692  		tegra_pcie_msi_teardown(pcie);
2693  
2694  	tegra_pcie_put_resources(pcie);
2695  
2696  	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2697  		tegra_pcie_port_free(port);
2698  }
2699  
tegra_pcie_pm_suspend(struct device * dev)2700  static int tegra_pcie_pm_suspend(struct device *dev)
2701  {
2702  	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2703  	struct tegra_pcie_port *port;
2704  	int err;
2705  
2706  	list_for_each_entry(port, &pcie->ports, list)
2707  		tegra_pcie_pme_turnoff(port);
2708  
2709  	tegra_pcie_disable_ports(pcie);
2710  
2711  	/*
2712  	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2713  	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2714  	 */
2715  	tegra_pcie_disable_interrupts(pcie);
2716  
2717  	if (pcie->soc->program_uphy) {
2718  		err = tegra_pcie_phy_power_off(pcie);
2719  		if (err < 0)
2720  			dev_err(dev, "failed to power off PHY(s): %d\n", err);
2721  	}
2722  
2723  	reset_control_assert(pcie->pex_rst);
2724  	clk_disable_unprepare(pcie->pex_clk);
2725  
2726  	if (IS_ENABLED(CONFIG_PCI_MSI))
2727  		tegra_pcie_disable_msi(pcie);
2728  
2729  	pinctrl_pm_select_idle_state(dev);
2730  	tegra_pcie_power_off(pcie);
2731  
2732  	return 0;
2733  }
2734  
tegra_pcie_pm_resume(struct device * dev)2735  static int tegra_pcie_pm_resume(struct device *dev)
2736  {
2737  	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2738  	int err;
2739  
2740  	err = tegra_pcie_power_on(pcie);
2741  	if (err) {
2742  		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2743  		return err;
2744  	}
2745  
2746  	err = pinctrl_pm_select_default_state(dev);
2747  	if (err < 0) {
2748  		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2749  		goto poweroff;
2750  	}
2751  
2752  	tegra_pcie_enable_controller(pcie);
2753  	tegra_pcie_setup_translations(pcie);
2754  
2755  	if (IS_ENABLED(CONFIG_PCI_MSI))
2756  		tegra_pcie_enable_msi(pcie);
2757  
2758  	err = clk_prepare_enable(pcie->pex_clk);
2759  	if (err) {
2760  		dev_err(dev, "failed to enable PEX clock: %d\n", err);
2761  		goto pex_dpd_enable;
2762  	}
2763  
2764  	reset_control_deassert(pcie->pex_rst);
2765  
2766  	if (pcie->soc->program_uphy) {
2767  		err = tegra_pcie_phy_power_on(pcie);
2768  		if (err < 0) {
2769  			dev_err(dev, "failed to power on PHY(s): %d\n", err);
2770  			goto disable_pex_clk;
2771  		}
2772  	}
2773  
2774  	tegra_pcie_apply_pad_settings(pcie);
2775  	tegra_pcie_enable_ports(pcie);
2776  
2777  	return 0;
2778  
2779  disable_pex_clk:
2780  	reset_control_assert(pcie->pex_rst);
2781  	clk_disable_unprepare(pcie->pex_clk);
2782  pex_dpd_enable:
2783  	pinctrl_pm_select_idle_state(dev);
2784  poweroff:
2785  	tegra_pcie_power_off(pcie);
2786  
2787  	return err;
2788  }
2789  
2790  static const struct dev_pm_ops tegra_pcie_pm_ops = {
2791  	RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2792  	NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume)
2793  };
2794  
2795  static struct platform_driver tegra_pcie_driver = {
2796  	.driver = {
2797  		.name = "tegra-pcie",
2798  		.of_match_table = tegra_pcie_of_match,
2799  		.suppress_bind_attrs = true,
2800  		.pm = &tegra_pcie_pm_ops,
2801  	},
2802  	.probe = tegra_pcie_probe,
2803  	.remove_new = tegra_pcie_remove,
2804  };
2805  module_platform_driver(tegra_pcie_driver);
2806