1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * TSA driver
4   *
5   * Copyright 2022 CS GROUP France
6   *
7   * Author: Herve Codina <herve.codina@bootlin.com>
8   */
9  
10  #include "tsa.h"
11  #include <dt-bindings/soc/cpm1-fsl,tsa.h>
12  #include <dt-bindings/soc/qe-fsl,tsa.h>
13  #include <linux/bitfield.h>
14  #include <linux/clk.h>
15  #include <linux/io.h>
16  #include <linux/module.h>
17  #include <linux/of.h>
18  #include <linux/of_platform.h>
19  #include <linux/platform_device.h>
20  #include <linux/slab.h>
21  #include <soc/fsl/qe/ucc.h>
22  
23  /* TSA SI RAM routing tables entry (CPM1) */
24  #define TSA_CPM1_SIRAM_ENTRY_LAST	BIT(16)
25  #define TSA_CPM1_SIRAM_ENTRY_BYTE	BIT(17)
26  #define TSA_CPM1_SIRAM_ENTRY_CNT_MASK	GENMASK(21, 18)
27  #define TSA_CPM1_SIRAM_ENTRY_CNT(x)	FIELD_PREP(TSA_CPM1_SIRAM_ENTRY_CNT_MASK, x)
28  #define TSA_CPM1_SIRAM_ENTRY_CSEL_MASK	GENMASK(24, 22)
29  #define TSA_CPM1_SIRAM_ENTRY_CSEL_NU	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x0)
30  #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x2)
31  #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x3)
32  #define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x4)
33  #define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x5)
34  #define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2	FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x6)
35  
36  /* TSA SI RAM routing tables entry (QE) */
37  #define TSA_QE_SIRAM_ENTRY_LAST		BIT(0)
38  #define TSA_QE_SIRAM_ENTRY_BYTE		BIT(1)
39  #define TSA_QE_SIRAM_ENTRY_CNT_MASK	GENMASK(4, 2)
40  #define TSA_QE_SIRAM_ENTRY_CNT(x)	FIELD_PREP(TSA_QE_SIRAM_ENTRY_CNT_MASK, x)
41  #define TSA_QE_SIRAM_ENTRY_CSEL_MASK	GENMASK(8, 5)
42  #define TSA_QE_SIRAM_ENTRY_CSEL_NU	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x0)
43  #define TSA_QE_SIRAM_ENTRY_CSEL_UCC5	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x1)
44  #define TSA_QE_SIRAM_ENTRY_CSEL_UCC1	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x9)
45  #define TSA_QE_SIRAM_ENTRY_CSEL_UCC2	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xa)
46  #define TSA_QE_SIRAM_ENTRY_CSEL_UCC3	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xb)
47  #define TSA_QE_SIRAM_ENTRY_CSEL_UCC4	FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xc)
48  
49  /*
50   * SI mode register :
51   * - CPM1: 32bit register split in 2*16bit (16bit TDM)
52   * - QE: 4x16bit registers, one per TDM
53   */
54  #define TSA_CPM1_SIMODE		0x00
55  #define TSA_QE_SIAMR		0x00
56  #define TSA_QE_SIBMR		0x02
57  #define TSA_QE_SICMR		0x04
58  #define TSA_QE_SIDMR		0x06
59  #define   TSA_CPM1_SIMODE_SMC2			BIT(31)
60  #define   TSA_CPM1_SIMODE_SMC1			BIT(15)
61  #define   TSA_CPM1_SIMODE_TDMA_MASK		GENMASK(11, 0)
62  #define   TSA_CPM1_SIMODE_TDMA(x)		FIELD_PREP(TSA_CPM1_SIMODE_TDMA_MASK, x)
63  #define   TSA_CPM1_SIMODE_TDMB_MASK		GENMASK(27, 16)
64  #define   TSA_CPM1_SIMODE_TDMB(x)		FIELD_PREP(TSA_CPM1_SIMODE_TDMB_MASK, x)
65  #define     TSA_QE_SIMODE_TDM_SAD_MASK		GENMASK(15, 12)
66  #define     TSA_QE_SIMODE_TDM_SAD(x)		FIELD_PREP(TSA_QE_SIMODE_TDM_SAD_MASK, x)
67  #define     TSA_CPM1_SIMODE_TDM_MASK		GENMASK(11, 0)
68  #define     TSA_SIMODE_TDM_SDM_MASK		GENMASK(11, 10)
69  #define       TSA_SIMODE_TDM_SDM_NORM		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x0)
70  #define       TSA_SIMODE_TDM_SDM_ECHO		FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x1)
71  #define       TSA_SIMODE_TDM_SDM_INTL_LOOP	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x2)
72  #define       TSA_SIMODE_TDM_SDM_LOOP_CTRL	FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x3)
73  #define     TSA_SIMODE_TDM_RFSD_MASK		GENMASK(9, 8)
74  #define     TSA_SIMODE_TDM_RFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_RFSD_MASK, x)
75  #define     TSA_SIMODE_TDM_DSC			BIT(7)
76  #define     TSA_SIMODE_TDM_CRT			BIT(6)
77  #define     TSA_CPM1_SIMODE_TDM_STZ		BIT(5) /* bit 5: STZ in CPM1 */
78  #define     TSA_QE_SIMODE_TDM_SL		BIT(5) /* bit 5: SL in QE */
79  #define     TSA_SIMODE_TDM_CE			BIT(4)
80  #define     TSA_SIMODE_TDM_FE			BIT(3)
81  #define     TSA_SIMODE_TDM_GM			BIT(2)
82  #define     TSA_SIMODE_TDM_TFSD_MASK		GENMASK(1, 0)
83  #define     TSA_SIMODE_TDM_TFSD(x)		FIELD_PREP(TSA_SIMODE_TDM_TFSD_MASK, x)
84  
85  /* CPM SI global mode register (8 bits) */
86  #define TSA_CPM1_SIGMR	0x04
87  #define TSA_CPM1_SIGMR_ENB			BIT(3)
88  #define TSA_CPM1_SIGMR_ENA			BIT(2)
89  #define TSA_CPM1_SIGMR_RDM_MASK			GENMASK(1, 0)
90  #define   TSA_CPM1_SIGMR_RDM_STATIC_TDMA	FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x0)
91  #define   TSA_CPM1_SIGMR_RDM_DYN_TDMA		FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x1)
92  #define   TSA_CPM1_SIGMR_RDM_STATIC_TDMAB	FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x2)
93  #define   TSA_CPM1_SIGMR_RDM_DYN_TDMAB		FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x3)
94  
95  /* QE SI global mode register high (8 bits) */
96  #define TSA_QE_SIGLMRH	0x08
97  #define TSA_QE_SIGLMRH_END	BIT(3)
98  #define TSA_QE_SIGLMRH_ENC	BIT(2)
99  #define TSA_QE_SIGLMRH_ENB	BIT(1)
100  #define TSA_QE_SIGLMRH_ENA	BIT(0)
101  
102  /* SI clock route register (32 bits) */
103  #define TSA_CPM1_SICR	0x0C
104  #define   TSA_CPM1_SICR_SCC2_MASK		GENMASK(15, 8)
105  #define   TSA_CPM1_SICR_SCC2(x)			FIELD_PREP(TSA_CPM1_SICR_SCC2_MASK, x)
106  #define   TSA_CPM1_SICR_SCC3_MASK		GENMASK(23, 16)
107  #define   TSA_CPM1_SICR_SCC3(x)			FIELD_PREP(TSA_CPM1_SICR_SCC3_MASK, x)
108  #define   TSA_CPM1_SICR_SCC4_MASK		GENMASK(31, 24)
109  #define   TSA_CPM1_SICR_SCC4(x)			FIELD_PREP(TSA_CPM1_SICR_SCC4_MASK, x)
110  #define     TSA_CPM1_SICR_SCC_MASK		GENMASK(7, 0)
111  #define     TSA_CPM1_SICR_SCC_GRX		BIT(7)
112  #define     TSA_CPM1_SICR_SCC_SCX_TSA		BIT(6)
113  #define     TSA_CPM1_SICR_SCC_RXCS_MASK		GENMASK(5, 3)
114  #define       TSA_CPM1_SICR_SCC_RXCS_BRG1	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x0)
115  #define       TSA_CPM1_SICR_SCC_RXCS_BRG2	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x1)
116  #define       TSA_CPM1_SICR_SCC_RXCS_BRG3	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x2)
117  #define       TSA_CPM1_SICR_SCC_RXCS_BRG4	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x3)
118  #define       TSA_CPM1_SICR_SCC_RXCS_CLK15	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x4)
119  #define       TSA_CPM1_SICR_SCC_RXCS_CLK26	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x5)
120  #define       TSA_CPM1_SICR_SCC_RXCS_CLK37	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x6)
121  #define       TSA_CPM1_SICR_SCC_RXCS_CLK48	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x7)
122  #define     TSA_CPM1_SICR_SCC_TXCS_MASK		GENMASK(2, 0)
123  #define       TSA_CPM1_SICR_SCC_TXCS_BRG1	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x0)
124  #define       TSA_CPM1_SICR_SCC_TXCS_BRG2	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x1)
125  #define       TSA_CPM1_SICR_SCC_TXCS_BRG3	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x2)
126  #define       TSA_CPM1_SICR_SCC_TXCS_BRG4	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x3)
127  #define       TSA_CPM1_SICR_SCC_TXCS_CLK15	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x4)
128  #define       TSA_CPM1_SICR_SCC_TXCS_CLK26	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x5)
129  #define       TSA_CPM1_SICR_SCC_TXCS_CLK37	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x6)
130  #define       TSA_CPM1_SICR_SCC_TXCS_CLK48	FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x7)
131  
132  struct tsa_entries_area {
133  	void __iomem *entries_start;
134  	void __iomem *entries_next;
135  	void __iomem *last_entry;
136  };
137  
138  struct tsa_tdm {
139  	bool is_enable;
140  	struct clk *l1rclk_clk;
141  	struct clk *l1rsync_clk;
142  	struct clk *l1tclk_clk;
143  	struct clk *l1tsync_clk;
144  	u32 simode_tdm;
145  };
146  
147  #define TSA_TDMA	0
148  #define TSA_TDMB	1
149  #define TSA_TDMC	2 /* QE implementation only */
150  #define TSA_TDMD	3 /* QE implementation only */
151  
152  enum tsa_version {
153  	TSA_CPM1 = 1, /* Avoid 0 value */
154  	TSA_QE,
155  };
156  
157  struct tsa {
158  	struct device *dev;
159  	void __iomem *si_regs;
160  	void __iomem *si_ram;
161  	resource_size_t si_ram_sz;
162  	spinlock_t	lock; /* Lock for read/modify/write sequence */
163  	enum tsa_version version;
164  	int tdms; /* TSA_TDMx ORed */
165  #if IS_ENABLED(CONFIG_QUICC_ENGINE)
166  	struct tsa_tdm tdm[4]; /* TDMa, TDMb, TDMc and TDMd */
167  #else
168  	struct tsa_tdm tdm[2]; /* TDMa and TDMb */
169  #endif
170  	/* Same number of serials for CPM1 and QE:
171  	 * CPM1: NU, 3 SCCs and 2 SMCs
172  	 * QE: NU and 5 UCCs
173  	 */
174  	struct tsa_serial {
175  		unsigned int id;
176  		struct tsa_serial_info info;
177  	} serials[6];
178  };
179  
tsa_serial_get_tsa(struct tsa_serial * tsa_serial)180  static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
181  {
182  	/* The serials table is indexed by the serial id */
183  	return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
184  }
185  
tsa_write32(void __iomem * addr,u32 val)186  static inline void tsa_write32(void __iomem *addr, u32 val)
187  {
188  	iowrite32be(val, addr);
189  }
190  
tsa_write16(void __iomem * addr,u16 val)191  static inline void tsa_write16(void __iomem *addr, u16 val)
192  {
193  	iowrite16be(val, addr);
194  }
195  
tsa_write8(void __iomem * addr,u8 val)196  static inline void tsa_write8(void __iomem *addr, u8 val)
197  {
198  	iowrite8(val, addr);
199  }
200  
tsa_read32(void __iomem * addr)201  static inline u32 tsa_read32(void __iomem *addr)
202  {
203  	return ioread32be(addr);
204  }
205  
tsa_read16(void __iomem * addr)206  static inline u16 tsa_read16(void __iomem *addr)
207  {
208  	return ioread16be(addr);
209  }
210  
tsa_clrbits32(void __iomem * addr,u32 clr)211  static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
212  {
213  	tsa_write32(addr, tsa_read32(addr) & ~clr);
214  }
215  
tsa_clrbits16(void __iomem * addr,u16 clr)216  static inline void tsa_clrbits16(void __iomem *addr, u16 clr)
217  {
218  	tsa_write16(addr, tsa_read16(addr) & ~clr);
219  }
220  
tsa_clrsetbits32(void __iomem * addr,u32 clr,u32 set)221  static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
222  {
223  	tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
224  }
225  
tsa_is_qe(const struct tsa * tsa)226  static bool tsa_is_qe(const struct tsa *tsa)
227  {
228  	if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM))
229  		return tsa->version == TSA_QE;
230  
231  	return IS_ENABLED(CONFIG_QUICC_ENGINE);
232  }
233  
tsa_qe_serial_get_num(struct tsa_serial * tsa_serial)234  static int tsa_qe_serial_get_num(struct tsa_serial *tsa_serial)
235  {
236  	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
237  
238  	switch (tsa_serial->id) {
239  	case FSL_QE_TSA_UCC1: return 0;
240  	case FSL_QE_TSA_UCC2: return 1;
241  	case FSL_QE_TSA_UCC3: return 2;
242  	case FSL_QE_TSA_UCC4: return 3;
243  	case FSL_QE_TSA_UCC5: return 4;
244  	default:
245  		break;
246  	}
247  
248  	dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
249  	return -EINVAL;
250  }
251  
tsa_serial_get_num(struct tsa_serial * tsa_serial)252  int tsa_serial_get_num(struct tsa_serial *tsa_serial)
253  {
254  	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
255  
256  	/*
257  	 * There is no need to get the serial num out of the TSA driver in the
258  	 * CPM case.
259  	 * Further more, in CPM, we can have 2 types of serial SCCs and FCCs.
260  	 * What kind of numbering to use that can be global to both SCCs and
261  	 * FCCs ?
262  	 */
263  	return tsa_is_qe(tsa) ? tsa_qe_serial_get_num(tsa_serial) : -EOPNOTSUPP;
264  }
265  EXPORT_SYMBOL(tsa_serial_get_num);
266  
tsa_cpm1_serial_connect(struct tsa_serial * tsa_serial,bool connect)267  static int tsa_cpm1_serial_connect(struct tsa_serial *tsa_serial, bool connect)
268  {
269  	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
270  	unsigned long flags;
271  	u32 clear;
272  	u32 set;
273  
274  	switch (tsa_serial->id) {
275  	case FSL_CPM_TSA_SCC2:
276  		clear = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_MASK);
277  		set = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_SCX_TSA);
278  		break;
279  	case FSL_CPM_TSA_SCC3:
280  		clear = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_MASK);
281  		set = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_SCX_TSA);
282  		break;
283  	case FSL_CPM_TSA_SCC4:
284  		clear = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_MASK);
285  		set = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_SCX_TSA);
286  		break;
287  	default:
288  		dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
289  		return -EINVAL;
290  	}
291  
292  	spin_lock_irqsave(&tsa->lock, flags);
293  	tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SICR, clear,
294  			 connect ? set : 0);
295  	spin_unlock_irqrestore(&tsa->lock, flags);
296  
297  	return 0;
298  }
299  
tsa_qe_serial_connect(struct tsa_serial * tsa_serial,bool connect)300  static int tsa_qe_serial_connect(struct tsa_serial *tsa_serial, bool connect)
301  {
302  	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
303  	unsigned long flags;
304  	int ucc_num;
305  	int ret;
306  
307  	ucc_num = tsa_qe_serial_get_num(tsa_serial);
308  	if (ucc_num < 0)
309  		return ucc_num;
310  
311  	spin_lock_irqsave(&tsa->lock, flags);
312  	ret = ucc_set_qe_mux_tsa(ucc_num, connect);
313  	spin_unlock_irqrestore(&tsa->lock, flags);
314  	if (ret) {
315  		dev_err(tsa->dev, "Connect serial id %u to TSA failed (%d)\n",
316  			tsa_serial->id, ret);
317  		return ret;
318  	}
319  	return 0;
320  }
321  
tsa_serial_connect(struct tsa_serial * tsa_serial)322  int tsa_serial_connect(struct tsa_serial *tsa_serial)
323  {
324  	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
325  
326  	return tsa_is_qe(tsa) ?
327  		tsa_qe_serial_connect(tsa_serial, true) :
328  		tsa_cpm1_serial_connect(tsa_serial, true);
329  }
330  EXPORT_SYMBOL(tsa_serial_connect);
331  
tsa_serial_disconnect(struct tsa_serial * tsa_serial)332  int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
333  {
334  	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
335  
336  	return tsa_is_qe(tsa) ?
337  		tsa_qe_serial_connect(tsa_serial, false) :
338  		tsa_cpm1_serial_connect(tsa_serial, false);
339  }
340  EXPORT_SYMBOL(tsa_serial_disconnect);
341  
tsa_serial_get_info(struct tsa_serial * tsa_serial,struct tsa_serial_info * info)342  int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info)
343  {
344  	memcpy(info, &tsa_serial->info, sizeof(*info));
345  	return 0;
346  }
347  EXPORT_SYMBOL(tsa_serial_get_info);
348  
tsa_cpm1_init_entries_area(struct tsa * tsa,struct tsa_entries_area * area,u32 tdms,u32 tdm_id,bool is_rx)349  static void tsa_cpm1_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
350  				       u32 tdms, u32 tdm_id, bool is_rx)
351  {
352  	resource_size_t quarter;
353  	resource_size_t half;
354  
355  	quarter = tsa->si_ram_sz / 4;
356  	half = tsa->si_ram_sz / 2;
357  
358  	if (tdms == BIT(TSA_TDMA)) {
359  		/* Only TDMA */
360  		if (is_rx) {
361  			/* First half of si_ram */
362  			area->entries_start = tsa->si_ram;
363  			area->entries_next = area->entries_start + half;
364  			area->last_entry = NULL;
365  		} else {
366  			/* Second half of si_ram */
367  			area->entries_start = tsa->si_ram + half;
368  			area->entries_next = area->entries_start + half;
369  			area->last_entry = NULL;
370  		}
371  	} else {
372  		/* Only TDMB or both TDMs */
373  		if (tdm_id == TSA_TDMA) {
374  			if (is_rx) {
375  				/* First half of first half of si_ram */
376  				area->entries_start = tsa->si_ram;
377  				area->entries_next = area->entries_start + quarter;
378  				area->last_entry = NULL;
379  			} else {
380  				/* First half of second half of si_ram */
381  				area->entries_start = tsa->si_ram + (2 * quarter);
382  				area->entries_next = area->entries_start + quarter;
383  				area->last_entry = NULL;
384  			}
385  		} else {
386  			if (is_rx) {
387  				/* Second half of first half of si_ram */
388  				area->entries_start = tsa->si_ram + quarter;
389  				area->entries_next = area->entries_start + quarter;
390  				area->last_entry = NULL;
391  			} else {
392  				/* Second half of second half of si_ram */
393  				area->entries_start = tsa->si_ram + (3 * quarter);
394  				area->entries_next = area->entries_start + quarter;
395  				area->last_entry = NULL;
396  			}
397  		}
398  	}
399  }
400  
tsa_qe_init_entries_area(struct tsa * tsa,struct tsa_entries_area * area,u32 tdms,u32 tdm_id,bool is_rx)401  static void tsa_qe_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
402  				     u32 tdms, u32 tdm_id, bool is_rx)
403  {
404  	resource_size_t eighth;
405  	resource_size_t half;
406  
407  	eighth = tsa->si_ram_sz / 8;
408  	half = tsa->si_ram_sz / 2;
409  
410  	/*
411  	 * One half of the SI RAM used for Tx, the other one for Rx.
412  	 * In each half, 1/4 of the area is assigned to each TDM.
413  	 */
414  	if (is_rx) {
415  		/* Rx: Second half of si_ram */
416  		area->entries_start = tsa->si_ram + half + (eighth * tdm_id);
417  		area->entries_next = area->entries_start + eighth;
418  		area->last_entry = NULL;
419  	} else {
420  		/* Tx: First half of si_ram */
421  		area->entries_start = tsa->si_ram + (eighth * tdm_id);
422  		area->entries_next = area->entries_start + eighth;
423  		area->last_entry = NULL;
424  	}
425  }
426  
tsa_init_entries_area(struct tsa * tsa,struct tsa_entries_area * area,u32 tdms,u32 tdm_id,bool is_rx)427  static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
428  				  u32 tdms, u32 tdm_id, bool is_rx)
429  {
430  	if (tsa_is_qe(tsa))
431  		tsa_qe_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
432  	else
433  		tsa_cpm1_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
434  }
435  
tsa_cpm1_serial_id2name(struct tsa * tsa,u32 serial_id)436  static const char *tsa_cpm1_serial_id2name(struct tsa *tsa, u32 serial_id)
437  {
438  	switch (serial_id) {
439  	case FSL_CPM_TSA_NU:	return "Not used";
440  	case FSL_CPM_TSA_SCC2:	return "SCC2";
441  	case FSL_CPM_TSA_SCC3:	return "SCC3";
442  	case FSL_CPM_TSA_SCC4:	return "SCC4";
443  	case FSL_CPM_TSA_SMC1:	return "SMC1";
444  	case FSL_CPM_TSA_SMC2:	return "SMC2";
445  	default:
446  		break;
447  	}
448  	return NULL;
449  }
450  
tsa_qe_serial_id2name(struct tsa * tsa,u32 serial_id)451  static const char *tsa_qe_serial_id2name(struct tsa *tsa, u32 serial_id)
452  {
453  	switch (serial_id) {
454  	case FSL_QE_TSA_NU:	return "Not used";
455  	case FSL_QE_TSA_UCC1:	return "UCC1";
456  	case FSL_QE_TSA_UCC2:	return "UCC2";
457  	case FSL_QE_TSA_UCC3:	return "UCC3";
458  	case FSL_QE_TSA_UCC4:	return "UCC4";
459  	case FSL_QE_TSA_UCC5:	return "UCC5";
460  	default:
461  		break;
462  	}
463  	return NULL;
464  }
465  
tsa_serial_id2name(struct tsa * tsa,u32 serial_id)466  static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
467  {
468  	return tsa_is_qe(tsa) ?
469  		tsa_qe_serial_id2name(tsa, serial_id) :
470  		tsa_cpm1_serial_id2name(tsa, serial_id);
471  }
472  
tsa_cpm1_serial_id2csel(struct tsa * tsa,u32 serial_id)473  static u32 tsa_cpm1_serial_id2csel(struct tsa *tsa, u32 serial_id)
474  {
475  	switch (serial_id) {
476  	case FSL_CPM_TSA_SCC2:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2;
477  	case FSL_CPM_TSA_SCC3:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3;
478  	case FSL_CPM_TSA_SCC4:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4;
479  	case FSL_CPM_TSA_SMC1:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1;
480  	case FSL_CPM_TSA_SMC2:	return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2;
481  	default:
482  		break;
483  	}
484  	return TSA_CPM1_SIRAM_ENTRY_CSEL_NU;
485  }
486  
tsa_cpm1_add_entry(struct tsa * tsa,struct tsa_entries_area * area,u32 count,u32 serial_id)487  static int tsa_cpm1_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
488  			      u32 count, u32 serial_id)
489  {
490  	void __iomem *addr;
491  	u32 left;
492  	u32 val;
493  	u32 cnt;
494  	u32 nb;
495  
496  	addr = area->last_entry ? area->last_entry + 4 : area->entries_start;
497  
498  	nb = DIV_ROUND_UP(count, 8);
499  	if ((addr + (nb * 4)) > area->entries_next) {
500  		dev_err(tsa->dev, "si ram area full\n");
501  		return -ENOSPC;
502  	}
503  
504  	if (area->last_entry) {
505  		/* Clear last flag */
506  		tsa_clrbits32(area->last_entry, TSA_CPM1_SIRAM_ENTRY_LAST);
507  	}
508  
509  	left = count;
510  	while (left) {
511  		val = TSA_CPM1_SIRAM_ENTRY_BYTE | tsa_cpm1_serial_id2csel(tsa, serial_id);
512  
513  		if (left > 16) {
514  			cnt = 16;
515  		} else {
516  			cnt = left;
517  			val |= TSA_CPM1_SIRAM_ENTRY_LAST;
518  			area->last_entry = addr;
519  		}
520  		val |= TSA_CPM1_SIRAM_ENTRY_CNT(cnt - 1);
521  
522  		tsa_write32(addr, val);
523  		addr += 4;
524  		left -= cnt;
525  	}
526  
527  	return 0;
528  }
529  
tsa_qe_serial_id2csel(struct tsa * tsa,u32 serial_id)530  static u32 tsa_qe_serial_id2csel(struct tsa *tsa, u32 serial_id)
531  {
532  	switch (serial_id) {
533  	case FSL_QE_TSA_UCC1:	return TSA_QE_SIRAM_ENTRY_CSEL_UCC1;
534  	case FSL_QE_TSA_UCC2:	return TSA_QE_SIRAM_ENTRY_CSEL_UCC2;
535  	case FSL_QE_TSA_UCC3:	return TSA_QE_SIRAM_ENTRY_CSEL_UCC3;
536  	case FSL_QE_TSA_UCC4:	return TSA_QE_SIRAM_ENTRY_CSEL_UCC4;
537  	case FSL_QE_TSA_UCC5:	return TSA_QE_SIRAM_ENTRY_CSEL_UCC5;
538  	default:
539  		break;
540  	}
541  	return TSA_QE_SIRAM_ENTRY_CSEL_NU;
542  }
543  
tsa_qe_add_entry(struct tsa * tsa,struct tsa_entries_area * area,u32 count,u32 serial_id)544  static int tsa_qe_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
545  			    u32 count, u32 serial_id)
546  {
547  	void __iomem *addr;
548  	u32 left;
549  	u32 val;
550  	u32 cnt;
551  	u32 nb;
552  
553  	addr = area->last_entry ? area->last_entry + 2 : area->entries_start;
554  
555  	nb = DIV_ROUND_UP(count, 8);
556  	if ((addr + (nb * 2)) > area->entries_next) {
557  		dev_err(tsa->dev, "si ram area full\n");
558  		return -ENOSPC;
559  	}
560  
561  	if (area->last_entry) {
562  		/* Clear last flag */
563  		tsa_clrbits16(area->last_entry, TSA_QE_SIRAM_ENTRY_LAST);
564  	}
565  
566  	left = count;
567  	while (left) {
568  		val = TSA_QE_SIRAM_ENTRY_BYTE | tsa_qe_serial_id2csel(tsa, serial_id);
569  
570  		if (left > 8) {
571  			cnt = 8;
572  		} else {
573  			cnt = left;
574  			val |= TSA_QE_SIRAM_ENTRY_LAST;
575  			area->last_entry = addr;
576  		}
577  		val |= TSA_QE_SIRAM_ENTRY_CNT(cnt - 1);
578  
579  		tsa_write16(addr, val);
580  		addr += 2;
581  		left -= cnt;
582  	}
583  
584  	return 0;
585  }
586  
tsa_add_entry(struct tsa * tsa,struct tsa_entries_area * area,u32 count,u32 serial_id)587  static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
588  			 u32 count, u32 serial_id)
589  {
590  	return tsa_is_qe(tsa) ?
591  		tsa_qe_add_entry(tsa, area, count, serial_id) :
592  		tsa_cpm1_add_entry(tsa, area, count, serial_id);
593  }
594  
tsa_of_parse_tdm_route(struct tsa * tsa,struct device_node * tdm_np,u32 tdms,u32 tdm_id,bool is_rx)595  static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
596  				  u32 tdms, u32 tdm_id, bool is_rx)
597  {
598  	struct tsa_entries_area area;
599  	const char *route_name;
600  	u32 serial_id;
601  	int len, i;
602  	u32 count;
603  	const char *serial_name;
604  	struct tsa_serial_info *serial_info;
605  	struct tsa_tdm *tdm;
606  	int ret;
607  	u32 ts;
608  
609  	route_name = is_rx ? "fsl,rx-ts-routes" : "fsl,tx-ts-routes";
610  
611  	len = of_property_count_u32_elems(tdm_np,  route_name);
612  	if (len < 0) {
613  		dev_err(tsa->dev, "%pOF: failed to read %s\n", tdm_np, route_name);
614  		return len;
615  	}
616  	if (len % 2 != 0) {
617  		dev_err(tsa->dev, "%pOF: wrong %s format\n", tdm_np, route_name);
618  		return -EINVAL;
619  	}
620  
621  	tsa_init_entries_area(tsa, &area, tdms, tdm_id, is_rx);
622  	ts = 0;
623  	for (i = 0; i < len; i += 2) {
624  		of_property_read_u32_index(tdm_np, route_name, i, &count);
625  		of_property_read_u32_index(tdm_np, route_name, i + 1, &serial_id);
626  
627  		if (serial_id >= ARRAY_SIZE(tsa->serials)) {
628  			dev_err(tsa->dev, "%pOF: invalid serial id (%u)\n",
629  				tdm_np, serial_id);
630  			return -EINVAL;
631  		}
632  
633  		serial_name = tsa_serial_id2name(tsa, serial_id);
634  		if (!serial_name) {
635  			dev_err(tsa->dev, "%pOF: unsupported serial id (%u)\n",
636  				tdm_np, serial_id);
637  			return -EINVAL;
638  		}
639  
640  		dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
641  			tdm_id, route_name, ts, ts + count - 1, serial_name);
642  		ts += count;
643  
644  		ret = tsa_add_entry(tsa, &area, count, serial_id);
645  		if (ret)
646  			return ret;
647  
648  		serial_info = &tsa->serials[serial_id].info;
649  		tdm = &tsa->tdm[tdm_id];
650  		if (is_rx) {
651  			serial_info->rx_fs_rate = clk_get_rate(tdm->l1rsync_clk);
652  			serial_info->rx_bit_rate = clk_get_rate(tdm->l1rclk_clk);
653  			serial_info->nb_rx_ts += count;
654  		} else {
655  			serial_info->tx_fs_rate = tdm->l1tsync_clk ?
656  				clk_get_rate(tdm->l1tsync_clk) :
657  				clk_get_rate(tdm->l1rsync_clk);
658  			serial_info->tx_bit_rate = tdm->l1tclk_clk ?
659  				clk_get_rate(tdm->l1tclk_clk) :
660  				clk_get_rate(tdm->l1rclk_clk);
661  			serial_info->nb_tx_ts += count;
662  		}
663  	}
664  	return 0;
665  }
666  
tsa_of_parse_tdm_rx_route(struct tsa * tsa,struct device_node * tdm_np,u32 tdms,u32 tdm_id)667  static inline int tsa_of_parse_tdm_rx_route(struct tsa *tsa,
668  					    struct device_node *tdm_np,
669  					    u32 tdms, u32 tdm_id)
670  {
671  	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, true);
672  }
673  
tsa_of_parse_tdm_tx_route(struct tsa * tsa,struct device_node * tdm_np,u32 tdms,u32 tdm_id)674  static inline int tsa_of_parse_tdm_tx_route(struct tsa *tsa,
675  					    struct device_node *tdm_np,
676  					    u32 tdms, u32 tdm_id)
677  {
678  	return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, false);
679  }
680  
tsa_of_parse_tdms(struct tsa * tsa,struct device_node * np)681  static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
682  {
683  	struct device_node *tdm_np;
684  	struct tsa_tdm *tdm;
685  	struct clk *clk;
686  	u32 tdm_id, val;
687  	int ret;
688  	int i;
689  
690  	tsa->tdms = 0;
691  	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++)
692  		tsa->tdm[i].is_enable = false;
693  
694  	for_each_available_child_of_node(np, tdm_np) {
695  		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
696  		if (ret) {
697  			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
698  			of_node_put(tdm_np);
699  			return ret;
700  		}
701  		switch (tdm_id) {
702  		case 0:
703  			tsa->tdms |= BIT(TSA_TDMA);
704  			break;
705  		case 1:
706  			tsa->tdms |= BIT(TSA_TDMB);
707  			break;
708  		case 2:
709  			if (!tsa_is_qe(tsa))
710  				goto invalid_tdm; /* Not available on CPM1 */
711  			tsa->tdms |= BIT(TSA_TDMC);
712  			break;
713  		case 3:
714  			if (!tsa_is_qe(tsa))
715  				goto invalid_tdm;  /* Not available on CPM1 */
716  			tsa->tdms |= BIT(TSA_TDMD);
717  			break;
718  		default:
719  invalid_tdm:
720  			dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
721  				tdm_id);
722  			of_node_put(tdm_np);
723  			return -EINVAL;
724  		}
725  	}
726  
727  	for_each_available_child_of_node(np, tdm_np) {
728  		ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
729  		if (ret) {
730  			dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
731  			of_node_put(tdm_np);
732  			return ret;
733  		}
734  
735  		tdm = &tsa->tdm[tdm_id];
736  		tdm->simode_tdm = TSA_SIMODE_TDM_SDM_NORM;
737  
738  		val = 0;
739  		ret = of_property_read_u32(tdm_np, "fsl,rx-frame-sync-delay-bits",
740  					   &val);
741  		if (ret && ret != -EINVAL) {
742  			dev_err(tsa->dev,
743  				"%pOF: failed to read fsl,rx-frame-sync-delay-bits\n",
744  				tdm_np);
745  			of_node_put(tdm_np);
746  			return ret;
747  		}
748  		if (val > 3) {
749  			dev_err(tsa->dev,
750  				"%pOF: Invalid fsl,rx-frame-sync-delay-bits (%u)\n",
751  				tdm_np, val);
752  			of_node_put(tdm_np);
753  			return -EINVAL;
754  		}
755  		tdm->simode_tdm |= TSA_SIMODE_TDM_RFSD(val);
756  
757  		val = 0;
758  		ret = of_property_read_u32(tdm_np, "fsl,tx-frame-sync-delay-bits",
759  					   &val);
760  		if (ret && ret != -EINVAL) {
761  			dev_err(tsa->dev,
762  				"%pOF: failed to read fsl,tx-frame-sync-delay-bits\n",
763  				tdm_np);
764  			of_node_put(tdm_np);
765  			return ret;
766  		}
767  		if (val > 3) {
768  			dev_err(tsa->dev,
769  				"%pOF: Invalid fsl,tx-frame-sync-delay-bits (%u)\n",
770  				tdm_np, val);
771  			of_node_put(tdm_np);
772  			return -EINVAL;
773  		}
774  		tdm->simode_tdm |= TSA_SIMODE_TDM_TFSD(val);
775  
776  		if (of_property_read_bool(tdm_np, "fsl,common-rxtx-pins"))
777  			tdm->simode_tdm |= TSA_SIMODE_TDM_CRT;
778  
779  		if (of_property_read_bool(tdm_np, "fsl,clock-falling-edge"))
780  			tdm->simode_tdm |= TSA_SIMODE_TDM_CE;
781  
782  		if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
783  			tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
784  
785  		if (tsa_is_qe(tsa) &&
786  		    of_property_read_bool(tdm_np, "fsl,fsync-active-low"))
787  			tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SL;
788  
789  		if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
790  			tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
791  
792  		clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rsync" : "l1rsync");
793  		if (IS_ERR(clk)) {
794  			ret = PTR_ERR(clk);
795  			of_node_put(tdm_np);
796  			goto err;
797  		}
798  		ret = clk_prepare_enable(clk);
799  		if (ret) {
800  			clk_put(clk);
801  			of_node_put(tdm_np);
802  			goto err;
803  		}
804  		tdm->l1rsync_clk = clk;
805  
806  		clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rclk" : "l1rclk");
807  		if (IS_ERR(clk)) {
808  			ret = PTR_ERR(clk);
809  			of_node_put(tdm_np);
810  			goto err;
811  		}
812  		ret = clk_prepare_enable(clk);
813  		if (ret) {
814  			clk_put(clk);
815  			of_node_put(tdm_np);
816  			goto err;
817  		}
818  		tdm->l1rclk_clk = clk;
819  
820  		if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
821  			clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tsync" : "l1tsync");
822  			if (IS_ERR(clk)) {
823  				ret = PTR_ERR(clk);
824  				of_node_put(tdm_np);
825  				goto err;
826  			}
827  			ret = clk_prepare_enable(clk);
828  			if (ret) {
829  				clk_put(clk);
830  				of_node_put(tdm_np);
831  				goto err;
832  			}
833  			tdm->l1tsync_clk = clk;
834  
835  			clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tclk" : "l1tclk");
836  			if (IS_ERR(clk)) {
837  				ret = PTR_ERR(clk);
838  				of_node_put(tdm_np);
839  				goto err;
840  			}
841  			ret = clk_prepare_enable(clk);
842  			if (ret) {
843  				clk_put(clk);
844  				of_node_put(tdm_np);
845  				goto err;
846  			}
847  			tdm->l1tclk_clk = clk;
848  		}
849  
850  		if (tsa_is_qe(tsa)) {
851  			/*
852  			 * The starting address for TSA table must be set.
853  			 * 512 entries for Tx and 512 entries for Rx are
854  			 * available for 4 TDMs.
855  			 * We assign entries equally -> 128 Rx/Tx entries per
856  			 * TDM. In other words, 4 blocks of 32 entries per TDM.
857  			 */
858  			tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SAD(4 * tdm_id);
859  		}
860  
861  		ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
862  		if (ret) {
863  			of_node_put(tdm_np);
864  			goto err;
865  		}
866  
867  		ret = tsa_of_parse_tdm_tx_route(tsa, tdm_np, tsa->tdms, tdm_id);
868  		if (ret) {
869  			of_node_put(tdm_np);
870  			goto err;
871  		}
872  
873  		tdm->is_enable = true;
874  	}
875  	return 0;
876  
877  err:
878  	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
879  		if (tsa->tdm[i].l1rsync_clk) {
880  			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
881  			clk_put(tsa->tdm[i].l1rsync_clk);
882  		}
883  		if (tsa->tdm[i].l1rclk_clk) {
884  			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
885  			clk_put(tsa->tdm[i].l1rclk_clk);
886  		}
887  		if (tsa->tdm[i].l1tsync_clk) {
888  			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
889  			clk_put(tsa->tdm[i].l1rsync_clk);
890  		}
891  		if (tsa->tdm[i].l1tclk_clk) {
892  			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
893  			clk_put(tsa->tdm[i].l1rclk_clk);
894  		}
895  	}
896  	return ret;
897  }
898  
tsa_init_si_ram(struct tsa * tsa)899  static void tsa_init_si_ram(struct tsa *tsa)
900  {
901  	resource_size_t i;
902  
903  	/* Fill all entries as the last one */
904  	if (tsa_is_qe(tsa)) {
905  		for (i = 0; i < tsa->si_ram_sz; i += 2)
906  			tsa_write16(tsa->si_ram + i, TSA_QE_SIRAM_ENTRY_LAST);
907  	} else {
908  		for (i = 0; i < tsa->si_ram_sz; i += 4)
909  			tsa_write32(tsa->si_ram + i, TSA_CPM1_SIRAM_ENTRY_LAST);
910  	}
911  }
912  
tsa_cpm1_setup(struct tsa * tsa)913  static int tsa_cpm1_setup(struct tsa *tsa)
914  {
915  	u32 val;
916  
917  	/* Set SIMODE */
918  	val = 0;
919  	if (tsa->tdm[0].is_enable)
920  		val |= TSA_CPM1_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
921  	if (tsa->tdm[1].is_enable)
922  		val |= TSA_CPM1_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
923  
924  	tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SIMODE,
925  			 TSA_CPM1_SIMODE_TDMA(TSA_CPM1_SIMODE_TDM_MASK) |
926  			 TSA_CPM1_SIMODE_TDMB(TSA_CPM1_SIMODE_TDM_MASK),
927  			 val);
928  
929  	/* Set SIGMR */
930  	val = (tsa->tdms == BIT(TSA_TDMA)) ?
931  		TSA_CPM1_SIGMR_RDM_STATIC_TDMA : TSA_CPM1_SIGMR_RDM_STATIC_TDMAB;
932  	if (tsa->tdms & BIT(TSA_TDMA))
933  		val |= TSA_CPM1_SIGMR_ENA;
934  	if (tsa->tdms & BIT(TSA_TDMB))
935  		val |= TSA_CPM1_SIGMR_ENB;
936  	tsa_write8(tsa->si_regs + TSA_CPM1_SIGMR, val);
937  
938  	return 0;
939  }
940  
tsa_qe_setup(struct tsa * tsa)941  static int tsa_qe_setup(struct tsa *tsa)
942  {
943  	unsigned int sixmr;
944  	u8 siglmrh = 0;
945  	unsigned int i;
946  
947  	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
948  		if (!tsa->tdm[i].is_enable)
949  			continue;
950  
951  		switch (i) {
952  		case 0:
953  			sixmr = TSA_QE_SIAMR;
954  			siglmrh |= TSA_QE_SIGLMRH_ENA;
955  			break;
956  		case 1:
957  			sixmr = TSA_QE_SIBMR;
958  			siglmrh |= TSA_QE_SIGLMRH_ENB;
959  			break;
960  		case 2:
961  			sixmr = TSA_QE_SICMR;
962  			siglmrh |= TSA_QE_SIGLMRH_ENC;
963  			break;
964  		case 3:
965  			sixmr = TSA_QE_SIDMR;
966  			siglmrh |= TSA_QE_SIGLMRH_END;
967  			break;
968  		default:
969  			return -EINVAL;
970  		}
971  
972  		/* Set SI mode register */
973  		tsa_write16(tsa->si_regs + sixmr, tsa->tdm[i].simode_tdm);
974  	}
975  
976  	/* Enable TDMs */
977  	tsa_write8(tsa->si_regs + TSA_QE_SIGLMRH, siglmrh);
978  
979  	return 0;
980  }
981  
tsa_setup(struct tsa * tsa)982  static int tsa_setup(struct tsa *tsa)
983  {
984  	return tsa_is_qe(tsa) ? tsa_qe_setup(tsa) : tsa_cpm1_setup(tsa);
985  }
986  
tsa_probe(struct platform_device * pdev)987  static int tsa_probe(struct platform_device *pdev)
988  {
989  	struct device_node *np = pdev->dev.of_node;
990  	struct resource *res;
991  	struct tsa *tsa;
992  	unsigned int i;
993  	int ret;
994  
995  	tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
996  	if (!tsa)
997  		return -ENOMEM;
998  
999  	tsa->dev = &pdev->dev;
1000  	tsa->version = (enum tsa_version)(uintptr_t)of_device_get_match_data(&pdev->dev);
1001  	switch (tsa->version) {
1002  	case TSA_CPM1:
1003  		dev_info(tsa->dev, "CPM1 version\n");
1004  		break;
1005  	case TSA_QE:
1006  		dev_info(tsa->dev, "QE version\n");
1007  		break;
1008  	default:
1009  		dev_err(tsa->dev, "Unknown version (%d)\n", tsa->version);
1010  		return -EINVAL;
1011  	}
1012  
1013  	for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
1014  		tsa->serials[i].id = i;
1015  
1016  	spin_lock_init(&tsa->lock);
1017  
1018  	tsa->si_regs = devm_platform_ioremap_resource_byname(pdev, "si_regs");
1019  	if (IS_ERR(tsa->si_regs))
1020  		return PTR_ERR(tsa->si_regs);
1021  
1022  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "si_ram");
1023  	if (!res) {
1024  		dev_err(tsa->dev, "si_ram resource missing\n");
1025  		return -EINVAL;
1026  	}
1027  	tsa->si_ram_sz = resource_size(res);
1028  	tsa->si_ram = devm_ioremap_resource(&pdev->dev, res);
1029  	if (IS_ERR(tsa->si_ram))
1030  		return PTR_ERR(tsa->si_ram);
1031  
1032  	tsa_init_si_ram(tsa);
1033  
1034  	ret = tsa_of_parse_tdms(tsa, np);
1035  	if (ret)
1036  		return ret;
1037  
1038  	ret = tsa_setup(tsa);
1039  	if (ret)
1040  		return ret;
1041  
1042  	platform_set_drvdata(pdev, tsa);
1043  
1044  	return 0;
1045  }
1046  
tsa_remove(struct platform_device * pdev)1047  static void tsa_remove(struct platform_device *pdev)
1048  {
1049  	struct tsa *tsa = platform_get_drvdata(pdev);
1050  	int i;
1051  
1052  	for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
1053  		if (tsa->tdm[i].l1rsync_clk) {
1054  			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
1055  			clk_put(tsa->tdm[i].l1rsync_clk);
1056  		}
1057  		if (tsa->tdm[i].l1rclk_clk) {
1058  			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
1059  			clk_put(tsa->tdm[i].l1rclk_clk);
1060  		}
1061  		if (tsa->tdm[i].l1tsync_clk) {
1062  			clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
1063  			clk_put(tsa->tdm[i].l1rsync_clk);
1064  		}
1065  		if (tsa->tdm[i].l1tclk_clk) {
1066  			clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
1067  			clk_put(tsa->tdm[i].l1rclk_clk);
1068  		}
1069  	}
1070  }
1071  
1072  static const struct of_device_id tsa_id_table[] = {
1073  #if IS_ENABLED(CONFIG_CPM1)
1074  	{ .compatible = "fsl,cpm1-tsa", .data = (void *)TSA_CPM1 },
1075  #endif
1076  #if IS_ENABLED(CONFIG_QUICC_ENGINE)
1077  	{ .compatible = "fsl,qe-tsa", .data = (void *)TSA_QE },
1078  #endif
1079  	{} /* sentinel */
1080  };
1081  MODULE_DEVICE_TABLE(of, tsa_id_table);
1082  
1083  static struct platform_driver tsa_driver = {
1084  	.driver = {
1085  		.name = "fsl-tsa",
1086  		.of_match_table = of_match_ptr(tsa_id_table),
1087  	},
1088  	.probe = tsa_probe,
1089  	.remove_new = tsa_remove,
1090  };
1091  module_platform_driver(tsa_driver);
1092  
tsa_serial_get_byphandle(struct device_node * np,const char * phandle_name)1093  struct tsa_serial *tsa_serial_get_byphandle(struct device_node *np,
1094  					    const char *phandle_name)
1095  {
1096  	struct of_phandle_args out_args;
1097  	struct platform_device *pdev;
1098  	struct tsa_serial *tsa_serial;
1099  	struct tsa *tsa;
1100  	int ret;
1101  
1102  	ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0, &out_args);
1103  	if (ret < 0)
1104  		return ERR_PTR(ret);
1105  
1106  	if (!of_match_node(tsa_driver.driver.of_match_table, out_args.np)) {
1107  		of_node_put(out_args.np);
1108  		return ERR_PTR(-EINVAL);
1109  	}
1110  
1111  	pdev = of_find_device_by_node(out_args.np);
1112  	of_node_put(out_args.np);
1113  	if (!pdev)
1114  		return ERR_PTR(-ENODEV);
1115  
1116  	tsa = platform_get_drvdata(pdev);
1117  	if (!tsa) {
1118  		platform_device_put(pdev);
1119  		return ERR_PTR(-EPROBE_DEFER);
1120  	}
1121  
1122  	if (out_args.args_count != 1) {
1123  		platform_device_put(pdev);
1124  		return ERR_PTR(-EINVAL);
1125  	}
1126  
1127  	if (out_args.args[0] >= ARRAY_SIZE(tsa->serials)) {
1128  		platform_device_put(pdev);
1129  		return ERR_PTR(-EINVAL);
1130  	}
1131  
1132  	tsa_serial = &tsa->serials[out_args.args[0]];
1133  
1134  	/*
1135  	 * Be sure that the serial id matches the phandle arg.
1136  	 * The tsa_serials table is indexed by serial ids. The serial id is set
1137  	 * during the probe() call and needs to be coherent.
1138  	 */
1139  	if (WARN_ON(tsa_serial->id != out_args.args[0])) {
1140  		platform_device_put(pdev);
1141  		return ERR_PTR(-EINVAL);
1142  	}
1143  
1144  	return tsa_serial;
1145  }
1146  EXPORT_SYMBOL(tsa_serial_get_byphandle);
1147  
tsa_serial_put(struct tsa_serial * tsa_serial)1148  void tsa_serial_put(struct tsa_serial *tsa_serial)
1149  {
1150  	struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
1151  
1152  	put_device(tsa->dev);
1153  }
1154  EXPORT_SYMBOL(tsa_serial_put);
1155  
devm_tsa_serial_release(struct device * dev,void * res)1156  static void devm_tsa_serial_release(struct device *dev, void *res)
1157  {
1158  	struct tsa_serial **tsa_serial = res;
1159  
1160  	tsa_serial_put(*tsa_serial);
1161  }
1162  
devm_tsa_serial_get_byphandle(struct device * dev,struct device_node * np,const char * phandle_name)1163  struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
1164  						 struct device_node *np,
1165  						 const char *phandle_name)
1166  {
1167  	struct tsa_serial *tsa_serial;
1168  	struct tsa_serial **dr;
1169  
1170  	dr = devres_alloc(devm_tsa_serial_release, sizeof(*dr), GFP_KERNEL);
1171  	if (!dr)
1172  		return ERR_PTR(-ENOMEM);
1173  
1174  	tsa_serial = tsa_serial_get_byphandle(np, phandle_name);
1175  	if (!IS_ERR(tsa_serial)) {
1176  		*dr = tsa_serial;
1177  		devres_add(dev, dr);
1178  	} else {
1179  		devres_free(dr);
1180  	}
1181  
1182  	return tsa_serial;
1183  }
1184  EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
1185  
1186  MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
1187  MODULE_DESCRIPTION("CPM/QE TSA driver");
1188  MODULE_LICENSE("GPL");
1189