1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2018 Cadence Design Systems Inc.
4 *
5 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
6 */
7
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/i3c/master.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/ioport.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/workqueue.h>
25
26 #define DEV_ID 0x0
27 #define DEV_ID_I3C_MASTER 0x5034
28
29 #define CONF_STATUS0 0x4
30 #define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
31 #define CONF_STATUS0_ECC_CHK BIT(28)
32 #define CONF_STATUS0_INTEG_CHK BIT(27)
33 #define CONF_STATUS0_CSR_DAP_CHK BIT(26)
34 #define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
35 #define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
36 #define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
37 #define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
38 #define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
39 #define CONF_STATUS0_SUPPORTS_DDR BIT(5)
40 #define CONF_STATUS0_SEC_MASTER BIT(4)
41 #define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
42
43 #define CONF_STATUS1 0x8
44 #define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
45 #define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
46 #define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
47 #define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
48 #define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
49 #define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
50 #define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
51
52 #define REV_ID 0xc
53 #define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
54 #define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
55 #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4)
56 #define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0))
57
58 #define CTRL 0x10
59 #define CTRL_DEV_EN BIT(31)
60 #define CTRL_HALT_EN BIT(30)
61 #define CTRL_MCS BIT(29)
62 #define CTRL_MCS_EN BIT(28)
63 #define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
64 #define CTRL_HJ_DISEC BIT(8)
65 #define CTRL_MST_ACK BIT(7)
66 #define CTRL_HJ_ACK BIT(6)
67 #define CTRL_HJ_INIT BIT(5)
68 #define CTRL_MST_INIT BIT(4)
69 #define CTRL_AHDR_OPT BIT(3)
70 #define CTRL_PURE_BUS_MODE 0
71 #define CTRL_MIXED_FAST_BUS_MODE 2
72 #define CTRL_MIXED_SLOW_BUS_MODE 3
73 #define CTRL_BUS_MODE_MASK GENMASK(1, 0)
74 #define THD_DELAY_MAX 3
75
76 #define PRESCL_CTRL0 0x14
77 #define PRESCL_CTRL0_I2C(x) ((x) << 16)
78 #define PRESCL_CTRL0_I3C(x) (x)
79 #define PRESCL_CTRL0_I3C_MAX GENMASK(9, 0)
80 #define PRESCL_CTRL0_I2C_MAX GENMASK(15, 0)
81
82 #define PRESCL_CTRL1 0x18
83 #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
84 #define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
85 #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
86 #define PRESCL_CTRL1_OD_LOW(x) (x)
87
88 #define MST_IER 0x20
89 #define MST_IDR 0x24
90 #define MST_IMR 0x28
91 #define MST_ICR 0x2c
92 #define MST_ISR 0x30
93 #define MST_INT_HALTED BIT(18)
94 #define MST_INT_MR_DONE BIT(17)
95 #define MST_INT_IMM_COMP BIT(16)
96 #define MST_INT_TX_THR BIT(15)
97 #define MST_INT_TX_OVF BIT(14)
98 #define MST_INT_IBID_THR BIT(12)
99 #define MST_INT_IBID_UNF BIT(11)
100 #define MST_INT_IBIR_THR BIT(10)
101 #define MST_INT_IBIR_UNF BIT(9)
102 #define MST_INT_IBIR_OVF BIT(8)
103 #define MST_INT_RX_THR BIT(7)
104 #define MST_INT_RX_UNF BIT(6)
105 #define MST_INT_CMDD_EMP BIT(5)
106 #define MST_INT_CMDD_THR BIT(4)
107 #define MST_INT_CMDD_OVF BIT(3)
108 #define MST_INT_CMDR_THR BIT(2)
109 #define MST_INT_CMDR_UNF BIT(1)
110 #define MST_INT_CMDR_OVF BIT(0)
111
112 #define MST_STATUS0 0x34
113 #define MST_STATUS0_IDLE BIT(18)
114 #define MST_STATUS0_HALTED BIT(17)
115 #define MST_STATUS0_MASTER_MODE BIT(16)
116 #define MST_STATUS0_TX_FULL BIT(13)
117 #define MST_STATUS0_IBID_FULL BIT(12)
118 #define MST_STATUS0_IBIR_FULL BIT(11)
119 #define MST_STATUS0_RX_FULL BIT(10)
120 #define MST_STATUS0_CMDD_FULL BIT(9)
121 #define MST_STATUS0_CMDR_FULL BIT(8)
122 #define MST_STATUS0_TX_EMP BIT(5)
123 #define MST_STATUS0_IBID_EMP BIT(4)
124 #define MST_STATUS0_IBIR_EMP BIT(3)
125 #define MST_STATUS0_RX_EMP BIT(2)
126 #define MST_STATUS0_CMDD_EMP BIT(1)
127 #define MST_STATUS0_CMDR_EMP BIT(0)
128
129 #define CMDR 0x38
130 #define CMDR_NO_ERROR 0
131 #define CMDR_DDR_PREAMBLE_ERROR 1
132 #define CMDR_DDR_PARITY_ERROR 2
133 #define CMDR_DDR_RX_FIFO_OVF 3
134 #define CMDR_DDR_TX_FIFO_UNF 4
135 #define CMDR_M0_ERROR 5
136 #define CMDR_M1_ERROR 6
137 #define CMDR_M2_ERROR 7
138 #define CMDR_MST_ABORT 8
139 #define CMDR_NACK_RESP 9
140 #define CMDR_INVALID_DA 10
141 #define CMDR_DDR_DROPPED 11
142 #define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
143 #define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
144 #define CMDR_CMDID_HJACK_DISEC 0xfe
145 #define CMDR_CMDID_HJACK_ENTDAA 0xff
146 #define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
147
148 #define IBIR 0x3c
149 #define IBIR_ACKED BIT(12)
150 #define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
151 #define IBIR_ERROR BIT(7)
152 #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
153 #define IBIR_TYPE_IBI 0
154 #define IBIR_TYPE_HJ 1
155 #define IBIR_TYPE_MR 2
156 #define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
157
158 #define SLV_IER 0x40
159 #define SLV_IDR 0x44
160 #define SLV_IMR 0x48
161 #define SLV_ICR 0x4c
162 #define SLV_ISR 0x50
163 #define SLV_INT_TM BIT(20)
164 #define SLV_INT_ERROR BIT(19)
165 #define SLV_INT_EVENT_UP BIT(18)
166 #define SLV_INT_HJ_DONE BIT(17)
167 #define SLV_INT_MR_DONE BIT(16)
168 #define SLV_INT_DA_UPD BIT(15)
169 #define SLV_INT_SDR_FAIL BIT(14)
170 #define SLV_INT_DDR_FAIL BIT(13)
171 #define SLV_INT_M_RD_ABORT BIT(12)
172 #define SLV_INT_DDR_RX_THR BIT(11)
173 #define SLV_INT_DDR_TX_THR BIT(10)
174 #define SLV_INT_SDR_RX_THR BIT(9)
175 #define SLV_INT_SDR_TX_THR BIT(8)
176 #define SLV_INT_DDR_RX_UNF BIT(7)
177 #define SLV_INT_DDR_TX_OVF BIT(6)
178 #define SLV_INT_SDR_RX_UNF BIT(5)
179 #define SLV_INT_SDR_TX_OVF BIT(4)
180 #define SLV_INT_DDR_RD_COMP BIT(3)
181 #define SLV_INT_DDR_WR_COMP BIT(2)
182 #define SLV_INT_SDR_RD_COMP BIT(1)
183 #define SLV_INT_SDR_WR_COMP BIT(0)
184
185 #define SLV_STATUS0 0x54
186 #define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
187 #define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
188
189 #define SLV_STATUS1 0x58
190 #define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
191 #define SLV_STATUS1_VEN_TM BIT(19)
192 #define SLV_STATUS1_HJ_DIS BIT(18)
193 #define SLV_STATUS1_MR_DIS BIT(17)
194 #define SLV_STATUS1_PROT_ERR BIT(16)
195 #define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
196 #define SLV_STATUS1_HAS_DA BIT(8)
197 #define SLV_STATUS1_DDR_RX_FULL BIT(7)
198 #define SLV_STATUS1_DDR_TX_FULL BIT(6)
199 #define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
200 #define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
201 #define SLV_STATUS1_SDR_RX_FULL BIT(3)
202 #define SLV_STATUS1_SDR_TX_FULL BIT(2)
203 #define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
204 #define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
205
206 #define CMD0_FIFO 0x60
207 #define CMD0_FIFO_IS_DDR BIT(31)
208 #define CMD0_FIFO_IS_CCC BIT(30)
209 #define CMD0_FIFO_BCH BIT(29)
210 #define XMIT_BURST_STATIC_SUBADDR 0
211 #define XMIT_SINGLE_INC_SUBADDR 1
212 #define XMIT_SINGLE_STATIC_SUBADDR 2
213 #define XMIT_BURST_WITHOUT_SUBADDR 3
214 #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
215 #define CMD0_FIFO_SBCA BIT(26)
216 #define CMD0_FIFO_RSBC BIT(25)
217 #define CMD0_FIFO_IS_10B BIT(24)
218 #define CMD0_FIFO_PL_LEN(l) ((l) << 12)
219 #define CMD0_FIFO_PL_LEN_MAX 4095
220 #define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
221 #define CMD0_FIFO_RNW BIT(0)
222
223 #define CMD1_FIFO 0x64
224 #define CMD1_FIFO_CMDID(id) ((id) << 24)
225 #define CMD1_FIFO_CSRADDR(a) (a)
226 #define CMD1_FIFO_CCC(id) (id)
227
228 #define TX_FIFO 0x68
229
230 #define IMD_CMD0 0x70
231 #define IMD_CMD0_PL_LEN(l) ((l) << 12)
232 #define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
233 #define IMD_CMD0_RNW BIT(0)
234
235 #define IMD_CMD1 0x74
236 #define IMD_CMD1_CCC(id) (id)
237
238 #define IMD_DATA 0x78
239 #define RX_FIFO 0x80
240 #define IBI_DATA_FIFO 0x84
241 #define SLV_DDR_TX_FIFO 0x88
242 #define SLV_DDR_RX_FIFO 0x8c
243
244 #define CMD_IBI_THR_CTRL 0x90
245 #define IBIR_THR(t) ((t) << 24)
246 #define CMDR_THR(t) ((t) << 16)
247 #define IBI_THR(t) ((t) << 8)
248 #define CMD_THR(t) (t)
249
250 #define TX_RX_THR_CTRL 0x94
251 #define RX_THR(t) ((t) << 16)
252 #define TX_THR(t) (t)
253
254 #define SLV_DDR_TX_RX_THR_CTRL 0x98
255 #define SLV_DDR_RX_THR(t) ((t) << 16)
256 #define SLV_DDR_TX_THR(t) (t)
257
258 #define FLUSH_CTRL 0x9c
259 #define FLUSH_IBI_RESP BIT(23)
260 #define FLUSH_CMD_RESP BIT(22)
261 #define FLUSH_SLV_DDR_RX_FIFO BIT(22)
262 #define FLUSH_SLV_DDR_TX_FIFO BIT(21)
263 #define FLUSH_IMM_FIFO BIT(20)
264 #define FLUSH_IBI_FIFO BIT(19)
265 #define FLUSH_RX_FIFO BIT(18)
266 #define FLUSH_TX_FIFO BIT(17)
267 #define FLUSH_CMD_FIFO BIT(16)
268
269 #define TTO_PRESCL_CTRL0 0xb0
270 #define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16)
271 #define TTO_PRESCL_CTRL0_DIVA(x) (x)
272
273 #define TTO_PRESCL_CTRL1 0xb4
274 #define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
275 #define TTO_PRESCL_CTRL1_DIVA(x) (x)
276
277 #define DEVS_CTRL 0xb8
278 #define DEVS_CTRL_DEV_CLR_SHIFT 16
279 #define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
280 #define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
281 #define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
282 #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
283 #define MAX_DEVS 16
284
285 #define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
286 #define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
287 #define DEV_ID_RR0_HDR_CAP BIT(10)
288 #define DEV_ID_RR0_IS_I3C BIT(9)
289 #define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13))
290 #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \
291 (((a) & GENMASK(9, 7)) << 6))
292 #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \
293 (((x) >> 6) & GENMASK(9, 7)))
294
295 #define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
296 #define DEV_ID_RR1_PID_MSB(pid) (pid)
297
298 #define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
299 #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
300 #define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
301 #define DEV_ID_RR2_DCR(dcr) (dcr)
302 #define DEV_ID_RR2_LVR(lvr) (lvr)
303
304 #define SIR_MAP(x) (0x180 + ((x) * 4))
305 #define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
306 #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
307 #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
308 #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
309 #define DEV_ROLE_SLAVE 0
310 #define DEV_ROLE_MASTER 1
311 #define SIR_MAP_DEV_ROLE(role) ((role) << 14)
312 #define SIR_MAP_DEV_SLOW BIT(13)
313 #define SIR_MAP_DEV_PL(l) ((l) << 8)
314 #define SIR_MAP_PL_MAX GENMASK(4, 0)
315 #define SIR_MAP_DEV_DA(a) ((a) << 1)
316 #define SIR_MAP_DEV_ACK BIT(0)
317
318 #define GPIR_WORD(x) (0x200 + ((x) * 4))
319 #define GPI_REG(val, id) \
320 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
321
322 #define GPOR_WORD(x) (0x220 + ((x) * 4))
323 #define GPO_REG(val, id) \
324 (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
325
326 #define ASF_INT_STATUS 0x300
327 #define ASF_INT_RAW_STATUS 0x304
328 #define ASF_INT_MASK 0x308
329 #define ASF_INT_TEST 0x30c
330 #define ASF_INT_FATAL_SELECT 0x310
331 #define ASF_INTEGRITY_ERR BIT(6)
332 #define ASF_PROTOCOL_ERR BIT(5)
333 #define ASF_TRANS_TIMEOUT_ERR BIT(4)
334 #define ASF_CSR_ERR BIT(3)
335 #define ASF_DAP_ERR BIT(2)
336 #define ASF_SRAM_UNCORR_ERR BIT(1)
337 #define ASF_SRAM_CORR_ERR BIT(0)
338
339 #define ASF_SRAM_CORR_FAULT_STATUS 0x320
340 #define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
341 #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
342 #define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
343
344 #define ASF_SRAM_FAULT_STATS 0x328
345 #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
346 #define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
347
348 #define ASF_TRANS_TOUT_CTRL 0x330
349 #define ASF_TRANS_TOUT_EN BIT(31)
350 #define ASF_TRANS_TOUT_VAL(x) (x)
351
352 #define ASF_TRANS_TOUT_FAULT_MASK 0x334
353 #define ASF_TRANS_TOUT_FAULT_STATUS 0x338
354 #define ASF_TRANS_TOUT_FAULT_APB BIT(3)
355 #define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
356 #define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
357 #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
358
359 #define ASF_PROTO_FAULT_MASK 0x340
360 #define ASF_PROTO_FAULT_STATUS 0x344
361 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
362 #define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
363 #define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
364 #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
365 #define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
366 #define ASF_PROTO_FAULT_M(x) BIT(x)
367
368 struct cdns_i3c_master_caps {
369 u32 cmdfifodepth;
370 u32 cmdrfifodepth;
371 u32 txfifodepth;
372 u32 rxfifodepth;
373 u32 ibirfifodepth;
374 };
375
376 struct cdns_i3c_cmd {
377 u32 cmd0;
378 u32 cmd1;
379 u32 tx_len;
380 const void *tx_buf;
381 u32 rx_len;
382 void *rx_buf;
383 u32 error;
384 };
385
386 struct cdns_i3c_xfer {
387 struct list_head node;
388 struct completion comp;
389 int ret;
390 unsigned int ncmds;
391 struct cdns_i3c_cmd cmds[] __counted_by(ncmds);
392 };
393
394 struct cdns_i3c_data {
395 u8 thd_delay_ns;
396 };
397
398 struct cdns_i3c_master {
399 struct work_struct hj_work;
400 struct i3c_master_controller base;
401 u32 free_rr_slots;
402 unsigned int maxdevs;
403 struct {
404 unsigned int num_slots;
405 struct i3c_dev_desc **slots;
406 spinlock_t lock;
407 } ibi;
408 struct {
409 struct list_head list;
410 struct cdns_i3c_xfer *cur;
411 spinlock_t lock;
412 } xferqueue;
413 void __iomem *regs;
414 struct clk *sysclk;
415 struct clk *pclk;
416 struct cdns_i3c_master_caps caps;
417 unsigned long i3c_scl_lim;
418 const struct cdns_i3c_data *devdata;
419 };
420
421 static inline struct cdns_i3c_master *
to_cdns_i3c_master(struct i3c_master_controller * master)422 to_cdns_i3c_master(struct i3c_master_controller *master)
423 {
424 return container_of(master, struct cdns_i3c_master, base);
425 }
426
cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master * master,const u8 * bytes,int nbytes)427 static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
428 const u8 *bytes, int nbytes)
429 {
430 writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
431 if (nbytes & 3) {
432 u32 tmp = 0;
433
434 memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
435 writesl(master->regs + TX_FIFO, &tmp, 1);
436 }
437 }
438
cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master * master,u8 * bytes,int nbytes)439 static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
440 u8 *bytes, int nbytes)
441 {
442 readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
443 if (nbytes & 3) {
444 u32 tmp;
445
446 readsl(master->regs + RX_FIFO, &tmp, 1);
447 memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
448 }
449 }
450
cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller * m,const struct i3c_ccc_cmd * cmd)451 static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
452 const struct i3c_ccc_cmd *cmd)
453 {
454 if (cmd->ndests > 1)
455 return false;
456
457 switch (cmd->id) {
458 case I3C_CCC_ENEC(true):
459 case I3C_CCC_ENEC(false):
460 case I3C_CCC_DISEC(true):
461 case I3C_CCC_DISEC(false):
462 case I3C_CCC_ENTAS(0, true):
463 case I3C_CCC_ENTAS(0, false):
464 case I3C_CCC_RSTDAA(true):
465 case I3C_CCC_RSTDAA(false):
466 case I3C_CCC_ENTDAA:
467 case I3C_CCC_SETMWL(true):
468 case I3C_CCC_SETMWL(false):
469 case I3C_CCC_SETMRL(true):
470 case I3C_CCC_SETMRL(false):
471 case I3C_CCC_DEFSLVS:
472 case I3C_CCC_ENTHDR(0):
473 case I3C_CCC_SETDASA:
474 case I3C_CCC_SETNEWDA:
475 case I3C_CCC_GETMWL:
476 case I3C_CCC_GETMRL:
477 case I3C_CCC_GETPID:
478 case I3C_CCC_GETBCR:
479 case I3C_CCC_GETDCR:
480 case I3C_CCC_GETSTATUS:
481 case I3C_CCC_GETACCMST:
482 case I3C_CCC_GETMXDS:
483 case I3C_CCC_GETHDRCAP:
484 return true;
485 default:
486 break;
487 }
488
489 return false;
490 }
491
cdns_i3c_master_disable(struct cdns_i3c_master * master)492 static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
493 {
494 u32 status;
495
496 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
497
498 return readl_poll_timeout(master->regs + MST_STATUS0, status,
499 status & MST_STATUS0_IDLE, 10, 1000000);
500 }
501
cdns_i3c_master_enable(struct cdns_i3c_master * master)502 static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
503 {
504 writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
505 }
506
507 static struct cdns_i3c_xfer *
cdns_i3c_master_alloc_xfer(struct cdns_i3c_master * master,unsigned int ncmds)508 cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
509 {
510 struct cdns_i3c_xfer *xfer;
511
512 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
513 if (!xfer)
514 return NULL;
515
516 INIT_LIST_HEAD(&xfer->node);
517 xfer->ncmds = ncmds;
518 xfer->ret = -ETIMEDOUT;
519
520 return xfer;
521 }
522
cdns_i3c_master_free_xfer(struct cdns_i3c_xfer * xfer)523 static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
524 {
525 kfree(xfer);
526 }
527
cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master * master)528 static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
529 {
530 struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
531 unsigned int i;
532
533 if (!xfer)
534 return;
535
536 writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
537 for (i = 0; i < xfer->ncmds; i++) {
538 struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
539
540 cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
541 cmd->tx_len);
542 }
543
544 for (i = 0; i < xfer->ncmds; i++) {
545 struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
546
547 writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
548 master->regs + CMD1_FIFO);
549 writel(cmd->cmd0, master->regs + CMD0_FIFO);
550 }
551
552 writel(readl(master->regs + CTRL) | CTRL_MCS,
553 master->regs + CTRL);
554 writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
555 }
556
cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master * master,u32 isr)557 static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
558 u32 isr)
559 {
560 struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
561 int i, ret = 0;
562 u32 status0;
563
564 if (!xfer)
565 return;
566
567 if (!(isr & MST_INT_CMDD_EMP))
568 return;
569
570 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
571
572 for (status0 = readl(master->regs + MST_STATUS0);
573 !(status0 & MST_STATUS0_CMDR_EMP);
574 status0 = readl(master->regs + MST_STATUS0)) {
575 struct cdns_i3c_cmd *cmd;
576 u32 cmdr, rx_len, id;
577
578 cmdr = readl(master->regs + CMDR);
579 id = CMDR_CMDID(cmdr);
580 if (id == CMDR_CMDID_HJACK_DISEC ||
581 id == CMDR_CMDID_HJACK_ENTDAA ||
582 WARN_ON(id >= xfer->ncmds))
583 continue;
584
585 cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
586 rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
587 cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
588 cmd->error = CMDR_ERROR(cmdr);
589 }
590
591 for (i = 0; i < xfer->ncmds; i++) {
592 switch (xfer->cmds[i].error) {
593 case CMDR_NO_ERROR:
594 break;
595
596 case CMDR_DDR_PREAMBLE_ERROR:
597 case CMDR_DDR_PARITY_ERROR:
598 case CMDR_M0_ERROR:
599 case CMDR_M1_ERROR:
600 case CMDR_M2_ERROR:
601 case CMDR_MST_ABORT:
602 case CMDR_NACK_RESP:
603 case CMDR_DDR_DROPPED:
604 ret = -EIO;
605 break;
606
607 case CMDR_DDR_RX_FIFO_OVF:
608 case CMDR_DDR_TX_FIFO_UNF:
609 ret = -ENOSPC;
610 break;
611
612 case CMDR_INVALID_DA:
613 default:
614 ret = -EINVAL;
615 break;
616 }
617 }
618
619 xfer->ret = ret;
620 complete(&xfer->comp);
621
622 xfer = list_first_entry_or_null(&master->xferqueue.list,
623 struct cdns_i3c_xfer, node);
624 if (xfer)
625 list_del_init(&xfer->node);
626
627 master->xferqueue.cur = xfer;
628 cdns_i3c_master_start_xfer_locked(master);
629 }
630
cdns_i3c_master_queue_xfer(struct cdns_i3c_master * master,struct cdns_i3c_xfer * xfer)631 static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
632 struct cdns_i3c_xfer *xfer)
633 {
634 unsigned long flags;
635
636 init_completion(&xfer->comp);
637 spin_lock_irqsave(&master->xferqueue.lock, flags);
638 if (master->xferqueue.cur) {
639 list_add_tail(&xfer->node, &master->xferqueue.list);
640 } else {
641 master->xferqueue.cur = xfer;
642 cdns_i3c_master_start_xfer_locked(master);
643 }
644 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
645 }
646
cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master * master,struct cdns_i3c_xfer * xfer)647 static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
648 struct cdns_i3c_xfer *xfer)
649 {
650 unsigned long flags;
651
652 spin_lock_irqsave(&master->xferqueue.lock, flags);
653 if (master->xferqueue.cur == xfer) {
654 u32 status;
655
656 writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
657 master->regs + CTRL);
658 readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
659 status & MST_STATUS0_IDLE, 10,
660 1000000);
661 master->xferqueue.cur = NULL;
662 writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
663 FLUSH_CMD_RESP,
664 master->regs + FLUSH_CTRL);
665 writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
666 writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
667 master->regs + CTRL);
668 } else {
669 list_del_init(&xfer->node);
670 }
671 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
672 }
673
cdns_i3c_cmd_get_err(struct cdns_i3c_cmd * cmd)674 static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
675 {
676 switch (cmd->error) {
677 case CMDR_M0_ERROR:
678 return I3C_ERROR_M0;
679
680 case CMDR_M1_ERROR:
681 return I3C_ERROR_M1;
682
683 case CMDR_M2_ERROR:
684 case CMDR_NACK_RESP:
685 return I3C_ERROR_M2;
686
687 default:
688 break;
689 }
690
691 return I3C_ERROR_UNKNOWN;
692 }
693
cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)694 static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
695 struct i3c_ccc_cmd *cmd)
696 {
697 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
698 struct cdns_i3c_xfer *xfer;
699 struct cdns_i3c_cmd *ccmd;
700 int ret;
701
702 xfer = cdns_i3c_master_alloc_xfer(master, 1);
703 if (!xfer)
704 return -ENOMEM;
705
706 ccmd = xfer->cmds;
707 ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
708 ccmd->cmd0 = CMD0_FIFO_IS_CCC |
709 CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
710
711 if (cmd->id & I3C_CCC_DIRECT)
712 ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
713
714 if (cmd->rnw) {
715 ccmd->cmd0 |= CMD0_FIFO_RNW;
716 ccmd->rx_buf = cmd->dests[0].payload.data;
717 ccmd->rx_len = cmd->dests[0].payload.len;
718 } else {
719 ccmd->tx_buf = cmd->dests[0].payload.data;
720 ccmd->tx_len = cmd->dests[0].payload.len;
721 }
722
723 cdns_i3c_master_queue_xfer(master, xfer);
724 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
725 cdns_i3c_master_unqueue_xfer(master, xfer);
726
727 ret = xfer->ret;
728 cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
729 cdns_i3c_master_free_xfer(xfer);
730
731 return ret;
732 }
733
cdns_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)734 static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
735 struct i3c_priv_xfer *xfers,
736 int nxfers)
737 {
738 struct i3c_master_controller *m = i3c_dev_get_master(dev);
739 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
740 int txslots = 0, rxslots = 0, i, ret;
741 struct cdns_i3c_xfer *cdns_xfer;
742
743 for (i = 0; i < nxfers; i++) {
744 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
745 return -ENOTSUPP;
746 }
747
748 if (!nxfers)
749 return 0;
750
751 if (nxfers > master->caps.cmdfifodepth ||
752 nxfers > master->caps.cmdrfifodepth)
753 return -ENOTSUPP;
754
755 /*
756 * First make sure that all transactions (block of transfers separated
757 * by a STOP marker) fit in the FIFOs.
758 */
759 for (i = 0; i < nxfers; i++) {
760 if (xfers[i].rnw)
761 rxslots += DIV_ROUND_UP(xfers[i].len, 4);
762 else
763 txslots += DIV_ROUND_UP(xfers[i].len, 4);
764 }
765
766 if (rxslots > master->caps.rxfifodepth ||
767 txslots > master->caps.txfifodepth)
768 return -ENOTSUPP;
769
770 cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
771 if (!cdns_xfer)
772 return -ENOMEM;
773
774 for (i = 0; i < nxfers; i++) {
775 struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
776 u32 pl_len = xfers[i].len;
777
778 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
779 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
780
781 if (xfers[i].rnw) {
782 ccmd->cmd0 |= CMD0_FIFO_RNW;
783 ccmd->rx_buf = xfers[i].data.in;
784 ccmd->rx_len = xfers[i].len;
785 pl_len++;
786 } else {
787 ccmd->tx_buf = xfers[i].data.out;
788 ccmd->tx_len = xfers[i].len;
789 }
790
791 ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
792
793 if (i < nxfers - 1)
794 ccmd->cmd0 |= CMD0_FIFO_RSBC;
795
796 if (!i)
797 ccmd->cmd0 |= CMD0_FIFO_BCH;
798 }
799
800 cdns_i3c_master_queue_xfer(master, cdns_xfer);
801 if (!wait_for_completion_timeout(&cdns_xfer->comp,
802 msecs_to_jiffies(1000)))
803 cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
804
805 ret = cdns_xfer->ret;
806
807 for (i = 0; i < nxfers; i++)
808 xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
809
810 cdns_i3c_master_free_xfer(cdns_xfer);
811
812 return ret;
813 }
814
cdns_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,const struct i2c_msg * xfers,int nxfers)815 static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
816 const struct i2c_msg *xfers, int nxfers)
817 {
818 struct i3c_master_controller *m = i2c_dev_get_master(dev);
819 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
820 unsigned int nrxwords = 0, ntxwords = 0;
821 struct cdns_i3c_xfer *xfer;
822 int i, ret = 0;
823
824 if (nxfers > master->caps.cmdfifodepth)
825 return -ENOTSUPP;
826
827 for (i = 0; i < nxfers; i++) {
828 if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
829 return -ENOTSUPP;
830
831 if (xfers[i].flags & I2C_M_RD)
832 nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
833 else
834 ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
835 }
836
837 if (ntxwords > master->caps.txfifodepth ||
838 nrxwords > master->caps.rxfifodepth)
839 return -ENOTSUPP;
840
841 xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
842 if (!xfer)
843 return -ENOMEM;
844
845 for (i = 0; i < nxfers; i++) {
846 struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
847
848 ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
849 CMD0_FIFO_PL_LEN(xfers[i].len) |
850 CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
851
852 if (xfers[i].flags & I2C_M_TEN)
853 ccmd->cmd0 |= CMD0_FIFO_IS_10B;
854
855 if (xfers[i].flags & I2C_M_RD) {
856 ccmd->cmd0 |= CMD0_FIFO_RNW;
857 ccmd->rx_buf = xfers[i].buf;
858 ccmd->rx_len = xfers[i].len;
859 } else {
860 ccmd->tx_buf = xfers[i].buf;
861 ccmd->tx_len = xfers[i].len;
862 }
863 }
864
865 cdns_i3c_master_queue_xfer(master, xfer);
866 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
867 cdns_i3c_master_unqueue_xfer(master, xfer);
868
869 ret = xfer->ret;
870 cdns_i3c_master_free_xfer(xfer);
871
872 return ret;
873 }
874
875 struct cdns_i3c_i2c_dev_data {
876 u16 id;
877 s16 ibi;
878 struct i3c_generic_ibi_pool *ibi_pool;
879 };
880
prepare_rr0_dev_address(u32 addr)881 static u32 prepare_rr0_dev_address(u32 addr)
882 {
883 u32 ret = (addr << 1) & 0xff;
884
885 /* RR0[7:1] = addr[6:0] */
886 ret |= (addr & GENMASK(6, 0)) << 1;
887
888 /* RR0[15:13] = addr[9:7] */
889 ret |= (addr & GENMASK(9, 7)) << 6;
890
891 /* RR0[0] = ~XOR(addr[6:0]) */
892 if (!(hweight8(addr & 0x7f) & 1))
893 ret |= 1;
894
895 return ret;
896 }
897
cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc * dev)898 static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
899 {
900 struct i3c_master_controller *m = i3c_dev_get_master(dev);
901 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
902 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
903 u32 rr;
904
905 rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
906 dev->info.dyn_addr :
907 dev->info.static_addr);
908 writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
909 }
910
cdns_i3c_master_get_rr_slot(struct cdns_i3c_master * master,u8 dyn_addr)911 static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
912 u8 dyn_addr)
913 {
914 unsigned long activedevs;
915 u32 rr;
916 int i;
917
918 if (!dyn_addr) {
919 if (!master->free_rr_slots)
920 return -ENOSPC;
921
922 return ffs(master->free_rr_slots) - 1;
923 }
924
925 activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
926 activedevs &= ~BIT(0);
927
928 for_each_set_bit(i, &activedevs, master->maxdevs + 1) {
929 rr = readl(master->regs + DEV_ID_RR0(i));
930 if (!(rr & DEV_ID_RR0_IS_I3C) ||
931 DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
932 continue;
933
934 return i;
935 }
936
937 return -EINVAL;
938 }
939
cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)940 static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
941 u8 old_dyn_addr)
942 {
943 cdns_i3c_master_upd_i3c_addr(dev);
944
945 return 0;
946 }
947
cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)948 static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
949 {
950 struct i3c_master_controller *m = i3c_dev_get_master(dev);
951 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
952 struct cdns_i3c_i2c_dev_data *data;
953 int slot;
954
955 data = kzalloc(sizeof(*data), GFP_KERNEL);
956 if (!data)
957 return -ENOMEM;
958
959 slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
960 if (slot < 0) {
961 kfree(data);
962 return slot;
963 }
964
965 data->ibi = -1;
966 data->id = slot;
967 i3c_dev_set_master_data(dev, data);
968 master->free_rr_slots &= ~BIT(slot);
969
970 if (!dev->info.dyn_addr) {
971 cdns_i3c_master_upd_i3c_addr(dev);
972 writel(readl(master->regs + DEVS_CTRL) |
973 DEVS_CTRL_DEV_ACTIVE(data->id),
974 master->regs + DEVS_CTRL);
975 }
976
977 return 0;
978 }
979
cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)980 static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
981 {
982 struct i3c_master_controller *m = i3c_dev_get_master(dev);
983 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
984 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
985
986 writel(readl(master->regs + DEVS_CTRL) |
987 DEVS_CTRL_DEV_CLR(data->id),
988 master->regs + DEVS_CTRL);
989
990 i3c_dev_set_master_data(dev, NULL);
991 master->free_rr_slots |= BIT(data->id);
992 kfree(data);
993 }
994
cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)995 static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
996 {
997 struct i3c_master_controller *m = i2c_dev_get_master(dev);
998 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
999 struct cdns_i3c_i2c_dev_data *data;
1000 int slot;
1001
1002 slot = cdns_i3c_master_get_rr_slot(master, 0);
1003 if (slot < 0)
1004 return slot;
1005
1006 data = kzalloc(sizeof(*data), GFP_KERNEL);
1007 if (!data)
1008 return -ENOMEM;
1009
1010 data->id = slot;
1011 master->free_rr_slots &= ~BIT(slot);
1012 i2c_dev_set_master_data(dev, data);
1013
1014 writel(prepare_rr0_dev_address(dev->addr),
1015 master->regs + DEV_ID_RR0(data->id));
1016 writel(dev->lvr, master->regs + DEV_ID_RR2(data->id));
1017 writel(readl(master->regs + DEVS_CTRL) |
1018 DEVS_CTRL_DEV_ACTIVE(data->id),
1019 master->regs + DEVS_CTRL);
1020
1021 return 0;
1022 }
1023
cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)1024 static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
1025 {
1026 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1027 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1028 struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1029
1030 writel(readl(master->regs + DEVS_CTRL) |
1031 DEVS_CTRL_DEV_CLR(data->id),
1032 master->regs + DEVS_CTRL);
1033 master->free_rr_slots |= BIT(data->id);
1034
1035 i2c_dev_set_master_data(dev, NULL);
1036 kfree(data);
1037 }
1038
cdns_i3c_master_bus_cleanup(struct i3c_master_controller * m)1039 static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
1040 {
1041 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1042
1043 cdns_i3c_master_disable(master);
1044 }
1045
cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master * master,unsigned int slot,struct i3c_device_info * info)1046 static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
1047 unsigned int slot,
1048 struct i3c_device_info *info)
1049 {
1050 u32 rr;
1051
1052 memset(info, 0, sizeof(*info));
1053 rr = readl(master->regs + DEV_ID_RR0(slot));
1054 info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
1055 rr = readl(master->regs + DEV_ID_RR2(slot));
1056 info->dcr = rr;
1057 info->bcr = rr >> 8;
1058 info->pid = rr >> 16;
1059 info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
1060 }
1061
cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master * master)1062 static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
1063 {
1064 struct i3c_master_controller *m = &master->base;
1065 unsigned long i3c_lim_period, pres_step, ncycles;
1066 struct i3c_bus *bus = i3c_master_get_bus(m);
1067 unsigned long new_i3c_scl_lim = 0;
1068 struct i3c_dev_desc *dev;
1069 u32 prescl1, ctrl;
1070
1071 i3c_bus_for_each_i3cdev(bus, dev) {
1072 unsigned long max_fscl;
1073
1074 max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
1075 I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
1076 switch (max_fscl) {
1077 case I3C_SDR1_FSCL_8MHZ:
1078 max_fscl = 8000000;
1079 break;
1080 case I3C_SDR2_FSCL_6MHZ:
1081 max_fscl = 6000000;
1082 break;
1083 case I3C_SDR3_FSCL_4MHZ:
1084 max_fscl = 4000000;
1085 break;
1086 case I3C_SDR4_FSCL_2MHZ:
1087 max_fscl = 2000000;
1088 break;
1089 case I3C_SDR0_FSCL_MAX:
1090 default:
1091 max_fscl = 0;
1092 break;
1093 }
1094
1095 if (max_fscl &&
1096 (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
1097 new_i3c_scl_lim = max_fscl;
1098 }
1099
1100 /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
1101 if (new_i3c_scl_lim == master->i3c_scl_lim)
1102 return;
1103 master->i3c_scl_lim = new_i3c_scl_lim;
1104 if (!new_i3c_scl_lim)
1105 return;
1106 pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
1107
1108 /* Configure PP_LOW to meet I3C slave limitations. */
1109 prescl1 = readl(master->regs + PRESCL_CTRL1) &
1110 ~PRESCL_CTRL1_PP_LOW_MASK;
1111 ctrl = readl(master->regs + CTRL);
1112
1113 i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
1114 ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
1115 if (ncycles < 4)
1116 ncycles = 0;
1117 else
1118 ncycles -= 4;
1119
1120 prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
1121
1122 /* Disable I3C master before updating PRESCL_CTRL1. */
1123 if (ctrl & CTRL_DEV_EN)
1124 cdns_i3c_master_disable(master);
1125
1126 writel(prescl1, master->regs + PRESCL_CTRL1);
1127
1128 if (ctrl & CTRL_DEV_EN)
1129 cdns_i3c_master_enable(master);
1130 }
1131
cdns_i3c_master_do_daa(struct i3c_master_controller * m)1132 static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
1133 {
1134 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1135 unsigned long olddevs, newdevs;
1136 int ret, slot;
1137 u8 addrs[MAX_DEVS] = { };
1138 u8 last_addr = 0;
1139
1140 olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1141 olddevs |= BIT(0);
1142
1143 /* Prepare RR slots before launching DAA. */
1144 for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) {
1145 ret = i3c_master_get_free_addr(m, last_addr + 1);
1146 if (ret < 0)
1147 return -ENOSPC;
1148
1149 last_addr = ret;
1150 addrs[slot] = last_addr;
1151 writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
1152 master->regs + DEV_ID_RR0(slot));
1153 writel(0, master->regs + DEV_ID_RR1(slot));
1154 writel(0, master->regs + DEV_ID_RR2(slot));
1155 }
1156
1157 ret = i3c_master_entdaa_locked(&master->base);
1158 if (ret && ret != I3C_ERROR_M2)
1159 return ret;
1160
1161 newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1162 newdevs &= ~olddevs;
1163
1164 /*
1165 * Clear all retaining registers filled during DAA. We already
1166 * have the addressed assigned to them in the addrs array.
1167 */
1168 for_each_set_bit(slot, &newdevs, master->maxdevs + 1)
1169 i3c_master_add_i3c_dev_locked(m, addrs[slot]);
1170
1171 /*
1172 * Clear slots that ended up not being used. Can be caused by I3C
1173 * device creation failure or when the I3C device was already known
1174 * by the system but with a different address (in this case the device
1175 * already has a slot and does not need a new one).
1176 */
1177 writel(readl(master->regs + DEVS_CTRL) |
1178 master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
1179 master->regs + DEVS_CTRL);
1180
1181 i3c_master_defslvs_locked(&master->base);
1182
1183 cdns_i3c_master_upd_i3c_scl_lim(master);
1184
1185 /* Unmask Hot-Join and Mastership request interrupts. */
1186 i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
1187 I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
1188
1189 return 0;
1190 }
1191
cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master * master)1192 static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
1193 {
1194 unsigned long sysclk_rate = clk_get_rate(master->sysclk);
1195 u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
1196 (NSEC_PER_SEC / sysclk_rate));
1197
1198 /* Every value greater than 3 is not valid. */
1199 if (thd_delay > THD_DELAY_MAX)
1200 thd_delay = THD_DELAY_MAX;
1201
1202 /* CTLR_THD_DEL value is encoded. */
1203 return (THD_DELAY_MAX - thd_delay);
1204 }
1205
cdns_i3c_master_bus_init(struct i3c_master_controller * m)1206 static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
1207 {
1208 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1209 unsigned long pres_step, sysclk_rate, max_i2cfreq;
1210 struct i3c_bus *bus = i3c_master_get_bus(m);
1211 u32 ctrl, prescl0, prescl1, pres, low;
1212 struct i3c_device_info info = { };
1213 int ret, ncycles;
1214
1215 switch (bus->mode) {
1216 case I3C_BUS_MODE_PURE:
1217 ctrl = CTRL_PURE_BUS_MODE;
1218 break;
1219
1220 case I3C_BUS_MODE_MIXED_FAST:
1221 ctrl = CTRL_MIXED_FAST_BUS_MODE;
1222 break;
1223
1224 case I3C_BUS_MODE_MIXED_SLOW:
1225 ctrl = CTRL_MIXED_SLOW_BUS_MODE;
1226 break;
1227
1228 default:
1229 return -EINVAL;
1230 }
1231
1232 sysclk_rate = clk_get_rate(master->sysclk);
1233 if (!sysclk_rate)
1234 return -EINVAL;
1235
1236 pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
1237 if (pres > PRESCL_CTRL0_I3C_MAX)
1238 return -ERANGE;
1239
1240 bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
1241
1242 prescl0 = PRESCL_CTRL0_I3C(pres);
1243
1244 low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
1245 prescl1 = PRESCL_CTRL1_OD_LOW(low);
1246
1247 max_i2cfreq = bus->scl_rate.i2c;
1248
1249 pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
1250 if (pres > PRESCL_CTRL0_I2C_MAX)
1251 return -ERANGE;
1252
1253 bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
1254
1255 prescl0 |= PRESCL_CTRL0_I2C(pres);
1256 writel(prescl0, master->regs + PRESCL_CTRL0);
1257
1258 /* Calculate OD and PP low. */
1259 pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
1260 ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
1261 if (ncycles < 0)
1262 ncycles = 0;
1263 prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
1264 writel(prescl1, master->regs + PRESCL_CTRL1);
1265
1266 /* Get an address for the master. */
1267 ret = i3c_master_get_free_addr(m, 0);
1268 if (ret < 0)
1269 return ret;
1270
1271 writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
1272 master->regs + DEV_ID_RR0(0));
1273
1274 cdns_i3c_master_dev_rr_to_info(master, 0, &info);
1275 if (info.bcr & I3C_BCR_HDR_CAP)
1276 info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
1277
1278 ret = i3c_master_set_info(&master->base, &info);
1279 if (ret)
1280 return ret;
1281
1282 /*
1283 * Enable Hot-Join, and, when a Hot-Join request happens, disable all
1284 * events coming from this device.
1285 *
1286 * We will issue ENTDAA afterwards from the threaded IRQ handler.
1287 */
1288 ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
1289
1290 /*
1291 * Configure data hold delay based on device-specific data.
1292 *
1293 * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
1294 * master output. This setting allows to meet this timing on master's
1295 * SoC outputs, regardless of PCB balancing.
1296 */
1297 ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
1298 writel(ctrl, master->regs + CTRL);
1299
1300 cdns_i3c_master_enable(master);
1301
1302 return 0;
1303 }
1304
cdns_i3c_master_handle_ibi(struct cdns_i3c_master * master,u32 ibir)1305 static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
1306 u32 ibir)
1307 {
1308 struct cdns_i3c_i2c_dev_data *data;
1309 bool data_consumed = false;
1310 struct i3c_ibi_slot *slot;
1311 u32 id = IBIR_SLVID(ibir);
1312 struct i3c_dev_desc *dev;
1313 size_t nbytes;
1314 u8 *buf;
1315
1316 /*
1317 * FIXME: maybe we should report the FIFO OVF errors to the upper
1318 * layer.
1319 */
1320 if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
1321 goto out;
1322
1323 dev = master->ibi.slots[id];
1324 spin_lock(&master->ibi.lock);
1325
1326 data = i3c_dev_get_master_data(dev);
1327 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
1328 if (!slot)
1329 goto out_unlock;
1330
1331 buf = slot->data;
1332
1333 nbytes = IBIR_XFER_BYTES(ibir);
1334 readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
1335 if (nbytes % 3) {
1336 u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
1337
1338 memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
1339 }
1340
1341 slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
1342 dev->ibi->max_payload_len);
1343 i3c_master_queue_ibi(dev, slot);
1344 data_consumed = true;
1345
1346 out_unlock:
1347 spin_unlock(&master->ibi.lock);
1348
1349 out:
1350 /* Consume data from the FIFO if it's not been done already. */
1351 if (!data_consumed) {
1352 int i;
1353
1354 for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
1355 readl(master->regs + IBI_DATA_FIFO);
1356 }
1357 }
1358
cnds_i3c_master_demux_ibis(struct cdns_i3c_master * master)1359 static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
1360 {
1361 u32 status0;
1362
1363 writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
1364
1365 for (status0 = readl(master->regs + MST_STATUS0);
1366 !(status0 & MST_STATUS0_IBIR_EMP);
1367 status0 = readl(master->regs + MST_STATUS0)) {
1368 u32 ibir = readl(master->regs + IBIR);
1369
1370 switch (IBIR_TYPE(ibir)) {
1371 case IBIR_TYPE_IBI:
1372 cdns_i3c_master_handle_ibi(master, ibir);
1373 break;
1374
1375 case IBIR_TYPE_HJ:
1376 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1377 queue_work(master->base.wq, &master->hj_work);
1378 break;
1379
1380 case IBIR_TYPE_MR:
1381 WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1382 break;
1383
1384 default:
1385 break;
1386 }
1387 }
1388 }
1389
cdns_i3c_master_interrupt(int irq,void * data)1390 static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
1391 {
1392 struct cdns_i3c_master *master = data;
1393 u32 status;
1394
1395 status = readl(master->regs + MST_ISR);
1396 if (!(status & readl(master->regs + MST_IMR)))
1397 return IRQ_NONE;
1398
1399 spin_lock(&master->xferqueue.lock);
1400 cdns_i3c_master_end_xfer_locked(master, status);
1401 spin_unlock(&master->xferqueue.lock);
1402
1403 if (status & MST_INT_IBIR_THR)
1404 cnds_i3c_master_demux_ibis(master);
1405
1406 return IRQ_HANDLED;
1407 }
1408
cdns_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1409 static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1410 {
1411 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1412 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1413 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1414 unsigned long flags;
1415 u32 sirmap;
1416 int ret;
1417
1418 ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
1419 I3C_CCC_EVENT_SIR);
1420 if (ret)
1421 return ret;
1422
1423 spin_lock_irqsave(&master->ibi.lock, flags);
1424 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1425 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1426 sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1427 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1428 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1429 spin_unlock_irqrestore(&master->ibi.lock, flags);
1430
1431 return ret;
1432 }
1433
cdns_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1434 static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1435 {
1436 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1437 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1438 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1439 unsigned long flags;
1440 u32 sircfg, sirmap;
1441 int ret;
1442
1443 spin_lock_irqsave(&master->ibi.lock, flags);
1444 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1445 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1446 sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
1447 SIR_MAP_DEV_DA(dev->info.dyn_addr) |
1448 SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
1449 SIR_MAP_DEV_ACK;
1450
1451 if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
1452 sircfg |= SIR_MAP_DEV_SLOW;
1453
1454 sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
1455 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1456 spin_unlock_irqrestore(&master->ibi.lock, flags);
1457
1458 ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
1459 I3C_CCC_EVENT_SIR);
1460 if (ret) {
1461 spin_lock_irqsave(&master->ibi.lock, flags);
1462 sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1463 sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1464 sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1465 SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1466 writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1467 spin_unlock_irqrestore(&master->ibi.lock, flags);
1468 }
1469
1470 return ret;
1471 }
1472
cdns_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1473 static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1474 const struct i3c_ibi_setup *req)
1475 {
1476 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1477 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1478 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1479 unsigned long flags;
1480 unsigned int i;
1481
1482 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1483 if (IS_ERR(data->ibi_pool))
1484 return PTR_ERR(data->ibi_pool);
1485
1486 spin_lock_irqsave(&master->ibi.lock, flags);
1487 for (i = 0; i < master->ibi.num_slots; i++) {
1488 if (!master->ibi.slots[i]) {
1489 data->ibi = i;
1490 master->ibi.slots[i] = dev;
1491 break;
1492 }
1493 }
1494 spin_unlock_irqrestore(&master->ibi.lock, flags);
1495
1496 if (i < master->ibi.num_slots)
1497 return 0;
1498
1499 i3c_generic_ibi_free_pool(data->ibi_pool);
1500 data->ibi_pool = NULL;
1501
1502 return -ENOSPC;
1503 }
1504
cdns_i3c_master_free_ibi(struct i3c_dev_desc * dev)1505 static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1506 {
1507 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1508 struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1509 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1510 unsigned long flags;
1511
1512 spin_lock_irqsave(&master->ibi.lock, flags);
1513 master->ibi.slots[data->ibi] = NULL;
1514 data->ibi = -1;
1515 spin_unlock_irqrestore(&master->ibi.lock, flags);
1516
1517 i3c_generic_ibi_free_pool(data->ibi_pool);
1518 }
1519
cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1520 static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1521 struct i3c_ibi_slot *slot)
1522 {
1523 struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1524
1525 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1526 }
1527
1528 static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
1529 .bus_init = cdns_i3c_master_bus_init,
1530 .bus_cleanup = cdns_i3c_master_bus_cleanup,
1531 .do_daa = cdns_i3c_master_do_daa,
1532 .attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
1533 .reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
1534 .detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
1535 .attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
1536 .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
1537 .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
1538 .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
1539 .priv_xfers = cdns_i3c_master_priv_xfers,
1540 .i2c_xfers = cdns_i3c_master_i2c_xfers,
1541 .enable_ibi = cdns_i3c_master_enable_ibi,
1542 .disable_ibi = cdns_i3c_master_disable_ibi,
1543 .request_ibi = cdns_i3c_master_request_ibi,
1544 .free_ibi = cdns_i3c_master_free_ibi,
1545 .recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
1546 };
1547
cdns_i3c_master_hj(struct work_struct * work)1548 static void cdns_i3c_master_hj(struct work_struct *work)
1549 {
1550 struct cdns_i3c_master *master = container_of(work,
1551 struct cdns_i3c_master,
1552 hj_work);
1553
1554 i3c_master_do_daa(&master->base);
1555 }
1556
1557 static struct cdns_i3c_data cdns_i3c_devdata = {
1558 .thd_delay_ns = 10,
1559 };
1560
1561 static const struct of_device_id cdns_i3c_master_of_ids[] = {
1562 { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
1563 { /* sentinel */ },
1564 };
1565 MODULE_DEVICE_TABLE(of, cdns_i3c_master_of_ids);
1566
cdns_i3c_master_probe(struct platform_device * pdev)1567 static int cdns_i3c_master_probe(struct platform_device *pdev)
1568 {
1569 struct cdns_i3c_master *master;
1570 int ret, irq;
1571 u32 val;
1572
1573 master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
1574 if (!master)
1575 return -ENOMEM;
1576
1577 master->devdata = of_device_get_match_data(&pdev->dev);
1578 if (!master->devdata)
1579 return -EINVAL;
1580
1581 master->regs = devm_platform_ioremap_resource(pdev, 0);
1582 if (IS_ERR(master->regs))
1583 return PTR_ERR(master->regs);
1584
1585 master->pclk = devm_clk_get(&pdev->dev, "pclk");
1586 if (IS_ERR(master->pclk))
1587 return PTR_ERR(master->pclk);
1588
1589 master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
1590 if (IS_ERR(master->sysclk))
1591 return PTR_ERR(master->sysclk);
1592
1593 irq = platform_get_irq(pdev, 0);
1594 if (irq < 0)
1595 return irq;
1596
1597 ret = clk_prepare_enable(master->pclk);
1598 if (ret)
1599 return ret;
1600
1601 ret = clk_prepare_enable(master->sysclk);
1602 if (ret)
1603 goto err_disable_pclk;
1604
1605 if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
1606 ret = -EINVAL;
1607 goto err_disable_sysclk;
1608 }
1609
1610 spin_lock_init(&master->xferqueue.lock);
1611 INIT_LIST_HEAD(&master->xferqueue.list);
1612
1613 INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
1614 writel(0xffffffff, master->regs + MST_IDR);
1615 writel(0xffffffff, master->regs + SLV_IDR);
1616 ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
1617 dev_name(&pdev->dev), master);
1618 if (ret)
1619 goto err_disable_sysclk;
1620
1621 platform_set_drvdata(pdev, master);
1622
1623 val = readl(master->regs + CONF_STATUS0);
1624
1625 /* Device ID0 is reserved to describe this master. */
1626 master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
1627 master->free_rr_slots = GENMASK(master->maxdevs, 1);
1628 master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
1629 master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
1630
1631 val = readl(master->regs + CONF_STATUS1);
1632 master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
1633 master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
1634 master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
1635
1636 spin_lock_init(&master->ibi.lock);
1637 master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
1638 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1639 sizeof(*master->ibi.slots),
1640 GFP_KERNEL);
1641 if (!master->ibi.slots) {
1642 ret = -ENOMEM;
1643 goto err_disable_sysclk;
1644 }
1645
1646 writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
1647 writel(MST_INT_IBIR_THR, master->regs + MST_IER);
1648 writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
1649
1650 ret = i3c_master_register(&master->base, &pdev->dev,
1651 &cdns_i3c_master_ops, false);
1652 if (ret)
1653 goto err_disable_sysclk;
1654
1655 return 0;
1656
1657 err_disable_sysclk:
1658 clk_disable_unprepare(master->sysclk);
1659
1660 err_disable_pclk:
1661 clk_disable_unprepare(master->pclk);
1662
1663 return ret;
1664 }
1665
cdns_i3c_master_remove(struct platform_device * pdev)1666 static void cdns_i3c_master_remove(struct platform_device *pdev)
1667 {
1668 struct cdns_i3c_master *master = platform_get_drvdata(pdev);
1669
1670 cancel_work_sync(&master->hj_work);
1671 i3c_master_unregister(&master->base);
1672
1673 clk_disable_unprepare(master->sysclk);
1674 clk_disable_unprepare(master->pclk);
1675 }
1676
1677 static struct platform_driver cdns_i3c_master = {
1678 .probe = cdns_i3c_master_probe,
1679 .remove_new = cdns_i3c_master_remove,
1680 .driver = {
1681 .name = "cdns-i3c-master",
1682 .of_match_table = cdns_i3c_master_of_ids,
1683 },
1684 };
1685 module_platform_driver(cdns_i3c_master);
1686
1687 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
1688 MODULE_DESCRIPTION("Cadence I3C master driver");
1689 MODULE_LICENSE("GPL v2");
1690 MODULE_ALIAS("platform:cdns-i3c-master");
1691