1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Silvaco dual-role I3C master driver
4  *
5  * Copyright (C) 2020 Silvaco
6  * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7  * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG      0x000
26 #define   SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define   SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define   SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define   SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define   SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define   SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define   SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define   SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define   SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define   SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
36 
37 #define SVC_I3C_MCTRL        0x084
38 #define   SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
39 #define   SVC_I3C_MCTRL_REQUEST_NONE 0
40 #define   SVC_I3C_MCTRL_REQUEST_START_ADDR 1
41 #define   SVC_I3C_MCTRL_REQUEST_STOP 2
42 #define   SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
43 #define   SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
44 #define   SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
45 #define   SVC_I3C_MCTRL_TYPE_I3C 0
46 #define   SVC_I3C_MCTRL_TYPE_I2C BIT(4)
47 #define   SVC_I3C_MCTRL_IBIRESP_AUTO 0
48 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
49 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
50 #define   SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
51 #define   SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
52 #define   SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
53 #define   SVC_I3C_MCTRL_DIR_WRITE 0
54 #define   SVC_I3C_MCTRL_DIR_READ 1
55 #define   SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
56 #define   SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
57 
58 #define SVC_I3C_MSTATUS      0x088
59 #define   SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
60 #define   SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
61 #define   SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
62 #define   SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
63 #define   SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
64 #define   SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
65 #define   SVC_I3C_MSTATUS_IBITYPE_IBI 1
66 #define   SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
67 #define   SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
68 #define   SVC_I3C_MINT_SLVSTART BIT(8)
69 #define   SVC_I3C_MINT_MCTRLDONE BIT(9)
70 #define   SVC_I3C_MINT_COMPLETE BIT(10)
71 #define   SVC_I3C_MINT_RXPEND BIT(11)
72 #define   SVC_I3C_MINT_TXNOTFULL BIT(12)
73 #define   SVC_I3C_MINT_IBIWON BIT(13)
74 #define   SVC_I3C_MINT_ERRWARN BIT(15)
75 #define   SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
76 #define   SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
77 #define   SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
78 #define   SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
79 #define   SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
80 #define   SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
81 #define   SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
82 #define   SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
83 
84 #define SVC_I3C_IBIRULES     0x08C
85 #define   SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
86 						       ((addr) & 0x3F) << ((slot) * 6))
87 #define   SVC_I3C_IBIRULES_ADDRS 5
88 #define   SVC_I3C_IBIRULES_MSB0 BIT(30)
89 #define   SVC_I3C_IBIRULES_NOBYTE BIT(31)
90 #define   SVC_I3C_IBIRULES_MANDBYTE 0
91 #define SVC_I3C_MINTSET      0x090
92 #define SVC_I3C_MINTCLR      0x094
93 #define SVC_I3C_MINTMASKED   0x098
94 #define SVC_I3C_MERRWARN     0x09C
95 #define   SVC_I3C_MERRWARN_NACK BIT(2)
96 #define   SVC_I3C_MERRWARN_TIMEOUT BIT(20)
97 #define SVC_I3C_MDMACTRL     0x0A0
98 #define SVC_I3C_MDATACTRL    0x0AC
99 #define   SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
100 #define   SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
101 #define   SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
102 #define   SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
103 #define   SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
104 #define   SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
105 #define   SVC_I3C_MDATACTRL_TXFULL BIT(30)
106 #define   SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
107 
108 #define SVC_I3C_MWDATAB      0x0B0
109 #define   SVC_I3C_MWDATAB_END BIT(8)
110 
111 #define SVC_I3C_MWDATABE     0x0B4
112 #define SVC_I3C_MWDATAH      0x0B8
113 #define SVC_I3C_MWDATAHE     0x0BC
114 #define SVC_I3C_MRDATAB      0x0C0
115 #define SVC_I3C_MRDATAH      0x0C8
116 #define SVC_I3C_MWMSG_SDR    0x0D0
117 #define SVC_I3C_MRMSG_SDR    0x0D4
118 #define SVC_I3C_MWMSG_DDR    0x0D8
119 #define SVC_I3C_MRMSG_DDR    0x0DC
120 
121 #define SVC_I3C_MDYNADDR     0x0E4
122 #define   SVC_MDYNADDR_VALID BIT(0)
123 #define   SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
124 
125 #define SVC_I3C_MAX_DEVS 32
126 #define SVC_I3C_PM_TIMEOUT_MS 1000
127 
128 /* This parameter depends on the implementation and may be tuned */
129 #define SVC_I3C_FIFO_SIZE 16
130 #define SVC_I3C_PPBAUD_MAX 15
131 #define SVC_I3C_QUICK_I2C_CLK 4170000
132 
133 #define SVC_I3C_EVENT_IBI	BIT(0)
134 #define SVC_I3C_EVENT_HOTJOIN	BIT(1)
135 
136 struct svc_i3c_cmd {
137 	u8 addr;
138 	bool rnw;
139 	u8 *in;
140 	const void *out;
141 	unsigned int len;
142 	unsigned int actual_len;
143 	struct i3c_priv_xfer *xfer;
144 	bool continued;
145 };
146 
147 struct svc_i3c_xfer {
148 	struct list_head node;
149 	struct completion comp;
150 	int ret;
151 	unsigned int type;
152 	unsigned int ncmds;
153 	struct svc_i3c_cmd cmds[] __counted_by(ncmds);
154 };
155 
156 struct svc_i3c_regs_save {
157 	u32 mconfig;
158 	u32 mdynaddr;
159 };
160 
161 /**
162  * struct svc_i3c_master - Silvaco I3C Master structure
163  * @base: I3C master controller
164  * @dev: Corresponding device
165  * @regs: Memory mapping
166  * @saved_regs: Volatile values for PM operations
167  * @free_slots: Bit array of available slots
168  * @addrs: Array containing the dynamic addresses of each attached device
169  * @descs: Array of descriptors, one per attached device
170  * @hj_work: Hot-join work
171  * @ibi_work: IBI work
172  * @irq: Main interrupt
173  * @pclk: System clock
174  * @fclk: Fast clock (bus)
175  * @sclk: Slow clock (other events)
176  * @xferqueue: Transfer queue structure
177  * @xferqueue.list: List member
178  * @xferqueue.cur: Current ongoing transfer
179  * @xferqueue.lock: Queue lock
180  * @ibi: IBI structure
181  * @ibi.num_slots: Number of slots available in @ibi.slots
182  * @ibi.slots: Available IBI slots
183  * @ibi.tbq_slot: To be queued IBI slot
184  * @ibi.lock: IBI lock
185  * @lock: Transfer lock, protect between IBI work thread and callbacks from master
186  * @enabled_events: Bit masks for enable events (IBI, HotJoin).
187  * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
188  */
189 struct svc_i3c_master {
190 	struct i3c_master_controller base;
191 	struct device *dev;
192 	void __iomem *regs;
193 	struct svc_i3c_regs_save saved_regs;
194 	u32 free_slots;
195 	u8 addrs[SVC_I3C_MAX_DEVS];
196 	struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
197 	struct work_struct hj_work;
198 	struct work_struct ibi_work;
199 	int irq;
200 	struct clk *pclk;
201 	struct clk *fclk;
202 	struct clk *sclk;
203 	struct {
204 		struct list_head list;
205 		struct svc_i3c_xfer *cur;
206 		/* Prevent races between transfers */
207 		spinlock_t lock;
208 	} xferqueue;
209 	struct {
210 		unsigned int num_slots;
211 		struct i3c_dev_desc **slots;
212 		struct i3c_ibi_slot *tbq_slot;
213 		/* Prevent races within IBI handlers */
214 		spinlock_t lock;
215 	} ibi;
216 	struct mutex lock;
217 	int enabled_events;
218 	u32 mctrl_config;
219 };
220 
221 /**
222  * struct svc_i3c_i2c_dev_data - Device specific data
223  * @index: Index in the master tables corresponding to this device
224  * @ibi: IBI slot index in the master structure
225  * @ibi_pool: IBI pool associated to this device
226  */
227 struct svc_i3c_i2c_dev_data {
228 	u8 index;
229 	int ibi;
230 	struct i3c_generic_ibi_pool *ibi_pool;
231 };
232 
is_events_enabled(struct svc_i3c_master * master,u32 mask)233 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
234 {
235 	return !!(master->enabled_events & mask);
236 }
237 
svc_i3c_master_error(struct svc_i3c_master * master)238 static bool svc_i3c_master_error(struct svc_i3c_master *master)
239 {
240 	u32 mstatus, merrwarn;
241 
242 	mstatus = readl(master->regs + SVC_I3C_MSTATUS);
243 	if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
244 		merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
245 		writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
246 
247 		/* Ignore timeout error */
248 		if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
249 			dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
250 				mstatus, merrwarn);
251 			return false;
252 		}
253 
254 		dev_err(master->dev,
255 			"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
256 			mstatus, merrwarn);
257 
258 		return true;
259 	}
260 
261 	return false;
262 }
263 
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)264 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
265 {
266 	writel(mask, master->regs + SVC_I3C_MINTSET);
267 }
268 
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)269 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
270 {
271 	u32 mask = readl(master->regs + SVC_I3C_MINTSET);
272 
273 	writel(mask, master->regs + SVC_I3C_MINTCLR);
274 }
275 
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)276 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
277 {
278 	/* Clear pending warnings */
279 	writel(readl(master->regs + SVC_I3C_MERRWARN),
280 	       master->regs + SVC_I3C_MERRWARN);
281 }
282 
svc_i3c_master_flush_fifo(struct svc_i3c_master * master)283 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
284 {
285 	/* Flush FIFOs */
286 	writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
287 	       master->regs + SVC_I3C_MDATACTRL);
288 }
289 
svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master * master)290 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
291 {
292 	u32 reg;
293 
294 	/* Set RX and TX tigger levels, flush FIFOs */
295 	reg = SVC_I3C_MDATACTRL_FLUSHTB |
296 	      SVC_I3C_MDATACTRL_FLUSHRB |
297 	      SVC_I3C_MDATACTRL_UNLOCK_TRIG |
298 	      SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
299 	      SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
300 	writel(reg, master->regs + SVC_I3C_MDATACTRL);
301 }
302 
svc_i3c_master_reset(struct svc_i3c_master * master)303 static void svc_i3c_master_reset(struct svc_i3c_master *master)
304 {
305 	svc_i3c_master_clear_merrwarn(master);
306 	svc_i3c_master_reset_fifo_trigger(master);
307 	svc_i3c_master_disable_interrupts(master);
308 }
309 
310 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)311 to_svc_i3c_master(struct i3c_master_controller *master)
312 {
313 	return container_of(master, struct svc_i3c_master, base);
314 }
315 
svc_i3c_master_hj_work(struct work_struct * work)316 static void svc_i3c_master_hj_work(struct work_struct *work)
317 {
318 	struct svc_i3c_master *master;
319 
320 	master = container_of(work, struct svc_i3c_master, hj_work);
321 	i3c_master_do_daa(&master->base);
322 }
323 
324 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)325 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
326 			     unsigned int ibiaddr)
327 {
328 	int i;
329 
330 	for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
331 		if (master->addrs[i] == ibiaddr)
332 			break;
333 
334 	if (i == SVC_I3C_MAX_DEVS)
335 		return NULL;
336 
337 	return master->descs[i];
338 }
339 
svc_i3c_master_emit_stop(struct svc_i3c_master * master)340 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
341 {
342 	writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
343 
344 	/*
345 	 * This delay is necessary after the emission of a stop, otherwise eg.
346 	 * repeating IBIs do not get detected. There is a note in the manual
347 	 * about it, stating that the stop condition might not be settled
348 	 * correctly if a start condition follows too rapidly.
349 	 */
350 	udelay(1);
351 }
352 
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)353 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
354 				     struct i3c_dev_desc *dev)
355 {
356 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
357 	struct i3c_ibi_slot *slot;
358 	unsigned int count;
359 	u32 mdatactrl;
360 	int ret, val;
361 	u8 *buf;
362 
363 	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
364 	if (!slot)
365 		return -ENOSPC;
366 
367 	slot->len = 0;
368 	buf = slot->data;
369 
370 	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
371 						SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
372 	if (ret) {
373 		dev_err(master->dev, "Timeout when polling for COMPLETE\n");
374 		return ret;
375 	}
376 
377 	while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS))  &&
378 	       slot->len < SVC_I3C_FIFO_SIZE) {
379 		mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
380 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
381 		readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
382 		slot->len += count;
383 		buf += count;
384 	}
385 
386 	master->ibi.tbq_slot = slot;
387 
388 	return 0;
389 }
390 
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)391 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
392 				   bool mandatory_byte)
393 {
394 	unsigned int ibi_ack_nack;
395 
396 	ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
397 	if (mandatory_byte)
398 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
399 	else
400 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
401 
402 	writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
403 }
404 
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)405 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
406 {
407 	writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
408 	       SVC_I3C_MCTRL_IBIRESP_NACK,
409 	       master->regs + SVC_I3C_MCTRL);
410 }
411 
svc_i3c_master_ibi_work(struct work_struct * work)412 static void svc_i3c_master_ibi_work(struct work_struct *work)
413 {
414 	struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
415 	struct svc_i3c_i2c_dev_data *data;
416 	unsigned int ibitype, ibiaddr;
417 	struct i3c_dev_desc *dev;
418 	u32 status, val;
419 	int ret;
420 
421 	mutex_lock(&master->lock);
422 	/*
423 	 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
424 	 * readl_relaxed_poll_timeout() to return immediately. Consequently,
425 	 * ibitype will be 0 since it was last updated only after the 8th SCL
426 	 * cycle, leading to missed client IBI handlers.
427 	 *
428 	 * A typical scenario is when IBIWON occurs and bus arbitration is lost
429 	 * at svc_i3c_master_priv_xfers().
430 	 *
431 	 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
432 	 */
433 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
434 
435 	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
436 	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
437 	       SVC_I3C_MCTRL_IBIRESP_AUTO,
438 	       master->regs + SVC_I3C_MCTRL);
439 
440 	/* Wait for IBIWON, should take approximately 100us */
441 	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
442 					 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
443 	if (ret) {
444 		dev_err(master->dev, "Timeout when polling for IBIWON\n");
445 		svc_i3c_master_emit_stop(master);
446 		goto reenable_ibis;
447 	}
448 
449 	status = readl(master->regs + SVC_I3C_MSTATUS);
450 	ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
451 	ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
452 
453 	/* Handle the critical responses to IBI's */
454 	switch (ibitype) {
455 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
456 		dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
457 		if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
458 			svc_i3c_master_nack_ibi(master);
459 		else
460 			svc_i3c_master_handle_ibi(master, dev);
461 		break;
462 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
463 		if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
464 			svc_i3c_master_ack_ibi(master, false);
465 		else
466 			svc_i3c_master_nack_ibi(master);
467 		break;
468 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
469 		svc_i3c_master_nack_ibi(master);
470 		break;
471 	default:
472 		break;
473 	}
474 
475 	/*
476 	 * If an error happened, we probably got interrupted and the exchange
477 	 * timedout. In this case we just drop everything, emit a stop and wait
478 	 * for the slave to interrupt again.
479 	 */
480 	if (svc_i3c_master_error(master)) {
481 		if (master->ibi.tbq_slot) {
482 			data = i3c_dev_get_master_data(dev);
483 			i3c_generic_ibi_recycle_slot(data->ibi_pool,
484 						     master->ibi.tbq_slot);
485 			master->ibi.tbq_slot = NULL;
486 		}
487 
488 		svc_i3c_master_emit_stop(master);
489 
490 		goto reenable_ibis;
491 	}
492 
493 	/* Handle the non critical tasks */
494 	switch (ibitype) {
495 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
496 		if (dev) {
497 			i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
498 			master->ibi.tbq_slot = NULL;
499 		}
500 		svc_i3c_master_emit_stop(master);
501 		break;
502 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
503 		svc_i3c_master_emit_stop(master);
504 		if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
505 			queue_work(master->base.wq, &master->hj_work);
506 		break;
507 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
508 	default:
509 		break;
510 	}
511 
512 reenable_ibis:
513 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
514 	mutex_unlock(&master->lock);
515 }
516 
svc_i3c_master_irq_handler(int irq,void * dev_id)517 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
518 {
519 	struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
520 	u32 active = readl(master->regs + SVC_I3C_MSTATUS);
521 
522 	if (!SVC_I3C_MSTATUS_SLVSTART(active))
523 		return IRQ_NONE;
524 
525 	/* Clear the interrupt status */
526 	writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
527 
528 	svc_i3c_master_disable_interrupts(master);
529 
530 	/* Handle the interrupt in a non atomic context */
531 	queue_work(master->base.wq, &master->ibi_work);
532 
533 	return IRQ_HANDLED;
534 }
535 
svc_i3c_master_set_speed(struct i3c_master_controller * m,enum i3c_open_drain_speed speed)536 static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
537 				     enum i3c_open_drain_speed speed)
538 {
539 	struct svc_i3c_master *master = to_svc_i3c_master(m);
540 	struct i3c_bus *bus = i3c_master_get_bus(&master->base);
541 	u32 ppbaud, odbaud, odhpp, mconfig;
542 	unsigned long fclk_rate;
543 	int ret;
544 
545 	ret = pm_runtime_resume_and_get(master->dev);
546 	if (ret < 0) {
547 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
548 		return ret;
549 	}
550 
551 	switch (speed) {
552 	case I3C_OPEN_DRAIN_SLOW_SPEED:
553 		fclk_rate = clk_get_rate(master->fclk);
554 		if (!fclk_rate) {
555 			ret = -EINVAL;
556 			goto rpm_out;
557 		}
558 		/*
559 		 * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
560 		 * broadcast address is visible to all I2C/I3C devices on the I3C bus.
561 		 * I3C device working as a I2C device will turn off its 50ns Spike
562 		 * Filter to change to I3C mode.
563 		 */
564 		mconfig = master->mctrl_config;
565 		ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
566 		odhpp = 0;
567 		odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
568 		mconfig &= ~GENMASK(24, 16);
569 		mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
570 		writel(mconfig, master->regs + SVC_I3C_MCONFIG);
571 		break;
572 	case I3C_OPEN_DRAIN_NORMAL_SPEED:
573 		writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
574 		break;
575 	}
576 
577 rpm_out:
578 	pm_runtime_mark_last_busy(master->dev);
579 	pm_runtime_put_autosuspend(master->dev);
580 
581 	return ret;
582 }
583 
svc_i3c_master_bus_init(struct i3c_master_controller * m)584 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
585 {
586 	struct svc_i3c_master *master = to_svc_i3c_master(m);
587 	struct i3c_bus *bus = i3c_master_get_bus(m);
588 	struct i3c_device_info info = {};
589 	unsigned long fclk_rate, fclk_period_ns;
590 	unsigned long i2c_period_ns, i2c_scl_rate, i3c_scl_rate;
591 	unsigned int high_period_ns, od_low_period_ns;
592 	u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
593 	int ret;
594 
595 	ret = pm_runtime_resume_and_get(master->dev);
596 	if (ret < 0) {
597 		dev_err(master->dev,
598 			"<%s> cannot resume i3c bus master, err: %d\n",
599 			__func__, ret);
600 		return ret;
601 	}
602 
603 	/* Timings derivation */
604 	fclk_rate = clk_get_rate(master->fclk);
605 	if (!fclk_rate) {
606 		ret = -EINVAL;
607 		goto rpm_out;
608 	}
609 
610 	fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
611 	i2c_period_ns = DIV_ROUND_UP(1000000000, bus->scl_rate.i2c);
612 	i2c_scl_rate = bus->scl_rate.i2c;
613 	i3c_scl_rate = bus->scl_rate.i3c;
614 
615 	/*
616 	 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
617 	 * Simplest configuration is using a 50% duty-cycle of 40ns.
618 	 */
619 	ppbaud = DIV_ROUND_UP(fclk_rate / 2, i3c_scl_rate) - 1;
620 	pplow = 0;
621 
622 	/*
623 	 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
624 	 * duty-cycle tuned so that high levels are filetered out by
625 	 * the 50ns filter (target being 40ns).
626 	 */
627 	odhpp = 1;
628 	high_period_ns = (ppbaud + 1) * fclk_period_ns;
629 	odbaud = DIV_ROUND_UP(fclk_rate, SVC_I3C_QUICK_I2C_CLK * (1 + ppbaud)) - 2;
630 	od_low_period_ns = (odbaud + 1) * high_period_ns;
631 
632 	switch (bus->mode) {
633 	case I3C_BUS_MODE_PURE:
634 		i2cbaud = 0;
635 		odstop = 0;
636 		break;
637 	case I3C_BUS_MODE_MIXED_FAST:
638 		/*
639 		 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
640 		 * between the high and low period does not really matter.
641 		 */
642 		i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
643 		odstop = 1;
644 		break;
645 	case I3C_BUS_MODE_MIXED_LIMITED:
646 	case I3C_BUS_MODE_MIXED_SLOW:
647 		/* I3C PP + I3C OP + I2C OP both use i2c clk rate */
648 		if (ppbaud > SVC_I3C_PPBAUD_MAX) {
649 			ppbaud = SVC_I3C_PPBAUD_MAX;
650 			pplow =  DIV_ROUND_UP(fclk_rate, i3c_scl_rate) - (2 + 2 * ppbaud);
651 		}
652 
653 		high_period_ns = (ppbaud + 1) * fclk_period_ns;
654 		odhpp = 0;
655 		odbaud = DIV_ROUND_UP(fclk_rate, i2c_scl_rate * (2 + 2 * ppbaud)) - 1;
656 
657 		od_low_period_ns = (odbaud + 1) * high_period_ns;
658 		i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
659 		odstop = 1;
660 		break;
661 	default:
662 		goto rpm_out;
663 	}
664 
665 	reg = SVC_I3C_MCONFIG_MASTER_EN |
666 	      SVC_I3C_MCONFIG_DISTO(0) |
667 	      SVC_I3C_MCONFIG_HKEEP(0) |
668 	      SVC_I3C_MCONFIG_ODSTOP(odstop) |
669 	      SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
670 	      SVC_I3C_MCONFIG_PPLOW(pplow) |
671 	      SVC_I3C_MCONFIG_ODBAUD(odbaud) |
672 	      SVC_I3C_MCONFIG_ODHPP(odhpp) |
673 	      SVC_I3C_MCONFIG_SKEW(0) |
674 	      SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
675 	writel(reg, master->regs + SVC_I3C_MCONFIG);
676 
677 	master->mctrl_config = reg;
678 	/* Master core's registration */
679 	ret = i3c_master_get_free_addr(m, 0);
680 	if (ret < 0)
681 		goto rpm_out;
682 
683 	info.dyn_addr = ret;
684 
685 	writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
686 	       master->regs + SVC_I3C_MDYNADDR);
687 
688 	ret = i3c_master_set_info(&master->base, &info);
689 	if (ret)
690 		goto rpm_out;
691 
692 rpm_out:
693 	pm_runtime_mark_last_busy(master->dev);
694 	pm_runtime_put_autosuspend(master->dev);
695 
696 	return ret;
697 }
698 
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)699 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
700 {
701 	struct svc_i3c_master *master = to_svc_i3c_master(m);
702 	int ret;
703 
704 	ret = pm_runtime_resume_and_get(master->dev);
705 	if (ret < 0) {
706 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
707 		return;
708 	}
709 
710 	svc_i3c_master_disable_interrupts(master);
711 
712 	/* Disable master */
713 	writel(0, master->regs + SVC_I3C_MCONFIG);
714 
715 	pm_runtime_mark_last_busy(master->dev);
716 	pm_runtime_put_autosuspend(master->dev);
717 }
718 
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)719 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
720 {
721 	unsigned int slot;
722 
723 	if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
724 		return -ENOSPC;
725 
726 	slot = ffs(master->free_slots) - 1;
727 
728 	master->free_slots &= ~BIT(slot);
729 
730 	return slot;
731 }
732 
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)733 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
734 					unsigned int slot)
735 {
736 	master->free_slots |= BIT(slot);
737 }
738 
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)739 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
740 {
741 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
742 	struct svc_i3c_master *master = to_svc_i3c_master(m);
743 	struct svc_i3c_i2c_dev_data *data;
744 	int slot;
745 
746 	slot = svc_i3c_master_reserve_slot(master);
747 	if (slot < 0)
748 		return slot;
749 
750 	data = kzalloc(sizeof(*data), GFP_KERNEL);
751 	if (!data) {
752 		svc_i3c_master_release_slot(master, slot);
753 		return -ENOMEM;
754 	}
755 
756 	data->ibi = -1;
757 	data->index = slot;
758 	master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
759 						   dev->info.static_addr;
760 	master->descs[slot] = dev;
761 
762 	i3c_dev_set_master_data(dev, data);
763 
764 	return 0;
765 }
766 
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)767 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
768 					   u8 old_dyn_addr)
769 {
770 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
771 	struct svc_i3c_master *master = to_svc_i3c_master(m);
772 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
773 
774 	master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
775 							  dev->info.static_addr;
776 
777 	return 0;
778 }
779 
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)780 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
781 {
782 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
783 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
784 	struct svc_i3c_master *master = to_svc_i3c_master(m);
785 
786 	master->addrs[data->index] = 0;
787 	svc_i3c_master_release_slot(master, data->index);
788 
789 	kfree(data);
790 }
791 
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)792 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
793 {
794 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
795 	struct svc_i3c_master *master = to_svc_i3c_master(m);
796 	struct svc_i3c_i2c_dev_data *data;
797 	int slot;
798 
799 	slot = svc_i3c_master_reserve_slot(master);
800 	if (slot < 0)
801 		return slot;
802 
803 	data = kzalloc(sizeof(*data), GFP_KERNEL);
804 	if (!data) {
805 		svc_i3c_master_release_slot(master, slot);
806 		return -ENOMEM;
807 	}
808 
809 	data->index = slot;
810 	master->addrs[slot] = dev->addr;
811 
812 	i2c_dev_set_master_data(dev, data);
813 
814 	return 0;
815 }
816 
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)817 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
818 {
819 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
820 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
821 	struct svc_i3c_master *master = to_svc_i3c_master(m);
822 
823 	svc_i3c_master_release_slot(master, data->index);
824 
825 	kfree(data);
826 }
827 
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)828 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
829 				unsigned int len)
830 {
831 	int ret, i;
832 	u32 reg;
833 
834 	for (i = 0; i < len; i++) {
835 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
836 						reg,
837 						SVC_I3C_MSTATUS_RXPEND(reg),
838 						0, 1000);
839 		if (ret)
840 			return ret;
841 
842 		dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
843 	}
844 
845 	return 0;
846 }
847 
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)848 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
849 					u8 *addrs, unsigned int *count)
850 {
851 	u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
852 	unsigned int dev_nb = 0, last_addr = 0;
853 	u32 reg;
854 	int ret, i;
855 
856 	while (true) {
857 		/* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA.
858 		 *
859 		 * ENTER DAA:
860 		 *   1 will issue START, 7E, ENTDAA, and then emits 7E/R to process first target.
861 		 *   2 Stops just before the new Dynamic Address (DA) is to be emitted.
862 		 *
863 		 * PROCESS DAA:
864 		 *   1 The DA is written using MWDATAB or ADDR bits 6:0.
865 		 *   2 ProcessDAA is requested again to write the new address, and then starts the
866 		 *     next (START, 7E, ENTDAA)  unless marked to STOP; an MSTATUS indicating NACK
867 		 *     means DA was not accepted (e.g. parity error). If PROCESSDAA is NACKed on the
868 		 *     7E/R, which means no more Slaves need a DA, then a COMPLETE will be signaled
869 		 *     (along with DONE), and a STOP issued automatically.
870 		 */
871 		writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
872 		       SVC_I3C_MCTRL_TYPE_I3C |
873 		       SVC_I3C_MCTRL_IBIRESP_NACK |
874 		       SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
875 		       master->regs + SVC_I3C_MCTRL);
876 
877 		/*
878 		 * Either one slave will send its ID, or the assignment process
879 		 * is done.
880 		 */
881 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
882 						reg,
883 						SVC_I3C_MSTATUS_RXPEND(reg) |
884 						SVC_I3C_MSTATUS_MCTRLDONE(reg),
885 						1, 1000);
886 		if (ret)
887 			break;
888 
889 		if (SVC_I3C_MSTATUS_RXPEND(reg)) {
890 			u8 data[6];
891 
892 			/*
893 			 * We only care about the 48-bit provisioned ID yet to
894 			 * be sure a device does not nack an address twice.
895 			 * Otherwise, we would just need to flush the RX FIFO.
896 			 */
897 			ret = svc_i3c_master_readb(master, data, 6);
898 			if (ret)
899 				break;
900 
901 			for (i = 0; i < 6; i++)
902 				prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
903 
904 			/* We do not care about the BCR and DCR yet */
905 			ret = svc_i3c_master_readb(master, data, 2);
906 			if (ret)
907 				break;
908 		} else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
909 			if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
910 			    SVC_I3C_MSTATUS_COMPLETE(reg)) {
911 				/*
912 				 * All devices received and acked they dynamic
913 				 * address, this is the natural end of the DAA
914 				 * procedure.
915 				 *
916 				 * Hardware will auto emit STOP at this case.
917 				 */
918 				*count = dev_nb;
919 				return 0;
920 
921 			} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
922 				/* No I3C devices attached */
923 				if (dev_nb == 0) {
924 					/*
925 					 * Hardware can't treat first NACK for ENTAA as normal
926 					 * COMPLETE. So need manual emit STOP.
927 					 */
928 					ret = 0;
929 					*count = 0;
930 					break;
931 				}
932 
933 				/*
934 				 * A slave device nacked the address, this is
935 				 * allowed only once, DAA will be stopped and
936 				 * then resumed. The same device is supposed to
937 				 * answer again immediately and shall ack the
938 				 * address this time.
939 				 */
940 				if (prov_id[dev_nb] == nacking_prov_id) {
941 					ret = -EIO;
942 					break;
943 				}
944 
945 				dev_nb--;
946 				nacking_prov_id = prov_id[dev_nb];
947 				svc_i3c_master_emit_stop(master);
948 
949 				continue;
950 			} else {
951 				break;
952 			}
953 		}
954 
955 		/* Wait for the slave to be ready to receive its address */
956 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
957 						reg,
958 						SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
959 						SVC_I3C_MSTATUS_STATE_DAA(reg) &&
960 						SVC_I3C_MSTATUS_BETWEEN(reg),
961 						0, 1000);
962 		if (ret)
963 			break;
964 
965 		/* Give the slave device a suitable dynamic address */
966 		ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
967 		if (ret < 0)
968 			break;
969 
970 		addrs[dev_nb] = ret;
971 		dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
972 			dev_nb, addrs[dev_nb]);
973 
974 		writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
975 		last_addr = addrs[dev_nb++];
976 	}
977 
978 	/* Need manual issue STOP except for Complete condition */
979 	svc_i3c_master_emit_stop(master);
980 	return ret;
981 }
982 
svc_i3c_update_ibirules(struct svc_i3c_master * master)983 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
984 {
985 	struct i3c_dev_desc *dev;
986 	u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
987 	unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
988 		nobyte_addr_ko = 0;
989 	bool list_mbyte = false, list_nobyte = false;
990 
991 	/* Create the IBIRULES register for both cases */
992 	i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
993 		if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
994 			continue;
995 
996 		if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
997 			reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
998 							   dev->info.dyn_addr);
999 
1000 			/* IBI rules cannot be applied to devices with MSb=1 */
1001 			if (dev->info.dyn_addr & BIT(7))
1002 				mbyte_addr_ko++;
1003 			else
1004 				mbyte_addr_ok++;
1005 		} else {
1006 			reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
1007 							    dev->info.dyn_addr);
1008 
1009 			/* IBI rules cannot be applied to devices with MSb=1 */
1010 			if (dev->info.dyn_addr & BIT(7))
1011 				nobyte_addr_ko++;
1012 			else
1013 				nobyte_addr_ok++;
1014 		}
1015 	}
1016 
1017 	/* Device list cannot be handled by hardware */
1018 	if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1019 		list_mbyte = true;
1020 
1021 	if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1022 		list_nobyte = true;
1023 
1024 	/* No list can be properly handled, return an error */
1025 	if (!list_mbyte && !list_nobyte)
1026 		return -ERANGE;
1027 
1028 	/* Pick the first list that can be handled by hardware, randomly */
1029 	if (list_mbyte)
1030 		writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
1031 	else
1032 		writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
1033 
1034 	return 0;
1035 }
1036 
svc_i3c_master_do_daa(struct i3c_master_controller * m)1037 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
1038 {
1039 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1040 	u8 addrs[SVC_I3C_MAX_DEVS];
1041 	unsigned long flags;
1042 	unsigned int dev_nb;
1043 	int ret, i;
1044 
1045 	ret = pm_runtime_resume_and_get(master->dev);
1046 	if (ret < 0) {
1047 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1048 		return ret;
1049 	}
1050 
1051 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1052 	ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
1053 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1054 
1055 	svc_i3c_master_clear_merrwarn(master);
1056 	if (ret)
1057 		goto rpm_out;
1058 
1059 	/* Register all devices who participated to the core */
1060 	for (i = 0; i < dev_nb; i++) {
1061 		ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
1062 		if (ret)
1063 			goto rpm_out;
1064 	}
1065 
1066 	/* Configure IBI auto-rules */
1067 	ret = svc_i3c_update_ibirules(master);
1068 	if (ret)
1069 		dev_err(master->dev, "Cannot handle such a list of devices");
1070 
1071 rpm_out:
1072 	pm_runtime_mark_last_busy(master->dev);
1073 	pm_runtime_put_autosuspend(master->dev);
1074 
1075 	return ret;
1076 }
1077 
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)1078 static int svc_i3c_master_read(struct svc_i3c_master *master,
1079 			       u8 *in, unsigned int len)
1080 {
1081 	int offset = 0, i;
1082 	u32 mdctrl, mstatus;
1083 	bool completed = false;
1084 	unsigned int count;
1085 	unsigned long start = jiffies;
1086 
1087 	while (!completed) {
1088 		mstatus = readl(master->regs + SVC_I3C_MSTATUS);
1089 		if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
1090 			completed = true;
1091 
1092 		if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
1093 			dev_dbg(master->dev, "I3C read timeout\n");
1094 			return -ETIMEDOUT;
1095 		}
1096 
1097 		mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
1098 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
1099 		if (offset + count > len) {
1100 			dev_err(master->dev, "I3C receive length too long!\n");
1101 			return -EINVAL;
1102 		}
1103 		for (i = 0; i < count; i++)
1104 			in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1105 
1106 		offset += count;
1107 	}
1108 
1109 	return offset;
1110 }
1111 
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)1112 static int svc_i3c_master_write(struct svc_i3c_master *master,
1113 				const u8 *out, unsigned int len)
1114 {
1115 	int offset = 0, ret;
1116 	u32 mdctrl;
1117 
1118 	while (offset < len) {
1119 		ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1120 					 mdctrl,
1121 					 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1122 					 0, 1000);
1123 		if (ret)
1124 			return ret;
1125 
1126 		/*
1127 		 * The last byte to be sent over the bus must either have the
1128 		 * "end" bit set or be written in MWDATABE.
1129 		 */
1130 		if (likely(offset < (len - 1)))
1131 			writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1132 		else
1133 			writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1134 	}
1135 
1136 	return 0;
1137 }
1138 
svc_i3c_master_xfer(struct svc_i3c_master * master,bool rnw,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int * actual_len,bool continued)1139 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1140 			       bool rnw, unsigned int xfer_type, u8 addr,
1141 			       u8 *in, const u8 *out, unsigned int xfer_len,
1142 			       unsigned int *actual_len, bool continued)
1143 {
1144 	int retry = 2;
1145 	u32 reg;
1146 	int ret;
1147 
1148 	/* clean SVC_I3C_MINT_IBIWON w1c bits */
1149 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1150 
1151 
1152 	while (retry--) {
1153 		writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1154 		       xfer_type |
1155 		       SVC_I3C_MCTRL_IBIRESP_NACK |
1156 		       SVC_I3C_MCTRL_DIR(rnw) |
1157 		       SVC_I3C_MCTRL_ADDR(addr) |
1158 		       SVC_I3C_MCTRL_RDTERM(*actual_len),
1159 		       master->regs + SVC_I3C_MCTRL);
1160 
1161 		ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1162 				 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1163 		if (ret)
1164 			goto emit_stop;
1165 
1166 		if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1167 			/*
1168 			 * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
1169 			 * If the Controller chooses to start an I3C Message with an I3C Dynamic
1170 			 * Address, then special provisions shall be made because that same I3C
1171 			 * Target may be initiating an IBI or a Controller Role Request. So, one of
1172 			 * three things may happen: (skip 1, 2)
1173 			 *
1174 			 * 3. The Addresses match and the RnW bits also match, and so neither
1175 			 * Controller nor Target will ACK since both are expecting the other side to
1176 			 * provide ACK. As a result, each side might think it had "won" arbitration,
1177 			 * but neither side would continue, as each would subsequently see that the
1178 			 * other did not provide ACK.
1179 			 * ...
1180 			 * For either value of RnW: Due to the NACK, the Controller shall defer the
1181 			 * Private Write or Private Read, and should typically transmit the Target
1182 			 * Address again after a Repeated START (i.e., the next one or any one prior
1183 			 * to a STOP in the Frame). Since the Address Header following a Repeated
1184 			 * START is not arbitrated, the Controller will always win (see Section
1185 			 * 5.1.2.2.4).
1186 			 */
1187 			if (retry && addr != 0x7e) {
1188 				writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
1189 			} else {
1190 				ret = -ENXIO;
1191 				*actual_len = 0;
1192 				goto emit_stop;
1193 			}
1194 		} else {
1195 			break;
1196 		}
1197 	}
1198 
1199 	/*
1200 	 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
1201 	 * with I3C Target Address.
1202 	 *
1203 	 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
1204 	 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
1205 	 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
1206 	 * a Hot-Join Request has been made.
1207 	 *
1208 	 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
1209 	 * and yield the above events handler.
1210 	 */
1211 	if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1212 		ret = -EAGAIN;
1213 		*actual_len = 0;
1214 		goto emit_stop;
1215 	}
1216 
1217 	if (rnw)
1218 		ret = svc_i3c_master_read(master, in, xfer_len);
1219 	else
1220 		ret = svc_i3c_master_write(master, out, xfer_len);
1221 	if (ret < 0)
1222 		goto emit_stop;
1223 
1224 	if (rnw)
1225 		*actual_len = ret;
1226 
1227 	ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1228 				 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1229 	if (ret)
1230 		goto emit_stop;
1231 
1232 	writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1233 
1234 	if (!continued) {
1235 		svc_i3c_master_emit_stop(master);
1236 
1237 		/* Wait idle if stop is sent. */
1238 		readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1239 				   SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1240 	}
1241 
1242 	return 0;
1243 
1244 emit_stop:
1245 	svc_i3c_master_emit_stop(master);
1246 	svc_i3c_master_clear_merrwarn(master);
1247 
1248 	return ret;
1249 }
1250 
1251 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1252 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1253 {
1254 	struct svc_i3c_xfer *xfer;
1255 
1256 	xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1257 	if (!xfer)
1258 		return NULL;
1259 
1260 	INIT_LIST_HEAD(&xfer->node);
1261 	xfer->ncmds = ncmds;
1262 	xfer->ret = -ETIMEDOUT;
1263 
1264 	return xfer;
1265 }
1266 
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1267 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1268 {
1269 	kfree(xfer);
1270 }
1271 
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1272 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1273 					       struct svc_i3c_xfer *xfer)
1274 {
1275 	if (master->xferqueue.cur == xfer)
1276 		master->xferqueue.cur = NULL;
1277 	else
1278 		list_del_init(&xfer->node);
1279 }
1280 
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1281 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1282 					struct svc_i3c_xfer *xfer)
1283 {
1284 	unsigned long flags;
1285 
1286 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1287 	svc_i3c_master_dequeue_xfer_locked(master, xfer);
1288 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1289 }
1290 
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1291 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1292 {
1293 	struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1294 	int ret, i;
1295 
1296 	if (!xfer)
1297 		return;
1298 
1299 	svc_i3c_master_clear_merrwarn(master);
1300 	svc_i3c_master_flush_fifo(master);
1301 
1302 	for (i = 0; i < xfer->ncmds; i++) {
1303 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1304 
1305 		ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1306 					  cmd->addr, cmd->in, cmd->out,
1307 					  cmd->len, &cmd->actual_len,
1308 					  cmd->continued);
1309 		/* cmd->xfer is NULL if I2C or CCC transfer */
1310 		if (cmd->xfer)
1311 			cmd->xfer->actual_len = cmd->actual_len;
1312 
1313 		if (ret)
1314 			break;
1315 	}
1316 
1317 	xfer->ret = ret;
1318 	complete(&xfer->comp);
1319 
1320 	if (ret < 0)
1321 		svc_i3c_master_dequeue_xfer_locked(master, xfer);
1322 
1323 	xfer = list_first_entry_or_null(&master->xferqueue.list,
1324 					struct svc_i3c_xfer,
1325 					node);
1326 	if (xfer)
1327 		list_del_init(&xfer->node);
1328 
1329 	master->xferqueue.cur = xfer;
1330 	svc_i3c_master_start_xfer_locked(master);
1331 }
1332 
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1333 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1334 					struct svc_i3c_xfer *xfer)
1335 {
1336 	unsigned long flags;
1337 	int ret;
1338 
1339 	ret = pm_runtime_resume_and_get(master->dev);
1340 	if (ret < 0) {
1341 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1342 		return;
1343 	}
1344 
1345 	init_completion(&xfer->comp);
1346 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1347 	if (master->xferqueue.cur) {
1348 		list_add_tail(&xfer->node, &master->xferqueue.list);
1349 	} else {
1350 		master->xferqueue.cur = xfer;
1351 		svc_i3c_master_start_xfer_locked(master);
1352 	}
1353 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1354 
1355 	pm_runtime_mark_last_busy(master->dev);
1356 	pm_runtime_put_autosuspend(master->dev);
1357 }
1358 
1359 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1360 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1361 				const struct i3c_ccc_cmd *cmd)
1362 {
1363 	/* No software support for CCC commands targeting more than one slave */
1364 	return (cmd->ndests == 1);
1365 }
1366 
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1367 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1368 					      struct i3c_ccc_cmd *ccc)
1369 {
1370 	unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1371 	struct svc_i3c_xfer *xfer;
1372 	struct svc_i3c_cmd *cmd;
1373 	u8 *buf;
1374 	int ret;
1375 
1376 	xfer = svc_i3c_master_alloc_xfer(master, 1);
1377 	if (!xfer)
1378 		return -ENOMEM;
1379 
1380 	buf = kmalloc(xfer_len, GFP_KERNEL);
1381 	if (!buf) {
1382 		svc_i3c_master_free_xfer(xfer);
1383 		return -ENOMEM;
1384 	}
1385 
1386 	buf[0] = ccc->id;
1387 	memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1388 
1389 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1390 
1391 	cmd = &xfer->cmds[0];
1392 	cmd->addr = ccc->dests[0].addr;
1393 	cmd->rnw = ccc->rnw;
1394 	cmd->in = NULL;
1395 	cmd->out = buf;
1396 	cmd->len = xfer_len;
1397 	cmd->actual_len = 0;
1398 	cmd->continued = false;
1399 
1400 	mutex_lock(&master->lock);
1401 	svc_i3c_master_enqueue_xfer(master, xfer);
1402 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1403 		svc_i3c_master_dequeue_xfer(master, xfer);
1404 	mutex_unlock(&master->lock);
1405 
1406 	ret = xfer->ret;
1407 	kfree(buf);
1408 	svc_i3c_master_free_xfer(xfer);
1409 
1410 	return ret;
1411 }
1412 
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1413 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1414 					      struct i3c_ccc_cmd *ccc)
1415 {
1416 	unsigned int xfer_len = ccc->dests[0].payload.len;
1417 	unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1418 	struct svc_i3c_xfer *xfer;
1419 	struct svc_i3c_cmd *cmd;
1420 	int ret;
1421 
1422 	xfer = svc_i3c_master_alloc_xfer(master, 2);
1423 	if (!xfer)
1424 		return -ENOMEM;
1425 
1426 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1427 
1428 	/* Broadcasted message */
1429 	cmd = &xfer->cmds[0];
1430 	cmd->addr = I3C_BROADCAST_ADDR;
1431 	cmd->rnw = 0;
1432 	cmd->in = NULL;
1433 	cmd->out = &ccc->id;
1434 	cmd->len = 1;
1435 	cmd->actual_len = 0;
1436 	cmd->continued = true;
1437 
1438 	/* Directed message */
1439 	cmd = &xfer->cmds[1];
1440 	cmd->addr = ccc->dests[0].addr;
1441 	cmd->rnw = ccc->rnw;
1442 	cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1443 	cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data;
1444 	cmd->len = xfer_len;
1445 	cmd->actual_len = actual_len;
1446 	cmd->continued = false;
1447 
1448 	mutex_lock(&master->lock);
1449 	svc_i3c_master_enqueue_xfer(master, xfer);
1450 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1451 		svc_i3c_master_dequeue_xfer(master, xfer);
1452 	mutex_unlock(&master->lock);
1453 
1454 	if (cmd->actual_len != xfer_len)
1455 		ccc->dests[0].payload.len = cmd->actual_len;
1456 
1457 	ret = xfer->ret;
1458 	svc_i3c_master_free_xfer(xfer);
1459 
1460 	return ret;
1461 }
1462 
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1463 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1464 				       struct i3c_ccc_cmd *cmd)
1465 {
1466 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1467 	bool broadcast = cmd->id < 0x80;
1468 	int ret;
1469 
1470 	if (broadcast)
1471 		ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1472 	else
1473 		ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1474 
1475 	if (ret)
1476 		cmd->err = I3C_ERROR_M2;
1477 
1478 	return ret;
1479 }
1480 
svc_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)1481 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1482 				     struct i3c_priv_xfer *xfers,
1483 				     int nxfers)
1484 {
1485 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1486 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1487 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1488 	struct svc_i3c_xfer *xfer;
1489 	int ret, i;
1490 
1491 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1492 	if (!xfer)
1493 		return -ENOMEM;
1494 
1495 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1496 
1497 	for (i = 0; i < nxfers; i++) {
1498 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1499 
1500 		cmd->xfer = &xfers[i];
1501 		cmd->addr = master->addrs[data->index];
1502 		cmd->rnw = xfers[i].rnw;
1503 		cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1504 		cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1505 		cmd->len = xfers[i].len;
1506 		cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
1507 		cmd->continued = (i + 1) < nxfers;
1508 	}
1509 
1510 	mutex_lock(&master->lock);
1511 	svc_i3c_master_enqueue_xfer(master, xfer);
1512 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1513 		svc_i3c_master_dequeue_xfer(master, xfer);
1514 	mutex_unlock(&master->lock);
1515 
1516 	ret = xfer->ret;
1517 	svc_i3c_master_free_xfer(xfer);
1518 
1519 	return ret;
1520 }
1521 
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,const struct i2c_msg * xfers,int nxfers)1522 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1523 				    const struct i2c_msg *xfers,
1524 				    int nxfers)
1525 {
1526 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
1527 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1528 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1529 	struct svc_i3c_xfer *xfer;
1530 	int ret, i;
1531 
1532 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1533 	if (!xfer)
1534 		return -ENOMEM;
1535 
1536 	xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1537 
1538 	for (i = 0; i < nxfers; i++) {
1539 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1540 
1541 		cmd->addr = master->addrs[data->index];
1542 		cmd->rnw = xfers[i].flags & I2C_M_RD;
1543 		cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1544 		cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1545 		cmd->len = xfers[i].len;
1546 		cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1547 		cmd->continued = (i + 1 < nxfers);
1548 	}
1549 
1550 	mutex_lock(&master->lock);
1551 	svc_i3c_master_enqueue_xfer(master, xfer);
1552 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1553 		svc_i3c_master_dequeue_xfer(master, xfer);
1554 	mutex_unlock(&master->lock);
1555 
1556 	ret = xfer->ret;
1557 	svc_i3c_master_free_xfer(xfer);
1558 
1559 	return ret;
1560 }
1561 
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1562 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1563 				      const struct i3c_ibi_setup *req)
1564 {
1565 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1566 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1567 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1568 	unsigned long flags;
1569 	unsigned int i;
1570 
1571 	if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1572 		dev_err(master->dev, "IBI max payload %d should be < %d\n",
1573 			dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1574 		return -ERANGE;
1575 	}
1576 
1577 	data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1578 	if (IS_ERR(data->ibi_pool))
1579 		return PTR_ERR(data->ibi_pool);
1580 
1581 	spin_lock_irqsave(&master->ibi.lock, flags);
1582 	for (i = 0; i < master->ibi.num_slots; i++) {
1583 		if (!master->ibi.slots[i]) {
1584 			data->ibi = i;
1585 			master->ibi.slots[i] = dev;
1586 			break;
1587 		}
1588 	}
1589 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1590 
1591 	if (i < master->ibi.num_slots)
1592 		return 0;
1593 
1594 	i3c_generic_ibi_free_pool(data->ibi_pool);
1595 	data->ibi_pool = NULL;
1596 
1597 	return -ENOSPC;
1598 }
1599 
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1600 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1601 {
1602 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1603 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1604 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1605 	unsigned long flags;
1606 
1607 	spin_lock_irqsave(&master->ibi.lock, flags);
1608 	master->ibi.slots[data->ibi] = NULL;
1609 	data->ibi = -1;
1610 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1611 
1612 	i3c_generic_ibi_free_pool(data->ibi_pool);
1613 }
1614 
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1615 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1616 {
1617 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1618 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1619 	int ret;
1620 
1621 	ret = pm_runtime_resume_and_get(master->dev);
1622 	if (ret < 0) {
1623 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1624 		return ret;
1625 	}
1626 
1627 	master->enabled_events |= SVC_I3C_EVENT_IBI;
1628 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1629 
1630 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1631 }
1632 
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1633 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1634 {
1635 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1636 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1637 	int ret;
1638 
1639 	master->enabled_events &= ~SVC_I3C_EVENT_IBI;
1640 	if (!master->enabled_events)
1641 		svc_i3c_master_disable_interrupts(master);
1642 
1643 	ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1644 
1645 	pm_runtime_mark_last_busy(master->dev);
1646 	pm_runtime_put_autosuspend(master->dev);
1647 
1648 	return ret;
1649 }
1650 
svc_i3c_master_enable_hotjoin(struct i3c_master_controller * m)1651 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
1652 {
1653 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1654 	int ret;
1655 
1656 	ret = pm_runtime_resume_and_get(master->dev);
1657 	if (ret < 0) {
1658 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1659 		return ret;
1660 	}
1661 
1662 	master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
1663 
1664 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1665 
1666 	return 0;
1667 }
1668 
svc_i3c_master_disable_hotjoin(struct i3c_master_controller * m)1669 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
1670 {
1671 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1672 
1673 	master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
1674 
1675 	if (!master->enabled_events)
1676 		svc_i3c_master_disable_interrupts(master);
1677 
1678 	pm_runtime_mark_last_busy(master->dev);
1679 	pm_runtime_put_autosuspend(master->dev);
1680 
1681 	return 0;
1682 }
1683 
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1684 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1685 					    struct i3c_ibi_slot *slot)
1686 {
1687 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1688 
1689 	i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1690 }
1691 
1692 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1693 	.bus_init = svc_i3c_master_bus_init,
1694 	.bus_cleanup = svc_i3c_master_bus_cleanup,
1695 	.attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1696 	.detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1697 	.reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1698 	.attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1699 	.detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1700 	.do_daa = svc_i3c_master_do_daa,
1701 	.supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1702 	.send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1703 	.priv_xfers = svc_i3c_master_priv_xfers,
1704 	.i2c_xfers = svc_i3c_master_i2c_xfers,
1705 	.request_ibi = svc_i3c_master_request_ibi,
1706 	.free_ibi = svc_i3c_master_free_ibi,
1707 	.recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1708 	.enable_ibi = svc_i3c_master_enable_ibi,
1709 	.disable_ibi = svc_i3c_master_disable_ibi,
1710 	.enable_hotjoin = svc_i3c_master_enable_hotjoin,
1711 	.disable_hotjoin = svc_i3c_master_disable_hotjoin,
1712 	.set_speed = svc_i3c_master_set_speed,
1713 };
1714 
svc_i3c_master_prepare_clks(struct svc_i3c_master * master)1715 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
1716 {
1717 	int ret = 0;
1718 
1719 	ret = clk_prepare_enable(master->pclk);
1720 	if (ret)
1721 		return ret;
1722 
1723 	ret = clk_prepare_enable(master->fclk);
1724 	if (ret) {
1725 		clk_disable_unprepare(master->pclk);
1726 		return ret;
1727 	}
1728 
1729 	ret = clk_prepare_enable(master->sclk);
1730 	if (ret) {
1731 		clk_disable_unprepare(master->pclk);
1732 		clk_disable_unprepare(master->fclk);
1733 		return ret;
1734 	}
1735 
1736 	return 0;
1737 }
1738 
svc_i3c_master_unprepare_clks(struct svc_i3c_master * master)1739 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
1740 {
1741 	clk_disable_unprepare(master->pclk);
1742 	clk_disable_unprepare(master->fclk);
1743 	clk_disable_unprepare(master->sclk);
1744 }
1745 
svc_i3c_master_probe(struct platform_device * pdev)1746 static int svc_i3c_master_probe(struct platform_device *pdev)
1747 {
1748 	struct device *dev = &pdev->dev;
1749 	struct svc_i3c_master *master;
1750 	int ret;
1751 
1752 	master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1753 	if (!master)
1754 		return -ENOMEM;
1755 
1756 	master->regs = devm_platform_ioremap_resource(pdev, 0);
1757 	if (IS_ERR(master->regs))
1758 		return PTR_ERR(master->regs);
1759 
1760 	master->pclk = devm_clk_get(dev, "pclk");
1761 	if (IS_ERR(master->pclk))
1762 		return PTR_ERR(master->pclk);
1763 
1764 	master->fclk = devm_clk_get(dev, "fast_clk");
1765 	if (IS_ERR(master->fclk))
1766 		return PTR_ERR(master->fclk);
1767 
1768 	master->sclk = devm_clk_get(dev, "slow_clk");
1769 	if (IS_ERR(master->sclk))
1770 		return PTR_ERR(master->sclk);
1771 
1772 	master->irq = platform_get_irq(pdev, 0);
1773 	if (master->irq < 0)
1774 		return master->irq;
1775 
1776 	master->dev = dev;
1777 
1778 	ret = svc_i3c_master_prepare_clks(master);
1779 	if (ret)
1780 		return ret;
1781 
1782 	INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1783 	INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
1784 	mutex_init(&master->lock);
1785 
1786 	ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1787 			       IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1788 	if (ret)
1789 		goto err_disable_clks;
1790 
1791 	master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1792 
1793 	spin_lock_init(&master->xferqueue.lock);
1794 	INIT_LIST_HEAD(&master->xferqueue.list);
1795 
1796 	spin_lock_init(&master->ibi.lock);
1797 	master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1798 	master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1799 					 sizeof(*master->ibi.slots),
1800 					 GFP_KERNEL);
1801 	if (!master->ibi.slots) {
1802 		ret = -ENOMEM;
1803 		goto err_disable_clks;
1804 	}
1805 
1806 	platform_set_drvdata(pdev, master);
1807 
1808 	pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1809 	pm_runtime_use_autosuspend(&pdev->dev);
1810 	pm_runtime_get_noresume(&pdev->dev);
1811 	pm_runtime_set_active(&pdev->dev);
1812 	pm_runtime_enable(&pdev->dev);
1813 
1814 	svc_i3c_master_reset(master);
1815 
1816 	/* Register the master */
1817 	ret = i3c_master_register(&master->base, &pdev->dev,
1818 				  &svc_i3c_master_ops, false);
1819 	if (ret)
1820 		goto rpm_disable;
1821 
1822 	pm_runtime_mark_last_busy(&pdev->dev);
1823 	pm_runtime_put_autosuspend(&pdev->dev);
1824 
1825 	return 0;
1826 
1827 rpm_disable:
1828 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1829 	pm_runtime_put_noidle(&pdev->dev);
1830 	pm_runtime_set_suspended(&pdev->dev);
1831 	pm_runtime_disable(&pdev->dev);
1832 
1833 err_disable_clks:
1834 	svc_i3c_master_unprepare_clks(master);
1835 
1836 	return ret;
1837 }
1838 
svc_i3c_master_remove(struct platform_device * pdev)1839 static void svc_i3c_master_remove(struct platform_device *pdev)
1840 {
1841 	struct svc_i3c_master *master = platform_get_drvdata(pdev);
1842 
1843 	cancel_work_sync(&master->hj_work);
1844 	i3c_master_unregister(&master->base);
1845 
1846 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1847 	pm_runtime_disable(&pdev->dev);
1848 }
1849 
svc_i3c_save_regs(struct svc_i3c_master * master)1850 static void svc_i3c_save_regs(struct svc_i3c_master *master)
1851 {
1852 	master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
1853 	master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
1854 }
1855 
svc_i3c_restore_regs(struct svc_i3c_master * master)1856 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
1857 {
1858 	if (readl(master->regs + SVC_I3C_MDYNADDR) !=
1859 	    master->saved_regs.mdynaddr) {
1860 		writel(master->saved_regs.mconfig,
1861 		       master->regs + SVC_I3C_MCONFIG);
1862 		writel(master->saved_regs.mdynaddr,
1863 		       master->regs + SVC_I3C_MDYNADDR);
1864 	}
1865 }
1866 
svc_i3c_runtime_suspend(struct device * dev)1867 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
1868 {
1869 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1870 
1871 	svc_i3c_save_regs(master);
1872 	svc_i3c_master_unprepare_clks(master);
1873 	pinctrl_pm_select_sleep_state(dev);
1874 
1875 	return 0;
1876 }
1877 
svc_i3c_runtime_resume(struct device * dev)1878 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
1879 {
1880 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1881 
1882 	pinctrl_pm_select_default_state(dev);
1883 	svc_i3c_master_prepare_clks(master);
1884 
1885 	svc_i3c_restore_regs(master);
1886 
1887 	return 0;
1888 }
1889 
1890 static const struct dev_pm_ops svc_i3c_pm_ops = {
1891 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1892 				      pm_runtime_force_resume)
1893 	SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
1894 			   svc_i3c_runtime_resume, NULL)
1895 };
1896 
1897 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
1898 	{ .compatible = "silvaco,i3c-master-v1"},
1899 	{ /* sentinel */ },
1900 };
1901 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
1902 
1903 static struct platform_driver svc_i3c_master = {
1904 	.probe = svc_i3c_master_probe,
1905 	.remove_new = svc_i3c_master_remove,
1906 	.driver = {
1907 		.name = "silvaco-i3c-master",
1908 		.of_match_table = svc_i3c_master_of_match_tbl,
1909 		.pm = &svc_i3c_pm_ops,
1910 	},
1911 };
1912 module_platform_driver(svc_i3c_master);
1913 
1914 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
1915 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
1916 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
1917 MODULE_LICENSE("GPL v2");
1918