1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
4 *
5 * Copyright (C) 2011 Weinmann Medical GmbH
6 * Author: Nikolaus Voss <n.voss@weinmann.de>
7 *
8 * Evolved from original work by:
9 * Copyright (C) 2004 Rick Bronson
10 * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
11 *
12 * Borrowed heavily from original work by:
13 * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
14 */
15
16 #include <linux/clk.h>
17 #include <linux/completion.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/dmaengine.h>
20 #include <linux/err.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/i2c.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/of.h>
26 #include <linux/pinctrl/consumer.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29
30 #include "i2c-at91.h"
31
at91_init_twi_bus_master(struct at91_twi_dev * dev)32 void at91_init_twi_bus_master(struct at91_twi_dev *dev)
33 {
34 struct at91_twi_pdata *pdata = dev->pdata;
35 u32 filtr = 0;
36
37 /* FIFO should be enabled immediately after the software reset */
38 if (dev->fifo_size)
39 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
40 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
41 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
42 at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
43
44 /* enable digital filter */
45 if (pdata->has_dig_filtr && dev->enable_dig_filt)
46 filtr |= AT91_TWI_FILTR_FILT;
47
48 /* enable advanced digital filter */
49 if (pdata->has_adv_dig_filtr && dev->enable_dig_filt)
50 filtr |= AT91_TWI_FILTR_FILT |
51 (AT91_TWI_FILTR_THRES(dev->filter_width) &
52 AT91_TWI_FILTR_THRES_MASK);
53
54 /* enable analog filter */
55 if (pdata->has_ana_filtr && dev->enable_ana_filt)
56 filtr |= AT91_TWI_FILTR_PADFEN;
57
58 if (filtr)
59 at91_twi_write(dev, AT91_TWI_FILTR, filtr);
60 }
61
62 /*
63 * Calculate symmetric clock as stated in datasheet:
64 * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
65 */
at91_calc_twi_clock(struct at91_twi_dev * dev)66 static void at91_calc_twi_clock(struct at91_twi_dev *dev)
67 {
68 int ckdiv, cdiv, div, hold = 0, filter_width = 0;
69 struct at91_twi_pdata *pdata = dev->pdata;
70 int offset = pdata->clk_offset;
71 int max_ckdiv = pdata->clk_max_div;
72 struct i2c_timings timings, *t = &timings;
73
74 i2c_parse_fw_timings(dev->dev, t, true);
75
76 div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
77 2 * t->bus_freq_hz) - offset);
78 ckdiv = fls(div >> 8);
79 cdiv = div >> ckdiv;
80
81 if (ckdiv > max_ckdiv) {
82 dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
83 ckdiv, max_ckdiv);
84 ckdiv = max_ckdiv;
85 cdiv = 255;
86 }
87
88 if (pdata->has_hold_field) {
89 /*
90 * hold time = HOLD + 3 x T_peripheral_clock
91 * Use clk rate in kHz to prevent overflows when computing
92 * hold.
93 */
94 hold = DIV_ROUND_UP(t->sda_hold_ns
95 * (clk_get_rate(dev->clk) / 1000), 1000000);
96 hold -= 3;
97 if (hold < 0)
98 hold = 0;
99 if (hold > AT91_TWI_CWGR_HOLD_MAX) {
100 dev_warn(dev->dev,
101 "HOLD field set to its maximum value (%d instead of %d)\n",
102 AT91_TWI_CWGR_HOLD_MAX, hold);
103 hold = AT91_TWI_CWGR_HOLD_MAX;
104 }
105 }
106
107 if (pdata->has_adv_dig_filtr) {
108 /*
109 * filter width = 0 to AT91_TWI_FILTR_THRES_MAX
110 * peripheral clocks
111 */
112 filter_width = DIV_ROUND_UP(t->digital_filter_width_ns
113 * (clk_get_rate(dev->clk) / 1000), 1000000);
114 if (filter_width > AT91_TWI_FILTR_THRES_MAX) {
115 dev_warn(dev->dev,
116 "Filter threshold set to its maximum value (%d instead of %d)\n",
117 AT91_TWI_FILTR_THRES_MAX, filter_width);
118 filter_width = AT91_TWI_FILTR_THRES_MAX;
119 }
120 }
121
122 dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv
123 | AT91_TWI_CWGR_HOLD(hold);
124
125 dev->filter_width = filter_width;
126
127 dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns), filter_width %d (%d ns)\n",
128 cdiv, ckdiv, hold, t->sda_hold_ns, filter_width,
129 t->digital_filter_width_ns);
130 }
131
at91_twi_dma_cleanup(struct at91_twi_dev * dev)132 static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
133 {
134 struct at91_twi_dma *dma = &dev->dma;
135
136 at91_twi_irq_save(dev);
137
138 if (dma->xfer_in_progress) {
139 if (dma->direction == DMA_FROM_DEVICE)
140 dmaengine_terminate_sync(dma->chan_rx);
141 else
142 dmaengine_terminate_sync(dma->chan_tx);
143 dma->xfer_in_progress = false;
144 }
145 if (dma->buf_mapped) {
146 dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]),
147 dev->buf_len, dma->direction);
148 dma->buf_mapped = false;
149 }
150
151 at91_twi_irq_restore(dev);
152 }
153
at91_twi_write_next_byte(struct at91_twi_dev * dev)154 static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
155 {
156 if (!dev->buf_len)
157 return;
158
159 /* 8bit write works with and without FIFO */
160 writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
161
162 /* send stop when last byte has been written */
163 if (--dev->buf_len == 0) {
164 if (!dev->use_alt_cmd)
165 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
166 at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
167 }
168
169 dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
170
171 ++dev->buf;
172 }
173
at91_twi_write_data_dma_callback(void * data)174 static void at91_twi_write_data_dma_callback(void *data)
175 {
176 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
177
178 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
179 dev->buf_len, DMA_TO_DEVICE);
180
181 /*
182 * When this callback is called, THR/TX FIFO is likely not to be empty
183 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
184 * Status Register to be sure that the STOP bit has been sent and the
185 * transfer is completed. The NACK interrupt has already been enabled,
186 * we just have to enable TXCOMP one.
187 */
188 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
189 if (!dev->use_alt_cmd)
190 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
191 }
192
at91_twi_write_data_dma(struct at91_twi_dev * dev)193 static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
194 {
195 dma_addr_t dma_addr;
196 struct dma_async_tx_descriptor *txdesc;
197 struct at91_twi_dma *dma = &dev->dma;
198 struct dma_chan *chan_tx = dma->chan_tx;
199 unsigned int sg_len = 1;
200
201 if (!dev->buf_len)
202 return;
203
204 dma->direction = DMA_TO_DEVICE;
205
206 at91_twi_irq_save(dev);
207 dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
208 DMA_TO_DEVICE);
209 if (dma_mapping_error(dev->dev, dma_addr)) {
210 dev_err(dev->dev, "dma map failed\n");
211 return;
212 }
213 dma->buf_mapped = true;
214 at91_twi_irq_restore(dev);
215
216 if (dev->fifo_size) {
217 size_t part1_len, part2_len;
218 struct scatterlist *sg;
219 unsigned fifo_mr;
220
221 sg_len = 0;
222
223 part1_len = dev->buf_len & ~0x3;
224 if (part1_len) {
225 sg = &dma->sg[sg_len++];
226 sg_dma_len(sg) = part1_len;
227 sg_dma_address(sg) = dma_addr;
228 }
229
230 part2_len = dev->buf_len & 0x3;
231 if (part2_len) {
232 sg = &dma->sg[sg_len++];
233 sg_dma_len(sg) = part2_len;
234 sg_dma_address(sg) = dma_addr + part1_len;
235 }
236
237 /*
238 * DMA controller is triggered when at least 4 data can be
239 * written into the TX FIFO
240 */
241 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
242 fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
243 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
244 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
245 } else {
246 sg_dma_len(&dma->sg[0]) = dev->buf_len;
247 sg_dma_address(&dma->sg[0]) = dma_addr;
248 }
249
250 txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
251 DMA_MEM_TO_DEV,
252 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
253 if (!txdesc) {
254 dev_err(dev->dev, "dma prep slave sg failed\n");
255 goto error;
256 }
257
258 txdesc->callback = at91_twi_write_data_dma_callback;
259 txdesc->callback_param = dev;
260
261 dma->xfer_in_progress = true;
262 dmaengine_submit(txdesc);
263 dma_async_issue_pending(chan_tx);
264
265 return;
266
267 error:
268 at91_twi_dma_cleanup(dev);
269 }
270
at91_twi_read_next_byte(struct at91_twi_dev * dev)271 static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
272 {
273 /*
274 * If we are in this case, it means there is garbage data in RHR, so
275 * delete them.
276 */
277 if (!dev->buf_len) {
278 at91_twi_read(dev, AT91_TWI_RHR);
279 return;
280 }
281
282 /* 8bit read works with and without FIFO */
283 *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR);
284 --dev->buf_len;
285
286 /* return if aborting, we only needed to read RHR to clear RXRDY*/
287 if (dev->recv_len_abort)
288 return;
289
290 /* handle I2C_SMBUS_BLOCK_DATA */
291 if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
292 /* ensure length byte is a valid value */
293 if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
294 dev->msg->flags &= ~I2C_M_RECV_LEN;
295 dev->buf_len += *dev->buf;
296 dev->msg->len = dev->buf_len + 1;
297 dev_dbg(dev->dev, "received block length %zu\n",
298 dev->buf_len);
299 } else {
300 /* abort and send the stop by reading one more byte */
301 dev->recv_len_abort = true;
302 dev->buf_len = 1;
303 }
304 }
305
306 /* send stop if second but last byte has been read */
307 if (!dev->use_alt_cmd && dev->buf_len == 1)
308 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
309
310 dev_dbg(dev->dev, "read 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
311
312 ++dev->buf;
313 }
314
at91_twi_read_data_dma_callback(void * data)315 static void at91_twi_read_data_dma_callback(void *data)
316 {
317 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
318 unsigned ier = AT91_TWI_TXCOMP;
319
320 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
321 dev->buf_len, DMA_FROM_DEVICE);
322
323 if (!dev->use_alt_cmd) {
324 /* The last two bytes have to be read without using dma */
325 dev->buf += dev->buf_len - 2;
326 dev->buf_len = 2;
327 ier |= AT91_TWI_RXRDY;
328 }
329 at91_twi_write(dev, AT91_TWI_IER, ier);
330 }
331
at91_twi_read_data_dma(struct at91_twi_dev * dev)332 static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
333 {
334 dma_addr_t dma_addr;
335 struct dma_async_tx_descriptor *rxdesc;
336 struct at91_twi_dma *dma = &dev->dma;
337 struct dma_chan *chan_rx = dma->chan_rx;
338 size_t buf_len;
339
340 buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
341 dma->direction = DMA_FROM_DEVICE;
342
343 /* Keep in mind that we won't use dma to read the last two bytes */
344 at91_twi_irq_save(dev);
345 dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE);
346 if (dma_mapping_error(dev->dev, dma_addr)) {
347 dev_err(dev->dev, "dma map failed\n");
348 return;
349 }
350 dma->buf_mapped = true;
351 at91_twi_irq_restore(dev);
352
353 if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) {
354 unsigned fifo_mr;
355
356 /*
357 * DMA controller is triggered when at least 4 data can be
358 * read from the RX FIFO
359 */
360 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
361 fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK;
362 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA);
363 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
364 }
365
366 sg_dma_len(&dma->sg[0]) = buf_len;
367 sg_dma_address(&dma->sg[0]) = dma_addr;
368
369 rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM,
370 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
371 if (!rxdesc) {
372 dev_err(dev->dev, "dma prep slave sg failed\n");
373 goto error;
374 }
375
376 rxdesc->callback = at91_twi_read_data_dma_callback;
377 rxdesc->callback_param = dev;
378
379 dma->xfer_in_progress = true;
380 dmaengine_submit(rxdesc);
381 dma_async_issue_pending(dma->chan_rx);
382
383 return;
384
385 error:
386 at91_twi_dma_cleanup(dev);
387 }
388
atmel_twi_interrupt(int irq,void * dev_id)389 static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
390 {
391 struct at91_twi_dev *dev = dev_id;
392 const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
393 const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
394
395 if (!irqstatus)
396 return IRQ_NONE;
397 /*
398 * In reception, the behavior of the twi device (before sama5d2) is
399 * weird. There is some magic about RXRDY flag! When a data has been
400 * almost received, the reception of a new one is anticipated if there
401 * is no stop command to send. That is the reason why ask for sending
402 * the stop command not on the last data but on the second last one.
403 *
404 * Unfortunately, we could still have the RXRDY flag set even if the
405 * transfer is done and we have read the last data. It might happen
406 * when the i2c slave device sends too quickly data after receiving the
407 * ack from the master. The data has been almost received before having
408 * the order to send stop. In this case, sending the stop command could
409 * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
410 * the RXRDY interrupt first in order to not keep garbage data in the
411 * Receive Holding Register for the next transfer.
412 */
413 if (irqstatus & AT91_TWI_RXRDY) {
414 /*
415 * Read all available bytes at once by polling RXRDY usable w/
416 * and w/o FIFO. With FIFO enabled we could also read RXFL and
417 * avoid polling RXRDY.
418 */
419 do {
420 at91_twi_read_next_byte(dev);
421 } while (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY);
422 }
423
424 /*
425 * When a NACK condition is detected, the I2C controller sets the NACK,
426 * TXCOMP and TXRDY bits all together in the Status Register (SR).
427 *
428 * 1 - Handling NACK errors with CPU write transfer.
429 *
430 * In such case, we should not write the next byte into the Transmit
431 * Holding Register (THR) otherwise the I2C controller would start a new
432 * transfer and the I2C slave is likely to reply by another NACK.
433 *
434 * 2 - Handling NACK errors with DMA write transfer.
435 *
436 * By setting the TXRDY bit in the SR, the I2C controller also triggers
437 * the DMA controller to write the next data into the THR. Then the
438 * result depends on the hardware version of the I2C controller.
439 *
440 * 2a - Without support of the Alternative Command mode.
441 *
442 * This is the worst case: the DMA controller is triggered to write the
443 * next data into the THR, hence starting a new transfer: the I2C slave
444 * is likely to reply by another NACK.
445 * Concurrently, this interrupt handler is likely to be called to manage
446 * the first NACK before the I2C controller detects the second NACK and
447 * sets once again the NACK bit into the SR.
448 * When handling the first NACK, this interrupt handler disables the I2C
449 * controller interruptions, especially the NACK interrupt.
450 * Hence, the NACK bit is pending into the SR. This is why we should
451 * read the SR to clear all pending interrupts at the beginning of
452 * at91_do_twi_transfer() before actually starting a new transfer.
453 *
454 * 2b - With support of the Alternative Command mode.
455 *
456 * When a NACK condition is detected, the I2C controller also locks the
457 * THR (and sets the LOCK bit in the SR): even though the DMA controller
458 * is triggered by the TXRDY bit to write the next data into the THR,
459 * this data actually won't go on the I2C bus hence a second NACK is not
460 * generated.
461 */
462 if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
463 at91_disable_twi_interrupts(dev);
464 complete(&dev->cmd_complete);
465 } else if (irqstatus & AT91_TWI_TXRDY) {
466 at91_twi_write_next_byte(dev);
467 }
468
469 /* catch error flags */
470 dev->transfer_status |= status;
471
472 return IRQ_HANDLED;
473 }
474
at91_do_twi_transfer(struct at91_twi_dev * dev)475 static int at91_do_twi_transfer(struct at91_twi_dev *dev)
476 {
477 int ret;
478 unsigned long time_left;
479 bool has_unre_flag = dev->pdata->has_unre_flag;
480 bool has_alt_cmd = dev->pdata->has_alt_cmd;
481
482 /*
483 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
484 * read flag but shows the state of the transmission at the time the
485 * Status Register is read. According to the programmer datasheet,
486 * TXCOMP is set when both holding register and internal shifter are
487 * empty and STOP condition has been sent.
488 * Consequently, we should enable NACK interrupt rather than TXCOMP to
489 * detect transmission failure.
490 * Indeed let's take the case of an i2c write command using DMA.
491 * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and
492 * TXCOMP bits are set together into the Status Register.
493 * LOCK is a clear on write bit, which is set to prevent the DMA
494 * controller from sending new data on the i2c bus after a NACK
495 * condition has happened. Once locked, this i2c peripheral stops
496 * triggering the DMA controller for new data but it is more than
497 * likely that a new DMA transaction is already in progress, writing
498 * into the Transmit Holding Register. Since the peripheral is locked,
499 * these new data won't be sent to the i2c bus but they will remain
500 * into the Transmit Holding Register, so TXCOMP bit is cleared.
501 * Then when the interrupt handler is called, the Status Register is
502 * read: the TXCOMP bit is clear but NACK bit is still set. The driver
503 * manage the error properly, without waiting for timeout.
504 * This case can be reproduced easyly when writing into an at24 eeprom.
505 *
506 * Besides, the TXCOMP bit is already set before the i2c transaction
507 * has been started. For read transactions, this bit is cleared when
508 * writing the START bit into the Control Register. So the
509 * corresponding interrupt can safely be enabled just after.
510 * However for write transactions managed by the CPU, we first write
511 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
512 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
513 * the interrupt handler would be called immediately and the i2c command
514 * would be reported as completed.
515 * Also when a write transaction is managed by the DMA controller,
516 * enabling the TXCOMP interrupt in this function may lead to a race
517 * condition since we don't know whether the TXCOMP interrupt is enabled
518 * before or after the DMA has started to write into THR. So the TXCOMP
519 * interrupt is enabled later by at91_twi_write_data_dma_callback().
520 * Immediately after in that DMA callback, if the alternative command
521 * mode is not used, we still need to send the STOP condition manually
522 * writing the corresponding bit into the Control Register.
523 */
524
525 dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
526 (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
527
528 reinit_completion(&dev->cmd_complete);
529 dev->transfer_status = 0;
530
531 /* Clear pending interrupts, such as NACK. */
532 at91_twi_read(dev, AT91_TWI_SR);
533
534 if (dev->fifo_size) {
535 unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
536
537 /* Reset FIFO mode register */
538 fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK |
539 AT91_TWI_FMR_RXRDYM_MASK);
540 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA);
541 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA);
542 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
543
544 /* Flush FIFOs */
545 at91_twi_write(dev, AT91_TWI_CR,
546 AT91_TWI_THRCLR | AT91_TWI_RHRCLR);
547 }
548
549 if (!dev->buf_len) {
550 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
551 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
552 } else if (dev->msg->flags & I2C_M_RD) {
553 unsigned start_flags = AT91_TWI_START;
554
555 /* if only one byte is to be read, immediately stop transfer */
556 if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
557 !(dev->msg->flags & I2C_M_RECV_LEN))
558 start_flags |= AT91_TWI_STOP;
559 at91_twi_write(dev, AT91_TWI_CR, start_flags);
560 /*
561 * When using dma without alternative command mode, the last
562 * byte has to be read manually in order to not send the stop
563 * command too late and then to receive extra data.
564 * In practice, there are some issues if you use the dma to
565 * read n-1 bytes because of latency.
566 * Reading n-2 bytes with dma and the two last ones manually
567 * seems to be the best solution.
568 */
569 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
570 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
571 at91_twi_read_data_dma(dev);
572 } else {
573 at91_twi_write(dev, AT91_TWI_IER,
574 AT91_TWI_TXCOMP |
575 AT91_TWI_NACK |
576 AT91_TWI_RXRDY);
577 }
578 } else {
579 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
580 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
581 at91_twi_write_data_dma(dev);
582 } else {
583 at91_twi_write_next_byte(dev);
584 at91_twi_write(dev, AT91_TWI_IER,
585 AT91_TWI_TXCOMP | AT91_TWI_NACK |
586 (dev->buf_len ? AT91_TWI_TXRDY : 0));
587 }
588 }
589
590 time_left = wait_for_completion_timeout(&dev->cmd_complete,
591 dev->adapter.timeout);
592 if (time_left == 0) {
593 dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
594 at91_init_twi_bus(dev);
595 ret = -ETIMEDOUT;
596 goto error;
597 }
598 if (dev->transfer_status & AT91_TWI_NACK) {
599 dev_dbg(dev->dev, "received nack\n");
600 ret = -EREMOTEIO;
601 goto error;
602 }
603 if (dev->transfer_status & AT91_TWI_OVRE) {
604 dev_err(dev->dev, "overrun while reading\n");
605 ret = -EIO;
606 goto error;
607 }
608 if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
609 dev_err(dev->dev, "underrun while writing\n");
610 ret = -EIO;
611 goto error;
612 }
613 if ((has_alt_cmd || dev->fifo_size) &&
614 (dev->transfer_status & AT91_TWI_LOCK)) {
615 dev_err(dev->dev, "tx locked\n");
616 ret = -EIO;
617 goto error;
618 }
619 if (dev->recv_len_abort) {
620 dev_err(dev->dev, "invalid smbus block length recvd\n");
621 ret = -EPROTO;
622 goto error;
623 }
624
625 dev_dbg(dev->dev, "transfer complete\n");
626
627 return 0;
628
629 error:
630 /* first stop DMA transfer if still in progress */
631 at91_twi_dma_cleanup(dev);
632 /* then flush THR/FIFO and unlock TX if locked */
633 if ((has_alt_cmd || dev->fifo_size) &&
634 (dev->transfer_status & AT91_TWI_LOCK)) {
635 dev_dbg(dev->dev, "unlock tx\n");
636 at91_twi_write(dev, AT91_TWI_CR,
637 AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
638 }
639
640 /*
641 * some faulty I2C slave devices might hold SDA down;
642 * we can send a bus clear command, hoping that the pins will be
643 * released
644 */
645 i2c_recover_bus(&dev->adapter);
646
647 return ret;
648 }
649
at91_twi_xfer(struct i2c_adapter * adap,struct i2c_msg * msg,int num)650 static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
651 {
652 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
653 int ret;
654 unsigned int_addr_flag = 0;
655 struct i2c_msg *m_start = msg;
656 bool is_read;
657 u8 *dma_buf = NULL;
658
659 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
660
661 ret = pm_runtime_get_sync(dev->dev);
662 if (ret < 0)
663 goto out;
664
665 if (num == 2) {
666 int internal_address = 0;
667 int i;
668
669 /* 1st msg is put into the internal address, start with 2nd */
670 m_start = &msg[1];
671 for (i = 0; i < msg->len; ++i) {
672 const unsigned addr = msg->buf[msg->len - 1 - i];
673
674 internal_address |= addr << (8 * i);
675 int_addr_flag += AT91_TWI_IADRSZ_1;
676 }
677 at91_twi_write(dev, AT91_TWI_IADR, internal_address);
678 }
679
680 dev->use_alt_cmd = false;
681 is_read = (m_start->flags & I2C_M_RD);
682 if (dev->pdata->has_alt_cmd) {
683 if (m_start->len > 0 &&
684 m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
685 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
686 at91_twi_write(dev, AT91_TWI_ACR,
687 AT91_TWI_ACR_DATAL(m_start->len) |
688 ((is_read) ? AT91_TWI_ACR_DIR : 0));
689 dev->use_alt_cmd = true;
690 } else {
691 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
692 }
693 }
694
695 at91_twi_write(dev, AT91_TWI_MMR,
696 (m_start->addr << 16) |
697 int_addr_flag |
698 ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
699
700 dev->buf_len = m_start->len;
701 dev->buf = m_start->buf;
702 dev->msg = m_start;
703 dev->recv_len_abort = false;
704
705 if (dev->use_dma) {
706 dma_buf = i2c_get_dma_safe_msg_buf(m_start, 1);
707 if (!dma_buf) {
708 ret = -ENOMEM;
709 goto out;
710 }
711 dev->buf = dma_buf;
712 }
713
714 ret = at91_do_twi_transfer(dev);
715 i2c_put_dma_safe_msg_buf(dma_buf, m_start, !ret);
716
717 ret = (ret < 0) ? ret : num;
718 out:
719 pm_runtime_mark_last_busy(dev->dev);
720 pm_runtime_put_autosuspend(dev->dev);
721
722 return ret;
723 }
724
725 /*
726 * The hardware can handle at most two messages concatenated by a
727 * repeated start via it's internal address feature.
728 */
729 static const struct i2c_adapter_quirks at91_twi_quirks = {
730 .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
731 .max_comb_1st_msg_len = 3,
732 };
733
at91_twi_func(struct i2c_adapter * adapter)734 static u32 at91_twi_func(struct i2c_adapter *adapter)
735 {
736 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
737 | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
738 }
739
740 static const struct i2c_algorithm at91_twi_algorithm = {
741 .master_xfer = at91_twi_xfer,
742 .functionality = at91_twi_func,
743 };
744
at91_twi_configure_dma(struct at91_twi_dev * dev,u32 phy_addr)745 static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
746 {
747 int ret = 0;
748 struct dma_slave_config slave_config;
749 struct at91_twi_dma *dma = &dev->dma;
750 enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
751
752 /*
753 * The actual width of the access will be chosen in
754 * dmaengine_prep_slave_sg():
755 * for each buffer in the scatter-gather list, if its size is aligned
756 * to addr_width then addr_width accesses will be performed to transfer
757 * the buffer. On the other hand, if the buffer size is not aligned to
758 * addr_width then the buffer is transferred using single byte accesses.
759 * Please refer to the Atmel eXtended DMA controller driver.
760 * When FIFOs are used, the TXRDYM threshold can always be set to
761 * trigger the XDMAC when at least 4 data can be written into the TX
762 * FIFO, even if single byte accesses are performed.
763 * However the RXRDYM threshold must be set to fit the access width,
764 * deduced from buffer length, so the XDMAC is triggered properly to
765 * read data from the RX FIFO.
766 */
767 if (dev->fifo_size)
768 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
769
770 memset(&slave_config, 0, sizeof(slave_config));
771 slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
772 slave_config.src_addr_width = addr_width;
773 slave_config.src_maxburst = 1;
774 slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
775 slave_config.dst_addr_width = addr_width;
776 slave_config.dst_maxburst = 1;
777 slave_config.device_fc = false;
778
779 dma->chan_tx = dma_request_chan(dev->dev, "tx");
780 if (IS_ERR(dma->chan_tx)) {
781 ret = PTR_ERR(dma->chan_tx);
782 dma->chan_tx = NULL;
783 goto error;
784 }
785
786 dma->chan_rx = dma_request_chan(dev->dev, "rx");
787 if (IS_ERR(dma->chan_rx)) {
788 ret = PTR_ERR(dma->chan_rx);
789 dma->chan_rx = NULL;
790 goto error;
791 }
792
793 slave_config.direction = DMA_MEM_TO_DEV;
794 if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
795 dev_err(dev->dev, "failed to configure tx channel\n");
796 ret = -EINVAL;
797 goto error;
798 }
799
800 slave_config.direction = DMA_DEV_TO_MEM;
801 if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
802 dev_err(dev->dev, "failed to configure rx channel\n");
803 ret = -EINVAL;
804 goto error;
805 }
806
807 sg_init_table(dma->sg, 2);
808 dma->buf_mapped = false;
809 dma->xfer_in_progress = false;
810 dev->use_dma = true;
811
812 dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
813 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
814
815 return ret;
816
817 error:
818 if (ret != -EPROBE_DEFER)
819 dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
820 if (dma->chan_rx)
821 dma_release_channel(dma->chan_rx);
822 if (dma->chan_tx)
823 dma_release_channel(dma->chan_tx);
824 return ret;
825 }
826
at91_init_twi_recovery_gpio(struct platform_device * pdev,struct at91_twi_dev * dev)827 static int at91_init_twi_recovery_gpio(struct platform_device *pdev,
828 struct at91_twi_dev *dev)
829 {
830 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
831
832 rinfo->pinctrl = devm_pinctrl_get(&pdev->dev);
833 if (!rinfo->pinctrl) {
834 dev_info(dev->dev, "pinctrl unavailable, bus recovery not supported\n");
835 return 0;
836 }
837 if (IS_ERR(rinfo->pinctrl)) {
838 dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
839 return PTR_ERR(rinfo->pinctrl);
840 }
841 dev->adapter.bus_recovery_info = rinfo;
842
843 return 0;
844 }
845
at91_twi_recover_bus_cmd(struct i2c_adapter * adap)846 static int at91_twi_recover_bus_cmd(struct i2c_adapter *adap)
847 {
848 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
849
850 dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
851 if (!(dev->transfer_status & AT91_TWI_SDA)) {
852 dev_dbg(dev->dev, "SDA is down; sending bus clear command\n");
853 if (dev->use_alt_cmd) {
854 unsigned int acr;
855
856 acr = at91_twi_read(dev, AT91_TWI_ACR);
857 acr &= ~AT91_TWI_ACR_DATAL_MASK;
858 at91_twi_write(dev, AT91_TWI_ACR, acr);
859 }
860 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_CLEAR);
861 }
862
863 return 0;
864 }
865
at91_init_twi_recovery_info(struct platform_device * pdev,struct at91_twi_dev * dev)866 static int at91_init_twi_recovery_info(struct platform_device *pdev,
867 struct at91_twi_dev *dev)
868 {
869 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
870 bool has_clear_cmd = dev->pdata->has_clear_cmd;
871
872 if (!has_clear_cmd)
873 return at91_init_twi_recovery_gpio(pdev, dev);
874
875 rinfo->recover_bus = at91_twi_recover_bus_cmd;
876 dev->adapter.bus_recovery_info = rinfo;
877
878 return 0;
879 }
880
at91_twi_probe_master(struct platform_device * pdev,u32 phy_addr,struct at91_twi_dev * dev)881 int at91_twi_probe_master(struct platform_device *pdev,
882 u32 phy_addr, struct at91_twi_dev *dev)
883 {
884 int rc;
885
886 init_completion(&dev->cmd_complete);
887
888 rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
889 dev_name(dev->dev), dev);
890 if (rc) {
891 dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
892 return rc;
893 }
894
895 if (dev->dev->of_node) {
896 rc = at91_twi_configure_dma(dev, phy_addr);
897 if (rc == -EPROBE_DEFER)
898 return rc;
899 }
900
901 if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
902 &dev->fifo_size)) {
903 dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
904 }
905
906 dev->enable_dig_filt = of_property_read_bool(pdev->dev.of_node,
907 "i2c-digital-filter");
908
909 dev->enable_ana_filt = of_property_read_bool(pdev->dev.of_node,
910 "i2c-analog-filter");
911 at91_calc_twi_clock(dev);
912
913 rc = at91_init_twi_recovery_info(pdev, dev);
914 if (rc == -EPROBE_DEFER)
915 return rc;
916
917 dev->adapter.algo = &at91_twi_algorithm;
918 dev->adapter.quirks = &at91_twi_quirks;
919
920 return 0;
921 }
922