1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * ***************************************************************************
4 * Marvell Armada-3700 Serial Driver
5 * Author: Wilson Ding <dingwei@marvell.com>
6 * Copyright (C) 2015 Marvell International Ltd.
7 * ***************************************************************************
8 */
9
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/console.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/init.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/math64.h>
19 #include <linux/of.h>
20 #include <linux/of_address.h>
21 #include <linux/of_device.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/serial.h>
26 #include <linux/serial_core.h>
27 #include <linux/slab.h>
28 #include <linux/tty.h>
29 #include <linux/tty_flip.h>
30
31 /* Register Map */
32 #define UART_STD_RBR 0x00
33 #define UART_EXT_RBR 0x18
34
35 #define UART_STD_TSH 0x04
36 #define UART_EXT_TSH 0x1C
37
38 #define UART_STD_CTRL1 0x08
39 #define UART_EXT_CTRL1 0x04
40 #define CTRL_SOFT_RST BIT(31)
41 #define CTRL_TXFIFO_RST BIT(15)
42 #define CTRL_RXFIFO_RST BIT(14)
43 #define CTRL_SND_BRK_SEQ BIT(11)
44 #define CTRL_BRK_DET_INT BIT(3)
45 #define CTRL_FRM_ERR_INT BIT(2)
46 #define CTRL_PAR_ERR_INT BIT(1)
47 #define CTRL_OVR_ERR_INT BIT(0)
48 #define CTRL_BRK_INT (CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | \
49 CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT)
50
51 #define UART_STD_CTRL2 UART_STD_CTRL1
52 #define UART_EXT_CTRL2 0x20
53 #define CTRL_STD_TX_RDY_INT BIT(5)
54 #define CTRL_EXT_TX_RDY_INT BIT(6)
55 #define CTRL_STD_RX_RDY_INT BIT(4)
56 #define CTRL_EXT_RX_RDY_INT BIT(5)
57
58 #define UART_STAT 0x0C
59 #define STAT_TX_FIFO_EMP BIT(13)
60 #define STAT_TX_FIFO_FUL BIT(11)
61 #define STAT_TX_EMP BIT(6)
62 #define STAT_STD_TX_RDY BIT(5)
63 #define STAT_EXT_TX_RDY BIT(15)
64 #define STAT_STD_RX_RDY BIT(4)
65 #define STAT_EXT_RX_RDY BIT(14)
66 #define STAT_BRK_DET BIT(3)
67 #define STAT_FRM_ERR BIT(2)
68 #define STAT_PAR_ERR BIT(1)
69 #define STAT_OVR_ERR BIT(0)
70 #define STAT_BRK_ERR (STAT_BRK_DET | STAT_FRM_ERR \
71 | STAT_PAR_ERR | STAT_OVR_ERR)
72
73 /*
74 * Marvell Armada 3700 Functional Specifications describes that bit 21 of UART
75 * Clock Control register controls UART1 and bit 20 controls UART2. But in
76 * reality bit 21 controls UART2 and bit 20 controls UART1. This seems to be an
77 * error in Marvell's documentation. Hence following CLK_DIS macros are swapped.
78 */
79
80 #define UART_BRDV 0x10
81 /* These bits are located in UART1 address space and control UART2 */
82 #define UART2_CLK_DIS BIT(21)
83 /* These bits are located in UART1 address space and control UART1 */
84 #define UART1_CLK_DIS BIT(20)
85 /* These bits are located in UART1 address space and control both UARTs */
86 #define CLK_NO_XTAL BIT(19)
87 #define CLK_TBG_DIV1_SHIFT 15
88 #define CLK_TBG_DIV1_MASK 0x7
89 #define CLK_TBG_DIV1_MAX 6
90 #define CLK_TBG_DIV2_SHIFT 12
91 #define CLK_TBG_DIV2_MASK 0x7
92 #define CLK_TBG_DIV2_MAX 6
93 #define CLK_TBG_SEL_SHIFT 10
94 #define CLK_TBG_SEL_MASK 0x3
95 /* These bits are located in both UARTs address space */
96 #define BRDV_BAUD_MASK 0x3FF
97 #define BRDV_BAUD_MAX BRDV_BAUD_MASK
98
99 #define UART_OSAMP 0x14
100 #define OSAMP_DEFAULT_DIVISOR 16
101 #define OSAMP_DIVISORS_MASK 0x3F3F3F3F
102 #define OSAMP_MAX_DIVISOR 63
103
104 #define MVEBU_NR_UARTS 2
105
106 #define MVEBU_UART_TYPE "mvebu-uart"
107 #define DRIVER_NAME "mvebu_serial"
108
109 enum {
110 /* Either there is only one summed IRQ... */
111 UART_IRQ_SUM = 0,
112 /* ...or there are two separate IRQ for RX and TX */
113 UART_RX_IRQ = 0,
114 UART_TX_IRQ,
115 UART_IRQ_COUNT
116 };
117
118 /* Diverging register offsets */
119 struct uart_regs_layout {
120 unsigned int rbr;
121 unsigned int tsh;
122 unsigned int ctrl;
123 unsigned int intr;
124 };
125
126 /* Diverging flags */
127 struct uart_flags {
128 unsigned int ctrl_tx_rdy_int;
129 unsigned int ctrl_rx_rdy_int;
130 unsigned int stat_tx_rdy;
131 unsigned int stat_rx_rdy;
132 };
133
134 /* Driver data, a structure for each UART port */
135 struct mvebu_uart_driver_data {
136 bool is_ext;
137 struct uart_regs_layout regs;
138 struct uart_flags flags;
139 };
140
141 /* Saved registers during suspend */
142 struct mvebu_uart_pm_regs {
143 unsigned int rbr;
144 unsigned int tsh;
145 unsigned int ctrl;
146 unsigned int intr;
147 unsigned int stat;
148 unsigned int brdv;
149 unsigned int osamp;
150 };
151
152 /* MVEBU UART driver structure */
153 struct mvebu_uart {
154 struct uart_port *port;
155 struct clk *clk;
156 int irq[UART_IRQ_COUNT];
157 struct mvebu_uart_driver_data *data;
158 #if defined(CONFIG_PM)
159 struct mvebu_uart_pm_regs pm_regs;
160 #endif /* CONFIG_PM */
161 };
162
to_mvuart(struct uart_port * port)163 static struct mvebu_uart *to_mvuart(struct uart_port *port)
164 {
165 return (struct mvebu_uart *)port->private_data;
166 }
167
168 #define IS_EXTENDED(port) (to_mvuart(port)->data->is_ext)
169
170 #define UART_RBR(port) (to_mvuart(port)->data->regs.rbr)
171 #define UART_TSH(port) (to_mvuart(port)->data->regs.tsh)
172 #define UART_CTRL(port) (to_mvuart(port)->data->regs.ctrl)
173 #define UART_INTR(port) (to_mvuart(port)->data->regs.intr)
174
175 #define CTRL_TX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_tx_rdy_int)
176 #define CTRL_RX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_rx_rdy_int)
177 #define STAT_TX_RDY(port) (to_mvuart(port)->data->flags.stat_tx_rdy)
178 #define STAT_RX_RDY(port) (to_mvuart(port)->data->flags.stat_rx_rdy)
179
180 static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS];
181
182 static DEFINE_SPINLOCK(mvebu_uart_lock);
183
184 /* Core UART Driver Operations */
mvebu_uart_tx_empty(struct uart_port * port)185 static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
186 {
187 unsigned long flags;
188 unsigned int st;
189
190 uart_port_lock_irqsave(port, &flags);
191 st = readl(port->membase + UART_STAT);
192 uart_port_unlock_irqrestore(port, flags);
193
194 return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
195 }
196
mvebu_uart_get_mctrl(struct uart_port * port)197 static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
198 {
199 return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
200 }
201
mvebu_uart_set_mctrl(struct uart_port * port,unsigned int mctrl)202 static void mvebu_uart_set_mctrl(struct uart_port *port,
203 unsigned int mctrl)
204 {
205 /*
206 * Even if we do not support configuring the modem control lines, this
207 * function must be proided to the serial core
208 */
209 }
210
mvebu_uart_stop_tx(struct uart_port * port)211 static void mvebu_uart_stop_tx(struct uart_port *port)
212 {
213 unsigned int ctl = readl(port->membase + UART_INTR(port));
214
215 ctl &= ~CTRL_TX_RDY_INT(port);
216 writel(ctl, port->membase + UART_INTR(port));
217 }
218
mvebu_uart_start_tx(struct uart_port * port)219 static void mvebu_uart_start_tx(struct uart_port *port)
220 {
221 unsigned int ctl;
222 unsigned char c;
223
224 if (IS_EXTENDED(port) && uart_fifo_get(port, &c))
225 writel(c, port->membase + UART_TSH(port));
226
227 ctl = readl(port->membase + UART_INTR(port));
228 ctl |= CTRL_TX_RDY_INT(port);
229 writel(ctl, port->membase + UART_INTR(port));
230 }
231
mvebu_uart_stop_rx(struct uart_port * port)232 static void mvebu_uart_stop_rx(struct uart_port *port)
233 {
234 unsigned int ctl;
235
236 ctl = readl(port->membase + UART_CTRL(port));
237 ctl &= ~CTRL_BRK_INT;
238 writel(ctl, port->membase + UART_CTRL(port));
239
240 ctl = readl(port->membase + UART_INTR(port));
241 ctl &= ~CTRL_RX_RDY_INT(port);
242 writel(ctl, port->membase + UART_INTR(port));
243 }
244
mvebu_uart_break_ctl(struct uart_port * port,int brk)245 static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
246 {
247 unsigned int ctl;
248 unsigned long flags;
249
250 uart_port_lock_irqsave(port, &flags);
251 ctl = readl(port->membase + UART_CTRL(port));
252 if (brk == -1)
253 ctl |= CTRL_SND_BRK_SEQ;
254 else
255 ctl &= ~CTRL_SND_BRK_SEQ;
256 writel(ctl, port->membase + UART_CTRL(port));
257 uart_port_unlock_irqrestore(port, flags);
258 }
259
mvebu_uart_rx_chars(struct uart_port * port,unsigned int status)260 static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
261 {
262 struct tty_port *tport = &port->state->port;
263 unsigned char ch = 0;
264 char flag = 0;
265 int ret;
266
267 do {
268 if (status & STAT_RX_RDY(port)) {
269 ch = readl(port->membase + UART_RBR(port));
270 ch &= 0xff;
271 flag = TTY_NORMAL;
272 port->icount.rx++;
273
274 if (status & STAT_PAR_ERR)
275 port->icount.parity++;
276 }
277
278 /*
279 * For UART2, error bits are not cleared on buffer read.
280 * This causes interrupt loop and system hang.
281 */
282 if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) {
283 ret = readl(port->membase + UART_STAT);
284 ret |= STAT_BRK_ERR;
285 writel(ret, port->membase + UART_STAT);
286 }
287
288 if (status & STAT_BRK_DET) {
289 port->icount.brk++;
290 status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
291 if (uart_handle_break(port))
292 goto ignore_char;
293 }
294
295 if (status & STAT_OVR_ERR)
296 port->icount.overrun++;
297
298 if (status & STAT_FRM_ERR)
299 port->icount.frame++;
300
301 if (uart_handle_sysrq_char(port, ch))
302 goto ignore_char;
303
304 if (status & port->ignore_status_mask & STAT_PAR_ERR)
305 status &= ~STAT_RX_RDY(port);
306
307 status &= port->read_status_mask;
308
309 if (status & STAT_PAR_ERR)
310 flag = TTY_PARITY;
311
312 status &= ~port->ignore_status_mask;
313
314 if (status & STAT_RX_RDY(port))
315 tty_insert_flip_char(tport, ch, flag);
316
317 if (status & STAT_BRK_DET)
318 tty_insert_flip_char(tport, 0, TTY_BREAK);
319
320 if (status & STAT_FRM_ERR)
321 tty_insert_flip_char(tport, 0, TTY_FRAME);
322
323 if (status & STAT_OVR_ERR)
324 tty_insert_flip_char(tport, 0, TTY_OVERRUN);
325
326 ignore_char:
327 status = readl(port->membase + UART_STAT);
328 } while (status & (STAT_RX_RDY(port) | STAT_BRK_DET));
329
330 tty_flip_buffer_push(tport);
331 }
332
mvebu_uart_tx_chars(struct uart_port * port,unsigned int status)333 static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status)
334 {
335 u8 ch;
336
337 uart_port_tx_limited(port, ch, port->fifosize,
338 !(readl(port->membase + UART_STAT) & STAT_TX_FIFO_FUL),
339 writel(ch, port->membase + UART_TSH(port)),
340 ({}));
341 }
342
mvebu_uart_isr(int irq,void * dev_id)343 static irqreturn_t mvebu_uart_isr(int irq, void *dev_id)
344 {
345 struct uart_port *port = (struct uart_port *)dev_id;
346 unsigned int st = readl(port->membase + UART_STAT);
347
348 if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
349 STAT_BRK_DET))
350 mvebu_uart_rx_chars(port, st);
351
352 if (st & STAT_TX_RDY(port))
353 mvebu_uart_tx_chars(port, st);
354
355 return IRQ_HANDLED;
356 }
357
mvebu_uart_rx_isr(int irq,void * dev_id)358 static irqreturn_t mvebu_uart_rx_isr(int irq, void *dev_id)
359 {
360 struct uart_port *port = (struct uart_port *)dev_id;
361 unsigned int st = readl(port->membase + UART_STAT);
362
363 if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
364 STAT_BRK_DET))
365 mvebu_uart_rx_chars(port, st);
366
367 return IRQ_HANDLED;
368 }
369
mvebu_uart_tx_isr(int irq,void * dev_id)370 static irqreturn_t mvebu_uart_tx_isr(int irq, void *dev_id)
371 {
372 struct uart_port *port = (struct uart_port *)dev_id;
373 unsigned int st = readl(port->membase + UART_STAT);
374
375 if (st & STAT_TX_RDY(port))
376 mvebu_uart_tx_chars(port, st);
377
378 return IRQ_HANDLED;
379 }
380
mvebu_uart_startup(struct uart_port * port)381 static int mvebu_uart_startup(struct uart_port *port)
382 {
383 struct mvebu_uart *mvuart = to_mvuart(port);
384 unsigned int ctl;
385 int ret;
386
387 writel(CTRL_TXFIFO_RST | CTRL_RXFIFO_RST,
388 port->membase + UART_CTRL(port));
389 udelay(1);
390
391 /* Clear the error bits of state register before IRQ request */
392 ret = readl(port->membase + UART_STAT);
393 ret |= STAT_BRK_ERR;
394 writel(ret, port->membase + UART_STAT);
395
396 writel(CTRL_BRK_INT, port->membase + UART_CTRL(port));
397
398 ctl = readl(port->membase + UART_INTR(port));
399 ctl |= CTRL_RX_RDY_INT(port);
400 writel(ctl, port->membase + UART_INTR(port));
401
402 if (!mvuart->irq[UART_TX_IRQ]) {
403 /* Old bindings with just one interrupt (UART0 only) */
404 ret = devm_request_irq(port->dev, mvuart->irq[UART_IRQ_SUM],
405 mvebu_uart_isr, port->irqflags,
406 dev_name(port->dev), port);
407 if (ret) {
408 dev_err(port->dev, "unable to request IRQ %d\n",
409 mvuart->irq[UART_IRQ_SUM]);
410 return ret;
411 }
412 } else {
413 /* New bindings with an IRQ for RX and TX (both UART) */
414 ret = devm_request_irq(port->dev, mvuart->irq[UART_RX_IRQ],
415 mvebu_uart_rx_isr, port->irqflags,
416 dev_name(port->dev), port);
417 if (ret) {
418 dev_err(port->dev, "unable to request IRQ %d\n",
419 mvuart->irq[UART_RX_IRQ]);
420 return ret;
421 }
422
423 ret = devm_request_irq(port->dev, mvuart->irq[UART_TX_IRQ],
424 mvebu_uart_tx_isr, port->irqflags,
425 dev_name(port->dev),
426 port);
427 if (ret) {
428 dev_err(port->dev, "unable to request IRQ %d\n",
429 mvuart->irq[UART_TX_IRQ]);
430 devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ],
431 port);
432 return ret;
433 }
434 }
435
436 return 0;
437 }
438
mvebu_uart_shutdown(struct uart_port * port)439 static void mvebu_uart_shutdown(struct uart_port *port)
440 {
441 struct mvebu_uart *mvuart = to_mvuart(port);
442
443 writel(0, port->membase + UART_INTR(port));
444
445 if (!mvuart->irq[UART_TX_IRQ]) {
446 devm_free_irq(port->dev, mvuart->irq[UART_IRQ_SUM], port);
447 } else {
448 devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ], port);
449 devm_free_irq(port->dev, mvuart->irq[UART_TX_IRQ], port);
450 }
451 }
452
mvebu_uart_baud_rate_set(struct uart_port * port,unsigned int baud)453 static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
454 {
455 unsigned int d_divisor, m_divisor;
456 unsigned long flags;
457 u32 brdv, osamp;
458
459 if (!port->uartclk)
460 return 0;
461
462 /*
463 * The baudrate is derived from the UART clock thanks to divisors:
464 * > d1 * d2 ("TBG divisors"): can divide only TBG clock from 1 to 6
465 * > D ("baud generator"): can divide the clock from 1 to 1023
466 * > M ("fractional divisor"): allows a better accuracy (from 1 to 63)
467 *
468 * Exact formulas for calculating baudrate:
469 *
470 * with default x16 scheme:
471 * baudrate = xtal / (d * 16)
472 * baudrate = tbg / (d1 * d2 * d * 16)
473 *
474 * with fractional divisor:
475 * baudrate = 10 * xtal / (d * (3 * (m1 + m2) + 2 * (m3 + m4)))
476 * baudrate = 10 * tbg / (d1*d2 * d * (3 * (m1 + m2) + 2 * (m3 + m4)))
477 *
478 * Oversampling value:
479 * osamp = (m1 << 0) | (m2 << 8) | (m3 << 16) | (m4 << 24);
480 *
481 * Where m1 controls number of clock cycles per bit for bits 1,2,3;
482 * m2 for bits 4,5,6; m3 for bits 7,8 and m4 for bits 9,10.
483 *
484 * To simplify baudrate setup set all the M prescalers to the same
485 * value. For baudrates 9600 Bd and higher, it is enough to use the
486 * default (x16) divisor or fractional divisor with M = 63, so there
487 * is no need to use real fractional support (where the M prescalers
488 * are not equal).
489 *
490 * When all the M prescalers are zeroed then default (x16) divisor is
491 * used. Default x16 scheme is more stable than M (fractional divisor),
492 * so use M only when D divisor is not enough to derive baudrate.
493 *
494 * Member port->uartclk is either xtal clock rate or TBG clock rate
495 * divided by (d1 * d2). So d1 and d2 are already set by the UART clock
496 * driver (and UART driver itself cannot change them). Moreover they are
497 * shared between both UARTs.
498 */
499
500 m_divisor = OSAMP_DEFAULT_DIVISOR;
501 d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
502
503 if (d_divisor > BRDV_BAUD_MAX) {
504 /*
505 * Experiments show that small M divisors are unstable.
506 * Use maximal possible M = 63 and calculate D divisor.
507 */
508 m_divisor = OSAMP_MAX_DIVISOR;
509 d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
510 }
511
512 if (d_divisor < 1)
513 d_divisor = 1;
514 else if (d_divisor > BRDV_BAUD_MAX)
515 d_divisor = BRDV_BAUD_MAX;
516
517 spin_lock_irqsave(&mvebu_uart_lock, flags);
518 brdv = readl(port->membase + UART_BRDV);
519 brdv &= ~BRDV_BAUD_MASK;
520 brdv |= d_divisor;
521 writel(brdv, port->membase + UART_BRDV);
522 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
523
524 osamp = readl(port->membase + UART_OSAMP);
525 osamp &= ~OSAMP_DIVISORS_MASK;
526 if (m_divisor != OSAMP_DEFAULT_DIVISOR)
527 osamp |= (m_divisor << 0) | (m_divisor << 8) |
528 (m_divisor << 16) | (m_divisor << 24);
529 writel(osamp, port->membase + UART_OSAMP);
530
531 return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor);
532 }
533
mvebu_uart_set_termios(struct uart_port * port,struct ktermios * termios,const struct ktermios * old)534 static void mvebu_uart_set_termios(struct uart_port *port,
535 struct ktermios *termios,
536 const struct ktermios *old)
537 {
538 unsigned long flags;
539 unsigned int baud, min_baud, max_baud;
540
541 uart_port_lock_irqsave(port, &flags);
542
543 port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
544 STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
545
546 if (termios->c_iflag & INPCK)
547 port->read_status_mask |= STAT_FRM_ERR | STAT_PAR_ERR;
548
549 port->ignore_status_mask = 0;
550 if (termios->c_iflag & IGNPAR)
551 port->ignore_status_mask |=
552 STAT_FRM_ERR | STAT_PAR_ERR | STAT_OVR_ERR;
553
554 if ((termios->c_cflag & CREAD) == 0)
555 port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR;
556
557 /*
558 * Maximal divisor is 1023 and maximal fractional divisor is 63. And
559 * experiments show that baudrates above 1/80 of parent clock rate are
560 * not stable. So disallow baudrates above 1/80 of the parent clock
561 * rate. If port->uartclk is not available, then
562 * mvebu_uart_baud_rate_set() fails, so values min_baud and max_baud
563 * in this case do not matter.
564 */
565 min_baud = DIV_ROUND_UP(port->uartclk, BRDV_BAUD_MAX *
566 OSAMP_MAX_DIVISOR);
567 max_baud = port->uartclk / 80;
568
569 baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
570 baud = mvebu_uart_baud_rate_set(port, baud);
571
572 /* In case baudrate cannot be changed, report previous old value */
573 if (baud == 0 && old)
574 baud = tty_termios_baud_rate(old);
575
576 /* Only the following flag changes are supported */
577 if (old) {
578 termios->c_iflag &= INPCK | IGNPAR;
579 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
580 termios->c_cflag &= CREAD | CBAUD;
581 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
582 termios->c_cflag |= CS8;
583 }
584
585 if (baud != 0) {
586 tty_termios_encode_baud_rate(termios, baud, baud);
587 uart_update_timeout(port, termios->c_cflag, baud);
588 }
589
590 uart_port_unlock_irqrestore(port, flags);
591 }
592
mvebu_uart_type(struct uart_port * port)593 static const char *mvebu_uart_type(struct uart_port *port)
594 {
595 return MVEBU_UART_TYPE;
596 }
597
mvebu_uart_release_port(struct uart_port * port)598 static void mvebu_uart_release_port(struct uart_port *port)
599 {
600 /* Nothing to do here */
601 }
602
mvebu_uart_request_port(struct uart_port * port)603 static int mvebu_uart_request_port(struct uart_port *port)
604 {
605 return 0;
606 }
607
608 #ifdef CONFIG_CONSOLE_POLL
mvebu_uart_get_poll_char(struct uart_port * port)609 static int mvebu_uart_get_poll_char(struct uart_port *port)
610 {
611 unsigned int st = readl(port->membase + UART_STAT);
612
613 if (!(st & STAT_RX_RDY(port)))
614 return NO_POLL_CHAR;
615
616 return readl(port->membase + UART_RBR(port));
617 }
618
mvebu_uart_put_poll_char(struct uart_port * port,unsigned char c)619 static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c)
620 {
621 unsigned int st;
622
623 for (;;) {
624 st = readl(port->membase + UART_STAT);
625
626 if (!(st & STAT_TX_FIFO_FUL))
627 break;
628
629 udelay(1);
630 }
631
632 writel(c, port->membase + UART_TSH(port));
633 }
634 #endif
635
636 static const struct uart_ops mvebu_uart_ops = {
637 .tx_empty = mvebu_uart_tx_empty,
638 .set_mctrl = mvebu_uart_set_mctrl,
639 .get_mctrl = mvebu_uart_get_mctrl,
640 .stop_tx = mvebu_uart_stop_tx,
641 .start_tx = mvebu_uart_start_tx,
642 .stop_rx = mvebu_uart_stop_rx,
643 .break_ctl = mvebu_uart_break_ctl,
644 .startup = mvebu_uart_startup,
645 .shutdown = mvebu_uart_shutdown,
646 .set_termios = mvebu_uart_set_termios,
647 .type = mvebu_uart_type,
648 .release_port = mvebu_uart_release_port,
649 .request_port = mvebu_uart_request_port,
650 #ifdef CONFIG_CONSOLE_POLL
651 .poll_get_char = mvebu_uart_get_poll_char,
652 .poll_put_char = mvebu_uart_put_poll_char,
653 #endif
654 };
655
656 /* Console Driver Operations */
657
658 #ifdef CONFIG_SERIAL_MVEBU_CONSOLE
659 /* Early Console */
mvebu_uart_putc(struct uart_port * port,unsigned char c)660 static void mvebu_uart_putc(struct uart_port *port, unsigned char c)
661 {
662 unsigned int st;
663
664 for (;;) {
665 st = readl(port->membase + UART_STAT);
666 if (!(st & STAT_TX_FIFO_FUL))
667 break;
668 }
669
670 /* At early stage, DT is not parsed yet, only use UART0 */
671 writel(c, port->membase + UART_STD_TSH);
672
673 for (;;) {
674 st = readl(port->membase + UART_STAT);
675 if (st & STAT_TX_FIFO_EMP)
676 break;
677 }
678 }
679
mvebu_uart_putc_early_write(struct console * con,const char * s,unsigned int n)680 static void mvebu_uart_putc_early_write(struct console *con,
681 const char *s,
682 unsigned int n)
683 {
684 struct earlycon_device *dev = con->data;
685
686 uart_console_write(&dev->port, s, n, mvebu_uart_putc);
687 }
688
689 static int __init
mvebu_uart_early_console_setup(struct earlycon_device * device,const char * opt)690 mvebu_uart_early_console_setup(struct earlycon_device *device,
691 const char *opt)
692 {
693 if (!device->port.membase)
694 return -ENODEV;
695
696 device->con->write = mvebu_uart_putc_early_write;
697
698 return 0;
699 }
700
701 EARLYCON_DECLARE(ar3700_uart, mvebu_uart_early_console_setup);
702 OF_EARLYCON_DECLARE(ar3700_uart, "marvell,armada-3700-uart",
703 mvebu_uart_early_console_setup);
704
wait_for_xmitr(struct uart_port * port)705 static void wait_for_xmitr(struct uart_port *port)
706 {
707 u32 val;
708
709 readl_poll_timeout_atomic(port->membase + UART_STAT, val,
710 (val & STAT_TX_RDY(port)), 1, 10000);
711 }
712
wait_for_xmite(struct uart_port * port)713 static void wait_for_xmite(struct uart_port *port)
714 {
715 u32 val;
716
717 readl_poll_timeout_atomic(port->membase + UART_STAT, val,
718 (val & STAT_TX_EMP), 1, 10000);
719 }
720
mvebu_uart_console_putchar(struct uart_port * port,unsigned char ch)721 static void mvebu_uart_console_putchar(struct uart_port *port, unsigned char ch)
722 {
723 wait_for_xmitr(port);
724 writel(ch, port->membase + UART_TSH(port));
725 }
726
mvebu_uart_console_write(struct console * co,const char * s,unsigned int count)727 static void mvebu_uart_console_write(struct console *co, const char *s,
728 unsigned int count)
729 {
730 struct uart_port *port = &mvebu_uart_ports[co->index];
731 unsigned long flags;
732 unsigned int ier, intr, ctl;
733 int locked = 1;
734
735 if (oops_in_progress)
736 locked = uart_port_trylock_irqsave(port, &flags);
737 else
738 uart_port_lock_irqsave(port, &flags);
739
740 ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
741 intr = readl(port->membase + UART_INTR(port)) &
742 (CTRL_RX_RDY_INT(port) | CTRL_TX_RDY_INT(port));
743 writel(0, port->membase + UART_CTRL(port));
744 writel(0, port->membase + UART_INTR(port));
745
746 uart_console_write(port, s, count, mvebu_uart_console_putchar);
747
748 wait_for_xmite(port);
749
750 if (ier)
751 writel(ier, port->membase + UART_CTRL(port));
752
753 if (intr) {
754 ctl = intr | readl(port->membase + UART_INTR(port));
755 writel(ctl, port->membase + UART_INTR(port));
756 }
757
758 if (locked)
759 uart_port_unlock_irqrestore(port, flags);
760 }
761
mvebu_uart_console_setup(struct console * co,char * options)762 static int mvebu_uart_console_setup(struct console *co, char *options)
763 {
764 struct uart_port *port;
765 int baud = 9600;
766 int bits = 8;
767 int parity = 'n';
768 int flow = 'n';
769
770 if (co->index < 0 || co->index >= MVEBU_NR_UARTS)
771 return -EINVAL;
772
773 port = &mvebu_uart_ports[co->index];
774
775 if (!port->mapbase || !port->membase) {
776 pr_debug("console on ttyMV%i not present\n", co->index);
777 return -ENODEV;
778 }
779
780 if (options)
781 uart_parse_options(options, &baud, &parity, &bits, &flow);
782
783 return uart_set_options(port, co, baud, parity, bits, flow);
784 }
785
786 static struct uart_driver mvebu_uart_driver;
787
788 static struct console mvebu_uart_console = {
789 .name = "ttyMV",
790 .write = mvebu_uart_console_write,
791 .device = uart_console_device,
792 .setup = mvebu_uart_console_setup,
793 .flags = CON_PRINTBUFFER,
794 .index = -1,
795 .data = &mvebu_uart_driver,
796 };
797
mvebu_uart_console_init(void)798 static int __init mvebu_uart_console_init(void)
799 {
800 register_console(&mvebu_uart_console);
801 return 0;
802 }
803
804 console_initcall(mvebu_uart_console_init);
805
806
807 #endif /* CONFIG_SERIAL_MVEBU_CONSOLE */
808
809 static struct uart_driver mvebu_uart_driver = {
810 .owner = THIS_MODULE,
811 .driver_name = DRIVER_NAME,
812 .dev_name = "ttyMV",
813 .nr = MVEBU_NR_UARTS,
814 #ifdef CONFIG_SERIAL_MVEBU_CONSOLE
815 .cons = &mvebu_uart_console,
816 #endif
817 };
818
819 #if defined(CONFIG_PM)
mvebu_uart_suspend(struct device * dev)820 static int mvebu_uart_suspend(struct device *dev)
821 {
822 struct mvebu_uart *mvuart = dev_get_drvdata(dev);
823 struct uart_port *port = mvuart->port;
824 unsigned long flags;
825
826 uart_suspend_port(&mvebu_uart_driver, port);
827
828 mvuart->pm_regs.rbr = readl(port->membase + UART_RBR(port));
829 mvuart->pm_regs.tsh = readl(port->membase + UART_TSH(port));
830 mvuart->pm_regs.ctrl = readl(port->membase + UART_CTRL(port));
831 mvuart->pm_regs.intr = readl(port->membase + UART_INTR(port));
832 mvuart->pm_regs.stat = readl(port->membase + UART_STAT);
833 spin_lock_irqsave(&mvebu_uart_lock, flags);
834 mvuart->pm_regs.brdv = readl(port->membase + UART_BRDV);
835 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
836 mvuart->pm_regs.osamp = readl(port->membase + UART_OSAMP);
837
838 device_set_wakeup_enable(dev, true);
839
840 return 0;
841 }
842
mvebu_uart_resume(struct device * dev)843 static int mvebu_uart_resume(struct device *dev)
844 {
845 struct mvebu_uart *mvuart = dev_get_drvdata(dev);
846 struct uart_port *port = mvuart->port;
847 unsigned long flags;
848
849 writel(mvuart->pm_regs.rbr, port->membase + UART_RBR(port));
850 writel(mvuart->pm_regs.tsh, port->membase + UART_TSH(port));
851 writel(mvuart->pm_regs.ctrl, port->membase + UART_CTRL(port));
852 writel(mvuart->pm_regs.intr, port->membase + UART_INTR(port));
853 writel(mvuart->pm_regs.stat, port->membase + UART_STAT);
854 spin_lock_irqsave(&mvebu_uart_lock, flags);
855 writel(mvuart->pm_regs.brdv, port->membase + UART_BRDV);
856 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
857 writel(mvuart->pm_regs.osamp, port->membase + UART_OSAMP);
858
859 uart_resume_port(&mvebu_uart_driver, port);
860
861 return 0;
862 }
863
864 static const struct dev_pm_ops mvebu_uart_pm_ops = {
865 .suspend = mvebu_uart_suspend,
866 .resume = mvebu_uart_resume,
867 };
868 #endif /* CONFIG_PM */
869
870 static const struct of_device_id mvebu_uart_of_match[];
871
872 /* Counter to keep track of each UART port id when not using CONFIG_OF */
873 static int uart_num_counter;
874
mvebu_uart_probe(struct platform_device * pdev)875 static int mvebu_uart_probe(struct platform_device *pdev)
876 {
877 const struct of_device_id *match = of_match_device(mvebu_uart_of_match,
878 &pdev->dev);
879 struct uart_port *port;
880 struct mvebu_uart *mvuart;
881 struct resource *reg;
882 int id, irq;
883
884 /* Assume that all UART ports have a DT alias or none has */
885 id = of_alias_get_id(pdev->dev.of_node, "serial");
886 if (!pdev->dev.of_node || id < 0)
887 pdev->id = uart_num_counter++;
888 else
889 pdev->id = id;
890
891 if (pdev->id >= MVEBU_NR_UARTS) {
892 dev_err(&pdev->dev, "cannot have more than %d UART ports\n",
893 MVEBU_NR_UARTS);
894 return -EINVAL;
895 }
896
897 port = &mvebu_uart_ports[pdev->id];
898
899 spin_lock_init(&port->lock);
900
901 port->dev = &pdev->dev;
902 port->type = PORT_MVEBU;
903 port->ops = &mvebu_uart_ops;
904 port->regshift = 0;
905
906 port->fifosize = 32;
907 port->iotype = UPIO_MEM32;
908 port->flags = UPF_FIXED_PORT;
909 port->line = pdev->id;
910
911 /*
912 * IRQ number is not stored in this structure because we may have two of
913 * them per port (RX and TX). Instead, use the driver UART structure
914 * array so called ->irq[].
915 */
916 port->irq = 0;
917 port->irqflags = 0;
918
919 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, ®);
920 if (IS_ERR(port->membase))
921 return PTR_ERR(port->membase);
922 port->mapbase = reg->start;
923
924 mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
925 GFP_KERNEL);
926 if (!mvuart)
927 return -ENOMEM;
928
929 /* Get controller data depending on the compatible string */
930 mvuart->data = (struct mvebu_uart_driver_data *)match->data;
931 mvuart->port = port;
932
933 port->private_data = mvuart;
934 platform_set_drvdata(pdev, mvuart);
935
936 /* Get fixed clock frequency */
937 mvuart->clk = devm_clk_get(&pdev->dev, NULL);
938 if (IS_ERR(mvuart->clk)) {
939 if (PTR_ERR(mvuart->clk) == -EPROBE_DEFER)
940 return PTR_ERR(mvuart->clk);
941
942 if (IS_EXTENDED(port)) {
943 dev_err(&pdev->dev, "unable to get UART clock\n");
944 return PTR_ERR(mvuart->clk);
945 }
946 } else {
947 if (!clk_prepare_enable(mvuart->clk))
948 port->uartclk = clk_get_rate(mvuart->clk);
949 }
950
951 /* Manage interrupts */
952 if (platform_irq_count(pdev) == 1) {
953 /* Old bindings: no name on the single unamed UART0 IRQ */
954 irq = platform_get_irq(pdev, 0);
955 if (irq < 0)
956 return irq;
957
958 mvuart->irq[UART_IRQ_SUM] = irq;
959 } else {
960 /*
961 * New bindings: named interrupts (RX, TX) for both UARTS,
962 * only make use of uart-rx and uart-tx interrupts, do not use
963 * uart-sum of UART0 port.
964 */
965 irq = platform_get_irq_byname(pdev, "uart-rx");
966 if (irq < 0)
967 return irq;
968
969 mvuart->irq[UART_RX_IRQ] = irq;
970
971 irq = platform_get_irq_byname(pdev, "uart-tx");
972 if (irq < 0)
973 return irq;
974
975 mvuart->irq[UART_TX_IRQ] = irq;
976 }
977
978 /* UART Soft Reset*/
979 writel(CTRL_SOFT_RST, port->membase + UART_CTRL(port));
980 udelay(1);
981 writel(0, port->membase + UART_CTRL(port));
982
983 return uart_add_one_port(&mvebu_uart_driver, port);
984 }
985
986 static struct mvebu_uart_driver_data uart_std_driver_data = {
987 .is_ext = false,
988 .regs.rbr = UART_STD_RBR,
989 .regs.tsh = UART_STD_TSH,
990 .regs.ctrl = UART_STD_CTRL1,
991 .regs.intr = UART_STD_CTRL2,
992 .flags.ctrl_tx_rdy_int = CTRL_STD_TX_RDY_INT,
993 .flags.ctrl_rx_rdy_int = CTRL_STD_RX_RDY_INT,
994 .flags.stat_tx_rdy = STAT_STD_TX_RDY,
995 .flags.stat_rx_rdy = STAT_STD_RX_RDY,
996 };
997
998 static struct mvebu_uart_driver_data uart_ext_driver_data = {
999 .is_ext = true,
1000 .regs.rbr = UART_EXT_RBR,
1001 .regs.tsh = UART_EXT_TSH,
1002 .regs.ctrl = UART_EXT_CTRL1,
1003 .regs.intr = UART_EXT_CTRL2,
1004 .flags.ctrl_tx_rdy_int = CTRL_EXT_TX_RDY_INT,
1005 .flags.ctrl_rx_rdy_int = CTRL_EXT_RX_RDY_INT,
1006 .flags.stat_tx_rdy = STAT_EXT_TX_RDY,
1007 .flags.stat_rx_rdy = STAT_EXT_RX_RDY,
1008 };
1009
1010 /* Match table for of_platform binding */
1011 static const struct of_device_id mvebu_uart_of_match[] = {
1012 {
1013 .compatible = "marvell,armada-3700-uart",
1014 .data = (void *)&uart_std_driver_data,
1015 },
1016 {
1017 .compatible = "marvell,armada-3700-uart-ext",
1018 .data = (void *)&uart_ext_driver_data,
1019 },
1020 {}
1021 };
1022
1023 static struct platform_driver mvebu_uart_platform_driver = {
1024 .probe = mvebu_uart_probe,
1025 .driver = {
1026 .name = "mvebu-uart",
1027 .of_match_table = of_match_ptr(mvebu_uart_of_match),
1028 .suppress_bind_attrs = true,
1029 #if defined(CONFIG_PM)
1030 .pm = &mvebu_uart_pm_ops,
1031 #endif /* CONFIG_PM */
1032 },
1033 };
1034
1035 /* This code is based on clk-fixed-factor.c driver and modified. */
1036
1037 struct mvebu_uart_clock {
1038 struct clk_hw clk_hw;
1039 int clock_idx;
1040 u32 pm_context_reg1;
1041 u32 pm_context_reg2;
1042 };
1043
1044 struct mvebu_uart_clock_base {
1045 struct mvebu_uart_clock clocks[2];
1046 unsigned int parent_rates[5];
1047 int parent_idx;
1048 unsigned int div;
1049 void __iomem *reg1;
1050 void __iomem *reg2;
1051 bool configured;
1052 };
1053
1054 #define PARENT_CLOCK_XTAL 4
1055
1056 #define to_uart_clock(hw) container_of(hw, struct mvebu_uart_clock, clk_hw)
1057 #define to_uart_clock_base(uart_clock) container_of(uart_clock, \
1058 struct mvebu_uart_clock_base, clocks[uart_clock->clock_idx])
1059
mvebu_uart_clock_prepare(struct clk_hw * hw)1060 static int mvebu_uart_clock_prepare(struct clk_hw *hw)
1061 {
1062 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1063 struct mvebu_uart_clock_base *uart_clock_base =
1064 to_uart_clock_base(uart_clock);
1065 unsigned int prev_clock_idx, prev_clock_rate, prev_d1d2;
1066 unsigned int parent_clock_idx, parent_clock_rate;
1067 unsigned long flags;
1068 unsigned int d1, d2;
1069 u64 divisor;
1070 u32 val;
1071
1072 /*
1073 * This function just reconfigures UART Clock Control register (located
1074 * in UART1 address space which controls both UART1 and UART2) to
1075 * selected UART base clock and recalculates current UART1/UART2
1076 * divisors in their address spaces, so that final baudrate will not be
1077 * changed by switching UART parent clock. This is required for
1078 * otherwise kernel's boot log stops working - we need to ensure that
1079 * UART baudrate does not change during this setup. It is a one time
1080 * operation, it will execute only once and set `configured` to true,
1081 * and be skipped on subsequent calls. Because this UART Clock Control
1082 * register (UART_BRDV) is shared between UART1 baudrate function,
1083 * UART1 clock selector and UART2 clock selector, every access to
1084 * UART_BRDV (reg1) needs to be protected by a lock.
1085 */
1086
1087 spin_lock_irqsave(&mvebu_uart_lock, flags);
1088
1089 if (uart_clock_base->configured) {
1090 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1091 return 0;
1092 }
1093
1094 parent_clock_idx = uart_clock_base->parent_idx;
1095 parent_clock_rate = uart_clock_base->parent_rates[parent_clock_idx];
1096
1097 val = readl(uart_clock_base->reg1);
1098
1099 if (uart_clock_base->div > CLK_TBG_DIV1_MAX) {
1100 d1 = CLK_TBG_DIV1_MAX;
1101 d2 = uart_clock_base->div / CLK_TBG_DIV1_MAX;
1102 } else {
1103 d1 = uart_clock_base->div;
1104 d2 = 1;
1105 }
1106
1107 if (val & CLK_NO_XTAL) {
1108 prev_clock_idx = (val >> CLK_TBG_SEL_SHIFT) & CLK_TBG_SEL_MASK;
1109 prev_d1d2 = ((val >> CLK_TBG_DIV1_SHIFT) & CLK_TBG_DIV1_MASK) *
1110 ((val >> CLK_TBG_DIV2_SHIFT) & CLK_TBG_DIV2_MASK);
1111 } else {
1112 prev_clock_idx = PARENT_CLOCK_XTAL;
1113 prev_d1d2 = 1;
1114 }
1115
1116 /* Note that uart_clock_base->parent_rates[i] may not be available */
1117 prev_clock_rate = uart_clock_base->parent_rates[prev_clock_idx];
1118
1119 /* Recalculate UART1 divisor so UART1 baudrate does not change */
1120 if (prev_clock_rate) {
1121 divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) *
1122 parent_clock_rate * prev_d1d2,
1123 prev_clock_rate * d1 * d2);
1124 if (divisor < 1)
1125 divisor = 1;
1126 else if (divisor > BRDV_BAUD_MAX)
1127 divisor = BRDV_BAUD_MAX;
1128 val = (val & ~BRDV_BAUD_MASK) | divisor;
1129 }
1130
1131 if (parent_clock_idx != PARENT_CLOCK_XTAL) {
1132 /* Do not use XTAL, select TBG clock and TBG d1 * d2 divisors */
1133 val |= CLK_NO_XTAL;
1134 val &= ~(CLK_TBG_DIV1_MASK << CLK_TBG_DIV1_SHIFT);
1135 val |= d1 << CLK_TBG_DIV1_SHIFT;
1136 val &= ~(CLK_TBG_DIV2_MASK << CLK_TBG_DIV2_SHIFT);
1137 val |= d2 << CLK_TBG_DIV2_SHIFT;
1138 val &= ~(CLK_TBG_SEL_MASK << CLK_TBG_SEL_SHIFT);
1139 val |= parent_clock_idx << CLK_TBG_SEL_SHIFT;
1140 } else {
1141 /* Use XTAL, TBG bits are then ignored */
1142 val &= ~CLK_NO_XTAL;
1143 }
1144
1145 writel(val, uart_clock_base->reg1);
1146
1147 /* Recalculate UART2 divisor so UART2 baudrate does not change */
1148 if (prev_clock_rate) {
1149 val = readl(uart_clock_base->reg2);
1150 divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) *
1151 parent_clock_rate * prev_d1d2,
1152 prev_clock_rate * d1 * d2);
1153 if (divisor < 1)
1154 divisor = 1;
1155 else if (divisor > BRDV_BAUD_MAX)
1156 divisor = BRDV_BAUD_MAX;
1157 val = (val & ~BRDV_BAUD_MASK) | divisor;
1158 writel(val, uart_clock_base->reg2);
1159 }
1160
1161 uart_clock_base->configured = true;
1162
1163 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1164
1165 return 0;
1166 }
1167
mvebu_uart_clock_enable(struct clk_hw * hw)1168 static int mvebu_uart_clock_enable(struct clk_hw *hw)
1169 {
1170 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1171 struct mvebu_uart_clock_base *uart_clock_base =
1172 to_uart_clock_base(uart_clock);
1173 unsigned long flags;
1174 u32 val;
1175
1176 spin_lock_irqsave(&mvebu_uart_lock, flags);
1177
1178 val = readl(uart_clock_base->reg1);
1179
1180 if (uart_clock->clock_idx == 0)
1181 val &= ~UART1_CLK_DIS;
1182 else
1183 val &= ~UART2_CLK_DIS;
1184
1185 writel(val, uart_clock_base->reg1);
1186
1187 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1188
1189 return 0;
1190 }
1191
mvebu_uart_clock_disable(struct clk_hw * hw)1192 static void mvebu_uart_clock_disable(struct clk_hw *hw)
1193 {
1194 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1195 struct mvebu_uart_clock_base *uart_clock_base =
1196 to_uart_clock_base(uart_clock);
1197 unsigned long flags;
1198 u32 val;
1199
1200 spin_lock_irqsave(&mvebu_uart_lock, flags);
1201
1202 val = readl(uart_clock_base->reg1);
1203
1204 if (uart_clock->clock_idx == 0)
1205 val |= UART1_CLK_DIS;
1206 else
1207 val |= UART2_CLK_DIS;
1208
1209 writel(val, uart_clock_base->reg1);
1210
1211 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1212 }
1213
mvebu_uart_clock_is_enabled(struct clk_hw * hw)1214 static int mvebu_uart_clock_is_enabled(struct clk_hw *hw)
1215 {
1216 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1217 struct mvebu_uart_clock_base *uart_clock_base =
1218 to_uart_clock_base(uart_clock);
1219 u32 val;
1220
1221 val = readl(uart_clock_base->reg1);
1222
1223 if (uart_clock->clock_idx == 0)
1224 return !(val & UART1_CLK_DIS);
1225 else
1226 return !(val & UART2_CLK_DIS);
1227 }
1228
mvebu_uart_clock_save_context(struct clk_hw * hw)1229 static int mvebu_uart_clock_save_context(struct clk_hw *hw)
1230 {
1231 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1232 struct mvebu_uart_clock_base *uart_clock_base =
1233 to_uart_clock_base(uart_clock);
1234 unsigned long flags;
1235
1236 spin_lock_irqsave(&mvebu_uart_lock, flags);
1237 uart_clock->pm_context_reg1 = readl(uart_clock_base->reg1);
1238 uart_clock->pm_context_reg2 = readl(uart_clock_base->reg2);
1239 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1240
1241 return 0;
1242 }
1243
mvebu_uart_clock_restore_context(struct clk_hw * hw)1244 static void mvebu_uart_clock_restore_context(struct clk_hw *hw)
1245 {
1246 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1247 struct mvebu_uart_clock_base *uart_clock_base =
1248 to_uart_clock_base(uart_clock);
1249 unsigned long flags;
1250
1251 spin_lock_irqsave(&mvebu_uart_lock, flags);
1252 writel(uart_clock->pm_context_reg1, uart_clock_base->reg1);
1253 writel(uart_clock->pm_context_reg2, uart_clock_base->reg2);
1254 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1255 }
1256
mvebu_uart_clock_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)1257 static unsigned long mvebu_uart_clock_recalc_rate(struct clk_hw *hw,
1258 unsigned long parent_rate)
1259 {
1260 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1261 struct mvebu_uart_clock_base *uart_clock_base =
1262 to_uart_clock_base(uart_clock);
1263
1264 return parent_rate / uart_clock_base->div;
1265 }
1266
mvebu_uart_clock_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)1267 static long mvebu_uart_clock_round_rate(struct clk_hw *hw, unsigned long rate,
1268 unsigned long *parent_rate)
1269 {
1270 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1271 struct mvebu_uart_clock_base *uart_clock_base =
1272 to_uart_clock_base(uart_clock);
1273
1274 return *parent_rate / uart_clock_base->div;
1275 }
1276
mvebu_uart_clock_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)1277 static int mvebu_uart_clock_set_rate(struct clk_hw *hw, unsigned long rate,
1278 unsigned long parent_rate)
1279 {
1280 /*
1281 * We must report success but we can do so unconditionally because
1282 * mvebu_uart_clock_round_rate returns values that ensure this call is a
1283 * nop.
1284 */
1285
1286 return 0;
1287 }
1288
1289 static const struct clk_ops mvebu_uart_clock_ops = {
1290 .prepare = mvebu_uart_clock_prepare,
1291 .enable = mvebu_uart_clock_enable,
1292 .disable = mvebu_uart_clock_disable,
1293 .is_enabled = mvebu_uart_clock_is_enabled,
1294 .save_context = mvebu_uart_clock_save_context,
1295 .restore_context = mvebu_uart_clock_restore_context,
1296 .round_rate = mvebu_uart_clock_round_rate,
1297 .set_rate = mvebu_uart_clock_set_rate,
1298 .recalc_rate = mvebu_uart_clock_recalc_rate,
1299 };
1300
mvebu_uart_clock_register(struct device * dev,struct mvebu_uart_clock * uart_clock,const char * name,const char * parent_name)1301 static int mvebu_uart_clock_register(struct device *dev,
1302 struct mvebu_uart_clock *uart_clock,
1303 const char *name,
1304 const char *parent_name)
1305 {
1306 struct clk_init_data init = { };
1307
1308 uart_clock->clk_hw.init = &init;
1309
1310 init.name = name;
1311 init.ops = &mvebu_uart_clock_ops;
1312 init.flags = 0;
1313 init.num_parents = 1;
1314 init.parent_names = &parent_name;
1315
1316 return devm_clk_hw_register(dev, &uart_clock->clk_hw);
1317 }
1318
mvebu_uart_clock_probe(struct platform_device * pdev)1319 static int mvebu_uart_clock_probe(struct platform_device *pdev)
1320 {
1321 static const char *const uart_clk_names[] = { "uart_1", "uart_2" };
1322 static const char *const parent_clk_names[] = { "TBG-A-P", "TBG-B-P",
1323 "TBG-A-S", "TBG-B-S",
1324 "xtal" };
1325 struct clk *parent_clks[ARRAY_SIZE(parent_clk_names)];
1326 struct mvebu_uart_clock_base *uart_clock_base;
1327 struct clk_hw_onecell_data *hw_clk_data;
1328 struct device *dev = &pdev->dev;
1329 int i, parent_clk_idx, ret;
1330 unsigned long div, rate;
1331 struct resource *res;
1332 unsigned int d1, d2;
1333
1334 BUILD_BUG_ON(ARRAY_SIZE(uart_clk_names) !=
1335 ARRAY_SIZE(uart_clock_base->clocks));
1336 BUILD_BUG_ON(ARRAY_SIZE(parent_clk_names) !=
1337 ARRAY_SIZE(uart_clock_base->parent_rates));
1338
1339 uart_clock_base = devm_kzalloc(dev,
1340 sizeof(*uart_clock_base),
1341 GFP_KERNEL);
1342 if (!uart_clock_base)
1343 return -ENOMEM;
1344
1345 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1346 if (!res) {
1347 dev_err(dev, "Couldn't get first register\n");
1348 return -ENOENT;
1349 }
1350
1351 /*
1352 * UART Clock Control register (reg1 / UART_BRDV) is in the address
1353 * space of UART1 (standard UART variant), controls parent clock and
1354 * dividers for both UART1 and UART2 and is supplied via DT as the first
1355 * resource. Therefore use ioremap() rather than ioremap_resource() to
1356 * avoid conflicts with UART1 driver. Access to UART_BRDV is protected
1357 * by a lock shared between clock and UART driver.
1358 */
1359 uart_clock_base->reg1 = devm_ioremap(dev, res->start,
1360 resource_size(res));
1361 if (!uart_clock_base->reg1)
1362 return -ENOMEM;
1363
1364 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1365 if (!res) {
1366 dev_err(dev, "Couldn't get second register\n");
1367 return -ENOENT;
1368 }
1369
1370 /*
1371 * UART 2 Baud Rate Divisor register (reg2 / UART_BRDV) is in address
1372 * space of UART2 (extended UART variant), controls only one UART2
1373 * specific divider and is supplied via DT as second resource.
1374 * Therefore use ioremap() rather than ioremap_resource() to avoid
1375 * conflicts with UART2 driver. Access to UART_BRDV is protected by a
1376 * by lock shared between clock and UART driver.
1377 */
1378 uart_clock_base->reg2 = devm_ioremap(dev, res->start,
1379 resource_size(res));
1380 if (!uart_clock_base->reg2)
1381 return -ENOMEM;
1382
1383 hw_clk_data = devm_kzalloc(dev,
1384 struct_size(hw_clk_data, hws,
1385 ARRAY_SIZE(uart_clk_names)),
1386 GFP_KERNEL);
1387 if (!hw_clk_data)
1388 return -ENOMEM;
1389
1390 hw_clk_data->num = ARRAY_SIZE(uart_clk_names);
1391 for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) {
1392 hw_clk_data->hws[i] = &uart_clock_base->clocks[i].clk_hw;
1393 uart_clock_base->clocks[i].clock_idx = i;
1394 }
1395
1396 parent_clk_idx = -1;
1397
1398 for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) {
1399 parent_clks[i] = devm_clk_get(dev, parent_clk_names[i]);
1400 if (IS_ERR(parent_clks[i])) {
1401 if (PTR_ERR(parent_clks[i]) == -EPROBE_DEFER)
1402 return -EPROBE_DEFER;
1403 dev_warn(dev, "Couldn't get the parent clock %s: %ld\n",
1404 parent_clk_names[i], PTR_ERR(parent_clks[i]));
1405 continue;
1406 }
1407
1408 ret = clk_prepare_enable(parent_clks[i]);
1409 if (ret) {
1410 dev_warn(dev, "Couldn't enable parent clock %s: %d\n",
1411 parent_clk_names[i], ret);
1412 continue;
1413 }
1414 rate = clk_get_rate(parent_clks[i]);
1415 uart_clock_base->parent_rates[i] = rate;
1416
1417 if (i != PARENT_CLOCK_XTAL) {
1418 /*
1419 * Calculate the smallest TBG d1 and d2 divisors that
1420 * still can provide 9600 baudrate.
1421 */
1422 d1 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR *
1423 BRDV_BAUD_MAX);
1424 if (d1 < 1)
1425 d1 = 1;
1426 else if (d1 > CLK_TBG_DIV1_MAX)
1427 d1 = CLK_TBG_DIV1_MAX;
1428
1429 d2 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR *
1430 BRDV_BAUD_MAX * d1);
1431 if (d2 < 1)
1432 d2 = 1;
1433 else if (d2 > CLK_TBG_DIV2_MAX)
1434 d2 = CLK_TBG_DIV2_MAX;
1435 } else {
1436 /*
1437 * When UART clock uses XTAL clock as a source then it
1438 * is not possible to use d1 and d2 divisors.
1439 */
1440 d1 = d2 = 1;
1441 }
1442
1443 /* Skip clock source which cannot provide 9600 baudrate */
1444 if (rate > 9600 * OSAMP_MAX_DIVISOR * BRDV_BAUD_MAX * d1 * d2)
1445 continue;
1446
1447 /*
1448 * Choose TBG clock source with the smallest divisors. Use XTAL
1449 * clock source only in case TBG is not available as XTAL cannot
1450 * be used for baudrates higher than 230400.
1451 */
1452 if (parent_clk_idx == -1 ||
1453 (i != PARENT_CLOCK_XTAL && div > d1 * d2)) {
1454 parent_clk_idx = i;
1455 div = d1 * d2;
1456 }
1457 }
1458
1459 for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) {
1460 if (i == parent_clk_idx || IS_ERR(parent_clks[i]))
1461 continue;
1462 clk_disable_unprepare(parent_clks[i]);
1463 devm_clk_put(dev, parent_clks[i]);
1464 }
1465
1466 if (parent_clk_idx == -1) {
1467 dev_err(dev, "No usable parent clock\n");
1468 return -ENOENT;
1469 }
1470
1471 uart_clock_base->parent_idx = parent_clk_idx;
1472 uart_clock_base->div = div;
1473
1474 dev_notice(dev, "Using parent clock %s as base UART clock\n",
1475 __clk_get_name(parent_clks[parent_clk_idx]));
1476
1477 for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) {
1478 ret = mvebu_uart_clock_register(dev,
1479 &uart_clock_base->clocks[i],
1480 uart_clk_names[i],
1481 __clk_get_name(parent_clks[parent_clk_idx]));
1482 if (ret) {
1483 dev_err(dev, "Can't register UART clock %d: %d\n",
1484 i, ret);
1485 return ret;
1486 }
1487 }
1488
1489 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
1490 hw_clk_data);
1491 }
1492
1493 static const struct of_device_id mvebu_uart_clock_of_match[] = {
1494 { .compatible = "marvell,armada-3700-uart-clock", },
1495 { }
1496 };
1497
1498 static struct platform_driver mvebu_uart_clock_platform_driver = {
1499 .probe = mvebu_uart_clock_probe,
1500 .driver = {
1501 .name = "mvebu-uart-clock",
1502 .of_match_table = mvebu_uart_clock_of_match,
1503 },
1504 };
1505
mvebu_uart_init(void)1506 static int __init mvebu_uart_init(void)
1507 {
1508 int ret;
1509
1510 ret = uart_register_driver(&mvebu_uart_driver);
1511 if (ret)
1512 return ret;
1513
1514 ret = platform_driver_register(&mvebu_uart_clock_platform_driver);
1515 if (ret) {
1516 uart_unregister_driver(&mvebu_uart_driver);
1517 return ret;
1518 }
1519
1520 ret = platform_driver_register(&mvebu_uart_platform_driver);
1521 if (ret) {
1522 platform_driver_unregister(&mvebu_uart_clock_platform_driver);
1523 uart_unregister_driver(&mvebu_uart_driver);
1524 return ret;
1525 }
1526
1527 return 0;
1528 }
1529 arch_initcall(mvebu_uart_init);
1530