2 * Driver for Atmel AT91 Serial ports
3 * Copyright (C) 2003 Rick Bronson
5 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
6 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
8 * DMA support added by Chip Coldwell.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/tty.h>
26 #include <linux/ioport.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/serial.h>
30 #include <linux/clk.h>
31 #include <linux/console.h>
32 #include <linux/sysrq.h>
33 #include <linux/tty_flip.h>
34 #include <linux/platform_device.h>
36 #include <linux/of_device.h>
37 #include <linux/of_gpio.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/dmaengine.h>
40 #include <linux/atmel_pdc.h>
41 #include <linux/uaccess.h>
42 #include <linux/platform_data/atmel.h>
43 #include <linux/timer.h>
44 #include <linux/gpio.h>
45 #include <linux/gpio/consumer.h>
46 #include <linux/err.h>
47 #include <linux/irq.h>
48 #include <linux/suspend.h>
52 #include <asm/ioctls.h>
54 #define PDC_BUFFER_SIZE 512
55 /* Revisit: We should calculate this based on the actual port settings */
56 #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
58 /* The minium number of data FIFOs should be able to contain */
59 #define ATMEL_MIN_FIFO_SIZE 8
61 * These two offsets are substracted from the RX FIFO size to define the RTS
62 * high and low thresholds
64 #define ATMEL_RTS_HIGH_OFFSET 16
65 #define ATMEL_RTS_LOW_OFFSET 20
67 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
71 #include <linux/serial_core.h>
73 #include "serial_mctrl_gpio.h"
74 #include "atmel_serial.h"
76 static void atmel_start_rx(struct uart_port *port);
77 static void atmel_stop_rx(struct uart_port *port);
79 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
81 /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
82 * should coexist with the 8250 driver, such as if we have an external 16C550
84 #define SERIAL_ATMEL_MAJOR 204
85 #define MINOR_START 154
86 #define ATMEL_DEVICENAME "ttyAT"
90 /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
91 * name, but it is legally reserved for the 8250 driver. */
92 #define SERIAL_ATMEL_MAJOR TTY_MAJOR
93 #define MINOR_START 64
94 #define ATMEL_DEVICENAME "ttyS"
98 #define ATMEL_ISR_PASS_LIMIT 256
100 struct atmel_dma_buffer {
103 unsigned int dma_size;
107 struct atmel_uart_char {
113 * Be careful, the real size of the ring buffer is
114 * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
115 * can contain up to 1024 characters in PIO mode and up to 4096 characters in
118 #define ATMEL_SERIAL_RINGSIZE 1024
121 * at91: 6 USARTs and one DBGU port (SAM9260)
122 * samx7: 3 USARTs and 5 UARTs
124 #define ATMEL_MAX_UART 8
127 * We wrap our port structure around the generic uart_port.
129 struct atmel_uart_port {
130 struct uart_port uart; /* uart */
131 struct clk *clk; /* uart clock */
132 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
133 u32 backup_imr; /* IMR saved during suspend */
134 int break_active; /* break being received */
136 bool use_dma_rx; /* enable DMA receiver */
137 bool use_pdc_rx; /* enable PDC receiver */
138 short pdc_rx_idx; /* current PDC RX buffer */
139 struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */
141 bool use_dma_tx; /* enable DMA transmitter */
142 bool use_pdc_tx; /* enable PDC transmitter */
143 struct atmel_dma_buffer pdc_tx; /* PDC transmitter */
145 spinlock_t lock_tx; /* port lock */
146 spinlock_t lock_rx; /* port lock */
147 struct dma_chan *chan_tx;
148 struct dma_chan *chan_rx;
149 struct dma_async_tx_descriptor *desc_tx;
150 struct dma_async_tx_descriptor *desc_rx;
151 dma_cookie_t cookie_tx;
152 dma_cookie_t cookie_rx;
153 struct scatterlist sg_tx;
154 struct scatterlist sg_rx;
155 struct tasklet_struct tasklet_rx;
156 struct tasklet_struct tasklet_tx;
157 atomic_t tasklet_shutdown;
158 unsigned int irq_status_prev;
161 struct circ_buf rx_ring;
163 struct mctrl_gpios *gpios;
164 unsigned int tx_done_mask;
169 u32 rtor; /* address of receiver timeout register if it exists */
170 bool has_frac_baudrate;
172 struct timer_list uart_timer;
175 unsigned int pending;
176 unsigned int pending_status;
177 spinlock_t lock_suspended;
179 bool hd_start_rx; /* can start RX during half-duplex operation */
194 int (*prepare_rx)(struct uart_port *port);
195 int (*prepare_tx)(struct uart_port *port);
196 void (*schedule_rx)(struct uart_port *port);
197 void (*schedule_tx)(struct uart_port *port);
198 void (*release_rx)(struct uart_port *port);
199 void (*release_tx)(struct uart_port *port);
202 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
203 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
206 static struct console atmel_console;
209 #if defined(CONFIG_OF)
210 static const struct of_device_id atmel_serial_dt_ids[] = {
211 { .compatible = "atmel,at91rm9200-usart" },
212 { .compatible = "atmel,at91sam9260-usart" },
217 static inline struct atmel_uart_port *
218 to_atmel_uart_port(struct uart_port *uart)
220 return container_of(uart, struct atmel_uart_port, uart);
223 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
225 return __raw_readl(port->membase + reg);
228 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
230 __raw_writel(value, port->membase + reg);
233 static inline u8 atmel_uart_read_char(struct uart_port *port)
235 return __raw_readb(port->membase + ATMEL_US_RHR);
238 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
240 __raw_writeb(value, port->membase + ATMEL_US_THR);
243 static inline int atmel_uart_is_half_duplex(struct uart_port *port)
245 return (port->rs485.flags & SER_RS485_ENABLED) &&
246 !(port->rs485.flags & SER_RS485_RX_DURING_TX);
249 #ifdef CONFIG_SERIAL_ATMEL_PDC
250 static bool atmel_use_pdc_rx(struct uart_port *port)
252 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
254 return atmel_port->use_pdc_rx;
257 static bool atmel_use_pdc_tx(struct uart_port *port)
259 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
261 return atmel_port->use_pdc_tx;
264 static bool atmel_use_pdc_rx(struct uart_port *port)
269 static bool atmel_use_pdc_tx(struct uart_port *port)
275 static bool atmel_use_dma_tx(struct uart_port *port)
277 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
279 return atmel_port->use_dma_tx;
282 static bool atmel_use_dma_rx(struct uart_port *port)
284 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
286 return atmel_port->use_dma_rx;
289 static bool atmel_use_fifo(struct uart_port *port)
291 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
293 return atmel_port->fifo_size;
296 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
297 struct tasklet_struct *t)
299 if (!atomic_read(&atmel_port->tasklet_shutdown))
303 static unsigned int atmel_get_lines_status(struct uart_port *port)
305 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
306 unsigned int status, ret = 0;
308 status = atmel_uart_readl(port, ATMEL_US_CSR);
310 mctrl_gpio_get(atmel_port->gpios, &ret);
312 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
315 status &= ~ATMEL_US_CTS;
317 status |= ATMEL_US_CTS;
320 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
323 status &= ~ATMEL_US_DSR;
325 status |= ATMEL_US_DSR;
328 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
331 status &= ~ATMEL_US_RI;
333 status |= ATMEL_US_RI;
336 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
339 status &= ~ATMEL_US_DCD;
341 status |= ATMEL_US_DCD;
347 /* Enable or disable the rs485 support */
348 static int atmel_config_rs485(struct uart_port *port,
349 struct serial_rs485 *rs485conf)
351 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
354 /* Disable interrupts */
355 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
357 mode = atmel_uart_readl(port, ATMEL_US_MR);
359 /* Resetting serial mode to RS232 (0x0) */
360 mode &= ~ATMEL_US_USMODE;
362 port->rs485 = *rs485conf;
364 if (rs485conf->flags & SER_RS485_ENABLED) {
365 dev_dbg(port->dev, "Setting UART to RS485\n");
366 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
367 atmel_uart_writel(port, ATMEL_US_TTGR,
368 rs485conf->delay_rts_after_send);
369 mode |= ATMEL_US_USMODE_RS485;
371 dev_dbg(port->dev, "Setting UART to RS232\n");
372 if (atmel_use_pdc_tx(port))
373 atmel_port->tx_done_mask = ATMEL_US_ENDTX |
376 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
378 atmel_uart_writel(port, ATMEL_US_MR, mode);
380 /* Enable interrupts */
381 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
387 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
389 static u_int atmel_tx_empty(struct uart_port *port)
391 return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
397 * Set state of the modem control output lines
399 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
401 unsigned int control = 0;
402 unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
403 unsigned int rts_paused, rts_ready;
404 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
406 /* override mode to RS485 if needed, otherwise keep the current mode */
407 if (port->rs485.flags & SER_RS485_ENABLED) {
408 atmel_uart_writel(port, ATMEL_US_TTGR,
409 port->rs485.delay_rts_after_send);
410 mode &= ~ATMEL_US_USMODE;
411 mode |= ATMEL_US_USMODE_RS485;
414 /* set the RTS line state according to the mode */
415 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
416 /* force RTS line to high level */
417 rts_paused = ATMEL_US_RTSEN;
419 /* give the control of the RTS line back to the hardware */
420 rts_ready = ATMEL_US_RTSDIS;
422 /* force RTS line to high level */
423 rts_paused = ATMEL_US_RTSDIS;
425 /* force RTS line to low level */
426 rts_ready = ATMEL_US_RTSEN;
429 if (mctrl & TIOCM_RTS)
430 control |= rts_ready;
432 control |= rts_paused;
434 if (mctrl & TIOCM_DTR)
435 control |= ATMEL_US_DTREN;
437 control |= ATMEL_US_DTRDIS;
439 atmel_uart_writel(port, ATMEL_US_CR, control);
441 mctrl_gpio_set(atmel_port->gpios, mctrl);
443 /* Local loopback mode? */
444 mode &= ~ATMEL_US_CHMODE;
445 if (mctrl & TIOCM_LOOP)
446 mode |= ATMEL_US_CHMODE_LOC_LOOP;
448 mode |= ATMEL_US_CHMODE_NORMAL;
450 atmel_uart_writel(port, ATMEL_US_MR, mode);
454 * Get state of the modem control input lines
456 static u_int atmel_get_mctrl(struct uart_port *port)
458 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
459 unsigned int ret = 0, status;
461 status = atmel_uart_readl(port, ATMEL_US_CSR);
464 * The control signals are active low.
466 if (!(status & ATMEL_US_DCD))
468 if (!(status & ATMEL_US_CTS))
470 if (!(status & ATMEL_US_DSR))
472 if (!(status & ATMEL_US_RI))
475 return mctrl_gpio_get(atmel_port->gpios, &ret);
481 static void atmel_stop_tx(struct uart_port *port)
483 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
485 if (atmel_use_pdc_tx(port)) {
486 /* disable PDC transmit */
487 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
491 * Disable the transmitter.
492 * This is mandatory when DMA is used, otherwise the DMA buffer
493 * is fully transmitted.
495 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
497 /* Disable interrupts */
498 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
500 if (atmel_uart_is_half_duplex(port))
501 if (!atomic_read(&atmel_port->tasklet_shutdown))
502 atmel_start_rx(port);
507 * Start transmitting.
509 static void atmel_start_tx(struct uart_port *port)
511 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
513 if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
515 /* The transmitter is already running. Yes, we
519 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
520 if (atmel_uart_is_half_duplex(port))
523 if (atmel_use_pdc_tx(port))
524 /* re-enable PDC transmit */
525 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
527 /* Enable interrupts */
528 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
530 /* re-enable the transmitter */
531 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
535 * start receiving - port is in process of being opened.
537 static void atmel_start_rx(struct uart_port *port)
539 /* reset status and receiver */
540 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
542 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
544 if (atmel_use_pdc_rx(port)) {
545 /* enable PDC controller */
546 atmel_uart_writel(port, ATMEL_US_IER,
547 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
548 port->read_status_mask);
549 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
551 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
556 * Stop receiving - port is in process of being closed.
558 static void atmel_stop_rx(struct uart_port *port)
560 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
562 if (atmel_use_pdc_rx(port)) {
563 /* disable PDC receive */
564 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
565 atmel_uart_writel(port, ATMEL_US_IDR,
566 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
567 port->read_status_mask);
569 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
574 * Enable modem status interrupts
576 static void atmel_enable_ms(struct uart_port *port)
578 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
582 * Interrupt should not be enabled twice
584 if (atmel_port->ms_irq_enabled)
587 atmel_port->ms_irq_enabled = true;
589 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
590 ier |= ATMEL_US_CTSIC;
592 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
593 ier |= ATMEL_US_DSRIC;
595 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
596 ier |= ATMEL_US_RIIC;
598 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
599 ier |= ATMEL_US_DCDIC;
601 atmel_uart_writel(port, ATMEL_US_IER, ier);
603 mctrl_gpio_enable_ms(atmel_port->gpios);
607 * Disable modem status interrupts
609 static void atmel_disable_ms(struct uart_port *port)
611 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
615 * Interrupt should not be disabled twice
617 if (!atmel_port->ms_irq_enabled)
620 atmel_port->ms_irq_enabled = false;
622 mctrl_gpio_disable_ms(atmel_port->gpios);
624 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
625 idr |= ATMEL_US_CTSIC;
627 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
628 idr |= ATMEL_US_DSRIC;
630 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
631 idr |= ATMEL_US_RIIC;
633 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
634 idr |= ATMEL_US_DCDIC;
636 atmel_uart_writel(port, ATMEL_US_IDR, idr);
640 * Control the transmission of a break signal
642 static void atmel_break_ctl(struct uart_port *port, int break_state)
644 if (break_state != 0)
646 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
649 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
653 * Stores the incoming character in the ring buffer
656 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
659 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
660 struct circ_buf *ring = &atmel_port->rx_ring;
661 struct atmel_uart_char *c;
663 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
664 /* Buffer overflow, ignore char */
667 c = &((struct atmel_uart_char *)ring->buf)[ring->head];
671 /* Make sure the character is stored before we update head. */
674 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
678 * Deal with parity, framing and overrun errors.
680 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
683 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
685 if (status & ATMEL_US_RXBRK) {
686 /* ignore side-effect */
687 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
690 if (status & ATMEL_US_PARE)
691 port->icount.parity++;
692 if (status & ATMEL_US_FRAME)
693 port->icount.frame++;
694 if (status & ATMEL_US_OVRE)
695 port->icount.overrun++;
699 * Characters received (called from interrupt handler)
701 static void atmel_rx_chars(struct uart_port *port)
703 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
704 unsigned int status, ch;
706 status = atmel_uart_readl(port, ATMEL_US_CSR);
707 while (status & ATMEL_US_RXRDY) {
708 ch = atmel_uart_read_char(port);
711 * note that the error handling code is
712 * out of the main execution path
714 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
715 | ATMEL_US_OVRE | ATMEL_US_RXBRK)
716 || atmel_port->break_active)) {
719 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
721 if (status & ATMEL_US_RXBRK
722 && !atmel_port->break_active) {
723 atmel_port->break_active = 1;
724 atmel_uart_writel(port, ATMEL_US_IER,
728 * This is either the end-of-break
729 * condition or we've received at
730 * least one character without RXBRK
731 * being set. In both cases, the next
732 * RXBRK will indicate start-of-break.
734 atmel_uart_writel(port, ATMEL_US_IDR,
736 status &= ~ATMEL_US_RXBRK;
737 atmel_port->break_active = 0;
741 atmel_buffer_rx_char(port, status, ch);
742 status = atmel_uart_readl(port, ATMEL_US_CSR);
745 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
749 * Transmit characters (called from tasklet with TXRDY interrupt
752 static void atmel_tx_chars(struct uart_port *port)
754 struct circ_buf *xmit = &port->state->xmit;
755 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
758 (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
759 atmel_uart_write_char(port, port->x_char);
763 if (uart_circ_empty(xmit) || uart_tx_stopped(port))
766 while (atmel_uart_readl(port, ATMEL_US_CSR) &
767 atmel_port->tx_done_mask) {
768 atmel_uart_write_char(port, xmit->buf[xmit->tail]);
769 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
771 if (uart_circ_empty(xmit))
775 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
776 uart_write_wakeup(port);
778 if (!uart_circ_empty(xmit))
779 /* Enable interrupts */
780 atmel_uart_writel(port, ATMEL_US_IER,
781 atmel_port->tx_done_mask);
784 static void atmel_complete_tx_dma(void *arg)
786 struct atmel_uart_port *atmel_port = arg;
787 struct uart_port *port = &atmel_port->uart;
788 struct circ_buf *xmit = &port->state->xmit;
789 struct dma_chan *chan = atmel_port->chan_tx;
792 spin_lock_irqsave(&port->lock, flags);
795 dmaengine_terminate_all(chan);
796 xmit->tail += atmel_port->tx_len;
797 xmit->tail &= UART_XMIT_SIZE - 1;
799 port->icount.tx += atmel_port->tx_len;
801 spin_lock(&atmel_port->lock_tx);
802 async_tx_ack(atmel_port->desc_tx);
803 atmel_port->cookie_tx = -EINVAL;
804 atmel_port->desc_tx = NULL;
805 spin_unlock(&atmel_port->lock_tx);
807 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
808 uart_write_wakeup(port);
811 * xmit is a circular buffer so, if we have just send data from
812 * xmit->tail to the end of xmit->buf, now we have to transmit the
813 * remaining data from the beginning of xmit->buf to xmit->head.
815 if (!uart_circ_empty(xmit))
816 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
817 else if (atmel_uart_is_half_duplex(port)) {
819 * DMA done, re-enable TXEMPTY and signal that we can stop
820 * TX and start RX for RS485
822 atmel_port->hd_start_rx = true;
823 atmel_uart_writel(port, ATMEL_US_IER,
824 atmel_port->tx_done_mask);
827 spin_unlock_irqrestore(&port->lock, flags);
830 static void atmel_release_tx_dma(struct uart_port *port)
832 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
833 struct dma_chan *chan = atmel_port->chan_tx;
836 dmaengine_terminate_all(chan);
837 dma_release_channel(chan);
838 dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
842 atmel_port->desc_tx = NULL;
843 atmel_port->chan_tx = NULL;
844 atmel_port->cookie_tx = -EINVAL;
848 * Called from tasklet with TXRDY interrupt is disabled.
850 static void atmel_tx_dma(struct uart_port *port)
852 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
853 struct circ_buf *xmit = &port->state->xmit;
854 struct dma_chan *chan = atmel_port->chan_tx;
855 struct dma_async_tx_descriptor *desc;
856 struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
857 unsigned int tx_len, part1_len, part2_len, sg_len;
858 dma_addr_t phys_addr;
860 /* Make sure we have an idle channel */
861 if (atmel_port->desc_tx != NULL)
864 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
867 * Port xmit buffer is already mapped,
868 * and it is one page... Just adjust
869 * offsets and lengths. Since it is a circular buffer,
870 * we have to transmit till the end, and then the rest.
871 * Take the port lock to get a
872 * consistent xmit buffer state.
874 tx_len = CIRC_CNT_TO_END(xmit->head,
878 if (atmel_port->fifo_size) {
879 /* multi data mode */
880 part1_len = (tx_len & ~0x3); /* DWORD access */
881 part2_len = (tx_len & 0x3); /* BYTE access */
883 /* single data (legacy) mode */
885 part2_len = tx_len; /* BYTE access only */
888 sg_init_table(sgl, 2);
890 phys_addr = sg_dma_address(sg_tx) + xmit->tail;
893 sg_dma_address(sg) = phys_addr;
894 sg_dma_len(sg) = part1_len;
896 phys_addr += part1_len;
901 sg_dma_address(sg) = phys_addr;
902 sg_dma_len(sg) = part2_len;
906 * save tx_len so atmel_complete_tx_dma() will increase
907 * xmit->tail correctly
909 atmel_port->tx_len = tx_len;
911 desc = dmaengine_prep_slave_sg(chan,
918 dev_err(port->dev, "Failed to send via dma!\n");
922 dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
924 atmel_port->desc_tx = desc;
925 desc->callback = atmel_complete_tx_dma;
926 desc->callback_param = atmel_port;
927 atmel_port->cookie_tx = dmaengine_submit(desc);
928 if (dma_submit_error(atmel_port->cookie_tx)) {
929 dev_err(port->dev, "dma_submit_error %d\n",
930 atmel_port->cookie_tx);
934 dma_async_issue_pending(chan);
937 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
938 uart_write_wakeup(port);
941 static int atmel_prepare_tx_dma(struct uart_port *port)
943 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
945 struct dma_slave_config config;
949 dma_cap_set(DMA_SLAVE, mask);
951 atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx");
952 if (atmel_port->chan_tx == NULL)
954 dev_info(port->dev, "using %s for tx DMA transfers\n",
955 dma_chan_name(atmel_port->chan_tx));
957 spin_lock_init(&atmel_port->lock_tx);
958 sg_init_table(&atmel_port->sg_tx, 1);
959 /* UART circular tx buffer is an aligned page. */
960 BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
961 sg_set_page(&atmel_port->sg_tx,
962 virt_to_page(port->state->xmit.buf),
964 offset_in_page(port->state->xmit.buf));
965 nent = dma_map_sg(port->dev,
971 dev_dbg(port->dev, "need to release resource of dma\n");
974 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
975 sg_dma_len(&atmel_port->sg_tx),
976 port->state->xmit.buf,
977 &sg_dma_address(&atmel_port->sg_tx));
980 /* Configure the slave DMA */
981 memset(&config, 0, sizeof(config));
982 config.direction = DMA_MEM_TO_DEV;
983 config.dst_addr_width = (atmel_port->fifo_size) ?
984 DMA_SLAVE_BUSWIDTH_4_BYTES :
985 DMA_SLAVE_BUSWIDTH_1_BYTE;
986 config.dst_addr = port->mapbase + ATMEL_US_THR;
987 config.dst_maxburst = 1;
989 ret = dmaengine_slave_config(atmel_port->chan_tx,
992 dev_err(port->dev, "DMA tx slave configuration failed\n");
999 dev_err(port->dev, "TX channel not available, switch to pio\n");
1000 atmel_port->use_dma_tx = 0;
1001 if (atmel_port->chan_tx)
1002 atmel_release_tx_dma(port);
1006 static void atmel_complete_rx_dma(void *arg)
1008 struct uart_port *port = arg;
1009 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1011 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1014 static void atmel_release_rx_dma(struct uart_port *port)
1016 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1017 struct dma_chan *chan = atmel_port->chan_rx;
1020 dmaengine_terminate_all(chan);
1021 dma_release_channel(chan);
1022 dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
1026 atmel_port->desc_rx = NULL;
1027 atmel_port->chan_rx = NULL;
1028 atmel_port->cookie_rx = -EINVAL;
1031 static void atmel_rx_from_dma(struct uart_port *port)
1033 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1034 struct tty_port *tport = &port->state->port;
1035 struct circ_buf *ring = &atmel_port->rx_ring;
1036 struct dma_chan *chan = atmel_port->chan_rx;
1037 struct dma_tx_state state;
1038 enum dma_status dmastat;
1042 /* Reset the UART timeout early so that we don't miss one */
1043 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1044 dmastat = dmaengine_tx_status(chan,
1045 atmel_port->cookie_rx,
1047 /* Restart a new tasklet if DMA status is error */
1048 if (dmastat == DMA_ERROR) {
1049 dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1050 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1051 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1055 /* CPU claims ownership of RX DMA buffer */
1056 dma_sync_sg_for_cpu(port->dev,
1062 * ring->head points to the end of data already written by the DMA.
1063 * ring->tail points to the beginning of data to be read by the
1065 * The current transfer size should not be larger than the dma buffer
1068 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1069 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1071 * At this point ring->head may point to the first byte right after the
1072 * last byte of the dma buffer:
1073 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1075 * However ring->tail must always points inside the dma buffer:
1076 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1078 * Since we use a ring buffer, we have to handle the case
1079 * where head is lower than tail. In such a case, we first read from
1080 * tail to the end of the buffer then reset tail.
1082 if (ring->head < ring->tail) {
1083 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1085 tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1087 port->icount.rx += count;
1090 /* Finally we read data from tail to head */
1091 if (ring->tail < ring->head) {
1092 count = ring->head - ring->tail;
1094 tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1095 /* Wrap ring->head if needed */
1096 if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1098 ring->tail = ring->head;
1099 port->icount.rx += count;
1102 /* USART retreives ownership of RX DMA buffer */
1103 dma_sync_sg_for_device(port->dev,
1109 * Drop the lock here since it might end up calling
1110 * uart_start(), which takes the lock.
1112 spin_unlock(&port->lock);
1113 tty_flip_buffer_push(tport);
1114 spin_lock(&port->lock);
1116 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1119 static int atmel_prepare_rx_dma(struct uart_port *port)
1121 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1122 struct dma_async_tx_descriptor *desc;
1123 dma_cap_mask_t mask;
1124 struct dma_slave_config config;
1125 struct circ_buf *ring;
1128 ring = &atmel_port->rx_ring;
1131 dma_cap_set(DMA_CYCLIC, mask);
1133 atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
1134 if (atmel_port->chan_rx == NULL)
1136 dev_info(port->dev, "using %s for rx DMA transfers\n",
1137 dma_chan_name(atmel_port->chan_rx));
1139 spin_lock_init(&atmel_port->lock_rx);
1140 sg_init_table(&atmel_port->sg_rx, 1);
1141 /* UART circular rx buffer is an aligned page. */
1142 BUG_ON(!PAGE_ALIGNED(ring->buf));
1143 sg_set_page(&atmel_port->sg_rx,
1144 virt_to_page(ring->buf),
1145 sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1146 offset_in_page(ring->buf));
1147 nent = dma_map_sg(port->dev,
1153 dev_dbg(port->dev, "need to release resource of dma\n");
1156 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1157 sg_dma_len(&atmel_port->sg_rx),
1159 &sg_dma_address(&atmel_port->sg_rx));
1162 /* Configure the slave DMA */
1163 memset(&config, 0, sizeof(config));
1164 config.direction = DMA_DEV_TO_MEM;
1165 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1166 config.src_addr = port->mapbase + ATMEL_US_RHR;
1167 config.src_maxburst = 1;
1169 ret = dmaengine_slave_config(atmel_port->chan_rx,
1172 dev_err(port->dev, "DMA rx slave configuration failed\n");
1176 * Prepare a cyclic dma transfer, assign 2 descriptors,
1177 * each one is half ring buffer size
1179 desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1180 sg_dma_address(&atmel_port->sg_rx),
1181 sg_dma_len(&atmel_port->sg_rx),
1182 sg_dma_len(&atmel_port->sg_rx)/2,
1184 DMA_PREP_INTERRUPT);
1186 dev_err(port->dev, "Preparing DMA cyclic failed\n");
1189 desc->callback = atmel_complete_rx_dma;
1190 desc->callback_param = port;
1191 atmel_port->desc_rx = desc;
1192 atmel_port->cookie_rx = dmaengine_submit(desc);
1193 if (dma_submit_error(atmel_port->cookie_rx)) {
1194 dev_err(port->dev, "dma_submit_error %d\n",
1195 atmel_port->cookie_rx);
1199 dma_async_issue_pending(atmel_port->chan_rx);
1204 dev_err(port->dev, "RX channel not available, switch to pio\n");
1205 atmel_port->use_dma_rx = 0;
1206 if (atmel_port->chan_rx)
1207 atmel_release_rx_dma(port);
1211 static void atmel_uart_timer_callback(unsigned long data)
1213 struct uart_port *port = (void *)data;
1214 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1216 if (!atomic_read(&atmel_port->tasklet_shutdown)) {
1217 tasklet_schedule(&atmel_port->tasklet_rx);
1218 mod_timer(&atmel_port->uart_timer,
1219 jiffies + uart_poll_timeout(port));
1224 * receive interrupt handler.
1227 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1229 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1231 if (atmel_use_pdc_rx(port)) {
1233 * PDC receive. Just schedule the tasklet and let it
1234 * figure out the details.
1236 * TODO: We're not handling error flags correctly at
1239 if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1240 atmel_uart_writel(port, ATMEL_US_IDR,
1241 (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1242 atmel_tasklet_schedule(atmel_port,
1243 &atmel_port->tasklet_rx);
1246 if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1247 ATMEL_US_FRAME | ATMEL_US_PARE))
1248 atmel_pdc_rxerr(port, pending);
1251 if (atmel_use_dma_rx(port)) {
1252 if (pending & ATMEL_US_TIMEOUT) {
1253 atmel_uart_writel(port, ATMEL_US_IDR,
1255 atmel_tasklet_schedule(atmel_port,
1256 &atmel_port->tasklet_rx);
1260 /* Interrupt receive */
1261 if (pending & ATMEL_US_RXRDY)
1262 atmel_rx_chars(port);
1263 else if (pending & ATMEL_US_RXBRK) {
1265 * End of break detected. If it came along with a
1266 * character, atmel_rx_chars will handle it.
1268 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1269 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1270 atmel_port->break_active = 0;
1275 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1278 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1280 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1282 if (pending & atmel_port->tx_done_mask) {
1283 atmel_uart_writel(port, ATMEL_US_IDR,
1284 atmel_port->tx_done_mask);
1286 /* Start RX if flag was set and FIFO is empty */
1287 if (atmel_port->hd_start_rx) {
1288 if (!(atmel_uart_readl(port, ATMEL_US_CSR)
1289 & ATMEL_US_TXEMPTY))
1290 dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
1292 atmel_port->hd_start_rx = false;
1293 atmel_start_rx(port);
1296 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1301 * status flags interrupt handler.
1304 atmel_handle_status(struct uart_port *port, unsigned int pending,
1305 unsigned int status)
1307 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1308 unsigned int status_change;
1310 if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1311 | ATMEL_US_CTSIC)) {
1312 status_change = status ^ atmel_port->irq_status_prev;
1313 atmel_port->irq_status_prev = status;
1315 if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1316 | ATMEL_US_DCD | ATMEL_US_CTS)) {
1317 /* TODO: All reads to CSR will clear these interrupts! */
1318 if (status_change & ATMEL_US_RI)
1320 if (status_change & ATMEL_US_DSR)
1322 if (status_change & ATMEL_US_DCD)
1323 uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1324 if (status_change & ATMEL_US_CTS)
1325 uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1327 wake_up_interruptible(&port->state->port.delta_msr_wait);
1335 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1337 struct uart_port *port = dev_id;
1338 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1339 unsigned int status, pending, mask, pass_counter = 0;
1341 spin_lock(&atmel_port->lock_suspended);
1344 status = atmel_get_lines_status(port);
1345 mask = atmel_uart_readl(port, ATMEL_US_IMR);
1346 pending = status & mask;
1350 if (atmel_port->suspended) {
1351 atmel_port->pending |= pending;
1352 atmel_port->pending_status = status;
1353 atmel_uart_writel(port, ATMEL_US_IDR, mask);
1358 atmel_handle_receive(port, pending);
1359 atmel_handle_status(port, pending, status);
1360 atmel_handle_transmit(port, pending);
1361 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1363 spin_unlock(&atmel_port->lock_suspended);
1365 return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1368 static void atmel_release_tx_pdc(struct uart_port *port)
1370 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1371 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1373 dma_unmap_single(port->dev,
1380 * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1382 static void atmel_tx_pdc(struct uart_port *port)
1384 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1385 struct circ_buf *xmit = &port->state->xmit;
1386 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1389 /* nothing left to transmit? */
1390 if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1393 xmit->tail += pdc->ofs;
1394 xmit->tail &= UART_XMIT_SIZE - 1;
1396 port->icount.tx += pdc->ofs;
1399 /* more to transmit - setup next transfer */
1401 /* disable PDC transmit */
1402 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1404 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1405 dma_sync_single_for_device(port->dev,
1410 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1413 atmel_uart_writel(port, ATMEL_PDC_TPR,
1414 pdc->dma_addr + xmit->tail);
1415 atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1416 /* re-enable PDC transmit */
1417 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1418 /* Enable interrupts */
1419 atmel_uart_writel(port, ATMEL_US_IER,
1420 atmel_port->tx_done_mask);
1422 if (atmel_uart_is_half_duplex(port)) {
1423 /* DMA done, stop TX, start RX for RS485 */
1424 atmel_start_rx(port);
1428 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1429 uart_write_wakeup(port);
1432 static int atmel_prepare_tx_pdc(struct uart_port *port)
1434 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1435 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1436 struct circ_buf *xmit = &port->state->xmit;
1438 pdc->buf = xmit->buf;
1439 pdc->dma_addr = dma_map_single(port->dev,
1443 pdc->dma_size = UART_XMIT_SIZE;
1449 static void atmel_rx_from_ring(struct uart_port *port)
1451 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1452 struct circ_buf *ring = &atmel_port->rx_ring;
1454 unsigned int status;
1456 while (ring->head != ring->tail) {
1457 struct atmel_uart_char c;
1459 /* Make sure c is loaded after head. */
1462 c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1464 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1471 * note that the error handling code is
1472 * out of the main execution path
1474 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1475 | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1476 if (status & ATMEL_US_RXBRK) {
1477 /* ignore side-effect */
1478 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1481 if (uart_handle_break(port))
1484 if (status & ATMEL_US_PARE)
1485 port->icount.parity++;
1486 if (status & ATMEL_US_FRAME)
1487 port->icount.frame++;
1488 if (status & ATMEL_US_OVRE)
1489 port->icount.overrun++;
1491 status &= port->read_status_mask;
1493 if (status & ATMEL_US_RXBRK)
1495 else if (status & ATMEL_US_PARE)
1497 else if (status & ATMEL_US_FRAME)
1502 if (uart_handle_sysrq_char(port, c.ch))
1505 uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1509 * Drop the lock here since it might end up calling
1510 * uart_start(), which takes the lock.
1512 spin_unlock(&port->lock);
1513 tty_flip_buffer_push(&port->state->port);
1514 spin_lock(&port->lock);
1517 static void atmel_release_rx_pdc(struct uart_port *port)
1519 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1522 for (i = 0; i < 2; i++) {
1523 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1525 dma_unmap_single(port->dev,
1533 static void atmel_rx_from_pdc(struct uart_port *port)
1535 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1536 struct tty_port *tport = &port->state->port;
1537 struct atmel_dma_buffer *pdc;
1538 int rx_idx = atmel_port->pdc_rx_idx;
1544 /* Reset the UART timeout early so that we don't miss one */
1545 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1547 pdc = &atmel_port->pdc_rx[rx_idx];
1548 head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1551 /* If the PDC has switched buffers, RPR won't contain
1552 * any address within the current buffer. Since head
1553 * is unsigned, we just need a one-way comparison to
1556 * In this case, we just need to consume the entire
1557 * buffer and resubmit it for DMA. This will clear the
1558 * ENDRX bit as well, so that we can safely re-enable
1559 * all interrupts below.
1561 head = min(head, pdc->dma_size);
1563 if (likely(head != tail)) {
1564 dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1565 pdc->dma_size, DMA_FROM_DEVICE);
1568 * head will only wrap around when we recycle
1569 * the DMA buffer, and when that happens, we
1570 * explicitly set tail to 0. So head will
1571 * always be greater than tail.
1573 count = head - tail;
1575 tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1578 dma_sync_single_for_device(port->dev, pdc->dma_addr,
1579 pdc->dma_size, DMA_FROM_DEVICE);
1581 port->icount.rx += count;
1586 * If the current buffer is full, we need to check if
1587 * the next one contains any additional data.
1589 if (head >= pdc->dma_size) {
1591 atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1592 atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1595 atmel_port->pdc_rx_idx = rx_idx;
1597 } while (head >= pdc->dma_size);
1600 * Drop the lock here since it might end up calling
1601 * uart_start(), which takes the lock.
1603 spin_unlock(&port->lock);
1604 tty_flip_buffer_push(tport);
1605 spin_lock(&port->lock);
1607 atmel_uart_writel(port, ATMEL_US_IER,
1608 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1611 static int atmel_prepare_rx_pdc(struct uart_port *port)
1613 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1616 for (i = 0; i < 2; i++) {
1617 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1619 pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1620 if (pdc->buf == NULL) {
1622 dma_unmap_single(port->dev,
1623 atmel_port->pdc_rx[0].dma_addr,
1626 kfree(atmel_port->pdc_rx[0].buf);
1628 atmel_port->use_pdc_rx = 0;
1631 pdc->dma_addr = dma_map_single(port->dev,
1635 pdc->dma_size = PDC_BUFFER_SIZE;
1639 atmel_port->pdc_rx_idx = 0;
1641 atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1642 atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1644 atmel_uart_writel(port, ATMEL_PDC_RNPR,
1645 atmel_port->pdc_rx[1].dma_addr);
1646 atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1652 * tasklet handling tty stuff outside the interrupt handler.
1654 static void atmel_tasklet_rx_func(unsigned long data)
1656 struct uart_port *port = (struct uart_port *)data;
1657 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1659 /* The interrupt handler does not take the lock */
1660 spin_lock(&port->lock);
1661 atmel_port->schedule_rx(port);
1662 spin_unlock(&port->lock);
1665 static void atmel_tasklet_tx_func(unsigned long data)
1667 struct uart_port *port = (struct uart_port *)data;
1668 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1670 /* The interrupt handler does not take the lock */
1671 spin_lock(&port->lock);
1672 atmel_port->schedule_tx(port);
1673 spin_unlock(&port->lock);
1676 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1677 struct platform_device *pdev)
1679 struct device_node *np = pdev->dev.of_node;
1681 /* DMA/PDC usage specification */
1682 if (of_property_read_bool(np, "atmel,use-dma-rx")) {
1683 if (of_property_read_bool(np, "dmas")) {
1684 atmel_port->use_dma_rx = true;
1685 atmel_port->use_pdc_rx = false;
1687 atmel_port->use_dma_rx = false;
1688 atmel_port->use_pdc_rx = true;
1691 atmel_port->use_dma_rx = false;
1692 atmel_port->use_pdc_rx = false;
1695 if (of_property_read_bool(np, "atmel,use-dma-tx")) {
1696 if (of_property_read_bool(np, "dmas")) {
1697 atmel_port->use_dma_tx = true;
1698 atmel_port->use_pdc_tx = false;
1700 atmel_port->use_dma_tx = false;
1701 atmel_port->use_pdc_tx = true;
1704 atmel_port->use_dma_tx = false;
1705 atmel_port->use_pdc_tx = false;
1709 static void atmel_init_rs485(struct uart_port *port,
1710 struct platform_device *pdev)
1712 struct device_node *np = pdev->dev.of_node;
1714 struct serial_rs485 *rs485conf = &port->rs485;
1717 /* rs485 properties */
1718 if (of_property_read_u32_array(np, "rs485-rts-delay",
1719 rs485_delay, 2) == 0) {
1720 rs485conf->delay_rts_before_send = rs485_delay[0];
1721 rs485conf->delay_rts_after_send = rs485_delay[1];
1722 rs485conf->flags = 0;
1725 if (of_get_property(np, "rs485-rx-during-tx", NULL))
1726 rs485conf->flags |= SER_RS485_RX_DURING_TX;
1728 if (of_get_property(np, "linux,rs485-enabled-at-boot-time", NULL))
1729 rs485conf->flags |= SER_RS485_ENABLED;
1732 static void atmel_set_ops(struct uart_port *port)
1734 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1736 if (atmel_use_dma_rx(port)) {
1737 atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1738 atmel_port->schedule_rx = &atmel_rx_from_dma;
1739 atmel_port->release_rx = &atmel_release_rx_dma;
1740 } else if (atmel_use_pdc_rx(port)) {
1741 atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1742 atmel_port->schedule_rx = &atmel_rx_from_pdc;
1743 atmel_port->release_rx = &atmel_release_rx_pdc;
1745 atmel_port->prepare_rx = NULL;
1746 atmel_port->schedule_rx = &atmel_rx_from_ring;
1747 atmel_port->release_rx = NULL;
1750 if (atmel_use_dma_tx(port)) {
1751 atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1752 atmel_port->schedule_tx = &atmel_tx_dma;
1753 atmel_port->release_tx = &atmel_release_tx_dma;
1754 } else if (atmel_use_pdc_tx(port)) {
1755 atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1756 atmel_port->schedule_tx = &atmel_tx_pdc;
1757 atmel_port->release_tx = &atmel_release_tx_pdc;
1759 atmel_port->prepare_tx = NULL;
1760 atmel_port->schedule_tx = &atmel_tx_chars;
1761 atmel_port->release_tx = NULL;
1766 * Get ip name usart or uart
1768 static void atmel_get_ip_name(struct uart_port *port)
1770 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1771 int name = atmel_uart_readl(port, ATMEL_US_NAME);
1773 u32 usart, dbgu_uart, new_uart;
1774 /* ASCII decoding for IP version */
1775 usart = 0x55534152; /* USAR(T) */
1776 dbgu_uart = 0x44424755; /* DBGU */
1777 new_uart = 0x55415254; /* UART */
1780 * Only USART devices from at91sam9260 SOC implement fractional
1781 * baudrate. It is available for all asynchronous modes, with the
1782 * following restriction: the sampling clock's duty cycle is not
1785 atmel_port->has_frac_baudrate = false;
1786 atmel_port->has_hw_timer = false;
1788 if (name == new_uart) {
1789 dev_dbg(port->dev, "Uart with hw timer");
1790 atmel_port->has_hw_timer = true;
1791 atmel_port->rtor = ATMEL_UA_RTOR;
1792 } else if (name == usart) {
1793 dev_dbg(port->dev, "Usart\n");
1794 atmel_port->has_frac_baudrate = true;
1795 atmel_port->has_hw_timer = true;
1796 atmel_port->rtor = ATMEL_US_RTOR;
1797 } else if (name == dbgu_uart) {
1798 dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
1800 /* fallback for older SoCs: use version field */
1801 version = atmel_uart_readl(port, ATMEL_US_VERSION);
1806 dev_dbg(port->dev, "This version is usart\n");
1807 atmel_port->has_frac_baudrate = true;
1808 atmel_port->has_hw_timer = true;
1809 atmel_port->rtor = ATMEL_US_RTOR;
1813 dev_dbg(port->dev, "This version is uart\n");
1816 dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1822 * Perform initialization and enable port for reception
1824 static int atmel_startup(struct uart_port *port)
1826 struct platform_device *pdev = to_platform_device(port->dev);
1827 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1831 * Ensure that no interrupts are enabled otherwise when
1832 * request_irq() is called we could get stuck trying to
1833 * handle an unexpected interrupt
1835 atmel_uart_writel(port, ATMEL_US_IDR, -1);
1836 atmel_port->ms_irq_enabled = false;
1841 retval = request_irq(port->irq, atmel_interrupt,
1842 IRQF_SHARED | IRQF_COND_SUSPEND,
1843 dev_name(&pdev->dev), port);
1845 dev_err(port->dev, "atmel_startup - Can't get irq\n");
1849 atomic_set(&atmel_port->tasklet_shutdown, 0);
1850 tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func,
1851 (unsigned long)port);
1852 tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func,
1853 (unsigned long)port);
1856 * Initialize DMA (if necessary)
1858 atmel_init_property(atmel_port, pdev);
1859 atmel_set_ops(port);
1861 if (atmel_port->prepare_rx) {
1862 retval = atmel_port->prepare_rx(port);
1864 atmel_set_ops(port);
1867 if (atmel_port->prepare_tx) {
1868 retval = atmel_port->prepare_tx(port);
1870 atmel_set_ops(port);
1874 * Enable FIFO when available
1876 if (atmel_port->fifo_size) {
1877 unsigned int txrdym = ATMEL_US_ONE_DATA;
1878 unsigned int rxrdym = ATMEL_US_ONE_DATA;
1881 atmel_uart_writel(port, ATMEL_US_CR,
1886 if (atmel_use_dma_tx(port))
1887 txrdym = ATMEL_US_FOUR_DATA;
1889 fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1890 if (atmel_port->rts_high &&
1891 atmel_port->rts_low)
1892 fmr |= ATMEL_US_FRTSC |
1893 ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1894 ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1896 atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1899 /* Save current CSR for comparison in atmel_tasklet_func() */
1900 atmel_port->irq_status_prev = atmel_get_lines_status(port);
1903 * Finally, enable the serial port
1905 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1906 /* enable xmit & rcvr */
1907 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1909 setup_timer(&atmel_port->uart_timer,
1910 atmel_uart_timer_callback,
1911 (unsigned long)port);
1913 if (atmel_use_pdc_rx(port)) {
1914 /* set UART timeout */
1915 if (!atmel_port->has_hw_timer) {
1916 mod_timer(&atmel_port->uart_timer,
1917 jiffies + uart_poll_timeout(port));
1918 /* set USART timeout */
1920 atmel_uart_writel(port, atmel_port->rtor,
1922 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1924 atmel_uart_writel(port, ATMEL_US_IER,
1925 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1927 /* enable PDC controller */
1928 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1929 } else if (atmel_use_dma_rx(port)) {
1930 /* set UART timeout */
1931 if (!atmel_port->has_hw_timer) {
1932 mod_timer(&atmel_port->uart_timer,
1933 jiffies + uart_poll_timeout(port));
1934 /* set USART timeout */
1936 atmel_uart_writel(port, atmel_port->rtor,
1938 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1940 atmel_uart_writel(port, ATMEL_US_IER,
1944 /* enable receive only */
1945 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
1952 * Flush any TX data submitted for DMA. Called when the TX circular
1955 static void atmel_flush_buffer(struct uart_port *port)
1957 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1959 if (atmel_use_pdc_tx(port)) {
1960 atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1961 atmel_port->pdc_tx.ofs = 0;
1964 * in uart_flush_buffer(), the xmit circular buffer has just
1965 * been cleared, so we have to reset tx_len accordingly.
1967 atmel_port->tx_len = 0;
1973 static void atmel_shutdown(struct uart_port *port)
1975 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1977 /* Disable modem control lines interrupts */
1978 atmel_disable_ms(port);
1980 /* Disable interrupts at device level */
1981 atmel_uart_writel(port, ATMEL_US_IDR, -1);
1983 /* Prevent spurious interrupts from scheduling the tasklet */
1984 atomic_inc(&atmel_port->tasklet_shutdown);
1987 * Prevent any tasklets being scheduled during
1990 del_timer_sync(&atmel_port->uart_timer);
1992 /* Make sure that no interrupt is on the fly */
1993 synchronize_irq(port->irq);
1996 * Clear out any scheduled tasklets before
1997 * we destroy the buffers
1999 tasklet_kill(&atmel_port->tasklet_rx);
2000 tasklet_kill(&atmel_port->tasklet_tx);
2003 * Ensure everything is stopped and
2004 * disable port and break condition.
2006 atmel_stop_rx(port);
2007 atmel_stop_tx(port);
2009 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
2012 * Shut-down the DMA.
2014 if (atmel_port->release_rx)
2015 atmel_port->release_rx(port);
2016 if (atmel_port->release_tx)
2017 atmel_port->release_tx(port);
2020 * Reset ring buffer pointers
2022 atmel_port->rx_ring.head = 0;
2023 atmel_port->rx_ring.tail = 0;
2026 * Free the interrupts
2028 free_irq(port->irq, port);
2030 atmel_flush_buffer(port);
2034 * Power / Clock management.
2036 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
2037 unsigned int oldstate)
2039 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2044 * Enable the peripheral clock for this serial port.
2045 * This is called on uart_open() or a resume event.
2047 clk_prepare_enable(atmel_port->clk);
2049 /* re-enable interrupts if we disabled some on suspend */
2050 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
2053 /* Back up the interrupt mask and disable all interrupts */
2054 atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
2055 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2058 * Disable the peripheral clock for this serial port.
2059 * This is called on uart_close() or a suspend event.
2061 clk_disable_unprepare(atmel_port->clk);
2064 dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
2069 * Change the port parameters
2071 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
2072 struct ktermios *old)
2074 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2075 unsigned long flags;
2076 unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0;
2078 /* save the current mode register */
2079 mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2081 /* reset the mode, clock divisor, parity, stop bits and data size */
2082 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
2083 ATMEL_US_PAR | ATMEL_US_USMODE);
2085 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2088 switch (termios->c_cflag & CSIZE) {
2090 mode |= ATMEL_US_CHRL_5;
2093 mode |= ATMEL_US_CHRL_6;
2096 mode |= ATMEL_US_CHRL_7;
2099 mode |= ATMEL_US_CHRL_8;
2104 if (termios->c_cflag & CSTOPB)
2105 mode |= ATMEL_US_NBSTOP_2;
2108 if (termios->c_cflag & PARENB) {
2109 /* Mark or Space parity */
2110 if (termios->c_cflag & CMSPAR) {
2111 if (termios->c_cflag & PARODD)
2112 mode |= ATMEL_US_PAR_MARK;
2114 mode |= ATMEL_US_PAR_SPACE;
2115 } else if (termios->c_cflag & PARODD)
2116 mode |= ATMEL_US_PAR_ODD;
2118 mode |= ATMEL_US_PAR_EVEN;
2120 mode |= ATMEL_US_PAR_NONE;
2122 spin_lock_irqsave(&port->lock, flags);
2124 port->read_status_mask = ATMEL_US_OVRE;
2125 if (termios->c_iflag & INPCK)
2126 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2127 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2128 port->read_status_mask |= ATMEL_US_RXBRK;
2130 if (atmel_use_pdc_rx(port))
2131 /* need to enable error interrupts */
2132 atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2135 * Characters to ignore
2137 port->ignore_status_mask = 0;
2138 if (termios->c_iflag & IGNPAR)
2139 port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2140 if (termios->c_iflag & IGNBRK) {
2141 port->ignore_status_mask |= ATMEL_US_RXBRK;
2143 * If we're ignoring parity and break indicators,
2144 * ignore overruns too (for real raw support).
2146 if (termios->c_iflag & IGNPAR)
2147 port->ignore_status_mask |= ATMEL_US_OVRE;
2149 /* TODO: Ignore all characters if CREAD is set.*/
2151 /* update the per-port timeout */
2152 uart_update_timeout(port, termios->c_cflag, baud);
2155 * save/disable interrupts. The tty layer will ensure that the
2156 * transmitter is empty if requested by the caller, so there's
2157 * no need to wait for it here.
2159 imr = atmel_uart_readl(port, ATMEL_US_IMR);
2160 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2162 /* disable receiver and transmitter */
2163 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2166 if (port->rs485.flags & SER_RS485_ENABLED) {
2167 atmel_uart_writel(port, ATMEL_US_TTGR,
2168 port->rs485.delay_rts_after_send);
2169 mode |= ATMEL_US_USMODE_RS485;
2170 } else if (termios->c_cflag & CRTSCTS) {
2171 /* RS232 with hardware handshake (RTS/CTS) */
2172 if (atmel_use_fifo(port) &&
2173 !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
2175 * with ATMEL_US_USMODE_HWHS set, the controller will
2176 * be able to drive the RTS pin high/low when the RX
2177 * FIFO is above RXFTHRES/below RXFTHRES2.
2178 * It will also disable the transmitter when the CTS
2180 * This mode is not activated if CTS pin is a GPIO
2181 * because in this case, the transmitter is always
2182 * disabled (there must be an internal pull-up
2183 * responsible for this behaviour).
2184 * If the RTS pin is a GPIO, the controller won't be
2185 * able to drive it according to the FIFO thresholds,
2186 * but it will be handled by the driver.
2188 mode |= ATMEL_US_USMODE_HWHS;
2191 * For platforms without FIFO, the flow control is
2192 * handled by the driver.
2194 mode |= ATMEL_US_USMODE_NORMAL;
2197 /* RS232 without hadware handshake */
2198 mode |= ATMEL_US_USMODE_NORMAL;
2202 * Set the baud rate:
2203 * Fractional baudrate allows to setup output frequency more
2204 * accurately. This feature is enabled only when using normal mode.
2205 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
2206 * Currently, OVER is always set to 0 so we get
2207 * baudrate = selected clock / (16 * (CD + FP / 8))
2209 * 8 CD + FP = selected clock / (2 * baudrate)
2211 if (atmel_port->has_frac_baudrate) {
2212 div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
2214 fp = div & ATMEL_US_FP_MASK;
2216 cd = uart_get_divisor(port, baud);
2219 if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */
2221 mode |= ATMEL_US_USCLKS_MCK_DIV8;
2223 quot = cd | fp << ATMEL_US_FP_OFFSET;
2225 atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2227 /* set the mode, clock divisor, parity, stop bits and data size */
2228 atmel_uart_writel(port, ATMEL_US_MR, mode);
2231 * when switching the mode, set the RTS line state according to the
2232 * new mode, otherwise keep the former state
2234 if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2235 unsigned int rts_state;
2237 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2238 /* let the hardware control the RTS line */
2239 rts_state = ATMEL_US_RTSDIS;
2241 /* force RTS line to low level */
2242 rts_state = ATMEL_US_RTSEN;
2245 atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2248 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2249 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2251 /* restore interrupts */
2252 atmel_uart_writel(port, ATMEL_US_IER, imr);
2254 /* CTS flow-control and modem-status interrupts */
2255 if (UART_ENABLE_MS(port, termios->c_cflag))
2256 atmel_enable_ms(port);
2258 atmel_disable_ms(port);
2260 spin_unlock_irqrestore(&port->lock, flags);
2263 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2265 if (termios->c_line == N_PPS) {
2266 port->flags |= UPF_HARDPPS_CD;
2267 spin_lock_irq(&port->lock);
2268 atmel_enable_ms(port);
2269 spin_unlock_irq(&port->lock);
2271 port->flags &= ~UPF_HARDPPS_CD;
2272 if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2273 spin_lock_irq(&port->lock);
2274 atmel_disable_ms(port);
2275 spin_unlock_irq(&port->lock);
2281 * Return string describing the specified port
2283 static const char *atmel_type(struct uart_port *port)
2285 return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2289 * Release the memory region(s) being used by 'port'.
2291 static void atmel_release_port(struct uart_port *port)
2293 struct platform_device *pdev = to_platform_device(port->dev);
2294 int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2296 release_mem_region(port->mapbase, size);
2298 if (port->flags & UPF_IOREMAP) {
2299 iounmap(port->membase);
2300 port->membase = NULL;
2305 * Request the memory region(s) being used by 'port'.
2307 static int atmel_request_port(struct uart_port *port)
2309 struct platform_device *pdev = to_platform_device(port->dev);
2310 int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2312 if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2315 if (port->flags & UPF_IOREMAP) {
2316 port->membase = ioremap(port->mapbase, size);
2317 if (port->membase == NULL) {
2318 release_mem_region(port->mapbase, size);
2327 * Configure/autoconfigure the port.
2329 static void atmel_config_port(struct uart_port *port, int flags)
2331 if (flags & UART_CONFIG_TYPE) {
2332 port->type = PORT_ATMEL;
2333 atmel_request_port(port);
2338 * Verify the new serial_struct (for TIOCSSERIAL).
2340 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2343 if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2345 if (port->irq != ser->irq)
2347 if (ser->io_type != SERIAL_IO_MEM)
2349 if (port->uartclk / 16 != ser->baud_base)
2351 if (port->mapbase != (unsigned long)ser->iomem_base)
2353 if (port->iobase != ser->port)
2360 #ifdef CONFIG_CONSOLE_POLL
2361 static int atmel_poll_get_char(struct uart_port *port)
2363 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2366 return atmel_uart_read_char(port);
2369 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2371 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2374 atmel_uart_write_char(port, ch);
2378 static const struct uart_ops atmel_pops = {
2379 .tx_empty = atmel_tx_empty,
2380 .set_mctrl = atmel_set_mctrl,
2381 .get_mctrl = atmel_get_mctrl,
2382 .stop_tx = atmel_stop_tx,
2383 .start_tx = atmel_start_tx,
2384 .stop_rx = atmel_stop_rx,
2385 .enable_ms = atmel_enable_ms,
2386 .break_ctl = atmel_break_ctl,
2387 .startup = atmel_startup,
2388 .shutdown = atmel_shutdown,
2389 .flush_buffer = atmel_flush_buffer,
2390 .set_termios = atmel_set_termios,
2391 .set_ldisc = atmel_set_ldisc,
2393 .release_port = atmel_release_port,
2394 .request_port = atmel_request_port,
2395 .config_port = atmel_config_port,
2396 .verify_port = atmel_verify_port,
2397 .pm = atmel_serial_pm,
2398 #ifdef CONFIG_CONSOLE_POLL
2399 .poll_get_char = atmel_poll_get_char,
2400 .poll_put_char = atmel_poll_put_char,
2405 * Configure the port from the platform device resource info.
2407 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2408 struct platform_device *pdev)
2411 struct uart_port *port = &atmel_port->uart;
2413 atmel_init_property(atmel_port, pdev);
2414 atmel_set_ops(port);
2416 atmel_init_rs485(port, pdev);
2418 port->iotype = UPIO_MEM;
2419 port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
2420 port->ops = &atmel_pops;
2422 port->dev = &pdev->dev;
2423 port->mapbase = pdev->resource[0].start;
2424 port->irq = pdev->resource[1].start;
2425 port->rs485_config = atmel_config_rs485;
2426 port->membase = NULL;
2428 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2430 /* for console, the clock could already be configured */
2431 if (!atmel_port->clk) {
2432 atmel_port->clk = clk_get(&pdev->dev, "usart");
2433 if (IS_ERR(atmel_port->clk)) {
2434 ret = PTR_ERR(atmel_port->clk);
2435 atmel_port->clk = NULL;
2438 ret = clk_prepare_enable(atmel_port->clk);
2440 clk_put(atmel_port->clk);
2441 atmel_port->clk = NULL;
2444 port->uartclk = clk_get_rate(atmel_port->clk);
2445 clk_disable_unprepare(atmel_port->clk);
2446 /* only enable clock when USART is in use */
2449 /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
2450 if (port->rs485.flags & SER_RS485_ENABLED)
2451 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2452 else if (atmel_use_pdc_tx(port)) {
2453 port->fifosize = PDC_BUFFER_SIZE;
2454 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2456 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2462 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2463 static void atmel_console_putchar(struct uart_port *port, int ch)
2465 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2467 atmel_uart_write_char(port, ch);
2471 * Interrupts are disabled on entering
2473 static void atmel_console_write(struct console *co, const char *s, u_int count)
2475 struct uart_port *port = &atmel_ports[co->index].uart;
2476 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2477 unsigned int status, imr;
2478 unsigned int pdc_tx;
2481 * First, save IMR and then disable interrupts
2483 imr = atmel_uart_readl(port, ATMEL_US_IMR);
2484 atmel_uart_writel(port, ATMEL_US_IDR,
2485 ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2487 /* Store PDC transmit status and disable it */
2488 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2489 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2491 /* Make sure that tx path is actually able to send characters */
2492 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2494 uart_console_write(port, s, count, atmel_console_putchar);
2497 * Finally, wait for transmitter to become empty
2501 status = atmel_uart_readl(port, ATMEL_US_CSR);
2502 } while (!(status & ATMEL_US_TXRDY));
2504 /* Restore PDC transmit status */
2506 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2508 /* set interrupts back the way they were */
2509 atmel_uart_writel(port, ATMEL_US_IER, imr);
2513 * If the port was already initialised (eg, by a boot loader),
2514 * try to determine the current setup.
2516 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2517 int *parity, int *bits)
2519 unsigned int mr, quot;
2522 * If the baud rate generator isn't running, the port wasn't
2523 * initialized by the boot loader.
2525 quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2529 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2530 if (mr == ATMEL_US_CHRL_8)
2535 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2536 if (mr == ATMEL_US_PAR_EVEN)
2538 else if (mr == ATMEL_US_PAR_ODD)
2541 *baud = port->uartclk / (16 * quot);
2544 static int __init atmel_console_setup(struct console *co, char *options)
2547 struct uart_port *port = &atmel_ports[co->index].uart;
2553 if (port->membase == NULL) {
2554 /* Port not initialized yet - delay setup */
2558 ret = clk_prepare_enable(atmel_ports[co->index].clk);
2562 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2563 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2564 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2567 uart_parse_options(options, &baud, &parity, &bits, &flow);
2569 atmel_console_get_options(port, &baud, &parity, &bits);
2571 return uart_set_options(port, co, baud, parity, bits, flow);
2574 static struct uart_driver atmel_uart;
2576 static struct console atmel_console = {
2577 .name = ATMEL_DEVICENAME,
2578 .write = atmel_console_write,
2579 .device = uart_console_device,
2580 .setup = atmel_console_setup,
2581 .flags = CON_PRINTBUFFER,
2583 .data = &atmel_uart,
2586 #define ATMEL_CONSOLE_DEVICE (&atmel_console)
2588 static inline bool atmel_is_console_port(struct uart_port *port)
2590 return port->cons && port->cons->index == port->line;
2594 #define ATMEL_CONSOLE_DEVICE NULL
2596 static inline bool atmel_is_console_port(struct uart_port *port)
2602 static struct uart_driver atmel_uart = {
2603 .owner = THIS_MODULE,
2604 .driver_name = "atmel_serial",
2605 .dev_name = ATMEL_DEVICENAME,
2606 .major = SERIAL_ATMEL_MAJOR,
2607 .minor = MINOR_START,
2608 .nr = ATMEL_MAX_UART,
2609 .cons = ATMEL_CONSOLE_DEVICE,
2613 static bool atmel_serial_clk_will_stop(void)
2615 #ifdef CONFIG_ARCH_AT91
2616 return at91_suspend_entering_slow_clock();
2622 static int atmel_serial_suspend(struct platform_device *pdev,
2625 struct uart_port *port = platform_get_drvdata(pdev);
2626 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2628 if (atmel_is_console_port(port) && console_suspend_enabled) {
2629 /* Drain the TX shifter */
2630 while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2635 if (atmel_is_console_port(port) && !console_suspend_enabled) {
2636 /* Cache register values as we won't get a full shutdown/startup
2639 atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR);
2640 atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR);
2641 atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
2642 atmel_port->cache.rtor = atmel_uart_readl(port,
2644 atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR);
2645 atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR);
2646 atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR);
2649 /* we can not wake up if we're running on slow clock */
2650 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2651 if (atmel_serial_clk_will_stop()) {
2652 unsigned long flags;
2654 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2655 atmel_port->suspended = true;
2656 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2657 device_set_wakeup_enable(&pdev->dev, 0);
2660 uart_suspend_port(&atmel_uart, port);
2665 static int atmel_serial_resume(struct platform_device *pdev)
2667 struct uart_port *port = platform_get_drvdata(pdev);
2668 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2669 unsigned long flags;
2671 if (atmel_is_console_port(port) && !console_suspend_enabled) {
2672 atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
2673 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
2674 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
2675 atmel_uart_writel(port, atmel_port->rtor,
2676 atmel_port->cache.rtor);
2677 atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr);
2679 if (atmel_port->fifo_size) {
2680 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN |
2681 ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR);
2682 atmel_uart_writel(port, ATMEL_US_FMR,
2683 atmel_port->cache.fmr);
2684 atmel_uart_writel(port, ATMEL_US_FIER,
2685 atmel_port->cache.fimr);
2687 atmel_start_rx(port);
2690 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2691 if (atmel_port->pending) {
2692 atmel_handle_receive(port, atmel_port->pending);
2693 atmel_handle_status(port, atmel_port->pending,
2694 atmel_port->pending_status);
2695 atmel_handle_transmit(port, atmel_port->pending);
2696 atmel_port->pending = 0;
2698 atmel_port->suspended = false;
2699 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2701 uart_resume_port(&atmel_uart, port);
2702 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
2707 #define atmel_serial_suspend NULL
2708 #define atmel_serial_resume NULL
2711 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
2712 struct platform_device *pdev)
2714 atmel_port->fifo_size = 0;
2715 atmel_port->rts_low = 0;
2716 atmel_port->rts_high = 0;
2718 if (of_property_read_u32(pdev->dev.of_node,
2720 &atmel_port->fifo_size))
2723 if (!atmel_port->fifo_size)
2726 if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2727 atmel_port->fifo_size = 0;
2728 dev_err(&pdev->dev, "Invalid FIFO size\n");
2733 * 0 <= rts_low <= rts_high <= fifo_size
2734 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2735 * to flush their internal TX FIFO, commonly up to 16 data, before
2736 * actually stopping to send new data. So we try to set the RTS High
2737 * Threshold to a reasonably high value respecting this 16 data
2738 * empirical rule when possible.
2740 atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
2741 atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2742 atmel_port->rts_low = max_t(int, atmel_port->fifo_size >> 2,
2743 atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2745 dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2746 atmel_port->fifo_size);
2747 dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2748 atmel_port->rts_high);
2749 dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n",
2750 atmel_port->rts_low);
2753 static int atmel_serial_probe(struct platform_device *pdev)
2755 struct atmel_uart_port *atmel_port;
2756 struct device_node *np = pdev->dev.of_node;
2761 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2763 ret = of_alias_get_id(np, "serial");
2765 /* port id not found in platform data nor device-tree aliases:
2766 * auto-enumerate it */
2767 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2769 if (ret >= ATMEL_MAX_UART) {
2774 if (test_and_set_bit(ret, atmel_ports_in_use)) {
2775 /* port already in use */
2780 atmel_port = &atmel_ports[ret];
2781 atmel_port->backup_imr = 0;
2782 atmel_port->uart.line = ret;
2783 atmel_serial_probe_fifos(atmel_port, pdev);
2785 atomic_set(&atmel_port->tasklet_shutdown, 0);
2786 spin_lock_init(&atmel_port->lock_suspended);
2788 ret = atmel_init_port(atmel_port, pdev);
2792 atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
2793 if (IS_ERR(atmel_port->gpios)) {
2794 ret = PTR_ERR(atmel_port->gpios);
2798 if (!atmel_use_pdc_rx(&atmel_port->uart)) {
2800 data = kmalloc(sizeof(struct atmel_uart_char)
2801 * ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
2803 goto err_alloc_ring;
2804 atmel_port->rx_ring.buf = data;
2807 rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
2809 ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
2813 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2814 if (atmel_is_console_port(&atmel_port->uart)
2815 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
2817 * The serial core enabled the clock for us, so undo
2818 * the clk_prepare_enable() in atmel_console_setup()
2820 clk_disable_unprepare(atmel_port->clk);
2824 device_init_wakeup(&pdev->dev, 1);
2825 platform_set_drvdata(pdev, atmel_port);
2828 * The peripheral clock has been disabled by atmel_init_port():
2829 * enable it before accessing I/O registers
2831 clk_prepare_enable(atmel_port->clk);
2833 if (rs485_enabled) {
2834 atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
2835 ATMEL_US_USMODE_NORMAL);
2836 atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
2841 * Get port name of usart or uart
2843 atmel_get_ip_name(&atmel_port->uart);
2846 * The peripheral clock can now safely be disabled till the port
2849 clk_disable_unprepare(atmel_port->clk);
2854 kfree(atmel_port->rx_ring.buf);
2855 atmel_port->rx_ring.buf = NULL;
2857 if (!atmel_is_console_port(&atmel_port->uart)) {
2858 clk_put(atmel_port->clk);
2859 atmel_port->clk = NULL;
2862 clear_bit(atmel_port->uart.line, atmel_ports_in_use);
2868 * Even if the driver is not modular, it makes sense to be able to
2869 * unbind a device: there can be many bound devices, and there are
2870 * situations where dynamic binding and unbinding can be useful.
2872 * For example, a connected device can require a specific firmware update
2873 * protocol that needs bitbanging on IO lines, but use the regular serial
2874 * port in the normal case.
2876 static int atmel_serial_remove(struct platform_device *pdev)
2878 struct uart_port *port = platform_get_drvdata(pdev);
2879 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2882 tasklet_kill(&atmel_port->tasklet_rx);
2883 tasklet_kill(&atmel_port->tasklet_tx);
2885 device_init_wakeup(&pdev->dev, 0);
2887 ret = uart_remove_one_port(&atmel_uart, port);
2889 kfree(atmel_port->rx_ring.buf);
2891 /* "port" is allocated statically, so we shouldn't free it */
2893 clear_bit(port->line, atmel_ports_in_use);
2895 clk_put(atmel_port->clk);
2896 atmel_port->clk = NULL;
2901 static struct platform_driver atmel_serial_driver = {
2902 .probe = atmel_serial_probe,
2903 .remove = atmel_serial_remove,
2904 .suspend = atmel_serial_suspend,
2905 .resume = atmel_serial_resume,
2907 .name = "atmel_usart",
2908 .of_match_table = of_match_ptr(atmel_serial_dt_ids),
2912 static int __init atmel_serial_init(void)
2916 ret = uart_register_driver(&atmel_uart);
2920 ret = platform_driver_register(&atmel_serial_driver);
2922 uart_unregister_driver(&atmel_uart);
2926 device_initcall(atmel_serial_init);