1 // SPDX-License-Identifier: GPL-2.0+
3 * 8250_dma.c - DMA Engine API support for 8250.c
5 * Copyright (C) 2013 Intel Corporation
8 #include <linux/tty_flip.h>
9 #include <linux/serial_reg.h>
10 #include <linux/dma-mapping.h>
14 static void __dma_tx_complete(void *param)
16 struct uart_8250_port *p = param;
17 struct uart_8250_dma *dma = p->dma;
18 struct circ_buf *xmit = &p->port.state->xmit;
22 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
23 UART_XMIT_SIZE, DMA_TO_DEVICE);
25 spin_lock_irqsave(&p->port.lock, flags);
29 xmit->tail += dma->tx_size;
30 xmit->tail &= UART_XMIT_SIZE - 1;
31 p->port.icount.tx += dma->tx_size;
33 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
34 uart_write_wakeup(&p->port);
36 ret = serial8250_tx_dma(p);
38 p->ier |= UART_IER_THRI;
39 serial_port_out(&p->port, UART_IER, p->ier);
42 spin_unlock_irqrestore(&p->port.lock, flags);
45 static void __dma_rx_complete(void *param)
47 struct uart_8250_port *p = param;
48 struct uart_8250_dma *dma = p->dma;
49 struct tty_port *tty_port = &p->port.state->port;
50 struct dma_tx_state state;
51 enum dma_status dma_status;
55 * New DMA Rx can be started during the completion handler before it
56 * could acquire port's lock and it might still be ongoing. Don't to
57 * anything in such case.
59 dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
60 if (dma_status == DMA_IN_PROGRESS)
63 count = dma->rx_size - state.residue;
65 tty_insert_flip_string(tty_port, dma->rx_buf, count);
66 p->port.icount.rx += count;
69 tty_flip_buffer_push(tty_port);
72 static void dma_rx_complete(void *param)
74 struct uart_8250_port *p = param;
75 struct uart_8250_dma *dma = p->dma;
78 spin_lock_irqsave(&p->port.lock, flags);
81 spin_unlock_irqrestore(&p->port.lock, flags);
84 int serial8250_tx_dma(struct uart_8250_port *p)
86 struct uart_8250_dma *dma = p->dma;
87 struct circ_buf *xmit = &p->port.state->xmit;
88 struct dma_async_tx_descriptor *desc;
94 if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
95 /* We have been called from __dma_tx_complete() */
96 serial8250_rpm_put_tx(p);
100 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
102 desc = dmaengine_prep_slave_single(dma->txchan,
103 dma->tx_addr + xmit->tail,
104 dma->tx_size, DMA_MEM_TO_DEV,
105 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
112 desc->callback = __dma_tx_complete;
113 desc->callback_param = p;
115 dma->tx_cookie = dmaengine_submit(desc);
117 dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
118 UART_XMIT_SIZE, DMA_TO_DEVICE);
120 dma_async_issue_pending(dma->txchan);
123 if (p->ier & UART_IER_THRI) {
124 p->ier &= ~UART_IER_THRI;
125 serial_out(p, UART_IER, p->ier);
134 int serial8250_rx_dma(struct uart_8250_port *p)
136 struct uart_8250_dma *dma = p->dma;
137 struct dma_async_tx_descriptor *desc;
142 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
143 dma->rx_size, DMA_DEV_TO_MEM,
144 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
149 desc->callback = dma_rx_complete;
150 desc->callback_param = p;
152 dma->rx_cookie = dmaengine_submit(desc);
154 dma_async_issue_pending(dma->rxchan);
159 void serial8250_rx_dma_flush(struct uart_8250_port *p)
161 struct uart_8250_dma *dma = p->dma;
163 if (dma->rx_running) {
164 dmaengine_pause(dma->rxchan);
165 __dma_rx_complete(p);
166 dmaengine_terminate_async(dma->rxchan);
169 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
171 int serial8250_request_dma(struct uart_8250_port *p)
173 struct uart_8250_dma *dma = p->dma;
174 phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
175 dma->rx_dma_addr : p->port.mapbase;
176 phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
177 dma->tx_dma_addr : p->port.mapbase;
179 struct dma_slave_caps caps;
182 /* Default slave configuration parameters */
183 dma->rxconf.direction = DMA_DEV_TO_MEM;
184 dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
185 dma->rxconf.src_addr = rx_dma_addr + UART_RX;
187 dma->txconf.direction = DMA_MEM_TO_DEV;
188 dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
189 dma->txconf.dst_addr = tx_dma_addr + UART_TX;
192 dma_cap_set(DMA_SLAVE, mask);
194 /* Get a channel for RX */
195 dma->rxchan = dma_request_slave_channel_compat(mask,
196 dma->fn, dma->rx_param,
201 /* 8250 rx dma requires dmaengine driver to support pause/terminate */
202 ret = dma_get_slave_caps(dma->rxchan, &caps);
205 if (!caps.cmd_pause || !caps.cmd_terminate ||
206 caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
211 dmaengine_slave_config(dma->rxchan, &dma->rxconf);
213 /* Get a channel for TX */
214 dma->txchan = dma_request_slave_channel_compat(mask,
215 dma->fn, dma->tx_param,
222 /* 8250 tx dma requires dmaengine driver to support terminate */
223 ret = dma_get_slave_caps(dma->txchan, &caps);
226 if (!caps.cmd_terminate) {
231 dmaengine_slave_config(dma->txchan, &dma->txconf);
235 dma->rx_size = PAGE_SIZE;
237 dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
238 &dma->rx_addr, GFP_KERNEL);
245 dma->tx_addr = dma_map_single(dma->txchan->device->dev,
246 p->port.state->xmit.buf,
249 if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
250 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
251 dma->rx_buf, dma->rx_addr);
256 dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
260 dma_release_channel(dma->txchan);
262 dma_release_channel(dma->rxchan);
265 EXPORT_SYMBOL_GPL(serial8250_request_dma);
267 void serial8250_release_dma(struct uart_8250_port *p)
269 struct uart_8250_dma *dma = p->dma;
274 /* Release RX resources */
275 dmaengine_terminate_sync(dma->rxchan);
276 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
278 dma_release_channel(dma->rxchan);
281 /* Release TX resources */
282 dmaengine_terminate_sync(dma->txchan);
283 dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
284 UART_XMIT_SIZE, DMA_TO_DEVICE);
285 dma_release_channel(dma->txchan);
289 dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
291 EXPORT_SYMBOL_GPL(serial8250_release_dma);