[PATCH 6/6] Serial: AT91: Enable rx cyclic DMA transfer

Elen Song elen.song at atmel.com
Mon Oct 29 05:11:08 EDT 2012


To prevent data overrun, we use cyclic dma transfer for rx.

atmel_allocate_desc will allocate a cycle dma cookie after request channel,
after that, enable uart timeout interrupt in startup stage, when data successful received,
the timeout callback will check the residual bytes and insert receiving datas into the framework
during the transfer interval.

when current descriptor finished,
the dma callback will also check the residual bytes and filp the receiving datas.

Signed-off-by: Elen Song <elen.song at atmel.com>
---
 drivers/tty/serial/atmel_serial.c |  126 ++++++++++++++++++++++++++++++++++++-
 1 file changed, 125 insertions(+), 1 deletion(-)

diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 8ee9023..d3ba9ab 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -788,6 +788,40 @@ static void atmel_tx_request_dma(struct atmel_uart_port *atmel_port)
 	}
 }
 
+static void atmel_rx_dma_flip_buffer(struct uart_port *port,
+					char *buf, size_t count)
+{
+	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+	struct tty_struct *tty = port->state->port.tty;
+
+	dma_sync_sg_for_cpu(port->dev,
+				&atmel_port->sg_rx,
+				1,
+				DMA_DEV_TO_MEM);
+
+	tty_insert_flip_string(tty, buf, count);
+
+	dma_sync_sg_for_device(port->dev,
+				&atmel_port->sg_rx,
+				1,
+				DMA_DEV_TO_MEM);
+	/*
+	 * Drop the lock here since it might end up calling
+	 * uart_start(), which takes the lock.
+	 */
+	spin_unlock(&port->lock);
+	tty_flip_buffer_push(tty);
+	spin_lock(&port->lock);
+}
+
+static void atmel_dma_rx_complete(void *arg)
+{
+	struct uart_port *port = arg;
+	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+	tasklet_schedule(&atmel_port->tasklet);
+}
+
 static void atmel_rx_dma_release(struct atmel_uart_port *atmel_port)
 {
 	struct dma_chan *chan = atmel_port->chan_rx;
@@ -804,6 +838,47 @@ static void atmel_rx_dma_release(struct atmel_uart_port *atmel_port)
 	}
 }
 
+static void atmel_rx_from_dma(struct uart_port *port)
+{
+	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+	struct circ_buf *ring = &atmel_port->rx_ring;
+	struct dma_chan *chan = atmel_port->chan_rx;
+	struct dma_tx_state state;
+	enum dma_status dmastat;
+	size_t pending, count;
+
+
+	/* Reset the UART timeout early so that we don't miss one */
+	UART_PUT_CR(port, ATMEL_US_STTTO);
+	dmastat = dmaengine_tx_status(chan,
+				atmel_port->cookie_rx,
+				&state);
+	if (dmastat == DMA_ERROR)
+		dev_err(port->dev, "DMA status err\n");
+	/* current transfer size should no larger than dma buffer */
+	pending = sg_dma_len(&atmel_port->sg_rx) - state.residue;
+	BUG_ON(pending > sg_dma_len(&atmel_port->sg_rx));
+
+	/*
+	 * This will take the chars we have so far,
+	 * ring->head will record the transfer size, only new bytes come
+	 * will insert into the framework.
+	 */
+	if (pending > ring->head) {
+		count = pending - ring->head;
+
+		atmel_rx_dma_flip_buffer(port, ring->buf + ring->head, count);
+
+		ring->head += count;
+		if (ring->head == sg_dma_len(&atmel_port->sg_rx))
+			ring->head = 0;
+
+		port->icount.rx += count;
+	}
+
+	UART_PUT_IER(port, ATMEL_US_TIMEOUT);
+}
+
 static void atmel_rx_request_dma(struct atmel_uart_port *atmel_port)
 {
 	struct uart_port	*port;
@@ -880,6 +955,37 @@ static void atmel_rx_request_dma(struct atmel_uart_port *atmel_port)
 	}
 }
 
+static int atmel_allocate_desc(struct uart_port *port)
+{
+	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+	struct dma_async_tx_descriptor *desc;
+	struct dma_chan *chan = atmel_port->chan_rx;
+
+	if (!chan) {
+		dev_warn(port->dev, "No channel available\n");
+		goto err_dma;
+	}
+	/*
+	 * Prepare a cyclic dma transfer, assign 2 descriptors,
+	 * each one is half ring buffer size
+	 */
+	desc = dmaengine_prep_dma_cyclic(chan,
+				sg_dma_address(&atmel_port->sg_rx),
+				sg_dma_len(&atmel_port->sg_rx),
+				sg_dma_len(&atmel_port->sg_rx)/2,
+				DMA_DEV_TO_MEM);
+	desc->callback = atmel_dma_rx_complete;
+	desc->callback_param = port;
+	atmel_port->desc_rx = desc;
+	atmel_port->cookie_rx = dmaengine_submit(desc);
+
+	return 0;
+
+err_dma:
+	atmel_rx_dma_release(atmel_port);
+	return -EINVAL;
+}
+
 /*
  * receive interrupt handler.
  */
@@ -907,6 +1013,13 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
 			atmel_pdc_rxerr(port, pending);
 	}
 
+	if (atmel_use_dma_rx(port)) {
+		if (pending & ATMEL_US_TIMEOUT) {
+			UART_PUT_IDR(port, ATMEL_US_TIMEOUT);
+			tasklet_schedule(&atmel_port->tasklet);
+		}
+	}
+
 	/* Interrupt receive */
 	if (pending & ATMEL_US_RXRDY)
 		atmel_rx_chars(port);
@@ -1212,6 +1325,8 @@ static void atmel_tasklet_func(unsigned long data)
 
 	if (atmel_use_pdc_rx(port))
 		atmel_rx_from_pdc(port);
+	else if (atmel_use_dma_rx(port))
+		atmel_rx_from_dma(port);
 	else
 		atmel_rx_from_ring(port);
 
@@ -1297,8 +1412,11 @@ static int atmel_startup(struct uart_port *port)
 	if (atmel_use_dma_tx(port))
 		atmel_tx_request_dma(atmel_port);
 
-	if (atmel_use_dma_rx(port))
+	if (atmel_use_dma_rx(port)) {
 		atmel_rx_request_dma(atmel_port);
+		if (atmel_allocate_desc(port))
+			return -EINVAL;
+	}
 	/*
 	 * If there is a specific "open" function (to register
 	 * control line interrupts)
@@ -1330,6 +1448,12 @@ static int atmel_startup(struct uart_port *port)
 		UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
 		/* enable PDC controller */
 		UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
+	} else if (atmel_use_dma_rx(port)) {
+		/* set UART timeout */
+		UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
+		UART_PUT_CR(port, ATMEL_US_STTTO);
+
+		UART_PUT_IER(port, ATMEL_US_TIMEOUT);
 	} else {
 		/* enable receive only */
 		UART_PUT_IER(port, ATMEL_US_RXRDY);
-- 
1.7.9.5




More information about the linux-arm-kernel mailing list