aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-5.4/819-uart-0005-tty-serial-fsl_lpuart-enable-dma-mode-for-imx8qxp.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/layerscape/patches-5.4/819-uart-0005-tty-serial-fsl_lpuart-enable-dma-mode-for-imx8qxp.patch')
-rw-r--r--target/linux/layerscape/patches-5.4/819-uart-0005-tty-serial-fsl_lpuart-enable-dma-mode-for-imx8qxp.patch551
1 files changed, 551 insertions, 0 deletions
diff --git a/target/linux/layerscape/patches-5.4/819-uart-0005-tty-serial-fsl_lpuart-enable-dma-mode-for-imx8qxp.patch b/target/linux/layerscape/patches-5.4/819-uart-0005-tty-serial-fsl_lpuart-enable-dma-mode-for-imx8qxp.patch
new file mode 100644
index 0000000000..1a2d6c2dfe
--- /dev/null
+++ b/target/linux/layerscape/patches-5.4/819-uart-0005-tty-serial-fsl_lpuart-enable-dma-mode-for-imx8qxp.patch
@@ -0,0 +1,551 @@
+From 0d6e214f5a257f9b53619ef8aa3b6e767189bdcf Mon Sep 17 00:00:00 2001
+From: Fugang Duan <fugang.duan@nxp.com>
+Date: Wed, 11 Sep 2019 16:21:06 +0800
+Subject: [PATCH] tty: serial: fsl_lpuart: enable dma mode for imx8qxp
+
+imx8qxp lpuart support eDMA for dma mode, support EOP (end-of-packet)
+feature. But eDMA cannot detect the correct DADDR for current major
+loop in cyclic mode, so it doesn't support cyclic mode.
+
+The patch is to enable lpuart prep slave sg dma mode for imx8qxp.
+
+Signed-off-by: Fugang Duan <fugang.duan@nxp.com>
+---
+ drivers/tty/serial/fsl_lpuart.c | 280 +++++++++++++++++++++++++++++++---------
+ 1 file changed, 219 insertions(+), 61 deletions(-)
+
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -131,6 +131,7 @@
+ #define UARTBAUD_M10 0x20000000
+ #define UARTBAUD_TDMAE 0x00800000
+ #define UARTBAUD_RDMAE 0x00200000
++#define UARTBAUD_RIDMAE 0x00100000
+ #define UARTBAUD_MATCFG 0x00400000
+ #define UARTBAUD_BOTHEDGE 0x00020000
+ #define UARTBAUD_RESYNCDIS 0x00010000
+@@ -179,7 +180,7 @@
+ #define UARTCTRL_SBK 0x00010000
+ #define UARTCTRL_MA1IE 0x00008000
+ #define UARTCTRL_MA2IE 0x00004000
+-#define UARTCTRL_IDLECFG 0x00000100
++#define UARTCTRL_IDLECFG_OFF 0x8
+ #define UARTCTRL_LOOPS 0x00000080
+ #define UARTCTRL_DOZEEN 0x00000040
+ #define UARTCTRL_RSRC 0x00000020
+@@ -197,6 +198,7 @@
+ #define UARTDATA_MASK 0x3ff
+
+ #define UARTMODIR_IREN 0x00020000
++#define UARTMODIR_RTSWATER_S 0x8
+ #define UARTMODIR_TXCTSSRC 0x00000020
+ #define UARTMODIR_TXCTSC 0x00000010
+ #define UARTMODIR_RXRTSE 0x00000008
+@@ -210,6 +212,8 @@
+ #define UARTFIFO_RXUF 0x00010000
+ #define UARTFIFO_TXFLUSH 0x00008000
+ #define UARTFIFO_RXFLUSH 0x00004000
++#define UARTFIFO_RXIDEN_MASK 0x7
++#define UARTFIFO_RXIDEN_OFF 10
+ #define UARTFIFO_TXOFE 0x00000200
+ #define UARTFIFO_RXUFE 0x00000100
+ #define UARTFIFO_TXFE 0x00000080
+@@ -226,6 +230,9 @@
+ #define UARTWATER_TXWATER_OFF 0
+ #define UARTWATER_RXWATER_OFF 16
+
++#define UARTFIFO_RXIDEN_RDRF 0x3
++#define UARTCTRL_IDLECFG 0x7
++
+ /* Rx DMA timeout in ms, which is used to calculate Rx ring buffer size */
+ #define DMA_RX_TIMEOUT (10)
+
+@@ -253,6 +260,9 @@ struct lpuart_port {
+ unsigned int txfifo_size;
+ unsigned int rxfifo_size;
+
++ u8 rx_watermark;
++ bool dma_eeop;
++ bool rx_dma_cyclic;
+ bool lpuart_dma_tx_use;
+ bool lpuart_dma_rx_use;
+ struct dma_chan *dma_tx_chan;
+@@ -278,28 +288,38 @@ struct lpuart_soc_data {
+ enum lpuart_type devtype;
+ char iotype;
+ u8 reg_off;
++ u8 rx_watermark;
++ bool rx_dma_cyclic;
+ };
+
+ static const struct lpuart_soc_data vf_data = {
+ .devtype = VF610_LPUART,
+ .iotype = UPIO_MEM,
++ .rx_watermark = 1,
++ .rx_dma_cyclic = true,
+ };
+
+ static const struct lpuart_soc_data ls_data = {
+ .devtype = LS1021A_LPUART,
+ .iotype = UPIO_MEM32BE,
++ .rx_watermark = 0,
++ .rx_dma_cyclic = true,
+ };
+
+ static struct lpuart_soc_data imx7ulp_data = {
+ .devtype = IMX7ULP_LPUART,
+ .iotype = UPIO_MEM32,
+ .reg_off = IMX_REG_OFF,
++ .rx_watermark = 0,
++ .rx_dma_cyclic = true,
+ };
+
+ static struct lpuart_soc_data imx8qxp_data = {
+ .devtype = IMX8QXP_LPUART,
+ .iotype = UPIO_MEM32,
+ .reg_off = IMX_REG_OFF,
++ .rx_watermark = 31,
++ .rx_dma_cyclic = false,
+ };
+
+ static const struct of_device_id lpuart_dt_ids[] = {
+@@ -313,6 +333,7 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
+
+ /* Forward declare this for the dma callbacks*/
+ static void lpuart_dma_tx_complete(void *arg);
++static int lpuart_sched_rx_dma(struct lpuart_port *sport);
+
+ static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport)
+ {
+@@ -1000,19 +1021,15 @@ static irqreturn_t lpuart32_int(int irq,
+ if ((sts & UARTSTAT_TDRE) && !sport->lpuart_dma_tx_use)
+ lpuart32_txint(sport);
+
++ if (sport->lpuart_dma_rx_use && sport->dma_eeop)
++ sts &= ~UARTSTAT_IDLE;
++
+ lpuart32_write(&sport->port, sts, UARTSTAT);
+ return IRQ_HANDLED;
+ }
+
+-static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
++static void lpuart_rx_error_stat(struct lpuart_port *sport)
+ {
+- struct tty_port *port = &sport->port.state->port;
+- struct dma_tx_state state;
+- enum dma_status dmastat;
+- struct circ_buf *ring = &sport->rx_ring;
+- unsigned long flags;
+- int count = 0;
+-
+ if (lpuart_is_32(sport)) {
+ unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
+
+@@ -1064,8 +1081,21 @@ static void lpuart_copy_rx_to_tty(struct
+ writeb(cr2, sport->port.membase + UARTCR2);
+ }
+ }
++}
++
++static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
++{
++ struct tty_port *port = &sport->port.state->port;
++ struct dma_tx_state state;
++ enum dma_status dmastat;
++ struct circ_buf *ring = &sport->rx_ring;
++ unsigned long flags;
++ int count = 0;
+
+- async_tx_ack(sport->dma_rx_desc);
++ if (!is_imx8qxp_lpuart(sport)) {
++ lpuart_rx_error_stat(sport);
++ async_tx_ack(sport->dma_rx_desc);
++ }
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+@@ -1128,7 +1158,33 @@ static void lpuart_copy_rx_to_tty(struct
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ tty_flip_buffer_push(port);
+- mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout);
++
++ if (!sport->dma_eeop)
++ mod_timer(&sport->lpuart_timer,
++ jiffies + sport->dma_rx_timeout);
++}
++
++static void lpuart_dma_rx_post_handler(struct lpuart_port *sport)
++{
++ unsigned long flags;
++ unsigned long rxcount;
++
++ spin_lock_irqsave(&sport->port.lock, flags);
++
++ /* For end of packet, clear the idle flag to avoid to trigger
++ * the next transfer. Only i.MX8x lpuart support EEOP.
++ */
++ if (sport->dma_eeop && lpuart_is_32(sport)) {
++ rxcount = lpuart32_read(&sport->port, UARTWATER);
++ rxcount = rxcount >> UARTWATER_RXCNT_OFF;
++ if (!rxcount)
++ lpuart32_write(&sport->port, UARTSTAT_IDLE, UARTSTAT);
++ }
++
++ lpuart_sched_rx_dma(sport);
++
++ spin_unlock_irqrestore(&sport->port.lock, flags);
++
+ }
+
+ static void lpuart_dma_rx_complete(void *arg)
+@@ -1136,6 +1192,8 @@ static void lpuart_dma_rx_complete(void
+ struct lpuart_port *sport = arg;
+
+ lpuart_copy_rx_to_tty(sport);
++ if (!sport->rx_dma_cyclic)
++ lpuart_dma_rx_post_handler(sport);
+ }
+
+ static void lpuart_timer_func(struct timer_list *t)
+@@ -1143,13 +1201,78 @@ static void lpuart_timer_func(struct tim
+ struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
+
+ lpuart_copy_rx_to_tty(sport);
++ if (!sport->rx_dma_cyclic) {
++ dmaengine_terminate_async(sport->dma_rx_chan);
++ lpuart_dma_rx_post_handler(sport);
++ }
+ }
+
+-static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
++static int lpuart_sched_rxdma_cyclic(struct lpuart_port *sport)
++{
++ sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan,
++ sg_dma_address(&sport->rx_sgl),
++ sport->rx_sgl.length,
++ sport->rx_sgl.length / 2,
++ DMA_DEV_TO_MEM,
++ DMA_PREP_INTERRUPT);
++ if (!sport->dma_rx_desc) {
++ dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n");
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++static int lpuart_sched_rxdma_slave_sg(struct lpuart_port *sport)
++{
++ dma_sync_sg_for_device(sport->port.dev, &sport->rx_sgl, 1,
++ DMA_FROM_DEVICE);
++ sport->dma_rx_desc = dmaengine_prep_slave_sg(sport->dma_rx_chan,
++ &sport->rx_sgl,
++ 1,
++ DMA_DEV_TO_MEM,
++ DMA_PREP_INTERRUPT);
++ if (!sport->dma_rx_desc) {
++ dev_err(sport->port.dev, "Cannot prepare slave_sg DMA\n");
++ return -EFAULT;
++ }
++ sport->rx_ring.tail = 0;
++ sport->rx_ring.head = 0;
++
++ return 0;
++}
++
++static int lpuart_sched_rx_dma(struct lpuart_port *sport)
++{
++ unsigned long temp;
++ int ret;
++
++ if (sport->rx_dma_cyclic)
++ ret = lpuart_sched_rxdma_cyclic(sport);
++ else
++ ret = lpuart_sched_rxdma_slave_sg(sport);
++
++ sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
++ sport->dma_rx_desc->callback_param = sport;
++ sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
++ dma_async_issue_pending(sport->dma_rx_chan);
++
++ if (lpuart_is_32(sport)) {
++ temp = lpuart32_read(&sport->port, UARTBAUD);
++ if (sport->dma_eeop)
++ temp |= UARTBAUD_RIDMAE;
++ temp |= UARTBAUD_RDMAE;
++ lpuart32_write(&sport->port, temp, UARTBAUD);
++ } else {
++ writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS,
++ sport->port.membase + UARTCR5);
++ }
++
++ return ret;
++}
++
++static void lpuart_get_rx_dma_rng_len(struct lpuart_port *sport)
+ {
+- struct dma_slave_config dma_rx_sconfig = {};
+- struct circ_buf *ring = &sport->rx_ring;
+- int ret, nent;
+ int bits, baud;
+ struct tty_port *port = &sport->port.state->port;
+ struct tty_struct *tty = port->tty;
+@@ -1169,6 +1292,18 @@ static inline int lpuart_start_rx_dma(st
+ sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1));
+ if (sport->rx_dma_rng_buf_len < 16)
+ sport->rx_dma_rng_buf_len = 16;
++}
++
++static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
++{
++ struct dma_slave_config dma_rx_sconfig = {};
++ struct circ_buf *ring = &sport->rx_ring;
++ int ret, nent;
++
++ if (!sport->dma_eeop)
++ lpuart_get_rx_dma_rng_len(sport);
++ else
++ sport->rx_dma_rng_buf_len = PAGE_SIZE;
+
+ ring->buf = kzalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC);
+ if (!ring->buf)
+@@ -1194,32 +1329,7 @@ static inline int lpuart_start_rx_dma(st
+ return ret;
+ }
+
+- sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan,
+- sg_dma_address(&sport->rx_sgl),
+- sport->rx_sgl.length,
+- sport->rx_sgl.length / 2,
+- DMA_DEV_TO_MEM,
+- DMA_PREP_INTERRUPT);
+- if (!sport->dma_rx_desc) {
+- dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n");
+- return -EFAULT;
+- }
+-
+- sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
+- sport->dma_rx_desc->callback_param = sport;
+- sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
+- dma_async_issue_pending(sport->dma_rx_chan);
+-
+- if (lpuart_is_32(sport)) {
+- unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
+-
+- lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD);
+- } else {
+- writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS,
+- sport->port.membase + UARTCR5);
+- }
+-
+- return 0;
++ return lpuart_sched_rx_dma(sport);
+ }
+
+ static void lpuart_dma_rx_free(struct uart_port *port)
+@@ -1405,8 +1515,10 @@ static void lpuart_setup_watermark(struc
+ writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
+ }
+
++ if (uart_console(&sport->port))
++ sport->rx_watermark = 1;
+ writeb(0, sport->port.membase + UARTTWFIFO);
+- writeb(1, sport->port.membase + UARTRWFIFO);
++ writeb(sport->rx_watermark, sport->port.membase + UARTRWFIFO);
+
+ /* Restore cr2 */
+ writeb(cr2_saved, sport->port.membase + UARTCR2);
+@@ -1427,6 +1539,7 @@ static void lpuart32_setup_watermark(str
+ {
+ unsigned long val, ctrl;
+ unsigned long ctrl_saved;
++ unsigned long rxiden_cnt;
+
+ ctrl = lpuart32_read(&sport->port, UARTCTRL);
+ ctrl_saved = ctrl;
+@@ -1438,12 +1551,26 @@ static void lpuart32_setup_watermark(str
+ val = lpuart32_read(&sport->port, UARTFIFO);
+ val |= UARTFIFO_TXFE | UARTFIFO_RXFE;
+ val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
++ val &= ~(UARTFIFO_RXIDEN_MASK << UARTFIFO_RXIDEN_OFF);
++ rxiden_cnt = sport->dma_eeop ? 0 : UARTFIFO_RXIDEN_RDRF;
++ val |= ((rxiden_cnt & UARTFIFO_RXIDEN_MASK) <<
++ UARTFIFO_RXIDEN_OFF);
+ lpuart32_write(&sport->port, val, UARTFIFO);
+
+ /* set the watermark */
+- val = (0x1 << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF);
++ if (uart_console(&sport->port))
++ sport->rx_watermark = 1;
++ val = (sport->rx_watermark << UARTWATER_RXWATER_OFF) |
++ (0x0 << UARTWATER_TXWATER_OFF);
+ lpuart32_write(&sport->port, val, UARTWATER);
+
++ /* set RTS watermark */
++ if (!uart_console(&sport->port)) {
++ val = lpuart32_read(&sport->port, UARTMODIR);
++ val = (sport->rxfifo_size >> 1) << UARTMODIR_RTSWATER_S;
++ lpuart32_write(&sport->port, val, UARTMODIR);
++ }
++
+ /* Restore cr2 */
+ lpuart32_write(&sport->port, ctrl_saved, UARTCTRL);
+ }
+@@ -1455,17 +1582,29 @@ static void lpuart32_setup_watermark_ena
+ lpuart32_setup_watermark(sport);
+
+ temp = lpuart32_read(&sport->port, UARTCTRL);
+- temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE;
++ temp |= UARTCTRL_RE | UARTCTRL_TE;
++ temp |= UARTCTRL_IDLECFG << UARTCTRL_IDLECFG_OFF;
+ lpuart32_write(&sport->port, temp, UARTCTRL);
+ }
+
+ static void rx_dma_timer_init(struct lpuart_port *sport)
+ {
++ if (sport->dma_eeop)
++ return;
++
+ timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
+ sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
+ add_timer(&sport->lpuart_timer);
+ }
+
++static void lpuart_del_timer_sync(struct lpuart_port *sport)
++{
++ if (sport->dma_eeop)
++ return;
++
++ del_timer_sync(&sport->lpuart_timer);
++}
++
+ static void lpuart_tx_dma_startup(struct lpuart_port *sport)
+ {
+ u32 uartbaud;
+@@ -1529,19 +1668,23 @@ static int lpuart_startup(struct uart_po
+ return 0;
+ }
+
++static void lpuart32_hw_disable(struct lpuart_port *sport)
++{
++ unsigned long temp;
++
++ temp = lpuart32_read(&sport->port, UARTCTRL);
++ temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE |
++ UARTCTRL_TIE | UARTCTRL_TE);
++ lpuart32_write(&sport->port, temp, UARTCTRL);
++}
++
+ static void lpuart32_configure(struct lpuart_port *sport)
+ {
+ unsigned long temp;
+
+- if (sport->lpuart_dma_rx_use) {
+- /* RXWATER must be 0 */
+- temp = lpuart32_read(&sport->port, UARTWATER);
+- temp &= ~(UARTWATER_WATER_MASK << UARTWATER_RXWATER_OFF);
+- lpuart32_write(&sport->port, temp, UARTWATER);
+- }
+ temp = lpuart32_read(&sport->port, UARTCTRL);
+ if (!sport->lpuart_dma_rx_use)
+- temp |= UARTCTRL_RIE;
++ temp |= UARTCTRL_RIE | UARTCTRL_ILIE;
+ if (!sport->lpuart_dma_tx_use)
+ temp |= UARTCTRL_TIE;
+ lpuart32_write(&sport->port, temp, UARTCTRL);
+@@ -1574,12 +1717,12 @@ static int lpuart32_startup(struct uart_
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+- lpuart32_setup_watermark_enable(sport);
+-
++ lpuart32_hw_disable(sport);
+
+ lpuart_rx_dma_startup(sport);
+ lpuart_tx_dma_startup(sport);
+
++ lpuart32_setup_watermark_enable(sport);
+ lpuart32_configure(sport);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+@@ -1589,7 +1732,7 @@ static int lpuart32_startup(struct uart_
+ static void lpuart_dma_shutdown(struct lpuart_port *sport)
+ {
+ if (sport->lpuart_dma_rx_use) {
+- del_timer_sync(&sport->lpuart_timer);
++ lpuart_del_timer_sync(sport);
+ lpuart_dma_rx_free(&sport->port);
+ }
+
+@@ -1630,11 +1773,22 @@ static void lpuart32_shutdown(struct uar
+
+ spin_lock_irqsave(&port->lock, flags);
+
++ /* clear statue */
++ temp = lpuart32_read(&sport->port, UARTSTAT);
++ lpuart32_write(&sport->port, temp, UARTSTAT);
++
++ /* disable Rx/Tx DMA */
++ temp = lpuart32_read(port, UARTBAUD);
++ temp &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE | UARTBAUD_RIDMAE);
++ lpuart32_write(port, temp, UARTBAUD);
++
+ /* disable Rx/Tx and interrupts */
+ temp = lpuart32_read(port, UARTCTRL);
+- temp &= ~(UARTCTRL_TE | UARTCTRL_RE |
+- UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE);
++ temp &= ~(UARTCTRL_TE | UARTCTRL_RE | UARTCTRL_TIE |
++ UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_ILIE |
++ UARTCTRL_LOOPS);
+ lpuart32_write(port, temp, UARTCTRL);
++ lpuart32_write(port, 0, UARTMODIR);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+@@ -1731,10 +1885,10 @@ lpuart_set_termios(struct uart_port *por
+ * baud rate and restart Rx DMA path.
+ *
+ * Since timer function acqures sport->port.lock, need to stop before
+- * acquring same lock because otherwise del_timer_sync() can deadlock.
++ * acquring same lock because otherwise lpuart_del_timer_sync() can deadlock.
+ */
+ if (old && sport->lpuart_dma_rx_use) {
+- del_timer_sync(&sport->lpuart_timer);
++ lpuart_del_timer_sync(sport);
+ lpuart_dma_rx_free(&sport->port);
+ }
+
+@@ -1946,10 +2100,10 @@ lpuart32_set_termios(struct uart_port *p
+ * baud rate and restart Rx DMA path.
+ *
+ * Since timer function acqures sport->port.lock, need to stop before
+- * acquring same lock because otherwise del_timer_sync() can deadlock.
++ * acquring same lock because otherwise lpuart_del_timer_sync() can deadlock.
+ */
+ if (old && sport->lpuart_dma_rx_use) {
+- del_timer_sync(&sport->lpuart_timer);
++ lpuart_del_timer_sync(sport);
+ lpuart_dma_rx_free(&sport->port);
+ }
+
+@@ -2458,6 +2612,10 @@ static int lpuart_probe(struct platform_
+ sport->port.dev = &pdev->dev;
+ sport->port.type = PORT_LPUART;
+ sport->devtype = sdata->devtype;
++ sport->rx_dma_cyclic = sdata->rx_dma_cyclic;
++ sport->rx_watermark = sdata->rx_watermark;
++ sport->dma_eeop = is_imx8qxp_lpuart(sport);
++
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+@@ -2620,7 +2778,7 @@ static int lpuart_suspend(struct device
+ * Rx DMA path before suspend and start Rx DMA path on resume.
+ */
+ if (irq_wake) {
+- del_timer_sync(&sport->lpuart_timer);
++ lpuart_del_timer_sync(sport);
+ lpuart_dma_rx_free(&sport->port);
+ }
+