diff options
author | John Crispin <john@phrozen.org> | 2017-02-16 12:25:25 +0100 |
---|---|---|
committer | John Crispin <john@phrozen.org> | 2017-02-16 20:25:32 +0100 |
commit | bb255f74290d889b65a563bac7a4be0427fdbec8 (patch) | |
tree | 94eba9e702b4ed6203ee4aecb1bcd5778dde01b1 /target/linux/ipq806x/patches-4.9/0016-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch | |
parent | 04c4b6f7fd1c48c09333ba0051ad24a6905491a0 (diff) | |
download | upstream-bb255f74290d889b65a563bac7a4be0427fdbec8.tar.gz upstream-bb255f74290d889b65a563bac7a4be0427fdbec8.tar.bz2 upstream-bb255f74290d889b65a563bac7a4be0427fdbec8.zip |
ipq806x: add v4.9 support
Signed-off-by: John Crispin <john@phrozen.org>
Diffstat (limited to 'target/linux/ipq806x/patches-4.9/0016-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch')
-rw-r--r-- | target/linux/ipq806x/patches-4.9/0016-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch | 166 |
1 files changed, 166 insertions, 0 deletions
diff --git a/target/linux/ipq806x/patches-4.9/0016-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch b/target/linux/ipq806x/patches-4.9/0016-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch new file mode 100644 index 0000000000..de324ffd6e --- /dev/null +++ b/target/linux/ipq806x/patches-4.9/0016-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch @@ -0,0 +1,166 @@ +From 6b2bb8803f19116bad41a271f9035d4c853f4553 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock <mmcclint@codeaurora.org> +Date: Thu, 5 May 2016 10:07:11 -0500 +Subject: [PATCH 16/37] spi: qup: allow mulitple DMA transactions per spi xfer + +Much like the block mode changes, we are breaking up DMA transactions +into 64K chunks so we can reset the QUP engine. + +Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org> +--- + drivers/spi/spi-qup.c | 120 +++++++++++++++++++++++++++++++++++-------------- + 1 file changed, 86 insertions(+), 34 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -566,6 +566,21 @@ static int spi_qup_io_config(struct spi_ + return 0; + } + ++static unsigned int spi_qup_sgl_get_size(struct scatterlist *sgl, unsigned int nents) ++{ ++ struct scatterlist *sg; ++ int i; ++ unsigned int length = 0; ++ ++ if (!nents) ++ return 0; ++ ++ for_each_sg(sgl, sg, nents, i) ++ length += sg_dma_len(sg); ++ ++ return length; ++} ++ + static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer, + unsigned long timeout) + { +@@ -573,53 +588,90 @@ unsigned long timeout) + struct spi_qup *qup = spi_master_get_devdata(master); + dma_async_tx_callback rx_done = NULL, tx_done = NULL; + int ret; ++ struct scatterlist *tx_sgl, *rx_sgl; + +- ret = spi_qup_io_config(spi, xfer); +- if (ret) +- return ret; +- +- /* before issuing the descriptors, set the QUP to run */ +- ret = spi_qup_set_state(qup, QUP_STATE_RUN); +- if (ret) { +- dev_warn(qup->dev, "cannot set RUN state\n"); +- return ret; +- } +- +- if (!qup->qup_v1) { +- if (xfer->rx_buf) +- rx_done = spi_qup_dma_done; +- +- if (xfer->tx_buf) +- tx_done = spi_qup_dma_done; +- } +- +- if (xfer->rx_buf) { +- ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl, +- xfer->rx_sg.nents, DMA_DEV_TO_MEM, +- rx_done, &qup->done); +- if (ret) +- return ret; ++ rx_sgl = xfer->rx_sg.sgl; ++ tx_sgl = xfer->tx_sg.sgl; + +- dma_async_issue_pending(master->dma_rx); +- } ++ do { ++ int rx_nents = 0, tx_nents = 0; + +- if (xfer->tx_buf) { +- ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl, +- xfer->tx_sg.nents, DMA_MEM_TO_DEV, +- tx_done, &qup->dma_tx_done); ++ if (rx_sgl) { ++ rx_nents = sg_nents_for_len(rx_sgl, SPI_MAX_XFER); ++ if (rx_nents < 0) ++ rx_nents = sg_nents(rx_sgl); ++ ++ qup->n_words = spi_qup_sgl_get_size(rx_sgl, rx_nents) / ++ qup->w_size; ++ } ++ ++ if (tx_sgl) { ++ tx_nents = sg_nents_for_len(tx_sgl, SPI_MAX_XFER); ++ if (tx_nents < 0) ++ tx_nents = sg_nents(tx_sgl); ++ ++ qup->n_words = spi_qup_sgl_get_size(tx_sgl, tx_nents) / ++ qup->w_size; ++ } ++ ++ ++ ret = spi_qup_io_config(spi, xfer); + if (ret) + return ret; + +- dma_async_issue_pending(master->dma_tx); +- } ++ /* before issuing the descriptors, set the QUP to run */ ++ ret = spi_qup_set_state(qup, QUP_STATE_RUN); ++ if (ret) { ++ dev_warn(qup->dev, "cannot set RUN state\n"); ++ return ret; ++ } ++ ++ if (!qup->qup_v1) { ++ if (rx_sgl) { ++ rx_done = spi_qup_dma_done; ++ } ++ ++ if (tx_sgl) { ++ tx_done = spi_qup_dma_done; ++ } ++ } ++ ++ if (rx_sgl) { ++ ret = spi_qup_prep_sg(master, rx_sgl, rx_nents, ++ DMA_DEV_TO_MEM, rx_done, ++ &qup->done); ++ if (ret) ++ return ret; ++ ++ dma_async_issue_pending(master->dma_rx); ++ } ++ ++ if (tx_sgl) { ++ ret = spi_qup_prep_sg(master, tx_sgl, tx_nents, ++ DMA_MEM_TO_DEV, tx_done, ++ &qup->dma_tx_done); ++ if (ret) ++ return ret; ++ ++ dma_async_issue_pending(master->dma_tx); ++ } ++ ++ if (rx_sgl && !wait_for_completion_timeout(&qup->done, timeout)) { ++ pr_emerg(" rx timed out"); ++ return -ETIMEDOUT; ++ } ++ ++ if (tx_sgl && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) { ++ pr_emerg(" tx timed out\n"); ++ return -ETIMEDOUT; ++ } + +- if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout)) +- return -ETIMEDOUT; ++ for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl)); ++ for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl)); + +- if (xfer->tx_buf && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) +- ret = -ETIMEDOUT; ++ } while (rx_sgl || tx_sgl); + +- return ret; ++ return 0; + } + + static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer, |