diff options
author | Stefan Lippers-Hollmann <s.l-h@gmx.de> | 2018-05-18 04:50:09 +0200 |
---|---|---|
committer | John Crispin <john@phrozen.org> | 2018-05-22 20:34:14 +0200 |
commit | 2819732219904a81205abe0fa3fbe9c06884f119 (patch) | |
tree | e8b946f62b0d16ec44cbe41d07e9a271b6d16ceb /target/linux/ipq806x/patches-4.9/0013-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch | |
parent | 18e9ed2482d6988b5962e856ec0792d5794ad93f (diff) | |
download | upstream-2819732219904a81205abe0fa3fbe9c06884f119.tar.gz upstream-2819732219904a81205abe0fa3fbe9c06884f119.tar.bz2 upstream-2819732219904a81205abe0fa3fbe9c06884f119.zip |
ipq806x: drop linux 4.9 support
Signed-off-by: Stefan Lippers-Hollmann <s.l-h@gmx.de>
Diffstat (limited to 'target/linux/ipq806x/patches-4.9/0013-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch')
-rw-r--r-- | target/linux/ipq806x/patches-4.9/0013-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch | 166 |
1 files changed, 0 insertions, 166 deletions
diff --git a/target/linux/ipq806x/patches-4.9/0013-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch b/target/linux/ipq806x/patches-4.9/0013-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch deleted file mode 100644 index 13e199c52d..0000000000 --- a/target/linux/ipq806x/patches-4.9/0013-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch +++ /dev/null @@ -1,166 +0,0 @@ -From 028f915b20ec343dda88f1bcc99f07f6b428b4aa Mon Sep 17 00:00:00 2001 -From: Matthew McClintock <mmcclint@codeaurora.org> -Date: Thu, 5 May 2016 10:07:11 -0500 -Subject: [PATCH 13/69] spi: qup: allow mulitple DMA transactions per spi xfer - -Much like the block mode changes, we are breaking up DMA transactions -into 64K chunks so we can reset the QUP engine. - -Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org> ---- - drivers/spi/spi-qup.c | 120 ++++++++++++++++++++++++++++++++++++-------------- - 1 file changed, 86 insertions(+), 34 deletions(-) - ---- a/drivers/spi/spi-qup.c -+++ b/drivers/spi/spi-qup.c -@@ -566,6 +566,21 @@ static int spi_qup_io_config(struct spi_ - return 0; - } - -+static unsigned int spi_qup_sgl_get_size(struct scatterlist *sgl, unsigned int nents) -+{ -+ struct scatterlist *sg; -+ int i; -+ unsigned int length = 0; -+ -+ if (!nents) -+ return 0; -+ -+ for_each_sg(sgl, sg, nents, i) -+ length += sg_dma_len(sg); -+ -+ return length; -+} -+ - static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer, - unsigned long timeout) - { -@@ -573,53 +588,90 @@ unsigned long timeout) - struct spi_qup *qup = spi_master_get_devdata(master); - dma_async_tx_callback rx_done = NULL, tx_done = NULL; - int ret; -+ struct scatterlist *tx_sgl, *rx_sgl; - -- ret = spi_qup_io_config(spi, xfer); -- if (ret) -- return ret; -- -- /* before issuing the descriptors, set the QUP to run */ -- ret = spi_qup_set_state(qup, QUP_STATE_RUN); -- if (ret) { -- dev_warn(qup->dev, "cannot set RUN state\n"); -- return ret; -- } -- -- if (!qup->qup_v1) { -- if (xfer->rx_buf) -- rx_done = spi_qup_dma_done; -- -- if (xfer->tx_buf) -- tx_done = spi_qup_dma_done; -- } -- -- if (xfer->rx_buf) { -- ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl, -- xfer->rx_sg.nents, DMA_DEV_TO_MEM, -- rx_done, &qup->done); -- if (ret) -- return ret; -+ rx_sgl = xfer->rx_sg.sgl; -+ tx_sgl = xfer->tx_sg.sgl; - -- dma_async_issue_pending(master->dma_rx); -- } -+ do { -+ int rx_nents = 0, tx_nents = 0; - -- if (xfer->tx_buf) { -- ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl, -- xfer->tx_sg.nents, DMA_MEM_TO_DEV, -- tx_done, &qup->dma_tx_done); -+ if (rx_sgl) { -+ rx_nents = sg_nents_for_len(rx_sgl, SPI_MAX_XFER); -+ if (rx_nents < 0) -+ rx_nents = sg_nents(rx_sgl); -+ -+ qup->n_words = spi_qup_sgl_get_size(rx_sgl, rx_nents) / -+ qup->w_size; -+ } -+ -+ if (tx_sgl) { -+ tx_nents = sg_nents_for_len(tx_sgl, SPI_MAX_XFER); -+ if (tx_nents < 0) -+ tx_nents = sg_nents(tx_sgl); -+ -+ qup->n_words = spi_qup_sgl_get_size(tx_sgl, tx_nents) / -+ qup->w_size; -+ } -+ -+ -+ ret = spi_qup_io_config(spi, xfer); - if (ret) - return ret; - -- dma_async_issue_pending(master->dma_tx); -- } -+ /* before issuing the descriptors, set the QUP to run */ -+ ret = spi_qup_set_state(qup, QUP_STATE_RUN); -+ if (ret) { -+ dev_warn(qup->dev, "cannot set RUN state\n"); -+ return ret; -+ } -+ -+ if (!qup->qup_v1) { -+ if (rx_sgl) { -+ rx_done = spi_qup_dma_done; -+ } -+ -+ if (tx_sgl) { -+ tx_done = spi_qup_dma_done; -+ } -+ } -+ -+ if (rx_sgl) { -+ ret = spi_qup_prep_sg(master, rx_sgl, rx_nents, -+ DMA_DEV_TO_MEM, rx_done, -+ &qup->done); -+ if (ret) -+ return ret; -+ -+ dma_async_issue_pending(master->dma_rx); -+ } -+ -+ if (tx_sgl) { -+ ret = spi_qup_prep_sg(master, tx_sgl, tx_nents, -+ DMA_MEM_TO_DEV, tx_done, -+ &qup->dma_tx_done); -+ if (ret) -+ return ret; -+ -+ dma_async_issue_pending(master->dma_tx); -+ } -+ -+ if (rx_sgl && !wait_for_completion_timeout(&qup->done, timeout)) { -+ pr_emerg(" rx timed out"); -+ return -ETIMEDOUT; -+ } -+ -+ if (tx_sgl && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) { -+ pr_emerg(" tx timed out\n"); -+ return -ETIMEDOUT; -+ } - -- if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout)) -- return -ETIMEDOUT; -+ for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl)); -+ for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl)); - -- if (xfer->tx_buf && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) -- ret = -ETIMEDOUT; -+ } while (rx_sgl || tx_sgl); - -- return ret; -+ return 0; - } - - static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer, |