aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/ipq806x/patches-4.9/0015-spi-qup-refactor-spi_qup_prep_sg-to-be-more-take-spe.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/ipq806x/patches-4.9/0015-spi-qup-refactor-spi_qup_prep_sg-to-be-more-take-spe.patch')
-rw-r--r--target/linux/ipq806x/patches-4.9/0015-spi-qup-refactor-spi_qup_prep_sg-to-be-more-take-spe.patch73
1 files changed, 73 insertions, 0 deletions
diff --git a/target/linux/ipq806x/patches-4.9/0015-spi-qup-refactor-spi_qup_prep_sg-to-be-more-take-spe.patch b/target/linux/ipq806x/patches-4.9/0015-spi-qup-refactor-spi_qup_prep_sg-to-be-more-take-spe.patch
new file mode 100644
index 0000000000..90ffe863e1
--- /dev/null
+++ b/target/linux/ipq806x/patches-4.9/0015-spi-qup-refactor-spi_qup_prep_sg-to-be-more-take-spe.patch
@@ -0,0 +1,73 @@
+From a24914d34a4c6df4323c6d98950166600da79bc6 Mon Sep 17 00:00:00 2001
+From: Matthew McClintock <mmcclint@codeaurora.org>
+Date: Wed, 4 May 2016 16:33:42 -0500
+Subject: [PATCH 15/37] spi: qup: refactor spi_qup_prep_sg to be more take
+ specific sgl and nent
+
+This is in preparation for splitting DMA into multiple transacations,
+this contains no code changes just refactoring.
+
+Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org>
+---
+ drivers/spi/spi-qup.c | 28 +++++++++++-----------------
+ 1 file changed, 11 insertions(+), 17 deletions(-)
+
+--- a/drivers/spi/spi-qup.c
++++ b/drivers/spi/spi-qup.c
+@@ -379,27 +379,19 @@ static void spi_qup_write(struct spi_qup
+ } while (remainder);
+ }
+
+-static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
+- enum dma_transfer_direction dir,
+- dma_async_tx_callback callback,
+- void *data)
++static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl,
++ unsigned int nents, enum dma_transfer_direction dir,
++ dma_async_tx_callback callback, void *data)
+ {
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
+ struct dma_async_tx_descriptor *desc;
+- struct scatterlist *sgl;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+- unsigned int nents;
+
+- if (dir == DMA_MEM_TO_DEV) {
++ if (dir == DMA_MEM_TO_DEV)
+ chan = master->dma_tx;
+- nents = xfer->tx_sg.nents;
+- sgl = xfer->tx_sg.sgl;
+- } else {
++ else
+ chan = master->dma_rx;
+- nents = xfer->rx_sg.nents;
+- sgl = xfer->rx_sg.sgl;
+- }
+
+ desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
+ if (IS_ERR_OR_NULL(desc))
+@@ -602,8 +594,9 @@ unsigned long timeout)
+ }
+
+ if (xfer->rx_buf) {
+- ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done,
+- &qup->done);
++ ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl,
++ xfer->rx_sg.nents, DMA_DEV_TO_MEM,
++ rx_done, &qup->done);
+ if (ret)
+ return ret;
+
+@@ -611,8 +604,9 @@ unsigned long timeout)
+ }
+
+ if (xfer->tx_buf) {
+- ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done,
+- &qup->dma_tx_done);
++ ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl,
++ xfer->tx_sg.nents, DMA_MEM_TO_DEV,
++ tx_done, &qup->dma_tx_done);
+ if (ret)
+ return ret;
+