aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/lantiq/patches-4.9/0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch
diff options
context:
space:
mode:
authorHauke Mehrtens <hauke@hauke-m.de>2017-02-11 15:52:41 +0100
committerHauke Mehrtens <hauke@hauke-m.de>2017-02-11 23:44:13 +0100
commit863e79f8d5544a8a884375d7e867f350fddca9b9 (patch)
tree6c95efffd7d7db4df17dbfae7d108fdd20b99926 /target/linux/lantiq/patches-4.9/0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch
parentca9b9969fb3f3853f7c6beb43bd496d90c7efe80 (diff)
downloadupstream-863e79f8d5544a8a884375d7e867f350fddca9b9.tar.gz
upstream-863e79f8d5544a8a884375d7e867f350fddca9b9.tar.bz2
upstream-863e79f8d5544a8a884375d7e867f350fddca9b9.zip
lantiq: add support for kernel 4.9
The following patches were dropped because they are already applied upstream: 0012-pinctrl-lantiq-fix-up-pinmux.patch 0013-MTD-lantiq-xway-fix-invalid-operator.patch 0014-MTD-lantiq-xway-the-latched-command-should-be-persis.patch 0015-MTD-lantiq-xway-remove-endless-loop.patch 0016-MTD-lantiq-xway-add-missing-write_buf-and-read_buf-t.patch 0017-MTD-xway-fix-nand-locking.patch 0044-pinctrl-lantiq-introduce-new-dedicated-devicetree-bi.patch 0045-pinctrl-lantiq-Fix-GPIO-Setup-of-GPIO-Port3.patch 0046-pinctrl-lantiq-2-pins-have-the-wrong-mux-list.patch 0047-irq-fixes.patch 0047-mtd-plat-nand-pass-of-node.patch 0060-usb-dwc2-Add-support-for-Lantiq-ARX-and-XRX-SoCs.patch 0120-MIPS-lantiq-add-support-for-device-tree-file-from-bo.patch 0121-MIPS-lantiq-make-it-possible-to-build-in-no-device-t.patch 122-MIPS-store-the-appended-dtb-address-in-a-variable.patch The PHY driver was reduced to the code adding the LED configuration, the rest is already upstream: 0023-NET-PHY-adds-driver-for-lantiq-PHY11G.patch The SPI driver was replaced with the version pending for upstream inclusion: New driver: 0090-spi-add-transfer_status-callback.patch 0091-spi-lantiq-ssc-add-support-for-Lantiq-SSC-SPI-controller.patch Old driver: 0100-spi-add-support-for-Lantiq-SPI-controller.patch Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Diffstat (limited to 'target/linux/lantiq/patches-4.9/0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch')
-rw-r--r--target/linux/lantiq/patches-4.9/0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch152
1 files changed, 152 insertions, 0 deletions
diff --git a/target/linux/lantiq/patches-4.9/0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch b/target/linux/lantiq/patches-4.9/0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch
new file mode 100644
index 0000000000..234a2527fc
--- /dev/null
+++ b/target/linux/lantiq/patches-4.9/0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch
@@ -0,0 +1,152 @@
+From 58078a30038b578c26c532545448fe3746648390 Mon Sep 17 00:00:00 2001
+From: Hauke Mehrtens <hauke@hauke-m.de>
+Date: Thu, 29 Dec 2016 21:02:57 +0100
+Subject: [PATCH] MIPS: lantiq: lock DMA register accesses for SMP
+
+The DMA controller channel and port configuration is changed by
+selecting the port or channel in one register and then update the
+configuration in other registers. This has to be done in an atomic
+operation. Previously only the local interrupts were deactivated which
+works for single CPU systems. If the system supports SMP a better
+locking is needed, use spinlocks instead.
+On more recent SoCs (at least xrx200 and later) there are two memory
+regions to change the configuration, there we could use one area for
+each CPU and do not have to synchronize between the CPUs and more.
+
+Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
+---
+ arch/mips/lantiq/xway/dma.c | 38 ++++++++++++++++++++------------------
+ 1 file changed, 20 insertions(+), 18 deletions(-)
+
+--- a/arch/mips/lantiq/xway/dma.c
++++ b/arch/mips/lantiq/xway/dma.c
+@@ -20,6 +20,7 @@
+ #include <linux/io.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/module.h>
++#include <linux/spinlock.h>
+ #include <linux/clk.h>
+ #include <linux/err.h>
+
+@@ -59,16 +60,17 @@
+ ltq_dma_membase + (z))
+
+ static void __iomem *ltq_dma_membase;
++static DEFINE_SPINLOCK(ltq_dma_lock);
+
+ void
+ ltq_dma_enable_irq(struct ltq_dma_channel *ch)
+ {
+ unsigned long flags;
+
+- local_irq_save(flags);
++ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
+
+@@ -77,10 +79,10 @@ ltq_dma_disable_irq(struct ltq_dma_chann
+ {
+ unsigned long flags;
+
+- local_irq_save(flags);
++ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
+
+@@ -89,10 +91,10 @@ ltq_dma_ack_irq(struct ltq_dma_channel *
+ {
+ unsigned long flags;
+
+- local_irq_save(flags);
++ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
+
+@@ -101,11 +103,11 @@ ltq_dma_open(struct ltq_dma_channel *ch)
+ {
+ unsigned long flag;
+
+- local_irq_save(flag);
++ spin_lock_irqsave(&ltq_dma_lock, flag);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
+- ltq_dma_enable_irq(ch);
+- local_irq_restore(flag);
++ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
++ spin_unlock_irqrestore(&ltq_dma_lock, flag);
+ }
+ EXPORT_SYMBOL_GPL(ltq_dma_open);
+
+@@ -114,11 +116,11 @@ ltq_dma_close(struct ltq_dma_channel *ch
+ {
+ unsigned long flag;
+
+- local_irq_save(flag);
++ spin_lock_irqsave(&ltq_dma_lock, flag);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
+- ltq_dma_disable_irq(ch);
+- local_irq_restore(flag);
++ ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
++ spin_unlock_irqrestore(&ltq_dma_lock, flag);
+ }
+ EXPORT_SYMBOL_GPL(ltq_dma_close);
+
+@@ -133,7 +135,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch
+ &ch->phys, GFP_ATOMIC);
+ memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
+
+- local_irq_save(flags);
++ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
+ ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
+@@ -142,7 +144,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch
+ ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
+ while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
+ ;
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+ }
+
+ void
+@@ -152,11 +154,11 @@ ltq_dma_alloc_tx(struct ltq_dma_channel
+
+ ltq_dma_alloc(ch);
+
+- local_irq_save(flags);
++ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+ ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
+
+@@ -167,11 +169,11 @@ ltq_dma_alloc_rx(struct ltq_dma_channel
+
+ ltq_dma_alloc(ch);
+
+- local_irq_save(flags);
++ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+ ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
+