aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--target/linux/mvebu/patches-3.18/025-ARM-mvebu-Use-arm_coherent_dma_ops.patch111
1 files changed, 111 insertions, 0 deletions
diff --git a/target/linux/mvebu/patches-3.18/025-ARM-mvebu-Use-arm_coherent_dma_ops.patch b/target/linux/mvebu/patches-3.18/025-ARM-mvebu-Use-arm_coherent_dma_ops.patch
new file mode 100644
index 0000000000..e3d1415ede
--- /dev/null
+++ b/target/linux/mvebu/patches-3.18/025-ARM-mvebu-Use-arm_coherent_dma_ops.patch
@@ -0,0 +1,111 @@
+From 1bd4d8a6de5cda605e8b99fbf081be2ea2959380 Mon Sep 17 00:00:00 2001
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Fri, 16 Jan 2015 17:11:29 +0100
+Subject: ARM: mvebu: use arm_coherent_dma_ops and re-enable hardware I/O
+ coherency
+
+Now that we have enabled automatic I/O synchronization barriers, we no
+longer need any explicit barriers. We can therefore simplify
+arch/arm/mach-mvebu/coherency.c by using the existing
+arm_coherent_dma_ops instead of our custom mvebu_hwcc_dma_ops, and
+re-enable hardware I/O coherency support.
+
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+[Andrew Lunn <andrew@lunn.ch>: Remove forgotten comment]
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+
+--- a/arch/arm/mach-mvebu/coherency.c
++++ b/arch/arm/mach-mvebu/coherency.c
+@@ -33,6 +33,7 @@
+ #include <asm/smp_plat.h>
+ #include <asm/cacheflush.h>
+ #include <asm/mach/map.h>
++#include <asm/dma-mapping.h>
+ #include "armada-370-xp.h"
+ #include "coherency.h"
+ #include "mvebu-soc-id.h"
+@@ -223,59 +224,6 @@ static void __init armada_375_coherency_
+ coherency_wa_enabled = true;
+ }
+
+-static inline void mvebu_hwcc_sync_io_barrier(void)
+-{
+- if (coherency_wa_enabled) {
+- mvebu_hwcc_armada375_sync_io_barrier_wa();
+- return;
+- }
+-
+- writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
+- while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
+-}
+-
+-static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size,
+- enum dma_data_direction dir,
+- struct dma_attrs *attrs)
+-{
+- if (dir != DMA_TO_DEVICE)
+- mvebu_hwcc_sync_io_barrier();
+- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+-}
+-
+-
+-static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction dir,
+- struct dma_attrs *attrs)
+-{
+- if (dir != DMA_TO_DEVICE)
+- mvebu_hwcc_sync_io_barrier();
+-}
+-
+-static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction dir)
+-{
+- if (dir != DMA_TO_DEVICE)
+- mvebu_hwcc_sync_io_barrier();
+-}
+-
+-static struct dma_map_ops mvebu_hwcc_dma_ops = {
+- .alloc = arm_dma_alloc,
+- .free = arm_dma_free,
+- .mmap = arm_dma_mmap,
+- .map_page = mvebu_hwcc_dma_map_page,
+- .unmap_page = mvebu_hwcc_dma_unmap_page,
+- .get_sgtable = arm_dma_get_sgtable,
+- .map_sg = arm_dma_map_sg,
+- .unmap_sg = arm_dma_unmap_sg,
+- .sync_single_for_cpu = mvebu_hwcc_dma_sync,
+- .sync_single_for_device = mvebu_hwcc_dma_sync,
+- .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
+- .sync_sg_for_device = arm_dma_sync_sg_for_device,
+- .set_dma_mask = arm_dma_set_mask,
+-};
+-
+ static int mvebu_hwcc_notifier(struct notifier_block *nb,
+ unsigned long event, void *__dev)
+ {
+@@ -283,7 +231,7 @@ static int mvebu_hwcc_notifier(struct no
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+- set_dma_ops(dev, &mvebu_hwcc_dma_ops);
++ set_dma_ops(dev, &arm_coherent_dma_ops);
+
+ return NOTIFY_OK;
+ }
+@@ -405,14 +353,9 @@ static int coherency_type(void)
+ return type;
+ }
+
+-/*
+- * As a precaution, we currently completely disable hardware I/O
+- * coherency, until enough testing is done with automatic I/O
+- * synchronization barriers to validate that it is a proper solution.
+- */
+ int coherency_available(void)
+ {
+- return false;
++ return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
+ }
+
+ int __init coherency_init(void)