aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/cache.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/cache.h')
-rw-r--r--arch/arm/include/asm/cache.h28
1 files changed, 28 insertions, 0 deletions
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
new file mode 100644
index 00000000..75fe66bc
--- /dev/null
+++ b/arch/arm/include/asm/cache.h
@@ -0,0 +1,28 @@
+/*
+ * arch/arm/include/asm/cache.h
+ */
+#ifndef __ASMARM_CACHE_H
+#define __ASMARM_CACHE_H
+
+#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+/*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+ * sure that all such allocations are cache aligned. Otherwise,
+ * unrelated code may cause parts of the buffer to be read into the
+ * cache before the transfer is done, causing old data to be seen by
+ * the CPU.
+ */
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+/*
+ * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
+ */
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+#define ARCH_SLAB_MINALIGN 8
+#endif
+
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+#endif