From 849369d6c66d3054688672f97d31fceb8e8230fb Mon Sep 17 00:00:00 2001 From: root Date: Fri, 25 Dec 2015 04:40:36 +0000 Subject: initial_commit --- arch/arm/include/asm/cache.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 arch/arm/include/asm/cache.h (limited to 'arch/arm/include/asm/cache.h') diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h new file mode 100644 index 00000000..75fe66bc --- /dev/null +++ b/arch/arm/include/asm/cache.h @@ -0,0 +1,28 @@ +/* + * arch/arm/include/asm/cache.h + */ +#ifndef __ASMARM_CACHE_H +#define __ASMARM_CACHE_H + +#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +/* + * Memory returned by kmalloc() may be used for DMA, so we must make + * sure that all such allocations are cache aligned. Otherwise, + * unrelated code may cause parts of the buffer to be read into the + * cache before the transfer is done, causing old data to be seen by + * the CPU. + */ +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +/* + * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. + */ +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) +#define ARCH_SLAB_MINALIGN 8 +#endif + +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) + +#endif -- cgit v1.2.3