aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-arm
diff options
context:
space:
mode:
authorIan Campbell <ian.campbell@citrix.com>2013-09-09 17:45:51 +0100
committerIan Campbell <ian.campbell@citrix.com>2013-09-21 16:27:35 +0100
commit204cb1ec9f2456725492d44d724d3c9d6ba865f1 (patch)
tree0ae90057fdf24822d8c1822dafc714a03987ce11 /xen/include/asm-arm
parent45b187020098a9872b05a96c86d3f65d2404ec0c (diff)
downloadxen-204cb1ec9f2456725492d44d724d3c9d6ba865f1.tar.gz
xen-204cb1ec9f2456725492d44d724d3c9d6ba865f1.tar.bz2
xen-204cb1ec9f2456725492d44d724d3c9d6ba865f1.zip
xen/arm: replace io{read,write}{l,b} with {read,write}{l,b}
We appear to have invented the io versions ourselves for Xen on ARM, while x86 has the plain read/write. (and so does Linux FWIW) read/write are used in common driver code (specifically ns16550) so instead of keeping our own variant around lets replace it with the more standard ones. At the same time resync with Linux making the "based on" comment in both sets of io.h somewhat true (they don't look to have been very based on before...). Our io.h is now consistent with Linux v3.11. Note that iowrite and write take their arguments in the opposite order. Also make asm-arm/io.h useful and include it where necessary instead of picking up the include from mm.h. Remove the include from mm.h Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Julien Grall <julien.grall@linaro.org>
Diffstat (limited to 'xen/include/asm-arm')
-rw-r--r--xen/include/asm-arm/arm32/io.h74
-rw-r--r--xen/include/asm-arm/arm64/io.h92
-rw-r--r--xen/include/asm-arm/io.h8
-rw-r--r--xen/include/asm-arm/mm.h8
4 files changed, 153 insertions, 29 deletions
diff --git a/xen/include/asm-arm/arm32/io.h b/xen/include/asm-arm/arm32/io.h
index ec7e0ff9a2..73a879e9fb 100644
--- a/xen/include/asm-arm/arm32/io.h
+++ b/xen/include/asm-arm/arm32/io.h
@@ -22,25 +22,75 @@
#define _ARM_ARM32_IO_H
#include <asm/system.h>
+#include <asm/byteorder.h>
-static inline uint32_t ioreadl(const volatile void __iomem *addr)
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
- uint32_t val;
+ asm volatile("strb %1, %0"
+ : "+Qo" (*(volatile u8 __force *)addr)
+ : "r" (val));
+}
+
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("strh %1, %0"
+ : "+Q" (*(volatile u16 __force *)addr)
+ : "r" (val));
+}
- asm volatile("ldr %1, %0"
- : "+Qo" (*(volatile uint32_t __force *)addr),
- "=r" (val));
- dsb();
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("str %1, %0"
+ : "+Qo" (*(volatile u32 __force *)addr)
+ : "r" (val));
+}
- return val;
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 val;
+ asm volatile("ldrb %1, %0"
+ : "+Qo" (*(volatile u8 __force *)addr),
+ "=r" (val));
+ return val;
}
-static inline void iowritel(const volatile void __iomem *addr, uint32_t val)
+static inline u16 __raw_readw(const volatile void __iomem *addr)
{
- dsb();
- asm volatile("str %1, %0"
- : "+Qo" (*(volatile uint32_t __force *)addr)
- : "r" (val));
+ u16 val;
+ asm volatile("ldrh %1, %0"
+ : "+Q" (*(volatile u16 __force *)addr),
+ "=r" (val));
+ return val;
}
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 val;
+ asm volatile("ldr %1, %0"
+ : "+Qo" (*(volatile u32 __force *)addr),
+ "=r" (val));
+ return val;
+}
+
+#define __iormb() rmb()
+#define __iowmb() wmb()
+
+#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+ __raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl(c)); __r; })
+
+#define writeb_relaxed(v,c) __raw_writeb(v,c)
+#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
+
+#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+
+#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
+#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
+#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
+
#endif /* _ARM_ARM32_IO_H */
diff --git a/xen/include/asm-arm/arm64/io.h b/xen/include/asm-arm/arm64/io.h
index ec041cdbc4..37abc4788f 100644
--- a/xen/include/asm-arm/arm64/io.h
+++ b/xen/include/asm-arm/arm64/io.h
@@ -1,5 +1,6 @@
/*
- * Based on linux arch/arm64/include/asm/io.h
+ * Based on linux arch/arm64/include/asm/io.h which is in turn
+ * Based on arch/arm/include/asm/io.h
*
* Copyright (C) 1996-2000 Russell King
* Copyright (C) 2012 ARM Ltd.
@@ -19,20 +20,93 @@
#ifndef _ARM_ARM64_IO_H
#define _ARM_ARM64_IO_H
-static inline uint32_t ioreadl(const volatile void __iomem *addr)
+#include <asm/byteorder.h>
+
+/*
+ * Generic IO read/write. These perform native-endian accesses.
+ */
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
- uint32_t val;
+ asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr));
+}
- asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
- dsb();
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr));
+}
- return val;
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
}
-static inline void iowritel(const volatile void __iomem *addr, uint32_t val)
+static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
{
- dsb();
- asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
+ asm volatile("str %0, [%1]" : : "r" (val), "r" (addr));
}
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 val;
+ asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 val;
+ asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 val;
+ asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ u64 val;
+ asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+/* IO barriers */
+#define __iormb() rmb()
+#define __iowmb() wmb()
+
+#define mmiowb() do { } while (0)
+
+/*
+ * Relaxed I/O memory access primitives. These follow the Device memory
+ * ordering rules but do not guarantee any ordering relative to Normal memory
+ * accesses.
+ */
+#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; })
+#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; })
+#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; })
+#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; })
+
+#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
+#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
+#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
+#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
+
+/*
+ * I/O memory access primitives. Reads are ordered relative to any
+ * following Normal memory access. Writes are ordered relative to any prior
+ * Normal memory access.
+ */
+#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
+
+#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
+#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
+#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
+#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
+
#endif /* _ARM_ARM64_IO_H */
diff --git a/xen/include/asm-arm/io.h b/xen/include/asm-arm/io.h
index aea5233d59..e426804424 100644
--- a/xen/include/asm-arm/io.h
+++ b/xen/include/asm-arm/io.h
@@ -1,6 +1,14 @@
#ifndef _ASM_IO_H
#define _ASM_IO_H
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/io.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/io.h>
+#else
+# error "unknown ARM variant"
+#endif
+
#endif
/*
* Local variables:
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 97c2ee0302..173db1b9c7 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -6,14 +6,6 @@
#include <asm/page.h>
#include <public/xen.h>
-#if defined(CONFIG_ARM_32)
-# include <asm/arm32/io.h>
-#elif defined(CONFIG_ARM_64)
-# include <asm/arm64/io.h>
-#else
-# error "unknown ARM variant"
-#endif
-
/* Align Xen to a 2 MiB boundary. */
#define XEN_PADDR_ALIGN (1 << 21)