aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-arm/atomic.h
diff options
context:
space:
mode:
authorIan Campbell <ian.campbell@citrix.com>2013-02-22 08:57:47 +0000
committerIan Campbell <ian.campbell@citrix.com>2013-02-22 12:14:51 +0000
commitd2654a5568358ff0a08a1003df33087489b84d43 (patch)
tree92d8ccd8d252f3d219ce7212e3e386bd22ceb6b6 /xen/include/asm-arm/atomic.h
parentc52721848106c5c060c9130d8d3353bd4285a2f5 (diff)
downloadxen-d2654a5568358ff0a08a1003df33087489b84d43.tar.gz
xen-d2654a5568358ff0a08a1003df33087489b84d43.tar.bz2
xen-d2654a5568358ff0a08a1003df33087489b84d43.zip
xen: arm64: atomics
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen/include/asm-arm/atomic.h')
-rw-r--r--xen/include/asm-arm/atomic.h186
1 files changed, 33 insertions, 153 deletions
diff --git a/xen/include/asm-arm/atomic.h b/xen/include/asm-arm/atomic.h
index 3dc242741d..69c8f3f8ad 100644
--- a/xen/include/asm-arm/atomic.h
+++ b/xen/include/asm-arm/atomic.h
@@ -1,48 +1,49 @@
-/*
- * arch/arm/include/asm/atomic.h
- *
- * Copyright (C) 1996 Russell King.
- * Copyright (C) 2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
#ifndef __ARCH_ARM_ATOMIC__
#define __ARCH_ARM_ATOMIC__
#include <xen/config.h>
#include <asm/system.h>
-#define build_atomic_read(name, size, type, reg) \
+#define build_atomic_read(name, size, width, type, reg)\
static inline type name(const volatile type *addr) \
{ \
type ret; \
- asm volatile("ldr" size " %0,%1" \
+ asm volatile("ldr" size " %" width "0,%1" \
: reg (ret) \
: "m" (*(volatile type *)addr)); \
return ret; \
}
-#define build_atomic_write(name, size, type, reg) \
+#define build_atomic_write(name, size, width, type, reg) \
static inline void name(volatile type *addr, type val) \
{ \
- asm volatile("str" size " %1,%0" \
+ asm volatile("str" size " %"width"1,%0" \
: "=m" (*(volatile type *)addr) \
: reg (val)); \
}
-build_atomic_read(read_u8_atomic, "b", uint8_t, "=q")
-build_atomic_read(read_u16_atomic, "h", uint16_t, "=r")
-build_atomic_read(read_u32_atomic, "", uint32_t, "=r")
-//build_atomic_read(read_u64_atomic, "d", uint64_t, "=r")
-build_atomic_read(read_int_atomic, "", int, "=r")
-
-build_atomic_write(write_u8_atomic, "b", uint8_t, "q")
-build_atomic_write(write_u16_atomic, "h", uint16_t, "r")
-build_atomic_write(write_u32_atomic, "", uint32_t, "r")
-//build_atomic_write(write_u64_atomic, "d", uint64_t, "r")
-build_atomic_write(write_int_atomic, "", int, "r")
+#if defined (CONFIG_ARM_32)
+#define BYTE ""
+#define WORD ""
+#elif defined (CONFIG_ARM_64)
+#define BYTE "w"
+#define WORD "w"
+#endif
+
+build_atomic_read(read_u8_atomic, "b", BYTE, uint8_t, "=r")
+build_atomic_read(read_u16_atomic, "h", WORD, uint16_t, "=r")
+build_atomic_read(read_u32_atomic, "", WORD, uint32_t, "=r")
+build_atomic_read(read_int_atomic, "", WORD, int, "=r")
+
+build_atomic_write(write_u8_atomic, "b", BYTE, uint8_t, "r")
+build_atomic_write(write_u16_atomic, "h", WORD, uint16_t, "r")
+build_atomic_write(write_u32_atomic, "", WORD, uint32_t, "r")
+build_atomic_write(write_int_atomic, "", WORD, int, "r")
+
+#if 0 /* defined (CONFIG_ARM_64) */
+build_atomic_read(read_u64_atomic, "x", uint64_t, "=r")
+build_atomic_write(write_u64_atomic, "x", uint64_t, "r")
+#endif
void __bad_atomic_size(void);
@@ -88,134 +89,13 @@ typedef struct { int counter; } atomic_t;
#define _atomic_set(v,i) (((v).counter) = (i))
#define atomic_set(v,i) (((v)->counter) = (i))
-/*
- * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
- * store exclusive to ensure that these are atomic. We may loop
- * to ensure that the update happens.
- */
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- __asm__ __volatile__("@ atomic_add\n"
-"1: ldrex %0, [%3]\n"
-" add %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
-}
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- smp_mb();
-
- __asm__ __volatile__("@ atomic_add_return\n"
-"1: ldrex %0, [%3]\n"
-" add %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
-
- smp_mb();
-
- return result;
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- __asm__ __volatile__("@ atomic_sub\n"
-"1: ldrex %0, [%3]\n"
-" sub %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- smp_mb();
-
- __asm__ __volatile__("@ atomic_sub_return\n"
-"1: ldrex %0, [%3]\n"
-" sub %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
-
- smp_mb();
-
- return result;
-}
-
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
-{
- unsigned long oldval, res;
-
- smp_mb();
-
- do {
- __asm__ __volatile__("@ atomic_cmpxchg\n"
- "ldrex %1, [%3]\n"
- "mov %0, #0\n"
- "teq %1, %4\n"
- "strexeq %0, %5, [%3]\n"
- : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
- : "r" (&ptr->counter), "Ir" (old), "r" (new)
- : "cc");
- } while (res);
-
- smp_mb();
-
- return oldval;
-}
-
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long tmp, tmp2;
-
- __asm__ __volatile__("@ atomic_clear_mask\n"
-"1: ldrex %0, [%3]\n"
-" bic %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
- : "r" (addr), "Ir" (mask)
- : "cc");
-}
-
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic_dec(v) atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v) (atomic_add_return(1, v))
-#define atomic_dec_return(v) (atomic_sub_return(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/atomic.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/atomic.h>
+#else
+# error "unknown ARM variant"
+#endif
static inline atomic_t atomic_compareandswap(
atomic_t old, atomic_t new, atomic_t *v)