aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-11-25 13:31:58 +0000
committerKeir Fraser <keir@xen.org>2011-11-25 13:31:58 +0000
commit5e0f79980e4ace10de74923f604ba523ede0ef90 (patch)
tree7bff8f2967eeb1e1ebc2264d54306309a7a924a4
parentf73d1538994a1042069648a069e6688f0e290dbc (diff)
downloadxen-5e0f79980e4ace10de74923f604ba523ede0ef90.tar.gz
xen-5e0f79980e4ace10de74923f604ba523ede0ef90.tar.bz2
xen-5e0f79980e4ace10de74923f604ba523ede0ef90.zip
atomic: Define {read,write}_atomic() for reading/writing memory atomically.
Signed-off-by: Keir Fraser <keir@xen.org>
-rw-r--r--xen/arch/x86/mm.c2
-rw-r--r--xen/arch/x86/mm/p2m-ept.c4
-rw-r--r--xen/arch/x86/x86_32/seg_fixup.c2
-rw-r--r--xen/common/timer.c14
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/atomic.h65
-rw-r--r--xen/include/asm-x86/atomic.h60
-rw-r--r--xen/include/asm-x86/x86_32/page.h18
-rw-r--r--xen/include/asm-x86/x86_64/page.h6
8 files changed, 110 insertions, 61 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 8b4ab90d21..b00c277ac9 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4629,7 +4629,7 @@ long do_update_descriptor(u64 pa, u64 desc)
/* All is good so make the update. */
gdt_pent = map_domain_page(mfn);
- atomic_write64((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d);
+ write_atomic((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d);
unmap_domain_page(gdt_pent);
put_page_type(page);
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 9ccf14617f..a994364cfd 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -35,9 +35,9 @@
#include "mm-locks.h"
#define atomic_read_ept_entry(__pepte) \
- ( (ept_entry_t) { .epte = atomic_read64(&(__pepte)->epte) } )
+ ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
#define atomic_write_ept_entry(__pepte, __epte) \
- atomic_write64(&(__pepte)->epte, (__epte).epte)
+ write_atomic(&(__pepte)->epte, (__epte).epte)
#define is_epte_present(ept_entry) ((ept_entry)->epte & 0x7)
#define is_epte_superpage(ept_entry) ((ept_entry)->sp)
diff --git a/xen/arch/x86/x86_32/seg_fixup.c b/xen/arch/x86/x86_32/seg_fixup.c
index 13cf50e688..7baa963afb 100644
--- a/xen/arch/x86/x86_32/seg_fixup.c
+++ b/xen/arch/x86/x86_32/seg_fixup.c
@@ -314,7 +314,7 @@ static int fixup_seg(u16 seg, unsigned long offset)
b &= ~0xf0000; b |= limit & 0xf0000;
b ^= _SEGMENT_EC; /* grows-up <-> grows-down */
/* NB. This can't fault. Checked readable above; must also be writable. */
- atomic_write64((uint64_t *)&table[2*idx], ((uint64_t)b<<32) | a);
+ write_atomic((uint64_t *)&table[2*idx], ((uint64_t)b<<32) | a);
return 1;
}
diff --git a/xen/common/timer.c b/xen/common/timer.c
index 0547ea31a7..0dd2476483 100644
--- a/xen/common/timer.c
+++ b/xen/common/timer.c
@@ -239,7 +239,7 @@ static inline bool_t timer_lock(struct timer *timer)
for ( ; ; )
{
- cpu = atomic_read16(&timer->cpu);
+ cpu = read_atomic(&timer->cpu);
if ( unlikely(cpu == TIMER_CPU_status_killed) )
{
rcu_read_unlock(&timer_cpu_read_lock);
@@ -292,7 +292,7 @@ void init_timer(
memset(timer, 0, sizeof(*timer));
timer->function = function;
timer->data = data;
- atomic_write16(&timer->cpu, cpu);
+ write_atomic(&timer->cpu, cpu);
timer->status = TIMER_STATUS_inactive;
if ( !timer_lock_irqsave(timer, flags) )
BUG();
@@ -343,7 +343,7 @@ void migrate_timer(struct timer *timer, unsigned int new_cpu)
for ( ; ; )
{
- old_cpu = atomic_read16(&timer->cpu);
+ old_cpu = read_atomic(&timer->cpu);
if ( (old_cpu == new_cpu) || (old_cpu == TIMER_CPU_status_killed) )
{
rcu_read_unlock(&timer_cpu_read_lock);
@@ -375,7 +375,7 @@ void migrate_timer(struct timer *timer, unsigned int new_cpu)
deactivate_timer(timer);
list_del(&timer->inactive);
- atomic_write16(&timer->cpu, new_cpu);
+ write_atomic(&timer->cpu, new_cpu);
list_add(&timer->inactive, &per_cpu(timers, new_cpu).inactive);
if ( active )
@@ -402,7 +402,7 @@ void kill_timer(struct timer *timer)
list_del(&timer->inactive);
timer->status = TIMER_STATUS_killed;
old_cpu = timer->cpu;
- atomic_write16(&timer->cpu, TIMER_CPU_status_killed);
+ write_atomic(&timer->cpu, TIMER_CPU_status_killed);
spin_unlock_irqrestore(&per_cpu(timers, old_cpu).lock, flags);
@@ -573,7 +573,7 @@ static void migrate_timers_from_cpu(unsigned int old_cpu)
? old_ts->heap[1] : old_ts->list) != NULL )
{
remove_entry(t);
- atomic_write16(&t->cpu, new_cpu);
+ write_atomic(&t->cpu, new_cpu);
notify |= add_entry(t);
}
@@ -581,7 +581,7 @@ static void migrate_timers_from_cpu(unsigned int old_cpu)
{
t = list_entry(old_ts->inactive.next, struct timer, inactive);
list_del(&t->inactive);
- atomic_write16(&t->cpu, new_cpu);
+ write_atomic(&t->cpu, new_cpu);
list_add(&t->inactive, &new_ts->inactive);
}
diff --git a/xen/include/asm-ia64/linux-xen/asm/atomic.h b/xen/include/asm-ia64/linux-xen/asm/atomic.h
index 6849181eb2..c006ae28ac 100644
--- a/xen/include/asm-ia64/linux-xen/asm/atomic.h
+++ b/xen/include/asm-ia64/linux-xen/asm/atomic.h
@@ -39,8 +39,8 @@ typedef struct { volatile __s64 counter; } atomic64_t;
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define build_atomic_read(tag, type) \
-static inline type atomic_read##tag(const volatile type *addr) \
+#define build_read_atomic(tag, type) \
+static inline type read_##tag##_atomic(const volatile type *addr) \
{ \
type ret; \
asm volatile("ld%2.acq %0 = %1" \
@@ -49,37 +49,62 @@ static inline type atomic_read##tag(const volatile type *addr) \
return ret; \
}
-#define build_atomic_write(tag, type) \
-static inline void atomic_write##tag(volatile type *addr, type val) \
+#define build_write_atomic(tag, type) \
+static inline void write_##tag##_atomic(volatile type *addr, type val) \
{ \
asm volatile("st%2.rel %0 = %1" \
: "=m" (*addr) \
: "r" (val), "i" (sizeof(type))); \
}
-build_atomic_read(8, uint8_t)
-build_atomic_read(16, uint16_t)
-build_atomic_read(32, uint32_t)
-build_atomic_read(64, uint64_t)
-build_atomic_read(_int, int)
-build_atomic_read(_long, long)
+build_read_atomic(u8, uint8_t)
+build_read_atomic(u16, uint16_t)
+build_read_atomic(u32, uint32_t)
+build_read_atomic(u64, uint64_t)
+
+build_write_atomic(u8, uint8_t)
+build_write_atomic(u16, uint16_t)
+build_write_atomic(u32, uint32_t)
+build_write_atomic(u64, uint64_t)
+
+#undef build_read_atomic
+#undef build_write_atomic
+
+void __bad_atomic_size(void);
+
+#define read_atomic(p) ({ \
+ typeof(*p) __x; \
+ switch ( sizeof(*p) ) { \
+ case 1: __x = (typeof(*p))read_u8_atomic((uint8_t *)p); break; \
+ case 2: __x = (typeof(*p))read_u16_atomic((uint16_t *)p); break; \
+ case 4: __x = (typeof(*p))read_u32_atomic((uint32_t *)p); break; \
+ case 8: __x = (typeof(*p))read_u64_atomic((uint64_t *)p); break; \
+ default: __x = 0; __bad_atomic_size(); break; \
+ } \
+ __x; \
+})
-build_atomic_write(8, uint8_t)
-build_atomic_write(16, uint16_t)
-build_atomic_write(32, uint32_t)
-build_atomic_write(64, uint64_t)
-build_atomic_write(_int, int)
-build_atomic_write(_long, long)
+#define write_atomic(p, x) ({ \
+ typeof(*p) __x = (x); \
+ switch ( sizeof(*p) ) { \
+ case 1: write_u8_atomic((uint8_t *)p, (uint8_t)__x); break; \
+ case 2: write_u16_atomic((uint16_t *)p, (uint16_t)__x); break; \
+ case 4: write_u32_atomic((uint32_t *)p, (uint32_t)__x); break; \
+ case 8: write_u64_atomic((uint64_t *)p, (uint64_t)__x); break; \
+ default: __bad_atomic_size(); break; \
+ } \
+ __x; \
+})
#define _atomic_read(v) ((v).counter)
#define _atomic64_read(v) ((v).counter)
-#define atomic_read(v) atomic_read_int(&((v)->counter))
-#define atomic64_read(v) atomic_read_long(&((v)->counter))
+#define atomic_read(v) read_atomic(&((v)->counter))
+#define atomic64_read(v) read_atomic(&((v)->counter))
#define _atomic_set(v,i) (((v).counter) = (i))
#define _atomic64_set(v,i) (((v).counter) = (i))
-#define atomic_set(v,i) atomic_write_int(&((v)->counter), i)
-#define atomic64_set(v,l) atomic_write_long(&((v)->counter), l)
+#define atomic_set(v,i) write_atomic(&((v)->counter), i)
+#define atomic64_set(v,l) write_atomic(&((v)->counter), l)
#endif
diff --git a/xen/include/asm-x86/atomic.h b/xen/include/asm-x86/atomic.h
index 614a2aef51..d13277931b 100644
--- a/xen/include/asm-x86/atomic.h
+++ b/xen/include/asm-x86/atomic.h
@@ -4,36 +4,34 @@
#include <xen/config.h>
#include <asm/system.h>
-#define build_atomic_read(name, size, type, reg, barrier) \
+#define build_read_atomic(name, size, type, reg, barrier) \
static inline type name(const volatile type *addr) \
{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
:"m" (*(volatile type *)addr) barrier); return ret; }
-#define build_atomic_write(name, size, type, reg, barrier) \
+#define build_write_atomic(name, size, type, reg, barrier) \
static inline void name(volatile type *addr, type val) \
{ asm volatile("mov" size " %1,%0": "=m" (*(volatile type *)addr) \
:reg (val) barrier); }
-build_atomic_read(atomic_read8, "b", uint8_t, "=q", )
-build_atomic_read(atomic_read16, "w", uint16_t, "=r", )
-build_atomic_read(atomic_read32, "l", uint32_t, "=r", )
-build_atomic_read(atomic_read_int, "l", int, "=r", )
+build_read_atomic(read_u8_atomic, "b", uint8_t, "=q", )
+build_read_atomic(read_u16_atomic, "w", uint16_t, "=r", )
+build_read_atomic(read_u32_atomic, "l", uint32_t, "=r", )
-build_atomic_write(atomic_write8, "b", uint8_t, "q", )
-build_atomic_write(atomic_write16, "w", uint16_t, "r", )
-build_atomic_write(atomic_write32, "l", uint32_t, "r", )
-build_atomic_write(atomic_write_int, "l", int, "r", )
+build_write_atomic(write_u8_atomic, "b", uint8_t, "q", )
+build_write_atomic(write_u16_atomic, "w", uint16_t, "r", )
+build_write_atomic(write_u32_atomic, "l", uint32_t, "r", )
#ifdef __x86_64__
-build_atomic_read(atomic_read64, "q", uint64_t, "=r", )
-build_atomic_write(atomic_write64, "q", uint64_t, "r", )
+build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
+build_write_atomic(write_u64_atomic, "q", uint64_t, "r", )
#else
-static inline uint64_t atomic_read64(const volatile uint64_t *addr)
+static inline uint64_t read_u64_atomic(const volatile uint64_t *addr)
{
uint64_t *__addr = (uint64_t *)addr;
return __cmpxchg8b(__addr, 0, 0);
}
-static inline void atomic_write64(volatile uint64_t *addr, uint64_t val)
+static inline void write_u64_atomic(volatile uint64_t *addr, uint64_t val)
{
uint64_t old = *addr, new, *__addr = (uint64_t *)addr;
while ( (new = __cmpxchg8b(__addr, old, val)) != old )
@@ -41,8 +39,34 @@ static inline void atomic_write64(volatile uint64_t *addr, uint64_t val)
}
#endif
-#undef build_atomic_read
-#undef build_atomic_write
+#undef build_read_atomic
+#undef build_write_atomic
+
+void __bad_atomic_size(void);
+
+#define read_atomic(p) ({ \
+ typeof(*p) __x; \
+ switch ( sizeof(*p) ) { \
+ case 1: __x = (typeof(*p))read_u8_atomic((uint8_t *)p); break; \
+ case 2: __x = (typeof(*p))read_u16_atomic((uint16_t *)p); break; \
+ case 4: __x = (typeof(*p))read_u32_atomic((uint32_t *)p); break; \
+ case 8: __x = (typeof(*p))read_u64_atomic((uint64_t *)p); break; \
+ default: __x = 0; __bad_atomic_size(); break; \
+ } \
+ __x; \
+})
+
+#define write_atomic(p, x) ({ \
+ typeof(*p) __x = (x); \
+ switch ( sizeof(*p) ) { \
+ case 1: write_u8_atomic((uint8_t *)p, (uint8_t)__x); break; \
+ case 2: write_u16_atomic((uint16_t *)p, (uint16_t)__x); break; \
+ case 4: write_u32_atomic((uint32_t *)p, (uint32_t)__x); break; \
+ case 8: write_u64_atomic((uint64_t *)p, (uint64_t)__x); break; \
+ default: __bad_atomic_size(); break; \
+ } \
+ __x; \
+})
/*
* NB. I've pushed the volatile qualifier into the operations. This allows
@@ -60,7 +84,7 @@ typedef struct { int counter; } atomic_t;
* Atomically reads the value of @v.
*/
#define _atomic_read(v) ((v).counter)
-#define atomic_read(v) atomic_read_int(&((v)->counter))
+#define atomic_read(v) read_atomic(&((v)->counter))
/**
* atomic_set - set atomic variable
@@ -70,7 +94,7 @@ typedef struct { int counter; } atomic_t;
* Atomically sets the value of @v to @i.
*/
#define _atomic_set(v,i) (((v).counter) = (i))
-#define atomic_set(v,i) atomic_write_int(&((v)->counter), (i))
+#define atomic_set(v,i) write_atomic(&((v)->counter), (i))
/**
* atomic_add - add integer to atomic variable
diff --git a/xen/include/asm-x86/x86_32/page.h b/xen/include/asm-x86/x86_32/page.h
index 8dd4310b27..9b2c7733cf 100644
--- a/xen/include/asm-x86/x86_32/page.h
+++ b/xen/include/asm-x86/x86_32/page.h
@@ -85,15 +85,15 @@ extern unsigned int PAGE_HYPERVISOR_NOCACHE;
#endif
-#define pte_read_atomic(ptep) atomic_read64(ptep)
-#define pte_write_atomic(ptep, pte) atomic_write64(ptep, pte)
-#define pte_write(ptep, pte) do { \
- u32 *__ptep_words = (u32 *)(ptep); \
- atomic_write32(&__ptep_words[0], 0); \
- wmb(); \
- atomic_write32(&__ptep_words[1], (pte) >> 32); \
- wmb(); \
- atomic_write32(&__ptep_words[0], (pte) >> 0); \
+#define pte_read_atomic(ptep) read_atomic(ptep)
+#define pte_write_atomic(ptep, pte) write_atomic(ptep, pte)
+#define pte_write(ptep, pte) do { \
+ u32 *__ptep_words = (u32 *)(ptep); \
+ write_atomic(&__ptep_words[0], 0); \
+ wmb(); \
+ write_atomic(&__ptep_words[1], (pte) >> 32); \
+ wmb(); \
+ write_atomic(&__ptep_words[0], (pte) >> 0); \
} while ( 0 )
/* root table */
diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h
index 82397c4d60..2f46ba2c5a 100644
--- a/xen/include/asm-x86/x86_64/page.h
+++ b/xen/include/asm-x86/x86_64/page.h
@@ -116,9 +116,9 @@ typedef l4_pgentry_t root_pgentry_t;
#endif /* !__ASSEMBLY__ */
-#define pte_read_atomic(ptep) atomic_read64(ptep)
-#define pte_write_atomic(ptep, pte) atomic_write64(ptep, pte)
-#define pte_write(ptep, pte) atomic_write64(ptep, pte)
+#define pte_read_atomic(ptep) read_atomic(ptep)
+#define pte_write_atomic(ptep, pte) write_atomic(ptep, pte)
+#define pte_write(ptep, pte) write_atomic(ptep, pte)
/* Given a virtual address, get an entry offset into a linear page table. */
#define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)