aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2007-11-21 14:36:07 +0000
committerKeir Fraser <keir.fraser@citrix.com>2007-11-21 14:36:07 +0000
commit539c329f1f77c98963bbc88bb3144d0128261bfa (patch)
tree78dff2ab2967c358696cf9aa0410e258165560d5
parent07dd618798bf97a19e5632329f68422378e0ea99 (diff)
downloadxen-539c329f1f77c98963bbc88bb3144d0128261bfa.tar.gz
xen-539c329f1f77c98963bbc88bb3144d0128261bfa.tar.bz2
xen-539c329f1f77c98963bbc88bb3144d0128261bfa.zip
x86: rmb() can be weakened according to new Intel spec.
Both Intel and AMD agree that, from a programmer's viewpoint: Loads cannot be reordered relative to other loads. Stores cannot be reordered relative to other stores. Intel64 Architecture Memory Ordering White Paper <http://developer.intel.com/products/processor/manuals/318147.pdf> AMD64 Architecture Programmer's Manual, Volume 2: System Programming <http://www.amd.com/us-en/assets/content_type/\ white_papers_and_tech_docs/24593.pdf> Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
-rw-r--r--xen/include/asm-x86/system.h15
-rw-r--r--xen/include/asm-x86/x86_32/system.h5
-rw-r--r--xen/include/asm-x86/x86_64/system.h5
3 files changed, 19 insertions, 6 deletions
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index 1217002643..c257513dad 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -135,6 +135,21 @@ static always_inline unsigned long __cmpxchg(
#define __HAVE_ARCH_CMPXCHG
+/*
+ * Both Intel and AMD agree that, from a programmer's viewpoint:
+ * Loads cannot be reordered relative to other loads.
+ * Stores cannot be reordered relative to other stores.
+ *
+ * Intel64 Architecture Memory Ordering White Paper
+ * <http://developer.intel.com/products/processor/manuals/318147.pdf>
+ *
+ * AMD64 Architecture Programmer's Manual, Volume 2: System Programming
+ * <http://www.amd.com/us-en/assets/content_type/\
+ * white_papers_and_tech_docs/24593.pdf>
+ */
+#define rmb() barrier()
+#define wmb() barrier()
+
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
diff --git a/xen/include/asm-x86/x86_32/system.h b/xen/include/asm-x86/x86_32/system.h
index cf28258d39..5707af8e86 100644
--- a/xen/include/asm-x86/x86_32/system.h
+++ b/xen/include/asm-x86/x86_32/system.h
@@ -98,9 +98,8 @@ static inline void atomic_write64(uint64_t *p, uint64_t v)
w = x;
}
-#define mb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
-#define rmb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
-#define wmb() asm volatile ( "" : : : "memory" )
+#define mb() \
+ asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
#define __save_flags(x) \
asm volatile ( "pushfl ; popl %0" : "=g" (x) : )
diff --git a/xen/include/asm-x86/x86_64/system.h b/xen/include/asm-x86/x86_64/system.h
index 30902a01bc..229fc15292 100644
--- a/xen/include/asm-x86/x86_64/system.h
+++ b/xen/include/asm-x86/x86_64/system.h
@@ -52,9 +52,8 @@ static inline void atomic_write64(uint64_t *p, uint64_t v)
*p = v;
}
-#define mb() asm volatile ( "mfence" : : : "memory" )
-#define rmb() asm volatile ( "lfence" : : : "memory" )
-#define wmb() asm volatile ( "" : : : "memory" )
+#define mb() \
+ asm volatile ( "mfence" : : : "memory" )
#define __save_flags(x) \
asm volatile ( "pushfq ; popq %q0" : "=g" (x) : :"memory" )