aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-x86/system.h
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-08-29 14:40:00 +0100
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-08-29 14:40:00 +0100
commit201b1e702b7b8ec39e18f7e0f82e9a117604628a (patch)
tree89f3d38de0d6f69e85c41087ba65d21b13585dd4 /xen/include/asm-x86/system.h
parent4e8e84110921c9129738c22dc3d539cb5e5efa36 (diff)
downloadxen-201b1e702b7b8ec39e18f7e0f82e9a117604628a.tar.gz
xen-201b1e702b7b8ec39e18f7e0f82e9a117604628a.tar.bz2
xen-201b1e702b7b8ec39e18f7e0f82e9a117604628a.zip
x86: Remove (most) Centaur CPU support. Only VIA C7 can work, as it
has CMOV support. Leave a small amount of centaur.c around to support that. MTRR code goes entirely, as 686-class Centaur CPUs have generic MTRR support. Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/include/asm-x86/system.h')
-rw-r--r--xen/include/asm-x86/system.h26
1 files changed, 0 insertions, 26 deletions
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index 07ecdb2d54..44a2af5813 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -253,40 +253,14 @@ static always_inline unsigned long long __cmpxchg8b(
})
#endif
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
#if defined(__i386__)
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
#define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#ifdef CONFIG_X86_OOSTORE
-#define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#endif
#elif defined(__x86_64__)
#define mb() __asm__ __volatile__ ("mfence":::"memory")
#define rmb() __asm__ __volatile__ ("lfence":::"memory")
-#ifdef CONFIG_X86_OOSTORE
-#define wmb() __asm__ __volatile__ ("sfence":::"memory")
#endif
-#endif
-
-#ifndef CONFIG_X86_OOSTORE
#define wmb() __asm__ __volatile__ ("": : :"memory")
-#endif
#ifdef CONFIG_SMP
#define smp_mb() mb()