aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-x86/system.h
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-12-01 14:02:00 +0000
committerKeir Fraser <keir.fraser@citrix.com>2009-12-01 14:02:00 +0000
commit95b697252f765f9433c45f7115ed9ce0abcbf170 (patch)
tree20d54c39a5b0fe47b65e9c5f94382faa3f7bfc37 /xen/include/asm-x86/system.h
parent22b984ae9a41041ecbab3eb487de75e3f1ffb592 (diff)
downloadxen-95b697252f765f9433c45f7115ed9ce0abcbf170.tar.gz
xen-95b697252f765f9433c45f7115ed9ce0abcbf170.tar.bz2
xen-95b697252f765f9433c45f7115ed9ce0abcbf170.zip
x86: fix MCE/NMI injection
This attempts to address all the concerns raised in http://lists.xensource.com/archives/html/xen-devel/2009-11/msg01195.html, but I'm nevertheless still not convinced that all aspects of the injection handling really work reliably. In particular, while the patch here on top of the fixes for the problems menioned in the referenced mail also adds code to keep send_guest_trap() from injecting multiple events at a time, I don't think the is the right mechanism - it should be possible to handle NMI/MCE nested within each other. Another fix on top of the ones for the earlier described problems is that the vCPU affinity restore logic didn't account for software injected NMIs - these never set cpu_affinity_tmp, but due to it most likely being different from cpu_affinity it would have got restored (to a potentially random value) nevertheless. Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'xen/include/asm-x86/system.h')
-rw-r--r--xen/include/asm-x86/system.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index ced68d0633..52816da1c9 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -134,6 +134,13 @@ static always_inline unsigned long __cmpxchg(
#define __HAVE_ARCH_CMPXCHG
+#define cmpxchgptr(ptr,o,n) ({ \
+ const __typeof__(**(ptr)) *__o = (o); \
+ __typeof__(**(ptr)) *__n = (n); \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)__o, \
+ (unsigned long)__n,sizeof(*(ptr)))); \
+})
+
/*
* Both Intel and AMD agree that, from a programmer's viewpoint:
* Loads cannot be reordered relative to other loads.