aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/x86_64/entry.S
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-12-01 14:02:00 +0000
committerKeir Fraser <keir.fraser@citrix.com>2009-12-01 14:02:00 +0000
commit95b697252f765f9433c45f7115ed9ce0abcbf170 (patch)
tree20d54c39a5b0fe47b65e9c5f94382faa3f7bfc37 /xen/arch/x86/x86_64/entry.S
parent22b984ae9a41041ecbab3eb487de75e3f1ffb592 (diff)
downloadxen-95b697252f765f9433c45f7115ed9ce0abcbf170.tar.gz
xen-95b697252f765f9433c45f7115ed9ce0abcbf170.tar.bz2
xen-95b697252f765f9433c45f7115ed9ce0abcbf170.zip
x86: fix MCE/NMI injection
This attempts to address all the concerns raised in http://lists.xensource.com/archives/html/xen-devel/2009-11/msg01195.html, but I'm nevertheless still not convinced that all aspects of the injection handling really work reliably. In particular, while the patch here on top of the fixes for the problems menioned in the referenced mail also adds code to keep send_guest_trap() from injecting multiple events at a time, I don't think the is the right mechanism - it should be possible to handle NMI/MCE nested within each other. Another fix on top of the ones for the earlier described problems is that the vCPU affinity restore logic didn't account for software injected NMIs - these never set cpu_affinity_tmp, but due to it most likely being different from cpu_affinity it would have got restored (to a potentially random value) nevertheless. Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'xen/arch/x86/x86_64/entry.S')
-rw-r--r--xen/arch/x86/x86_64/entry.S22
1 files changed, 12 insertions, 10 deletions
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index f0924d5783..4cb902ec09 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -234,31 +234,33 @@ process_softirqs:
ALIGN
/* %rbx: struct vcpu */
process_mce:
- cmpw $VCPU_TRAP_MCE,VCPU_trap_priority(%rbx)
- jae test_guest_events
+ testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
+ jnz test_guest_events
sti
movb $0,VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce
test %eax,%eax
jz test_all_events
- movw VCPU_trap_priority(%rbx),%dx # safe priority for the
- movw %dx,VCPU_old_trap_priority(%rbx) # iret hypercall
- movw $VCPU_TRAP_MCE,VCPU_trap_priority(%rbx)
+ movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
+ movb %dl,VCPU_mce_old_mask(%rbx) # iret hypercall
+ orl $1 << VCPU_TRAP_MCE,%edx
+ movb %dl,VCPU_async_exception_mask(%rbx)
jmp process_trap
ALIGN
/* %rbx: struct vcpu */
process_nmi:
- cmpw $VCPU_TRAP_NMI,VCPU_trap_priority(%rbx)
- jae test_guest_events
+ cmpw $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
+ jnz test_guest_events
sti
movb $0,VCPU_nmi_pending(%rbx)
call set_guest_nmi_trapbounce
test %eax,%eax
jz test_all_events
- movw VCPU_trap_priority(%rbx),%dx # safe priority for the
- movw %dx,VCPU_old_trap_priority(%rbx) # iret hypercall
- movw $VCPU_TRAP_NMI,VCPU_trap_priority(%rbx)
+ movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
+ movb %dl,VCPU_nmi_old_mask(%rbx) # iret hypercall
+ orl $1 << VCPU_TRAP_NMI,%edx
+ movb %dl,VCPU_async_exception_mask(%rbx)
/* FALLTHROUGH */
process_trap:
leaq VCPU_trap_bounce(%rbx),%rdx