aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/x86_64
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-03-04 10:19:34 +0100
committerJan Beulich <jbeulich@suse.com>2013-03-04 10:19:34 +0100
commitd463b005bbd6475ed930a302821efe239e1b2cf9 (patch)
tree32515ad83274734890878d6f995b288c35b26efd /xen/arch/x86/x86_64
parente6a6fd63652814e5c36a0016c082032f798ced1f (diff)
downloadxen-d463b005bbd6475ed930a302821efe239e1b2cf9.tar.gz
xen-d463b005bbd6475ed930a302821efe239e1b2cf9.tar.bz2
xen-d463b005bbd6475ed930a302821efe239e1b2cf9.zip
x86: defer processing events on the NMI exit path
Otherwise, we may end up in the scheduler, keeping NMIs masked for a possibly unbounded period of time (until whenever the next IRET gets executed). Enforce timely event processing by sending a self IPI. Of course it's open for discussion whether to always use the straight exit path from handle_ist_exception. Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/x86_64')
-rw-r--r--xen/arch/x86/x86_64/compat/entry.S2
-rw-r--r--xen/arch/x86/x86_64/entry.S27
2 files changed, 23 insertions, 6 deletions
diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
index 7051f90213..7769019e27 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -171,7 +171,7 @@ compat_bad_hypercall:
jmp compat_test_all_events
/* %rbx: struct vcpu, interrupts disabled */
-compat_restore_all_guest:
+ENTRY(compat_restore_all_guest)
ASSERT_INTERRUPTS_DISABLED
RESTORE_ALL adj=8 compat=1
.Lft0: iretq
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 408c348696..82d9881945 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -11,6 +11,7 @@
#include <asm/apicdef.h>
#include <asm/page.h>
#include <public/xen.h>
+#include <irq_vectors.h>
ALIGN
/* %rbx: struct vcpu */
@@ -615,6 +616,9 @@ ENTRY(early_page_fault)
jmp restore_all_xen
.popsection
+ENTRY(nmi)
+ pushq $0
+ movl $TRAP_nmi,4(%rsp)
handle_ist_exception:
SAVE_ALL
testb $3,UREGS_cs(%rsp)
@@ -629,12 +633,25 @@ handle_ist_exception:
movzbl UREGS_entry_vector(%rsp),%eax
leaq exception_table(%rip),%rdx
callq *(%rdx,%rax,8)
- jmp ret_from_intr
+ cmpb $TRAP_nmi,UREGS_entry_vector(%rsp)
+ jne ret_from_intr
-ENTRY(nmi)
- pushq $0
- movl $TRAP_nmi,4(%rsp)
- jmp handle_ist_exception
+ /* We want to get straight to the IRET on the NMI exit path. */
+ testb $3,UREGS_cs(%rsp)
+ jz restore_all_xen
+ GET_CURRENT(%rbx)
+ /* Send an IPI to ourselves to cover for the lack of event checking. */
+ movl VCPU_processor(%rbx),%eax
+ shll $IRQSTAT_shift,%eax
+ leaq irq_stat(%rip),%rcx
+ cmpl $0,(%rcx,%rax,1)
+ je 1f
+ movl $EVENT_CHECK_VECTOR,%edi
+ call send_IPI_self
+1: movq VCPU_domain(%rbx),%rax
+ cmpb $0,DOMAIN_is_32bit_pv(%rax)
+ je restore_all_guest
+ jmp compat_restore_all_guest
ENTRY(nmi_crash)
pushq $0