aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xensource.com>2007-10-22 13:04:32 +0100
committerKeir Fraser <keir@xensource.com>2007-10-22 13:04:32 +0100
commita2308fa704a40f23916a176d9e06bbc0e3469caf (patch)
tree6e7e9254544315716f14e9bebb86879787a92d32
parentd420576eb51d53affb6aeda89c43d7d83b4d77d8 (diff)
downloadxen-a2308fa704a40f23916a176d9e06bbc0e3469caf.tar.gz
xen-a2308fa704a40f23916a176d9e06bbc0e3469caf.tar.bz2
xen-a2308fa704a40f23916a176d9e06bbc0e3469caf.zip
x86: Allow NMI callback CS to be specified via set_trap_table()
hypercall. Based on a patch by Jan Beulich. Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--xen/arch/x86/traps.c37
-rw-r--r--xen/arch/x86/x86_32/asm-offsets.c5
-rw-r--r--xen/arch/x86/x86_32/entry.S6
-rw-r--r--xen/arch/x86/x86_64/asm-offsets.c5
-rw-r--r--xen/arch/x86/x86_64/compat/entry.S6
-rw-r--r--xen/arch/x86/x86_64/compat/traps.c6
-rw-r--r--xen/common/kernel.c34
-rw-r--r--xen/include/asm-ia64/linux-null/asm/nmi.h8
-rw-r--r--xen/include/asm-powerpc/nmi.h3
-rw-r--r--xen/include/asm-x86/nmi.h15
-rw-r--r--xen/include/xen/nmi.h15
-rw-r--r--xen/include/xen/sched.h2
12 files changed, 84 insertions, 58 deletions
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 6d085784e2..8ccbeee2c6 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2229,6 +2229,37 @@ void __init trap_init(void)
open_softirq(NMI_SOFTIRQ, nmi_softirq);
}
+long register_guest_nmi_callback(unsigned long address)
+{
+ struct vcpu *v = current;
+ struct domain *d = current->domain;
+ struct trap_info *t = &v->arch.guest_context.trap_ctxt[TRAP_nmi];
+
+ t->vector = TRAP_nmi;
+ t->flags = 0;
+ t->cs = !IS_COMPAT(d) ? FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS;
+ t->address = address;
+ TI_SET_IF(t, 1);
+
+ /*
+ * If no handler was registered we can 'lose the NMI edge'. Re-assert it
+ * now.
+ */
+ if ( (v->vcpu_id == 0) && (arch_get_nmi_reason(d) != 0) )
+ v->nmi_pending = 1;
+
+ return 0;
+}
+
+long unregister_guest_nmi_callback(void)
+{
+ struct vcpu *v = current;
+ struct trap_info *t = &v->arch.guest_context.trap_ctxt[TRAP_nmi];
+
+ memset(t, 0, sizeof(*t));
+
+ return 0;
+}
long do_set_trap_table(XEN_GUEST_HANDLE(trap_info_t) traps)
{
@@ -2262,6 +2293,12 @@ long do_set_trap_table(XEN_GUEST_HANDLE(trap_info_t) traps)
if ( cur.address == 0 )
break;
+ if ( (cur.vector == TRAP_nmi) && !TI_GET_IF(&cur) )
+ {
+ rc = -EINVAL;
+ break;
+ }
+
fixup_guest_code_selector(current->domain, cur.cs);
memcpy(&dst[cur.vector], &cur, sizeof(cur));
diff --git a/xen/arch/x86/x86_32/asm-offsets.c b/xen/arch/x86/x86_32/asm-offsets.c
index 86e3682e0a..bb59330fbf 100644
--- a/xen/arch/x86/x86_32/asm-offsets.c
+++ b/xen/arch/x86/x86_32/asm-offsets.c
@@ -66,7 +66,10 @@ void __dummy__(void)
arch.guest_context.kernel_sp);
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
- OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
+ OFFSET(VCPU_nmi_cs, struct vcpu,
+ arch.guest_context.trap_ctxt[TRAP_nmi].cs);
+ OFFSET(VCPU_nmi_addr, struct vcpu,
+ arch.guest_context.trap_ctxt[TRAP_nmi].address);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S
index 37ed92d612..bff00eb5c7 100644
--- a/xen/arch/x86/x86_32/entry.S
+++ b/xen/arch/x86/x86_32/entry.S
@@ -257,13 +257,15 @@ process_nmi:
testb $1,VCPU_nmi_masked(%ebx)
jnz test_guest_events
movb $0,VCPU_nmi_pending(%ebx)
- movl VCPU_nmi_addr(%ebx),%eax
+ movzwl VCPU_nmi_cs(%ebx),%eax
+ movl VCPU_nmi_addr(%ebx),%ecx
test %eax,%eax
jz test_guest_events
movb $1,VCPU_nmi_masked(%ebx)
sti
leal VCPU_trap_bounce(%ebx),%edx
- movl %eax,TRAPBOUNCE_eip(%edx)
+ movw %ax,TRAPBOUNCE_cs(%edx)
+ movl %ecx,TRAPBOUNCE_eip(%edx)
movw $FLAT_KERNEL_CS,TRAPBOUNCE_cs(%edx)
movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
call create_bounce_frame
diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c
index 120f0d0e3d..365f227b8d 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -75,7 +75,10 @@ void __dummy__(void)
OFFSET(VCPU_kernel_ss, struct vcpu, arch.guest_context.kernel_ss);
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
- OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
+ OFFSET(VCPU_nmi_cs, struct vcpu,
+ arch.guest_context.trap_ctxt[TRAP_nmi].cs);
+ OFFSET(VCPU_nmi_addr, struct vcpu,
+ arch.guest_context.trap_ctxt[TRAP_nmi].address);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
index 3504e26058..b1e762bdd0 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -131,13 +131,15 @@ compat_process_nmi:
testb $1,VCPU_nmi_masked(%rbx)
jnz compat_test_guest_events
movb $0,VCPU_nmi_pending(%rbx)
- movl VCPU_nmi_addr(%rbx),%eax
+ movzwl VCPU_nmi_cs(%rbx),%eax
+ movl VCPU_nmi_addr(%rbx),%ecx
testl %eax,%eax
jz compat_test_guest_events
movb $1,VCPU_nmi_masked(%rbx)
sti
leaq VCPU_trap_bounce(%rbx),%rdx
- movl %eax,TRAPBOUNCE_eip(%rdx)
+ movw %ax,TRAPBOUNCE_cs(%rdx)
+ movl %ecx,TRAPBOUNCE_eip(%rdx)
movw $FLAT_COMPAT_KERNEL_CS,TRAPBOUNCE_cs(%rdx)
movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
call compat_create_bounce_frame
diff --git a/xen/arch/x86/x86_64/compat/traps.c b/xen/arch/x86/x86_64/compat/traps.c
index 4c6298dd58..0c704b942a 100644
--- a/xen/arch/x86/x86_64/compat/traps.c
+++ b/xen/arch/x86/x86_64/compat/traps.c
@@ -294,6 +294,12 @@ int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps)
if ( cur.address == 0 )
break;
+ if ( (cur.vector == TRAP_nmi) && !TI_GET_IF(&cur) )
+ {
+ rc = -EINVAL;
+ break;
+ }
+
fixup_guest_code_selector(current->domain, cur.cs);
XLAT_trap_info(dst + cur.vector, &cur);
diff --git a/xen/common/kernel.c b/xen/common/kernel.c
index 034ff211d3..1ce1720afc 100644
--- a/xen/common/kernel.c
+++ b/xen/common/kernel.c
@@ -247,40 +247,6 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE(void) arg)
return -ENOSYS;
}
-#ifndef COMPAT
-
-long register_guest_nmi_callback(unsigned long address)
-{
- struct vcpu *v = current;
- struct domain *d = current->domain;
-
- if ( (d->domain_id != 0) || (v->vcpu_id != 0) )
- return -EINVAL;
-
- v->nmi_addr = address;
-#ifdef CONFIG_X86
- /*
- * If no handler was registered we can 'lose the NMI edge'. Re-assert it
- * now.
- */
- if ( arch_get_nmi_reason(d) != 0 )
- v->nmi_pending = 1;
-#endif
-
- return 0;
-}
-
-long unregister_guest_nmi_callback(void)
-{
- struct vcpu *v = current;
-
- v->nmi_addr = 0;
-
- return 0;
-}
-
-#endif
-
DO(nmi_op)(unsigned int cmd, XEN_GUEST_HANDLE(void) arg)
{
struct xennmi_callback cb;
diff --git a/xen/include/asm-ia64/linux-null/asm/nmi.h b/xen/include/asm-ia64/linux-null/asm/nmi.h
index 29f4f4cd69..8463250968 100644
--- a/xen/include/asm-ia64/linux-null/asm/nmi.h
+++ b/xen/include/asm-ia64/linux-null/asm/nmi.h
@@ -1 +1,7 @@
-/* This file is intentionally left empty. */
+#ifndef __IA64_NMI_H__
+#define __IA64_NMI_H__
+
+#define register_guest_nmi_callback(a) (-ENOSYS)
+#define unregister_guest_nmi_callback() (-ENOSYS)
+
+#endif /* __IA64_NMI_H__ */
diff --git a/xen/include/asm-powerpc/nmi.h b/xen/include/asm-powerpc/nmi.h
index 4618b4b140..edbc554e80 100644
--- a/xen/include/asm-powerpc/nmi.h
+++ b/xen/include/asm-powerpc/nmi.h
@@ -3,4 +3,7 @@
#include <public/nmi.h>
+#define register_guest_nmi_callback(a) (-ENOSYS)
+#define unregister_guest_nmi_callback() (-ENOSYS)
+
#endif /* ASM_NMI_H */
diff --git a/xen/include/asm-x86/nmi.h b/xen/include/asm-x86/nmi.h
index d79b823ee2..af1ff2e008 100644
--- a/xen/include/asm-x86/nmi.h
+++ b/xen/include/asm-x86/nmi.h
@@ -23,4 +23,19 @@ void set_nmi_callback(nmi_callback_t callback);
*/
void unset_nmi_callback(void);
+/**
+ * register_guest_nmi_callback
+ *
+ * The default NMI handler passes the NMI to a guest callback. This
+ * function registers the address of that callback.
+ */
+long register_guest_nmi_callback(unsigned long address);
+
+/**
+ * unregister_guest_nmi_callback
+ *
+ * Unregister a guest NMI handler.
+ */
+long unregister_guest_nmi_callback(void);
+
#endif /* ASM_NMI_H */
diff --git a/xen/include/xen/nmi.h b/xen/include/xen/nmi.h
index a13e797273..e526b6ab6f 100644
--- a/xen/include/xen/nmi.h
+++ b/xen/include/xen/nmi.h
@@ -11,19 +11,4 @@
#include <asm/nmi.h>
-/**
- * register_guest_nmi_callback
- *
- * The default NMI handler passes the NMI to a guest callback. This
- * function registers the address of that callback.
- */
-extern long register_guest_nmi_callback(unsigned long address);
-
-/**
- * unregister_guest_nmi_callback
- *
- * Unregister a guest NMI handler.
- */
-extern long unregister_guest_nmi_callback(void);
-
#endif /* __XEN_NMI_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 67a203b969..85cdb6e9a5 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -131,8 +131,6 @@ struct vcpu
/* Bitmask of CPUs on which this VCPU may run. */
cpumask_t cpu_affinity;
- unsigned long nmi_addr; /* NMI callback address. */
-
/* Bitmask of CPUs which are holding onto this VCPU's state. */
cpumask_t vcpu_dirty_cpumask;