aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/traps.c8
-rw-r--r--xen/arch/x86/vmx.c23
-rw-r--r--xen/arch/x86/vmx_io.c1
-rw-r--r--xen/arch/x86/vmx_vmcs.c3
-rw-r--r--xen/include/asm-x86/i387.h12
-rw-r--r--xen/include/asm-x86/vmx.h16
6 files changed, 56 insertions, 7 deletions
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 52bd64ac9b..99de9d335a 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -919,13 +919,7 @@ asmlinkage int math_state_restore(struct cpu_user_regs *regs)
/* Prevent recursion. */
clts();
- if ( !test_and_set_bit(EDF_USEDFPU, &current->flags) )
- {
- if ( test_bit(EDF_DONEFPUINIT, &current->flags) )
- restore_fpu(current);
- else
- init_fpu();
- }
+ setup_fpu(current);
if ( test_and_clear_bit(EDF_GUEST_STTS, &current->flags) )
{
diff --git a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
index 7fa433c0c1..75714157a8 100644
--- a/xen/arch/x86/vmx.c
+++ b/xen/arch/x86/vmx.c
@@ -154,6 +154,21 @@ static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
return result;
}
+static void vmx_do_no_device_fault()
+{
+ unsigned long cr0;
+
+ clts();
+ setup_fpu(current);
+ __vmread(CR0_READ_SHADOW, &cr0);
+ if (!(cr0 & X86_CR0_TS)) {
+ __vmread(GUEST_CR0, &cr0);
+ cr0 &= ~X86_CR0_TS;
+ __vmwrite(GUEST_CR0, cr0);
+ }
+ __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP);
+}
+
static void vmx_do_general_protection_fault(struct cpu_user_regs *regs)
{
unsigned long eip, error_code;
@@ -894,6 +909,9 @@ static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs
mov_from_cr(cr, gp, regs);
break;
case TYPE_CLTS:
+ clts();
+ setup_fpu(current);
+
__vmread(GUEST_CR0, &value);
value &= ~X86_CR0_TS; /* clear TS */
__vmwrite(GUEST_CR0, value);
@@ -1093,6 +1111,11 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
break;
}
#endif
+ case TRAP_no_device:
+ {
+ vmx_do_no_device_fault();
+ break;
+ }
case TRAP_gp_fault:
{
vmx_do_general_protection_fault(&regs);
diff --git a/xen/arch/x86/vmx_io.c b/xen/arch/x86/vmx_io.c
index 05780a2157..49742286c5 100644
--- a/xen/arch/x86/vmx_io.c
+++ b/xen/arch/x86/vmx_io.c
@@ -429,6 +429,7 @@ void vmx_intr_assist(struct exec_domain *d)
void vmx_do_resume(struct exec_domain *d)
{
+ vmx_stts();
if ( test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state) )
__vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
else
diff --git a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c
index 90cc88122c..375d20da48 100644
--- a/xen/arch/x86/vmx_vmcs.c
+++ b/xen/arch/x86/vmx_vmcs.c
@@ -164,6 +164,9 @@ void vmx_do_launch(struct exec_domain *ed)
struct pfn_info *page;
struct cpu_user_regs *regs = get_cpu_user_regs();
+ vmx_stts();
+ set_bit(EDF_GUEST_STTS, &ed->flags);
+
cpu = smp_processor_id();
page = (struct pfn_info *) alloc_domheap_page(NULL);
diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h
index ca81778089..01039ab648 100644
--- a/xen/include/asm-x86/i387.h
+++ b/xen/include/asm-x86/i387.h
@@ -28,4 +28,16 @@ extern void restore_fpu(struct exec_domain *tsk);
__asm__ __volatile__ ( "ldmxcsr %0" : : "m" (__mxcsr) ); \
} while ( 0 )
+/* Make domain the FPU owner */
+static inline void setup_fpu(struct exec_domain *ed)
+{
+ if ( !test_and_set_bit(EDF_USEDFPU, &ed->flags) )
+ {
+ if ( test_bit(EDF_DONEFPUINIT, &ed->flags) )
+ restore_fpu(ed);
+ else
+ init_fpu();
+ }
+}
+
#endif /* __ASM_I386_I387_H */
diff --git a/xen/include/asm-x86/vmx.h b/xen/include/asm-x86/vmx.h
index 334afcf6be..3041a334f1 100644
--- a/xen/include/asm-x86/vmx.h
+++ b/xen/include/asm-x86/vmx.h
@@ -24,6 +24,7 @@
#include <asm/regs.h>
#include <asm/processor.h>
#include <asm/vmx_vmcs.h>
+#include <asm/i387.h>
extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
extern void vmx_asm_do_resume(void);
@@ -251,4 +252,19 @@ static inline int __vmxon (u64 addr)
return 0;
}
+/* Make sure that xen intercepts any FP accesses from current */
+static inline void vmx_stts()
+{
+ unsigned long cr0;
+
+ __vmread(GUEST_CR0, &cr0);
+ if (!(cr0 & X86_CR0_TS))
+ __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
+
+ __vmread(CR0_READ_SHADOW, &cr0);
+ if (!(cr0 & X86_CR0_TS))
+ __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP |
+ EXCEPTION_BITMAP_NM);
+}
+
#endif /* __ASM_X86_VMX_H__ */