aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/acpi/suspend.c2
-rw-r--r--xen/arch/x86/domain.c2
-rw-r--r--xen/arch/x86/i387.c160
-rw-r--r--xen/include/asm-x86/i387.h2
4 files changed, 91 insertions, 75 deletions
diff --git a/xen/arch/x86/acpi/suspend.c b/xen/arch/x86/acpi/suspend.c
index 66035d8ba4..d13bed0c6b 100644
--- a/xen/arch/x86/acpi/suspend.c
+++ b/xen/arch/x86/acpi/suspend.c
@@ -24,7 +24,7 @@ static uint16_t saved_segs[4];
void save_rest_processor_state(void)
{
- save_init_fpu(current);
+ vcpu_save_fpu(current);
#if defined(CONFIG_X86_64)
asm volatile (
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 538d497306..8c19b9e7d3 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1548,7 +1548,7 @@ static void __context_switch(void)
if ( !is_idle_vcpu(p) )
{
memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
- save_init_fpu(p);
+ vcpu_save_fpu(p);
p->arch.ctxt_switch_from(p);
}
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 255e1e51dd..d57c847dcc 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -66,78 +66,6 @@ static void init_fpu(void)
load_mxcsr(0x1f80);
}
-void save_init_fpu(struct vcpu *v)
-{
- unsigned long cr0;
- char *fpu_ctxt;
-
- if ( !v->fpu_dirtied )
- return;
-
- ASSERT(!is_idle_vcpu(v));
-
- cr0 = read_cr0();
- fpu_ctxt = v->arch.fpu_ctxt;
-
- /* This can happen, if a paravirtualised guest OS has set its CR0.TS. */
- if ( cr0 & X86_CR0_TS )
- clts();
-
- if ( xsave_enabled(v) )
- {
- /* XCR0 normally represents what guest OS set. In case of Xen itself,
- * we set all accumulated feature mask before doing save/restore.
- */
- set_xcr0(v->arch.xcr0_accum);
- xsave(v);
- set_xcr0(v->arch.xcr0);
- }
- else if ( cpu_has_fxsr )
- {
-#ifdef __i386__
- asm volatile (
- "fxsave %0"
- : "=m" (*fpu_ctxt) );
-#else /* __x86_64__ */
- /*
- * The only way to force fxsaveq on a wide range of gas versions. On
- * older versions the rex64 prefix works only if we force an
- * addressing mode that doesn't require extended registers.
- */
- asm volatile (
- REX64_PREFIX "fxsave (%1)"
- : "=m" (*fpu_ctxt) : "cdaSDb" (fpu_ctxt) );
-#endif
-
- /* Clear exception flags if FSW.ES is set. */
- if ( unlikely(fpu_ctxt[2] & 0x80) )
- asm volatile ("fnclex");
-
- /*
- * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
- * is pending. Clear the x87 state here by setting it to fixed
- * values. The hypervisor data segment can be sometimes 0 and
- * sometimes new user value. Both should be ok. Use the FPU saved
- * data block as a safe address because it should be in L1.
- */
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- {
- asm volatile (
- "emms\n\t" /* clear stack tags */
- "fildl %0" /* load to clear state */
- : : "m" (*fpu_ctxt) );
- }
- }
- else
- {
- /* FWAIT is required to make FNSAVE synchronous. */
- asm volatile ( "fnsave %0 ; fwait" : "=m" (*fpu_ctxt) );
- }
-
- v->fpu_dirtied = 0;
- write_cr0(cr0|X86_CR0_TS);
-}
-
static void restore_fpu(struct vcpu *v)
{
const char *fpu_ctxt = v->arch.fpu_ctxt;
@@ -185,8 +113,96 @@ static void restore_fpu(struct vcpu *v)
}
/*******************************/
+/* FPU Save Functions */
+/*******************************/
+/* Save x87 extended state */
+static inline void fpu_xsave(struct vcpu *v)
+{
+ /* XCR0 normally represents what guest OS set. In case of Xen itself,
+ * we set all accumulated feature mask before doing save/restore.
+ */
+ set_xcr0(v->arch.xcr0_accum);
+ xsave(v);
+ set_xcr0(v->arch.xcr0);
+}
+
+/* Save x87 FPU, MMX, SSE and SSE2 state */
+static inline void fpu_fxsave(struct vcpu *v)
+{
+ char *fpu_ctxt = v->arch.fpu_ctxt;
+
+#ifdef __i386__
+ asm volatile (
+ "fxsave %0"
+ : "=m" (*fpu_ctxt) );
+#else /* __x86_64__ */
+ /*
+ * The only way to force fxsaveq on a wide range of gas versions. On
+ * older versions the rex64 prefix works only if we force an
+ * addressing mode that doesn't require extended registers.
+ */
+ asm volatile (
+ REX64_PREFIX "fxsave (%1)"
+ : "=m" (*fpu_ctxt) : "cdaSDb" (fpu_ctxt) );
+#endif
+
+ /* Clear exception flags if FSW.ES is set. */
+ if ( unlikely(fpu_ctxt[2] & 0x80) )
+ asm volatile ("fnclex");
+
+ /*
+ * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
+ * is pending. Clear the x87 state here by setting it to fixed
+ * values. The hypervisor data segment can be sometimes 0 and
+ * sometimes new user value. Both should be ok. Use the FPU saved
+ * data block as a safe address because it should be in L1.
+ */
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ {
+ asm volatile (
+ "emms\n\t" /* clear stack tags */
+ "fildl %0" /* load to clear state */
+ : : "m" (*fpu_ctxt) );
+ }
+}
+
+/* Save x87 FPU state */
+static inline void fpu_fsave(struct vcpu *v)
+{
+ char *fpu_ctxt = v->arch.fpu_ctxt;
+
+ /* FWAIT is required to make FNSAVE synchronous. */
+ asm volatile ( "fnsave %0 ; fwait" : "=m" (*fpu_ctxt) );
+}
+
+/*******************************/
/* VCPU FPU Functions */
/*******************************/
+/*
+ * On each context switch, save the necessary FPU info of VCPU being switch
+ * out. It dispatches saving operation based on CPU's capability.
+ */
+void vcpu_save_fpu(struct vcpu *v)
+{
+ if ( !v->fpu_dirtied )
+ return;
+
+ ASSERT(!is_idle_vcpu(v));
+
+ /* This can happen, if a paravirtualised guest OS has set its CR0.TS. */
+ clts();
+
+ if ( xsave_enabled(v) )
+ fpu_xsave(v);
+ else if ( cpu_has_fxsr )
+ fpu_fxsave(v);
+ else
+ fpu_fsave(v);
+
+ v->fpu_dirtied = 0;
+ stts();
+}
+
/* Initialize FPU's context save area */
int vcpu_init_fpu(struct vcpu *v)
{
diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h
index f2bd78d680..d85713a89c 100644
--- a/xen/include/asm-x86/i387.h
+++ b/xen/include/asm-x86/i387.h
@@ -15,7 +15,7 @@
#include <xen/percpu.h>
void setup_fpu(struct vcpu *v);
-void save_init_fpu(struct vcpu *v);
+void vcpu_save_fpu(struct vcpu *v);
int vcpu_init_fpu(struct vcpu *v);
void vcpu_destroy_fpu(struct vcpu *v);