aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/i387.c
diff options
context:
space:
mode:
authorWei Huang <wei.huang2@amd.com>2011-05-09 11:38:30 +0100
committerWei Huang <wei.huang2@amd.com>2011-05-09 11:38:30 +0100
commit3eb0faa4f80653d56e0e74d68872f6d73de23937 (patch)
tree7f3f898fa69324d8e1f4e1ef91de82725f0c17dc /xen/arch/x86/i387.c
parent09014a12a9a2e820bc2ac2f82ff9018a69208c8c (diff)
downloadxen-3eb0faa4f80653d56e0e74d68872f6d73de23937.tar.gz
xen-3eb0faa4f80653d56e0e74d68872f6d73de23937.tar.bz2
xen-3eb0faa4f80653d56e0e74d68872f6d73de23937.zip
x86/fpu: clean up FPU context restore function
This patch cleans up context restore function. It renames the function name to vcpu_restore_fpu(). It also extracts FPU restore code (frstor, fxrstor, xrstor) out into seperate functions. vcpu_restor_fpu() will dispatch to these functions depending on CPU's capability. Signed-off-by: Wei Huang <wei.huang2@amd.com>
Diffstat (limited to 'xen/arch/x86/i387.c')
-rw-r--r--xen/arch/x86/i387.c155
1 files changed, 83 insertions, 72 deletions
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index d57c847dcc..fc0c1c45f4 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -17,56 +17,37 @@
#include <asm/xstate.h>
#include <asm/asm_defns.h>
-static void load_mxcsr(unsigned long val)
+#define MXCSR_DEFAULT 0x1f80
+static void fpu_init(void)
{
- val &= 0xffbf;
- asm volatile ( "ldmxcsr %0" : : "m" (val) );
-}
-
-static void init_fpu(void);
-static void restore_fpu(struct vcpu *v);
-
-void setup_fpu(struct vcpu *v)
-{
- ASSERT(!is_idle_vcpu(v));
-
- /* Avoid recursion. */
- clts();
-
- if ( v->fpu_dirtied )
- return;
-
- if ( xsave_enabled(v) )
- {
- /*
- * XCR0 normally represents what guest OS set. In case of Xen itself,
- * we set all supported feature mask before doing save/restore.
- */
- set_xcr0(v->arch.xcr0_accum);
- xrstor(v);
- set_xcr0(v->arch.xcr0);
- }
- else if ( v->fpu_initialised )
- {
- restore_fpu(v);
- }
- else
+ unsigned long val;
+
+ asm volatile ( "fninit" );
+ if ( cpu_has_xmm )
{
- init_fpu();
+ /* load default value into MXCSR control/status register */
+ val = MXCSR_DEFAULT;
+ asm volatile ( "ldmxcsr %0" : : "m" (val) );
}
-
- v->fpu_initialised = 1;
- v->fpu_dirtied = 1;
}
-static void init_fpu(void)
+/*******************************/
+/* FPU Restore Functions */
+/*******************************/
+/* Restore x87 extended state */
+static inline void fpu_xrstor(struct vcpu *v)
{
- asm volatile ( "fninit" );
- if ( cpu_has_xmm )
- load_mxcsr(0x1f80);
+ /*
+ * XCR0 normally represents what guest OS set. In case of Xen itself,
+ * we set all supported feature mask before doing save/restore.
+ */
+ set_xcr0(v->arch.xcr0_accum);
+ xrstor(v);
+ set_xcr0(v->arch.xcr0);
}
-static void restore_fpu(struct vcpu *v)
+/* Restor x87 FPU, MMX, SSE and SSE2 state */
+static inline void fpu_fxrstor(struct vcpu *v)
{
const char *fpu_ctxt = v->arch.fpu_ctxt;
@@ -75,41 +56,42 @@ static void restore_fpu(struct vcpu *v)
* possibility, which may occur if the block was passed to us by control
* tools, by silently clearing the block.
*/
- if ( cpu_has_fxsr )
- {
- asm volatile (
+ asm volatile (
#ifdef __i386__
- "1: fxrstor %0 \n"
+ "1: fxrstor %0 \n"
#else /* __x86_64__ */
- /* See above for why the operands/constraints are this way. */
- "1: " REX64_PREFIX "fxrstor (%2)\n"
+ /* See above for why the operands/constraints are this way. */
+ "1: " REX64_PREFIX "fxrstor (%2)\n"
#endif
- ".section .fixup,\"ax\" \n"
- "2: push %%"__OP"ax \n"
- " push %%"__OP"cx \n"
- " push %%"__OP"di \n"
- " lea %0,%%"__OP"di \n"
- " mov %1,%%ecx \n"
- " xor %%eax,%%eax \n"
- " rep ; stosl \n"
- " pop %%"__OP"di \n"
- " pop %%"__OP"cx \n"
- " pop %%"__OP"ax \n"
- " jmp 1b \n"
- ".previous \n"
- _ASM_EXTABLE(1b, 2b)
- :
- : "m" (*fpu_ctxt),
- "i" (sizeof(v->arch.xsave_area->fpu_sse)/4)
+ ".section .fixup,\"ax\" \n"
+ "2: push %%"__OP"ax \n"
+ " push %%"__OP"cx \n"
+ " push %%"__OP"di \n"
+ " lea %0,%%"__OP"di \n"
+ " mov %1,%%ecx \n"
+ " xor %%eax,%%eax \n"
+ " rep ; stosl \n"
+ " pop %%"__OP"di \n"
+ " pop %%"__OP"cx \n"
+ " pop %%"__OP"ax \n"
+ " jmp 1b \n"
+ ".previous \n"
+ _ASM_EXTABLE(1b, 2b)
+ :
+ : "m" (*fpu_ctxt),
+ "i" (sizeof(v->arch.xsave_area->fpu_sse)/4)
#ifdef __x86_64__
- ,"cdaSDb" (fpu_ctxt)
+ ,"cdaSDb" (fpu_ctxt)
#endif
- );
- }
- else
- {
- asm volatile ( "frstor %0" : : "m" (*fpu_ctxt) );
- }
+ );
+}
+
+/* Restore x87 extended state */
+static inline void fpu_frstor(struct vcpu *v)
+{
+ const char *fpu_ctxt = v->arch.fpu_ctxt;
+
+ asm volatile ( "frstor %0" : : "m" (*fpu_ctxt) );
}
/*******************************/
@@ -179,6 +161,35 @@ static inline void fpu_fsave(struct vcpu *v)
/* VCPU FPU Functions */
/*******************************/
/*
+ * Restore FPU state when #NM is triggered.
+ */
+void vcpu_restore_fpu(struct vcpu *v)
+{
+ ASSERT(!is_idle_vcpu(v));
+
+ /* Avoid recursion. */
+ clts();
+
+ if ( v->fpu_dirtied )
+ return;
+
+ if ( xsave_enabled(v) )
+ fpu_xrstor(v);
+ else if ( v->fpu_initialised )
+ {
+ if ( cpu_has_fxsr )
+ fpu_fxrstor(v);
+ else
+ fpu_frstor(v);
+ }
+ else
+ fpu_init();
+
+ v->fpu_initialised = 1;
+ v->fpu_dirtied = 1;
+}
+
+/*
* On each context switch, save the necessary FPU info of VCPU being switch
* out. It dispatches saving operation based on CPU's capability.
*/