aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/i387.c
diff options
context:
space:
mode:
authorWei Huang <wei.huang2@amd.com>2011-05-09 11:39:23 +0100
committerWei Huang <wei.huang2@amd.com>2011-05-09 11:39:23 +0100
commit5968755d6dab2b3e193bdaf1d23658d3f1328a25 (patch)
treef4fb78f9015766fc70f665080abfa20daa9a91db /xen/arch/x86/i387.c
parent0217c28adbb1656e72ce68239eb6092fdc7cde0c (diff)
downloadxen-5968755d6dab2b3e193bdaf1d23658d3f1328a25.tar.gz
xen-5968755d6dab2b3e193bdaf1d23658d3f1328a25.tar.bz2
xen-5968755d6dab2b3e193bdaf1d23658d3f1328a25.zip
x86/fpu: create lazy and non-lazy FPU restore functions
Currently Xen relies on #NM (via CR0.TS) to trigger FPU context restore. But not all FPU state is tracked by TS bit. This function creates two FPU restore functions: vcpu_restore_fpu_lazy() and vcpu_restore_fpu_eager(). vcpu_restore_fpu_lazy() is still used when #NM is triggered. vcpu_restore_fpu_eager(), as a comparision, is called for vcpu which is being scheduled in on every context switch. To minimize restore overhead, it creates a flag, nonlazy_xstate_used, to control non-lazy restore. Signed-off-by: Wei Huang <wei.huang2@amd.com>
Diffstat (limited to 'xen/arch/x86/i387.c')
-rw-r--r--xen/arch/x86/i387.c25
1 files changed, 20 insertions, 5 deletions
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 9350f734d5..e5780ef81b 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -98,13 +98,13 @@ static inline void fpu_frstor(struct vcpu *v)
/* FPU Save Functions */
/*******************************/
/* Save x87 extended state */
-static inline void fpu_xsave(struct vcpu *v, uint64_t mask)
+static inline void fpu_xsave(struct vcpu *v)
{
/* XCR0 normally represents what guest OS set. In case of Xen itself,
* we set all accumulated feature mask before doing save/restore.
*/
set_xcr0(v->arch.xcr0_accum);
- xsave(v, mask);
+ xsave(v, v->arch.nonlazy_xstate_used ? XSTATE_ALL : XSTATE_LAZY);
set_xcr0(v->arch.xcr0);
}
@@ -160,10 +160,25 @@ static inline void fpu_fsave(struct vcpu *v)
/*******************************/
/* VCPU FPU Functions */
/*******************************/
+/* Restore FPU state whenever VCPU is schduled in. */
+void vcpu_restore_fpu_eager(struct vcpu *v)
+{
+ ASSERT(!is_idle_vcpu(v));
+
+ /* save the nonlazy extended state which is not tracked by CR0.TS bit */
+ if ( v->arch.nonlazy_xstate_used )
+ {
+ /* Avoid recursion */
+ clts();
+ fpu_xrstor(v, XSTATE_NONLAZY);
+ stts();
+ }
+}
+
/*
* Restore FPU state when #NM is triggered.
*/
-void vcpu_restore_fpu(struct vcpu *v)
+void vcpu_restore_fpu_lazy(struct vcpu *v)
{
ASSERT(!is_idle_vcpu(v));
@@ -174,7 +189,7 @@ void vcpu_restore_fpu(struct vcpu *v)
return;
if ( xsave_enabled(v) )
- fpu_xrstor(v, XSTATE_ALL);
+ fpu_xrstor(v, XSTATE_LAZY);
else if ( v->fpu_initialised )
{
if ( cpu_has_fxsr )
@@ -204,7 +219,7 @@ void vcpu_save_fpu(struct vcpu *v)
clts();
if ( xsave_enabled(v) )
- fpu_xsave(v, XSTATE_ALL);
+ fpu_xsave(v);
else if ( cpu_has_fxsr )
fpu_fxsave(v);
else