aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/i387.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-01-14 08:34:53 +0000
committerKeir Fraser <keir@xen.org>2011-01-14 08:34:53 +0000
commit5a96732bd4101281bc1c609a08688cc87de61b26 (patch)
tree69738083a4658df044494c7fef153ca0d1cf9a90 /xen/arch/x86/i387.c
parent91fa20635ad51008527345579467687a2ccb168e (diff)
downloadxen-5a96732bd4101281bc1c609a08688cc87de61b26.tar.gz
xen-5a96732bd4101281bc1c609a08688cc87de61b26.tar.bz2
xen-5a96732bd4101281bc1c609a08688cc87de61b26.zip
x86: Avoid calling xsave_alloc_save_area before xsave_init
Currently, xsave_alloc_save_area will be called in init_idle_domain->scheduler_init->alloc_vcpu->vcpu_initialise calls with xsave_cntxt_size=0, it is earlier than xsave_init called in identity_cpu(). This may causing buffer overflow on xmem_pool. Idle domain isn't using FPU,SSE,AVX or any such extended state and doesn't need it saved. xsave_{alloc,free}_save_area() should test-and-exit on is_idle_vcpu(), and our context switch code should not be doing XSAVE when switching out an idle vcpu. Signed-off-by: Wei Gang <gang.wei@intel.com> Signed-off-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/i387.c')
-rw-r--r--xen/arch/x86/i387.c44
1 files changed, 42 insertions, 2 deletions
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 477efec973..59dbecf9aa 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -16,6 +16,39 @@
#include <asm/i387.h>
#include <asm/asm_defns.h>
+void setup_fpu(struct vcpu *v)
+{
+ ASSERT(!is_idle_vcpu(v));
+
+ /* Avoid recursion. */
+ clts();
+
+ if ( !v->fpu_dirtied )
+ {
+ v->fpu_dirtied = 1;
+ if ( cpu_has_xsave )
+ {
+ if ( !v->fpu_initialised )
+ v->fpu_initialised = 1;
+
+ /* XCR0 normally represents what guest OS set. In case of Xen
+ * itself, we set all supported feature mask before doing
+ * save/restore.
+ */
+ set_xcr0(v->arch.xcr0_accum);
+ xrstor(v);
+ set_xcr0(v->arch.xcr0);
+ }
+ else
+ {
+ if ( v->fpu_initialised )
+ restore_fpu(v);
+ else
+ init_fpu();
+ }
+ }
+}
+
void init_fpu(void)
{
asm volatile ( "fninit" );
@@ -29,6 +62,8 @@ void save_init_fpu(struct vcpu *v)
unsigned long cr0 = read_cr0();
char *fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
+ ASSERT(!is_idle_vcpu(v));
+
/* This can happen, if a paravirtualised guest OS has set its CR0.TS. */
if ( cr0 & X86_CR0_TS )
clts();
@@ -138,6 +173,7 @@ void restore_fpu(struct vcpu *v)
}
#define XSTATE_CPUID 0xd
+#define XSAVE_AREA_MIN_SIZE (512 + 64) /* FP/SSE + XSAVE.HEADER */
/*
* Maximum size (in byte) of the XSAVE/XRSTOR save area required by all
@@ -177,7 +213,9 @@ void xsave_init(void)
}
/* FP/SSE, XSAVE.HEADER, YMM */
- min_size = 512 + 64 + ((eax & XSTATE_YMM) ? XSTATE_YMM_SIZE : 0);
+ min_size = XSAVE_AREA_MIN_SIZE;
+ if ( eax & XSTATE_YMM )
+ min_size += XSTATE_YMM_SIZE;
BUG_ON(ecx < min_size);
/*
@@ -214,9 +252,11 @@ int xsave_alloc_save_area(struct vcpu *v)
{
void *save_area;
- if ( !cpu_has_xsave )
+ if ( !cpu_has_xsave || is_idle_vcpu(v) )
return 0;
+ BUG_ON(xsave_cntxt_size < XSAVE_AREA_MIN_SIZE);
+
/* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
save_area = _xmalloc(xsave_cntxt_size, 64);
if ( save_area == NULL )