aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/hvm/hvm.c10
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c7
-rw-r--r--xen/arch/x86/i387.c16
-rw-r--r--xen/include/asm-x86/hvm/vcpu.h2
-rw-r--r--xen/include/asm-x86/i387.h24
5 files changed, 25 insertions, 34 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 94190d3079..472df2f82e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -814,7 +814,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
xsave_init_save_area(xsave_area);
v->arch.hvm_vcpu.xsave_area = xsave_area;
- v->arch.hvm_vcpu.xfeature_mask = XSTATE_FP_SSE;
+ v->arch.hvm_vcpu.xcr0 = XSTATE_FP_SSE;
}
if ( (rc = vlapic_init(v)) != 0 )
@@ -2002,8 +2002,8 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
if ( cpu_has_xsave )
{
/*
- * Fix up "Processor Extended State Enumeration". We only present
- * FPU(bit0) and SSE(bit1) to HVM guest for now.
+ * Fix up "Processor Extended State Enumeration". We present
+ * FPU(bit0), SSE(bit1) and YMM(bit2) to HVM guest for now.
*/
*eax = *ebx = *ecx = *edx = 0;
switch ( count )
@@ -2012,14 +2012,14 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
/* No HW defines bit in EDX yet. */
*edx = 0;
/* We only enable the features we know. */
- *eax = xfeature_low;
+ *eax = xfeature_mask;
/* FP/SSE + XSAVE.HEADER + YMM. */
*ecx = 512 + 64 + ((*eax & XSTATE_YMM) ? XSTATE_YMM_SIZE : 0);
/* Let ebx equal ecx at present. */
*ebx = *ecx;
break;
case 2:
- if ( !(xfeature_low & XSTATE_YMM) )
+ if ( !(xfeature_mask & XSTATE_YMM) )
break;
*eax = XSTATE_YMM_SIZE;
*ebx = XSTATE_YMM_OFFSET;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 750d0fba8b..24c2331f47 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2203,20 +2203,19 @@ static int vmx_handle_eoi_write(void)
static int vmx_handle_xsetbv(u64 new_bv)
{
struct vcpu *v = current;
- u64 xfeature = (((u64)xfeature_high) << 32) | xfeature_low;
struct segment_register sreg;
hvm_get_segment_register(v, x86_seg_ss, &sreg);
if ( sreg.attr.fields.dpl != 0 )
goto err;
- if ( ((new_bv ^ xfeature) & ~xfeature) || !(new_bv & 1) )
+ if ( ((new_bv ^ xfeature_mask) & ~xfeature_mask) || !(new_bv & 1) )
goto err;
- if ( (xfeature & XSTATE_YMM & new_bv) && !(new_bv & XSTATE_SSE) )
+ if ( (xfeature_mask & XSTATE_YMM & new_bv) && !(new_bv & XSTATE_SSE) )
goto err;
- v->arch.hvm_vcpu.xfeature_mask = new_bv;
+ v->arch.hvm_vcpu.xcr0 = new_bv;
set_xcr0(new_bv);
return 0;
err:
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 65fee7ac72..fa16fa9c5c 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -142,7 +142,7 @@ void restore_fpu(struct vcpu *v)
u32 xsave_cntxt_size;
/* A 64-bit bitmask of the XSAVE/XRSTOR features supported by processor. */
-u32 xfeature_low, xfeature_high;
+u64 xfeature_mask;
void xsave_init(void)
{
@@ -186,15 +186,15 @@ void xsave_init(void)
* We know FP/SSE and YMM about eax, and nothing about edx at present.
*/
xsave_cntxt_size = ebx;
- xfeature_low = eax & XCNTXT_MASK;
- xfeature_high = 0;
- printk("%s: using cntxt_size: 0x%x and states: %08x:%08x\n",
- __func__, xsave_cntxt_size, xfeature_high, xfeature_low);
+ xfeature_mask = eax + ((u64)edx << 32);
+ xfeature_mask &= XCNTXT_MASK;
+ printk("%s: using cntxt_size: 0x%x and states: 0x%"PRIx64"\n",
+ __func__, xsave_cntxt_size, xfeature_mask);
}
else
{
BUG_ON(xsave_cntxt_size != ebx);
- BUG_ON(xfeature_low != (eax & XCNTXT_MASK));
+ BUG_ON(xfeature_mask != (xfeature_mask & XCNTXT_MASK));
}
}
@@ -202,11 +202,7 @@ void xsave_init_save_area(void *save_area)
{
memset(save_area, 0, xsave_cntxt_size);
- ((u16 *)save_area)[0] = 0x37f; /* FCW */
- ((u16 *)save_area)[2] = 0xffff; /* FTW */
((u32 *)save_area)[6] = 0x1f80; /* MXCSR */
-
- ((struct xsave_struct *)save_area)->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
}
/*
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 52c6c09e55..53ef98320f 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -56,7 +56,7 @@ struct hvm_vcpu {
* #NM handler, we XRSTOR the states we XSAVE-ed;
*/
void *xsave_area;
- uint64_t xfeature_mask;
+ uint64_t xcr0;
struct vlapic vlapic;
s64 cache_tsc_offset;
diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h
index 39e0e7df56..ba365c0b3c 100644
--- a/xen/include/asm-x86/i387.h
+++ b/xen/include/asm-x86/i387.h
@@ -15,7 +15,7 @@
#include <asm/processor.h>
extern unsigned int xsave_cntxt_size;
-extern u32 xfeature_low, xfeature_high;
+extern u64 xfeature_mask;
extern void xsave_init(void);
extern void xsave_init_save_area(void *save_area);
@@ -49,45 +49,41 @@ struct xsave_struct
#define REX_PREFIX
#endif
-static inline void xsetbv(u32 index, u64 xfeature_mask)
+static inline void xsetbv(u32 index, u64 xfeatures)
{
- u32 hi = xfeature_mask >> 32;
- u32 lo = (u32)xfeature_mask;
+ u32 hi = xfeatures >> 32;
+ u32 lo = (u32)xfeatures;
asm volatile (".byte 0x0f,0x01,0xd1" :: "c" (index),
"a" (lo), "d" (hi));
}
-static inline void set_xcr0(u64 xfeature_mask)
+static inline void set_xcr0(u64 xfeatures)
{
- xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeature_mask);
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures);
}
static inline void xsave(struct vcpu *v)
{
- u64 mask = v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE;
- u32 lo = mask, hi = mask >> 32;
struct xsave_struct *ptr;
ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x27"
:
- : "a" (lo), "d" (hi), "D"(ptr)
+ : "a" (-1), "d" (-1), "D"(ptr)
: "memory");
}
static inline void xrstor(struct vcpu *v)
{
- u64 mask = v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE;
- u32 lo = mask, hi = mask >> 32;
struct xsave_struct *ptr;
ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x2f"
:
- : "m" (*ptr), "a" (lo), "d" (hi), "D"(ptr));
+ : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr));
}
extern void init_fpu(void);
@@ -117,9 +113,9 @@ static inline void setup_fpu(struct vcpu *v)
if ( !v->fpu_initialised )
v->fpu_initialised = 1;
- set_xcr0(v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE);
+ set_xcr0(v->arch.hvm_vcpu.xcr0 | XSTATE_FP_SSE);
xrstor(v);
- set_xcr0(v->arch.hvm_vcpu.xfeature_mask);
+ set_xcr0(v->arch.hvm_vcpu.xcr0);
}
else
{