aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/xstate.c
diff options
context:
space:
mode:
Diffstat (limited to 'xen/arch/x86/xstate.c')
-rw-r--r--xen/arch/x86/xstate.c63
1 files changed, 51 insertions, 12 deletions
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 27a0eded4c..b0ac1ffdc7 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -21,7 +21,7 @@ bool_t __read_mostly cpu_has_xsaveopt;
* the supported and enabled features on the processor, including the
* XSAVE.HEADER. We only enable XCNTXT_MASK that we have known.
*/
-u32 xsave_cntxt_size;
+static u32 __read_mostly xsave_cntxt_size;
/* A 64-bit bitmask of the XSAVE/XRSTOR features supported by processor. */
u64 xfeature_mask;
@@ -212,13 +212,13 @@ void xrstor(struct vcpu *v, uint64_t mask)
bool_t xsave_enabled(const struct vcpu *v)
{
- if ( cpu_has_xsave )
- {
- ASSERT(xsave_cntxt_size >= XSTATE_AREA_MIN_SIZE);
- ASSERT(v->arch.xsave_area);
- }
+ if ( !cpu_has_xsave )
+ return 0;
- return cpu_has_xsave;
+ ASSERT(xsave_cntxt_size >= XSTATE_AREA_MIN_SIZE);
+ ASSERT(v->arch.xsave_area);
+
+ return !!v->arch.xcr0_accum;
}
int xstate_alloc_save_area(struct vcpu *v)
@@ -240,8 +240,8 @@ int xstate_alloc_save_area(struct vcpu *v)
save_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
v->arch.xsave_area = save_area;
- v->arch.xcr0 = XSTATE_FP_SSE;
- v->arch.xcr0_accum = XSTATE_FP_SSE;
+ v->arch.xcr0 = 0;
+ v->arch.xcr0_accum = 0;
return 0;
}
@@ -259,7 +259,11 @@ void xstate_init(bool_t bsp)
u64 feature_mask;
if ( boot_cpu_data.cpuid_level < XSTATE_CPUID )
+ {
+ BUG_ON(!bsp);
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
return;
+ }
cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
@@ -279,7 +283,6 @@ void xstate_init(bool_t bsp)
set_in_cr4(X86_CR4_OSXSAVE);
if ( !set_xcr0(feature_mask) )
BUG();
- cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
if ( bsp )
{
@@ -288,14 +291,14 @@ void xstate_init(bool_t bsp)
* xsave_cntxt_size is the max size required by enabled features.
* We know FP/SSE and YMM about eax, and nothing about edx at present.
*/
- xsave_cntxt_size = ebx;
+ xsave_cntxt_size = xstate_ctxt_size(feature_mask);
printk("%s: using cntxt_size: 0x%x and states: 0x%"PRIx64"\n",
__func__, xsave_cntxt_size, xfeature_mask);
}
else
{
BUG_ON(xfeature_mask != feature_mask);
- BUG_ON(xsave_cntxt_size != ebx);
+ BUG_ON(xsave_cntxt_size != xstate_ctxt_size(feature_mask));
}
/* Check XSAVEOPT feature. */
@@ -306,6 +309,42 @@ void xstate_init(bool_t bsp)
BUG_ON(!cpu_has_xsaveopt != !(eax & XSTATE_FEATURE_XSAVEOPT));
}
+unsigned int xstate_ctxt_size(u64 xcr0)
+{
+ u32 ebx = 0;
+
+ if ( xcr0 )
+ {
+ u64 act_xcr0 = get_xcr0();
+ u32 eax, ecx, edx;
+ bool_t ok = set_xcr0(xcr0);
+
+ ASSERT(ok);
+ cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
+ ASSERT(ebx <= ecx);
+ ok = set_xcr0(act_xcr0);
+ ASSERT(ok);
+ }
+
+ return ebx;
+}
+
+int validate_xstate(u64 xcr0, u64 xcr0_accum, u64 xstate_bv, u64 xfeat_mask)
+{
+ if ( (xcr0_accum & ~xfeat_mask) ||
+ (xstate_bv & ~xcr0_accum) ||
+ (xcr0 & ~xcr0_accum) ||
+ !(xcr0 & XSTATE_FP) ||
+ ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) ||
+ ((xcr0_accum & XSTATE_YMM) && !(xcr0_accum & XSTATE_SSE)) )
+ return -EINVAL;
+
+ if ( xcr0_accum & ~xfeature_mask )
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
int handle_xsetbv(u32 index, u64 new_bv)
{
struct vcpu *curr = current;