aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-x86/i387.h
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-11-03 08:15:20 +0000
committerKeir Fraser <keir@xen.org>2010-11-03 08:15:20 +0000
commit2d741abb30af1a2abc45f1cba4c8e78e6d1b07d0 (patch)
treeb1f3b5413d754a15302417d98c4b20167aad961b /xen/include/asm-x86/i387.h
parent3b0246ea0347bacdfcb751a920899837f29e13d8 (diff)
downloadxen-2d741abb30af1a2abc45f1cba4c8e78e6d1b07d0.tar.gz
xen-2d741abb30af1a2abc45f1cba4c8e78e6d1b07d0.tar.bz2
xen-2d741abb30af1a2abc45f1cba4c8e78e6d1b07d0.zip
x86: Xsave support for PV guests.
Signed-off-by: Shan Haitao <haitao.shan@intel.com> Signed-off-by: Han Weidong <weidong.han@intel.com>
Diffstat (limited to 'xen/include/asm-x86/i387.h')
-rw-r--r--xen/include/asm-x86/i387.h22
1 files changed, 17 insertions, 5 deletions
diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h
index ba365c0b3c..ba773e99ed 100644
--- a/xen/include/asm-x86/i387.h
+++ b/xen/include/asm-x86/i387.h
@@ -49,6 +49,8 @@ struct xsave_struct
#define REX_PREFIX
#endif
+DECLARE_PER_CPU(uint64_t, xcr0);
+
static inline void xsetbv(u32 index, u64 xfeatures)
{
u32 hi = xfeatures >> 32;
@@ -60,14 +62,20 @@ static inline void xsetbv(u32 index, u64 xfeatures)
static inline void set_xcr0(u64 xfeatures)
{
+ this_cpu(xcr0) = xfeatures;
xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures);
}
+static inline uint64_t get_xcr0(void)
+{
+ return this_cpu(xcr0);
+}
+
static inline void xsave(struct vcpu *v)
{
struct xsave_struct *ptr;
- ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
+ ptr =(struct xsave_struct *)v->arch.xsave_area;
asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x27"
:
@@ -79,7 +87,7 @@ static inline void xrstor(struct vcpu *v)
{
struct xsave_struct *ptr;
- ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
+ ptr =(struct xsave_struct *)v->arch.xsave_area;
asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x2f"
:
@@ -108,14 +116,18 @@ static inline void setup_fpu(struct vcpu *v)
if ( !v->fpu_dirtied )
{
v->fpu_dirtied = 1;
- if ( cpu_has_xsave && is_hvm_vcpu(v) )
+ if ( cpu_has_xsave )
{
if ( !v->fpu_initialised )
v->fpu_initialised = 1;
- set_xcr0(v->arch.hvm_vcpu.xcr0 | XSTATE_FP_SSE);
+ /* XCR0 normally represents what guest OS set. In case of Xen
+ * itself, we set all supported feature mask before doing
+ * save/restore.
+ */
+ set_xcr0(v->arch.xcr0_accum);
xrstor(v);
- set_xcr0(v->arch.hvm_vcpu.xcr0);
+ set_xcr0(v->arch.xcr0);
}
else
{