aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-11-03 08:15:20 +0000
committerKeir Fraser <keir@xen.org>2010-11-03 08:15:20 +0000
commit2d741abb30af1a2abc45f1cba4c8e78e6d1b07d0 (patch)
treeb1f3b5413d754a15302417d98c4b20167aad961b /xen/include
parent3b0246ea0347bacdfcb751a920899837f29e13d8 (diff)
downloadxen-2d741abb30af1a2abc45f1cba4c8e78e6d1b07d0.tar.gz
xen-2d741abb30af1a2abc45f1cba4c8e78e6d1b07d0.tar.bz2
xen-2d741abb30af1a2abc45f1cba4c8e78e6d1b07d0.zip
x86: Xsave support for PV guests.
Signed-off-by: Shan Haitao <haitao.shan@intel.com> Signed-off-by: Han Weidong <weidong.han@intel.com>
Diffstat (limited to 'xen/include')
-rw-r--r--xen/include/asm-x86/domain.h20
-rw-r--r--xen/include/asm-x86/hvm/vcpu.h9
-rw-r--r--xen/include/asm-x86/i387.h22
3 files changed, 36 insertions, 15 deletions
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 7549126504..15b050ac0e 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -400,6 +400,23 @@ struct arch_vcpu
pagetable_t monitor_table; /* (MFN) hypervisor PT (for HVM) */
unsigned long cr3; /* (MA) value to install in HW CR3 */
+ /*
+ * The save area for Processor Extended States and the bitmask of the
+ * XSAVE/XRSTOR features. They are used by: 1) when a vcpu (which has
+ * dirtied FPU/SSE) is scheduled out we XSAVE the states here; 2) in
+ * #NM handler, we XRSTOR the states we XSAVE-ed;
+ */
+ void *xsave_area;
+ uint64_t xcr0;
+ /* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen
+ * itself, as we can never know whether guest OS depends on content
+ * preservation whenever guest OS clears one feature flag (for example,
+ * temporarily).
+ * However, processor should not be able to touch eXtended states before
+ * it explicitly enables it via xcr0.
+ */
+ uint64_t xcr0_accum;
+
/* Current LDT details. */
unsigned long shadow_ldt_mapcnt;
spinlock_t shadow_ldt_lock;
@@ -435,7 +452,8 @@ unsigned long pv_guest_cr4_fixup(const struct vcpu *, unsigned long guest_cr4);
#define pv_guest_cr4_to_real_cr4(v) \
(((v)->arch.guest_context.ctrlreg[4] \
| (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE)) \
- | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0)) \
+ | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0) \
+ | ((cpu_has_xsave)? X86_CR4_OSXSAVE : 0)) \
& ~X86_CR4_DE)
#define real_cr4_to_pv_guest_cr4(c) \
((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD | X86_CR4_OSXSAVE))
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 53ef98320f..1d72ecfc60 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -49,15 +49,6 @@ struct hvm_vcpu {
*/
unsigned long hw_cr[5];
- /*
- * The save area for Processor Extended States and the bitmask of the
- * XSAVE/XRSTOR features. They are used by: 1) when a vcpu (which has
- * dirtied FPU/SSE) is scheduled out we XSAVE the states here; 2) in
- * #NM handler, we XRSTOR the states we XSAVE-ed;
- */
- void *xsave_area;
- uint64_t xcr0;
-
struct vlapic vlapic;
s64 cache_tsc_offset;
u64 guest_time;
diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h
index ba365c0b3c..ba773e99ed 100644
--- a/xen/include/asm-x86/i387.h
+++ b/xen/include/asm-x86/i387.h
@@ -49,6 +49,8 @@ struct xsave_struct
#define REX_PREFIX
#endif
+DECLARE_PER_CPU(uint64_t, xcr0);
+
static inline void xsetbv(u32 index, u64 xfeatures)
{
u32 hi = xfeatures >> 32;
@@ -60,14 +62,20 @@ static inline void xsetbv(u32 index, u64 xfeatures)
static inline void set_xcr0(u64 xfeatures)
{
+ this_cpu(xcr0) = xfeatures;
xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures);
}
+static inline uint64_t get_xcr0(void)
+{
+ return this_cpu(xcr0);
+}
+
static inline void xsave(struct vcpu *v)
{
struct xsave_struct *ptr;
- ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
+ ptr =(struct xsave_struct *)v->arch.xsave_area;
asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x27"
:
@@ -79,7 +87,7 @@ static inline void xrstor(struct vcpu *v)
{
struct xsave_struct *ptr;
- ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
+ ptr =(struct xsave_struct *)v->arch.xsave_area;
asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x2f"
:
@@ -108,14 +116,18 @@ static inline void setup_fpu(struct vcpu *v)
if ( !v->fpu_dirtied )
{
v->fpu_dirtied = 1;
- if ( cpu_has_xsave && is_hvm_vcpu(v) )
+ if ( cpu_has_xsave )
{
if ( !v->fpu_initialised )
v->fpu_initialised = 1;
- set_xcr0(v->arch.hvm_vcpu.xcr0 | XSTATE_FP_SSE);
+ /* XCR0 normally represents what guest OS set. In case of Xen
+ * itself, we set all supported feature mask before doing
+ * save/restore.
+ */
+ set_xcr0(v->arch.xcr0_accum);
xrstor(v);
- set_xcr0(v->arch.hvm_vcpu.xcr0);
+ set_xcr0(v->arch.xcr0);
}
else
{