aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-12-17 09:25:00 +0000
committerKeir Fraser <keir@xen.org>2010-12-17 09:25:00 +0000
commitee8ac49677904c14b69b7fe65d79b57af964edc5 (patch)
tree4f2cb3e3431c568770c57b27dff78d67ad03a2e6
parent5ec363dfe9654d3be6d3e9a0105ebe53577913fe (diff)
downloadxen-ee8ac49677904c14b69b7fe65d79b57af964edc5.tar.gz
xen-ee8ac49677904c14b69b7fe65d79b57af964edc5.tar.bz2
xen-ee8ac49677904c14b69b7fe65d79b57af964edc5.zip
x86:xsaveopt: Enable xsaveopt feature in Xen and guest
This patch uses "xsaveopt" instead of "xsave" if the feature is supported in hardware to optimize task switch performance in Xen. It also exposes the feature to guest VMs. Signed-off-by: Zhang Fengzhe <fengzhe.zhang@intel.com>
-rw-r--r--xen/arch/x86/hvm/hvm.c3
-rw-r--r--xen/arch/x86/i387.c11
-rw-r--r--xen/include/asm-x86/i387.h14
3 files changed, 27 insertions, 1 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 9f5784216c..19c8ba0794 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2171,6 +2171,9 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
*ebx = XSTATE_YMM_OFFSET;
break;
case 1:
+ if ( cpu_has_xsaveopt )
+ *eax = XSAVEOPT;
+ break;
default:
break;
}
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 82dd88e920..1ddb2c7495 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -39,7 +39,10 @@ void save_init_fpu(struct vcpu *v)
* we set all accumulated feature mask before doing save/restore.
*/
set_xcr0(v->arch.xcr0_accum);
- xsave(v);
+ if ( cpu_has_xsaveopt )
+ xsaveopt(v);
+ else
+ xsave(v);
set_xcr0(v->arch.xcr0);
}
else if ( cpu_has_fxsr )
@@ -152,6 +155,8 @@ u64 xfeature_mask;
/* Cached xcr0 for fast read */
DEFINE_PER_CPU(uint64_t, xcr0);
+bool_t __read_mostly cpu_has_xsaveopt;
+
void xsave_init(void)
{
u32 eax, ebx, ecx, edx;
@@ -196,6 +201,10 @@ void xsave_init(void)
xfeature_mask &= XCNTXT_MASK;
printk("%s: using cntxt_size: 0x%x and states: 0x%"PRIx64"\n",
__func__, xsave_cntxt_size, xfeature_mask);
+
+ /* Check XSAVEOPT feature. */
+ cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
+ cpu_has_xsaveopt = !!(eax & XSAVEOPT);
}
else
{
diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h
index 0e01d1604d..471747c7c4 100644
--- a/xen/include/asm-x86/i387.h
+++ b/xen/include/asm-x86/i387.h
@@ -16,6 +16,7 @@
extern unsigned int xsave_cntxt_size;
extern u64 xfeature_mask;
+extern bool_t cpu_has_xsaveopt;
void xsave_init(void);
int xsave_alloc_save_area(struct vcpu *v);
@@ -28,6 +29,7 @@ void xsave_free_save_area(struct vcpu *v);
#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
#define XSTATE_YMM_OFFSET (512 + 64)
#define XSTATE_YMM_SIZE 256
+#define XSAVEOPT (1 << 0)
struct xsave_struct
{
@@ -84,6 +86,18 @@ static inline void xsave(struct vcpu *v)
: "memory");
}
+static inline void xsaveopt(struct vcpu *v)
+{
+ struct xsave_struct *ptr;
+
+ ptr =(struct xsave_struct *)v->arch.xsave_area;
+
+ asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x37"
+ :
+ : "a" (-1), "d" (-1), "D"(ptr)
+ : "memory");
+}
+
static inline void xrstor(struct vcpu *v)
{
struct xsave_struct *ptr;