diff options
author | Wei Gang <gang.wei@intel.com> | 2011-02-21 09:11:57 +0000 |
---|---|---|
committer | Wei Gang <gang.wei@intel.com> | 2011-02-21 09:11:57 +0000 |
commit | 3af87a7ece7f27a537a006196873ca9fda696616 (patch) | |
tree | 5fd8c0199526ccb8651a76da2b334223b6e41b7d | |
parent | 0dba10b8e37f3f63ed9f5ed449d9e9e0c8c9f1aa (diff) | |
download | xen-3af87a7ece7f27a537a006196873ca9fda696616.tar.gz xen-3af87a7ece7f27a537a006196873ca9fda696616.tar.bz2 xen-3af87a7ece7f27a537a006196873ca9fda696616.zip |
x86: add strictly sanity check for XSAVE/XRSTOR
Replace most checks on cpu_has_xsave with checks on new fn
xsave_enabled(), do additional sanity checks in the new fn.
Signed-off-by: Wei Gang <gang.wei@intel.com>
Signed-off-by: Keir Fraser <keir.xen@gmail.com>
-rw-r--r-- | xen/arch/x86/domain.c | 4 | ||||
-rw-r--r-- | xen/arch/x86/domctl.c | 2 | ||||
-rw-r--r-- | xen/arch/x86/hvm/hvm.c | 19 | ||||
-rw-r--r-- | xen/arch/x86/hvm/vmx/vmcs.c | 3 | ||||
-rw-r--r-- | xen/arch/x86/i387.c | 15 | ||||
-rw-r--r-- | xen/arch/x86/traps.c | 2 | ||||
-rw-r--r-- | xen/include/asm-x86/domain.h | 2 | ||||
-rw-r--r-- | xen/include/asm-x86/hvm/hvm.h | 4 | ||||
-rw-r--r-- | xen/include/asm-x86/i387.h | 7 |
9 files changed, 37 insertions, 21 deletions
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index b971df19b9..d432eb8990 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -628,7 +628,7 @@ unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4) hv_cr4_mask &= ~X86_CR4_DE; if ( cpu_has_fsgsbase && !is_pv_32bit_domain(v->domain) ) hv_cr4_mask &= ~X86_CR4_FSGSBASE; - if ( cpu_has_xsave ) + if ( xsave_enabled(v) ) hv_cr4_mask &= ~X86_CR4_OSXSAVE; if ( (guest_cr4 & hv_cr4_mask) != (hv_cr4 & hv_cr4_mask) ) @@ -1402,7 +1402,7 @@ static void __context_switch(void) memcpy(stack_regs, &n->arch.guest_context.user_regs, CTXT_SWITCH_STACK_BYTES); - if ( cpu_has_xsave && n->arch.xcr0 != get_xcr0() ) + if ( xsave_enabled(n) && n->arch.xcr0 != get_xcr0() ) set_xcr0(n->arch.xcr0); n->arch.ctxt_switch_to(n); } diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 6fb5504d78..3f91f7ac2e 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1603,7 +1603,7 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) #endif /* Fill legacy context from xsave area first */ - if ( cpu_has_xsave ) + if ( xsave_enabled(v) ) memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt, sizeof(v->arch.guest_context.fpu_ctxt)); diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index ec71dfcef3..11f9392d60 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -676,7 +676,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) return -EINVAL; } - if ( ctxt.cr4 & HVM_CR4_GUEST_RESERVED_BITS ) + if ( ctxt.cr4 & HVM_CR4_GUEST_RESERVED_BITS(v) ) { gdprintk(XENLOG_ERR, "HVM restore: bad CR4 0x%"PRIx64"\n", ctxt.cr4); @@ -773,7 +773,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs)); /* In case xsave-absent save file is restored on a xsave-capable host */ - if ( cpu_has_xsave ) + if ( xsave_enabled(v) ) { struct xsave_struct *xsave_area = v->arch.xsave_area; @@ -836,6 +836,8 @@ static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h) for_each_vcpu ( d, v ) { + if ( !xsave_enabled(v) ) + continue; if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, HVM_CPU_XSAVE_SIZE) ) return 1; ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur]; @@ -861,11 +863,6 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h) struct hvm_save_descriptor *desc; uint64_t _xfeature_mask; - /* fails since we can't restore an img saved on xsave-capable host */ -//XXX: - if ( !cpu_has_xsave ) - return -EINVAL; - /* Which vcpu is this? */ vcpuid = hvm_load_instance(h); if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) @@ -874,6 +871,10 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h) return -EINVAL; } + /* Fails since we can't restore an img saved on xsave-capable host. */ + if ( !xsave_enabled(v) ) + return -EINVAL; + /* Customized checking for entry since our entry is of variable length */ desc = (struct hvm_save_descriptor *)&h->data[h->cur]; if ( sizeof (*desc) > h->size - h->cur) @@ -1453,7 +1454,7 @@ int hvm_set_cr4(unsigned long value) struct vcpu *v = current; unsigned long old_cr; - if ( value & HVM_CR4_GUEST_RESERVED_BITS ) + if ( value & HVM_CR4_GUEST_RESERVED_BITS(v) ) { HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to set reserved bit in CR4: %lx", @@ -2208,7 +2209,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, __clear_bit(X86_FEATURE_APIC & 31, edx); /* Fix up OSXSAVE. */ - if ( cpu_has_xsave ) + if ( xsave_enabled(v) ) *ecx |= (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE) ? bitmaskof(X86_FEATURE_OSXSAVE) : 0; break; diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 6f7fc1a8cc..18e7055157 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -760,7 +760,8 @@ static int construct_vmcs(struct vcpu *v) /* Host control registers. */ v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS; __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0); - __vmwrite(HOST_CR4, mmu_cr4_features | (cpu_has_xsave ? X86_CR4_OSXSAVE : 0)); + __vmwrite(HOST_CR4, + mmu_cr4_features | (xsave_enabled(v) ? X86_CR4_OSXSAVE : 0)); /* Host CS:RIP. */ __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS); diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c index 5c96da0456..7d0a9fa3f5 100644 --- a/xen/arch/x86/i387.c +++ b/xen/arch/x86/i387.c @@ -69,7 +69,7 @@ void setup_fpu(struct vcpu *v) if ( v->fpu_dirtied ) return; - if ( cpu_has_xsave ) + if ( xsave_enabled(v) ) { /* * XCR0 normally represents what guest OS set. In case of Xen itself, @@ -116,7 +116,7 @@ void save_init_fpu(struct vcpu *v) if ( cr0 & X86_CR0_TS ) clts(); - if ( cpu_has_xsave ) + if ( xsave_enabled(v) ) { /* XCR0 normally represents what guest OS set. In case of Xen itself, * we set all accumulated feature mask before doing save/restore. @@ -316,6 +316,17 @@ void xsave_free_save_area(struct vcpu *v) v->arch.xsave_area = NULL; } +bool_t xsave_enabled(const struct vcpu *v) +{ + if ( cpu_has_xsave ) + { + ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE); + ASSERT(v->arch.xsave_area); + } + + return cpu_has_xsave; +} + /* * Local variables: * mode: C diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 81141fd4f1..2908b61ca3 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -771,7 +771,7 @@ static void pv_cpuid(struct cpu_user_regs *regs) __clear_bit(X86_FEATURE_XTPR % 32, &c); __clear_bit(X86_FEATURE_PDCM % 32, &c); __clear_bit(X86_FEATURE_DCA % 32, &c); - if ( !cpu_has_xsave ) + if ( !xsave_enabled(current) ) { __clear_bit(X86_FEATURE_XSAVE % 32, &c); __clear_bit(X86_FEATURE_AVX % 32, &c); diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index eaba32d1be..8056559308 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -464,7 +464,7 @@ unsigned long pv_guest_cr4_fixup(const struct vcpu *, unsigned long guest_cr4); (((v)->arch.guest_context.ctrlreg[4] \ | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE)) \ | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0) \ - | ((cpu_has_xsave)? X86_CR4_OSXSAVE : 0)) \ + | ((xsave_enabled(v))? X86_CR4_OSXSAVE : 0)) \ & ~X86_CR4_DE) #define real_cr4_to_pv_guest_cr4(c) \ ((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD | X86_CR4_OSXSAVE)) diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index ca122c1ab6..46baf2c894 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -285,13 +285,13 @@ static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs) (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE)) /* These bits in CR4 cannot be set by the guest. */ -#define HVM_CR4_GUEST_RESERVED_BITS \ +#define HVM_CR4_GUEST_RESERVED_BITS(_v) \ (~((unsigned long) \ (X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | \ X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \ X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \ X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \ - (cpu_has_xsave ? X86_CR4_OSXSAVE : 0)))) + (xsave_enabled(_v) ? X86_CR4_OSXSAVE : 0)))) /* These exceptions must always be intercepted. */ #define HVM_TRAP_MASK ((1U << TRAP_machine_check) | (1U << TRAP_invalid_op)) diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h index 2cbbbc40e9..8e10d927e1 100644 --- a/xen/include/asm-x86/i387.h +++ b/xen/include/asm-x86/i387.h @@ -11,8 +11,10 @@ #ifndef __ASM_I386_I387_H #define __ASM_I386_I387_H -#include <xen/sched.h> -#include <asm/processor.h> +#include <xen/types.h> +#include <xen/percpu.h> + +struct vcpu; extern unsigned int xsave_cntxt_size; extern u64 xfeature_mask; @@ -20,6 +22,7 @@ extern u64 xfeature_mask; void xsave_init(void); int xsave_alloc_save_area(struct vcpu *v); void xsave_free_save_area(struct vcpu *v); +bool_t xsave_enabled(const struct vcpu *v); #define XSAVE_AREA_MIN_SIZE (512 + 64) /* FP/SSE + XSAVE.HEADER */ #define XSTATE_FP (1ULL << 0) |