diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2010-06-15 11:31:43 +0100 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2010-06-15 11:31:43 +0100 |
commit | 53f673e609772ba2cc9b20a06cdb9cb8b55161b0 (patch) | |
tree | 9c64ff58eea8658a2f0b7f30b4561ae8da4cc771 | |
parent | 34707a374ecd1c505e92e281b1a7c71f831e9d23 (diff) | |
download | xen-53f673e609772ba2cc9b20a06cdb9cb8b55161b0.tar.gz xen-53f673e609772ba2cc9b20a06cdb9cb8b55161b0.tar.bz2 xen-53f673e609772ba2cc9b20a06cdb9cb8b55161b0.zip |
VMX: enforce INVVPID checking
Escalate to use a wider context invalidation if the requested context
invalidation is not supported.
Signed-off-by: Xin Li <xin.li@intel.com>
xen-unstable changeset: 21598:a822156d9fbb
xen-unstable date: Fri Jun 11 09:34:58 2010 +0100
-rw-r--r-- | xen/arch/x86/hvm/vmx/vmcs.c | 9 | ||||
-rw-r--r-- | xen/include/asm-x86/hvm/vmx/vmcs.h | 6 | ||||
-rw-r--r-- | xen/include/asm-x86/hvm/vmx/vmx.h | 39 |
3 files changed, 50 insertions, 4 deletions
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 7229ac0341..11e7226b04 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -193,6 +193,15 @@ static void vmx_init_vmcs_config(void) !(_vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED) || !(_vmx_ept_vpid_cap & VMX_EPT_INVEPT_ALL_CONTEXT) ) _vmx_secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; + + /* + * the CPU must support INVVPID all context invalidation, because we + * will use it as final resort if other types are not supported. + * + * Or we just don't use VPID. + */ + if ( !(_vmx_ept_vpid_cap & VMX_VPID_INVVPID_ALL_CONTEXT) ) + _vmx_secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; } if ( _vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT ) diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 5d8ed18c53..5dbf628384 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -190,6 +190,12 @@ extern bool_t cpu_has_vmx_ins_outs_instr_info; #define VMX_EPT_INVEPT_SINGLE_CONTEXT 0x02000000 #define VMX_EPT_INVEPT_ALL_CONTEXT 0x04000000 +#define VMX_VPID_INVVPID_INSTRUCTION 0x100000000ULL +#define VMX_VPID_INVVPID_INDIVIDUAL_ADDR 0x10000000000ULL +#define VMX_VPID_INVVPID_SINGLE_CONTEXT 0x20000000000ULL +#define VMX_VPID_INVVPID_ALL_CONTEXT 0x40000000000ULL +#define VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 0x80000000000ULL + #define cpu_has_wbinvd_exiting \ (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING) #define cpu_has_vmx_virtualize_apic_accesses \ diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h index 9e3125a4a1..04bef433b3 100644 --- a/xen/include/asm-x86/hvm/vmx/vmx.h +++ b/xen/include/asm-x86/hvm/vmx/vmx.h @@ -200,6 +200,18 @@ extern u64 vmx_ept_vpid_cap; #define INVEPT_SINGLE_CONTEXT 1 #define INVEPT_ALL_CONTEXT 2 +#define cpu_has_vmx_vpid_invvpid_individual_addr \ + (vmx_ept_vpid_cap & VMX_VPID_INVVPID_INDIVIDUAL_ADDR) +#define cpu_has_vmx_vpid_invvpid_single_context \ + (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT) +#define cpu_has_vmx_vpid_invvpid_single_context_retaining_global \ + (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL) + +#define INVVPID_INDIVIDUAL_ADDR 0 +#define INVVPID_SINGLE_CONTEXT 1 +#define INVVPID_ALL_CONTEXT 2 +#define INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 3 + static inline void __vmptrld(u64 addr) { asm volatile ( VMPTRLD_OPCODE @@ -305,7 +317,7 @@ static inline void __invept(int type, u64 eptp, u64 gpa) : "memory" ); } -static inline void __invvpid(int ext, u16 vpid, u64 gva) +static inline void __invvpid(int type, u16 vpid, u64 gva) { struct { u64 vpid:16; @@ -322,7 +334,7 @@ static inline void __invvpid(int ext, u16 vpid, u64 gva) " "__FIXUP_WORD" 1b,2b\n" ".previous" : - : "a" (&operand), "c" (ext) + : "a" (&operand), "c" (type) : "memory" ); } @@ -335,12 +347,31 @@ void ept_sync_domain(struct domain *d); static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva) { - __invvpid(0, v->arch.hvm_vcpu.asid, (u64)gva); + int type = INVVPID_INDIVIDUAL_ADDR; + + /* + * If individual address invalidation is not supported, we escalate to + * use single context invalidation. + */ + if ( likely(cpu_has_vmx_vpid_invvpid_individual_addr) ) + goto execute_invvpid; + + type = INVVPID_SINGLE_CONTEXT; + + /* + * If single context invalidation is not supported, we escalate to + * use all context invalidation. + */ + if ( !cpu_has_vmx_vpid_invvpid_single_context ) + type = INVVPID_ALL_CONTEXT; + +execute_invvpid: + __invvpid(type, v->arch.hvm_vcpu.asid, (u64)gva); } static inline void vpid_sync_all(void) { - __invvpid(2, 0, 0); + __invvpid(INVVPID_ALL_CONTEXT, 0, 0); } static inline void __vmxoff(void) |