diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2010-06-15 11:30:57 +0100 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2010-06-15 11:30:57 +0100 |
commit | 45a21ce9eef6b9a7fce5a7bcf34d3f2d26aa84b4 (patch) | |
tree | 46e63177a7e055227d0fcf99db4f19cccab08911 | |
parent | e05e559c568156d82d4f11c60194a54c216ee8b6 (diff) | |
download | xen-45a21ce9eef6b9a7fce5a7bcf34d3f2d26aa84b4.tar.gz xen-45a21ce9eef6b9a7fce5a7bcf34d3f2d26aa84b4.tar.bz2 xen-45a21ce9eef6b9a7fce5a7bcf34d3f2d26aa84b4.zip |
VMX: enforce invept checking
Escalate to use all context invalidation if single context
invalidation is not supported.
Signed-off-by: Xin Li <xin.li@intel.com>
xen-unstable changeset: 21592:9db8fc1ca2ef
xen-unstable date: Thu Jun 10 17:30:23 2010 +0100
-rw-r--r-- | xen/arch/x86/hvm/vmx/vmcs.c | 8 | ||||
-rw-r--r-- | xen/arch/x86/hvm/vmx/vmx.c | 4 | ||||
-rw-r--r-- | xen/include/asm-x86/hvm/vmx/vmcs.h | 9 | ||||
-rw-r--r-- | xen/include/asm-x86/hvm/vmx/vmx.h | 25 |
4 files changed, 33 insertions, 13 deletions
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 940dc4e79e..7229ac0341 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -184,11 +184,14 @@ static void vmx_init_vmcs_config(void) * ept paging structures memory type to WB; * 2) the CPU must support the EPT page-walk length of 4 according to * Intel SDM 25.2.2. + * 3) the CPU must support INVEPT all context invalidation, because we + * will use it as final resort if other types are not supported. * * Or we just don't use EPT. */ if ( !(_vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB) || - !(_vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED) ) + !(_vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED) || + !(_vmx_ept_vpid_cap & VMX_EPT_INVEPT_ALL_CONTEXT) ) _vmx_secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; } @@ -437,7 +440,8 @@ int vmx_cpu_up(void) hvm_asid_init(cpu_has_vmx_vpid ? (1u << VMCS_VPID_WIDTH) : 0); - ept_sync_all(); + if ( cpu_has_vmx_ept ) + ept_sync_all(); if ( cpu_has_vmx_vpid ) vpid_sync_all(); diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 1472cb3f01..8b06f6b20e 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -688,7 +688,7 @@ static void vmx_ctxt_switch_to(struct vcpu *v) /* Test-and-test-and-set this CPU in the EPT-is-synced mask. */ if ( !cpu_isset(cpu, d->arch.hvm_domain.vmx.ept_synced) && !cpu_test_and_set(cpu, d->arch.hvm_domain.vmx.ept_synced) ) - __invept(1, d->arch.hvm_domain.vmx.ept_control.eptp, 0); + __invept(INVEPT_SINGLE_CONTEXT, ept_get_eptp(d), 0); } vmx_restore_guest_msrs(v); @@ -1220,7 +1220,7 @@ static void vmx_update_guest_efer(struct vcpu *v) static void __ept_sync_domain(void *info) { struct domain *d = info; - __invept(1, d->arch.hvm_domain.vmx.ept_control.eptp, 0); + __invept(INVEPT_SINGLE_CONTEXT, ept_get_eptp(d), 0); } void ept_sync_domain(struct domain *d) diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 9b6699d2e0..5d8ed18c53 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -70,8 +70,12 @@ struct vmx_domain { cpumask_t ept_synced; }; -#define ept_get_wl(d) \ +#define ept_get_wl(d) \ ((d)->arch.hvm_domain.vmx.ept_control.ept_wl) +#define ept_get_asr(d) \ + ((d)->arch.hvm_domain.vmx.ept_control.asr) +#define ept_get_eptp(d) \ + ((d)->arch.hvm_domain.vmx.ept_control.eptp) struct arch_vmx_struct { /* Virtual address of VMCS. */ @@ -182,6 +186,9 @@ extern bool_t cpu_has_vmx_ins_outs_instr_info; #define VMX_EPT_MEMORY_TYPE_UC 0x00000100 #define VMX_EPT_MEMORY_TYPE_WB 0x00004000 #define VMX_EPT_SUPERPAGE_2MB 0x00010000 +#define VMX_EPT_INVEPT_INSTRUCTION 0x00100000 +#define VMX_EPT_INVEPT_SINGLE_CONTEXT 0x02000000 +#define VMX_EPT_INVEPT_ALL_CONTEXT 0x04000000 #define cpu_has_wbinvd_exiting \ (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING) diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h index 073ba60454..dfad9a0ffc 100644 --- a/xen/include/asm-x86/hvm/vmx/vmx.h +++ b/xen/include/asm-x86/hvm/vmx/vmx.h @@ -43,9 +43,9 @@ typedef union { u64 epte; } ept_entry_t; -#define EPT_TABLE_ORDER 9 +#define EPT_TABLE_ORDER 9 #define EPTE_SUPER_PAGE_MASK 0x80 -#define EPTE_MFN_MASK 0x1fffffffffff000 +#define EPTE_MFN_MASK 0xffffffffff000ULL #define EPTE_AVAIL1_MASK 0xF00 #define EPTE_EMT_MASK 0x38 #define EPTE_IGMT_MASK 0x40 @@ -194,7 +194,11 @@ extern u64 vmx_ept_vpid_cap; (vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB) #define cpu_has_vmx_ept_2mb \ (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_2MB) +#define cpu_has_vmx_ept_invept_single_context \ + (vmx_ept_vpid_cap & VMX_EPT_INVEPT_SINGLE_CONTEXT) +#define INVEPT_SINGLE_CONTEXT 1 +#define INVEPT_ALL_CONTEXT 2 static inline void __vmptrld(u64 addr) { @@ -278,18 +282,26 @@ static inline void __vm_clear_bit(unsigned long field, unsigned int bit) __vmwrite(field, __vmread(field) & ~(1UL << bit)); } -static inline void __invept(int ext, u64 eptp, u64 gpa) +static inline void __invept(int type, u64 eptp, u64 gpa) { struct { u64 eptp, gpa; } operand = {eptp, gpa}; + /* + * If single context invalidation is not supported, we escalate to + * use all context invalidation. + */ + if ( (type == INVEPT_SINGLE_CONTEXT) && + !cpu_has_vmx_ept_invept_single_context ) + type = INVEPT_ALL_CONTEXT; + asm volatile ( INVEPT_OPCODE MODRM_EAX_08 /* CF==1 or ZF==1 --> crash (ud2) */ "ja 1f ; ud2 ; 1:\n" : - : "a" (&operand), "c" (ext) + : "a" (&operand), "c" (type) : "memory" ); } @@ -316,10 +328,7 @@ static inline void __invvpid(int ext, u16 vpid, u64 gva) static inline void ept_sync_all(void) { - if ( !current->domain->arch.hvm_domain.hap_enabled ) - return; - - __invept(2, 0, 0); + __invept(INVEPT_ALL_CONTEXT, 0, 0); } void ept_sync_domain(struct domain *d); |