aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDongxiao Xu <dongxiao.xu@intel.com>2013-01-25 10:18:40 +0100
committerDongxiao Xu <dongxiao.xu@intel.com>2013-01-25 10:18:40 +0100
commit8916ad7199bf671bacd1a6574b294831ec09c8cf (patch)
treea4723df25d42ed1e9fe96497bb7ffbf5b4ad92ab
parent806c9ba5830cf6b2ea2ab6d1605ef7351e62edfa (diff)
downloadxen-8916ad7199bf671bacd1a6574b294831ec09c8cf.tar.gz
xen-8916ad7199bf671bacd1a6574b294831ec09c8cf.tar.bz2
xen-8916ad7199bf671bacd1a6574b294831ec09c8cf.zip
nested vmx: use VMREAD/VMWRITE to construct vVMCS if enabled VMCS shadowing
Before the VMCS shadowing feature, we use memory operation to build up the virtual VMCS. This does work since this virtual VMCS will never be loaded into real hardware. However after we introduce the VMCS shadowing feature, this VMCS will be loaded into hardware, which requires all fields in the VMCS accessed by VMREAD/VMWRITE. Besides, the virtual VMCS revision identifer should also meet the hardware's requirement, instead of using a faked one. Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com> Acked-by Eddie Dong <eddie.dong@intel.com> Committed-by: Jan Beulich <jbeulich@suse.com>
-rw-r--r--xen/arch/x86/hvm/vmx/vmcs.c29
-rw-r--r--xen/arch/x86/hvm/vmx/vvmx.c20
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmcs.h5
-rw-r--r--xen/include/asm-x86/hvm/vmx/vvmx.h16
4 files changed, 62 insertions, 8 deletions
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index de22e032df..82a8d913c3 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -725,6 +725,35 @@ void vmx_vmcs_switch(struct vmcs_struct *from, struct vmcs_struct *to)
spin_unlock(&vmx->vmcs_lock);
}
+void virtual_vmcs_enter(void *vvmcs)
+{
+ __vmptrld(pfn_to_paddr(domain_page_map_to_mfn(vvmcs)));
+}
+
+void virtual_vmcs_exit(void *vvmcs)
+{
+ __vmpclear(pfn_to_paddr(domain_page_map_to_mfn(vvmcs)));
+ __vmptrld(virt_to_maddr(this_cpu(current_vmcs)));
+}
+
+u64 virtual_vmcs_vmread(void *vvmcs, u32 vmcs_encoding)
+{
+ u64 res;
+
+ virtual_vmcs_enter(vvmcs);
+ res = __vmread(vmcs_encoding);
+ virtual_vmcs_exit(vvmcs);
+
+ return res;
+}
+
+void virtual_vmcs_vmwrite(void *vvmcs, u32 vmcs_encoding, u64 val)
+{
+ virtual_vmcs_enter(vvmcs);
+ __vmwrite(vmcs_encoding, val);
+ virtual_vmcs_exit(vvmcs);
+}
+
static int construct_vmcs(struct vcpu *v)
{
struct domain *d = v->domain;
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 4ecc286a63..18d801f02d 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -175,7 +175,7 @@ static int vvmcs_offset(u32 width, u32 type, u32 index)
return offset;
}
-u64 __get_vvmcs(void *vvmcs, u32 vmcs_encoding)
+u64 __get_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding)
{
union vmcs_encoding enc;
u64 *content = (u64 *) vvmcs;
@@ -205,7 +205,12 @@ u64 __get_vvmcs(void *vvmcs, u32 vmcs_encoding)
return res;
}
-void __set_vvmcs(void *vvmcs, u32 vmcs_encoding, u64 val)
+u64 __get_vvmcs_real(void *vvmcs, u32 vmcs_encoding)
+{
+ return virtual_vmcs_vmread(vvmcs, vmcs_encoding);
+}
+
+void __set_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding, u64 val)
{
union vmcs_encoding enc;
u64 *content = (u64 *) vvmcs;
@@ -241,6 +246,11 @@ void __set_vvmcs(void *vvmcs, u32 vmcs_encoding, u64 val)
content[offset] = res;
}
+void __set_vvmcs_real(void *vvmcs, u32 vmcs_encoding, u64 val)
+{
+ virtual_vmcs_vmwrite(vvmcs, vmcs_encoding, val);
+}
+
static unsigned long reg_read(struct cpu_user_regs *regs,
enum vmx_regs_enc index)
{
@@ -1567,10 +1577,11 @@ int nvmx_handle_invvpid(struct cpu_user_regs *regs)
*/
int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
{
+ struct vcpu *v = current;
u64 data = 0, host_data = 0;
int r = 1;
- if ( !nestedhvm_enabled(current->domain) )
+ if ( !nestedhvm_enabled(v->domain) )
return 0;
rdmsrl(msr, host_data);
@@ -1580,7 +1591,8 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
*/
switch (msr) {
case MSR_IA32_VMX_BASIC:
- data = (host_data & (~0ul << 32)) | VVMCS_REVISION;
+ data = (host_data & (~0ul << 32)) |
+ ((v->arch.hvm_vmx.vmcs)->vmcs_revision_id);
break;
case MSR_IA32_VMX_PINBASED_CTLS:
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 9ff741f42f..652dc21e1c 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -244,6 +244,7 @@ extern bool_t cpu_has_vmx_ins_outs_instr_info;
(vmx_secondary_exec_control & SECONDARY_EXEC_APIC_REGISTER_VIRT)
#define cpu_has_vmx_virtual_intr_delivery \
(vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
+#define cpu_has_vmx_vmcs_shadowing 0
/* GUEST_INTERRUPTIBILITY_INFO flags. */
#define VMX_INTR_SHADOW_STI 0x00000001
@@ -436,6 +437,10 @@ void vmx_vmcs_switch(struct vmcs_struct *from, struct vmcs_struct *to);
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
int vmx_check_msr_bitmap(unsigned long *msr_bitmap, u32 msr, int access_type);
+void virtual_vmcs_enter(void *vvmcs);
+void virtual_vmcs_exit(void *vvmcs);
+u64 virtual_vmcs_vmread(void *vvmcs, u32 vmcs_encoding);
+void virtual_vmcs_vmwrite(void *vvmcs, u32 vmcs_encoding, u64 val);
#endif /* ASM_X86_HVM_VMX_VMCS_H__ */
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h
index 89e839f9d6..73a67cc2c8 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -152,8 +152,6 @@ nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
*
*/
-#define VVMCS_REVISION 0x40000001u
-
struct vvmcs_header {
u32 revision;
u32 abort;
@@ -185,8 +183,18 @@ enum vvmcs_encoding_type {
VVMCS_TYPE_HSTATE,
};
-u64 __get_vvmcs(void *vvmcs, u32 vmcs_encoding);
-void __set_vvmcs(void *vvmcs, u32 vmcs_encoding, u64 val);
+u64 __get_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding);
+u64 __get_vvmcs_real(void *vvmcs, u32 vmcs_encoding);
+void __set_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding, u64 val);
+void __set_vvmcs_real(void *vvmcs, u32 vmcs_encoding, u64 val);
+
+#define __get_vvmcs(_vvmcs, _vmcs_encoding) \
+ (cpu_has_vmx_vmcs_shadowing ? __get_vvmcs_real(_vvmcs, _vmcs_encoding) \
+ : __get_vvmcs_virtual(_vvmcs, _vmcs_encoding))
+
+#define __set_vvmcs(_vvmcs, _vmcs_encoding, _val) \
+ (cpu_has_vmx_vmcs_shadowing ? __set_vvmcs_real(_vvmcs, _vmcs_encoding, _val) \
+ : __set_vvmcs_virtual(_vvmcs, _vmcs_encoding, _val))
uint64_t get_shadow_eptp(struct vcpu *v);