aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-02-06 17:39:10 +0000
committerKeir Fraser <keir@xen.org>2011-02-06 17:39:10 +0000
commit82b701cd7d819cff61fe9eefcc83e95b9c6592e9 (patch)
tree722a9ac2face2580890794c579bea6949404ca7d
parent70de4016b4fba4de5b02827a80c59a8f1671c0bf (diff)
downloadxen-82b701cd7d819cff61fe9eefcc83e95b9c6592e9.tar.gz
xen-82b701cd7d819cff61fe9eefcc83e95b9c6592e9.tar.bz2
xen-82b701cd7d819cff61fe9eefcc83e95b9c6592e9.zip
hvm amd: Fix 32bit guest VM save/restore issues associated with SYSENTER MSRs
This patch turn-on SYSENTER MSRs interception for 32bit guest VMs on AMD CPUs. With it, hvm_svm.guest_sysenter_xx fields always contain the canonical version of SYSENTER MSRs and are used in guest save/restore. The data fields in VMCB save area are updated as necessary. Reported-by: James Harper <james.harper@bendigoit.com.au> Signed-off-by: Wei Huang <wei.huang2@amd.com> xen-unstable changeset: 22873:186162762071 xen-unstable date: Sun Feb 06 17:03:09 2011 +0000
-rw-r--r--xen/arch/x86/hvm/svm/svm.c41
1 files changed, 26 insertions, 15 deletions
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index a77639f064..f175216a28 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -251,10 +251,11 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
hvm_update_guest_cr(v, 2);
hvm_update_guest_cr(v, 4);
- v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
- v->arch.hvm_svm.guest_sysenter_esp = c->sysenter_esp;
- v->arch.hvm_svm.guest_sysenter_eip = c->sysenter_eip;
-
+ /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
+ vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
+ vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = c->sysenter_esp;
+ vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = c->sysenter_eip;
+
if ( paging_mode_hap(v->domain) )
{
vmcb->np_enable = 1;
@@ -449,14 +450,6 @@ static void svm_update_guest_efer(struct vcpu *v)
vmcb->efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME;
if ( lma )
vmcb->efer |= EFER_LME;
-
- /*
- * In legacy mode (EFER.LMA=0) we natively support SYSENTER/SYSEXIT with
- * no need for MSR intercepts. When EFER.LMA=1 we must trap and emulate.
- */
- svm_intercept_msr(v, MSR_IA32_SYSENTER_CS, lma);
- svm_intercept_msr(v, MSR_IA32_SYSENTER_ESP, lma);
- svm_intercept_msr(v, MSR_IA32_SYSENTER_EIP, lma);
}
static void svm_sync_vmcb(struct vcpu *v)
@@ -1099,6 +1092,21 @@ static int svm_msr_write_intercept(struct cpu_user_regs *regs)
u32 ecx = regs->ecx;
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ int sync = 0;
+
+ switch ( ecx )
+ {
+ case MSR_IA32_SYSENTER_CS:
+ case MSR_IA32_SYSENTER_ESP:
+ case MSR_IA32_SYSENTER_EIP:
+ sync = 1;
+ break;
+ default:
+ break;
+ }
+
+ if ( sync )
+ svm_sync_vmcb(v);
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
@@ -1110,13 +1118,13 @@ static int svm_msr_write_intercept(struct cpu_user_regs *regs)
goto gpf;
case MSR_IA32_SYSENTER_CS:
- v->arch.hvm_svm.guest_sysenter_cs = msr_content;
+ vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = msr_content;
break;
case MSR_IA32_SYSENTER_ESP:
- v->arch.hvm_svm.guest_sysenter_esp = msr_content;
+ vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = msr_content;
break;
case MSR_IA32_SYSENTER_EIP:
- v->arch.hvm_svm.guest_sysenter_eip = msr_content;
+ vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = msr_content;
break;
case MSR_IA32_DEBUGCTLMSR:
@@ -1164,6 +1172,9 @@ static int svm_msr_write_intercept(struct cpu_user_regs *regs)
break;
}
+ if ( sync )
+ svm_vmload(vmcb);
+
return X86EMUL_OKAY;
gpf: