aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/hvm/hvm.c61
-rw-r--r--xen/arch/x86/hvm/svm/svm.c76
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c61
-rw-r--r--xen/include/asm-x86/hvm/support.h1
4 files changed, 80 insertions, 119 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 447b158a27..2e2d6c7096 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -337,6 +337,34 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
if ( hvm_load_entry(CPU, h, &ctxt) != 0 )
return -EINVAL;
+ /* Sanity check some control registers. */
+ if ( (ctxt.cr0 & HVM_CR0_GUEST_RESERVED_BITS) ||
+ !(ctxt.cr0 & X86_CR0_ET) ||
+ ((ctxt.cr0 & (X86_CR0_PE|X86_CR0_PG)) == X86_CR0_PG) )
+ {
+ gdprintk(XENLOG_ERR, "HVM restore: bad CR0 0x%"PRIx64"\n",
+ ctxt.msr_efer);
+ return -EINVAL;
+ }
+
+ if ( ctxt.cr4 & HVM_CR4_GUEST_RESERVED_BITS )
+ {
+ gdprintk(XENLOG_ERR, "HVM restore: bad CR4 0x%"PRIx64"\n",
+ ctxt.msr_efer);
+ return -EINVAL;
+ }
+
+ if ( (ctxt.msr_efer & ~(EFER_LME | EFER_NX | EFER_SCE)) ||
+ ((sizeof(long) != 8) && (ctxt.msr_efer & EFER_LME)) ||
+ (!cpu_has_nx && (ctxt.msr_efer & EFER_NX)) ||
+ (!cpu_has_syscall && (ctxt.msr_efer & EFER_SCE)) ||
+ ((ctxt.msr_efer & (EFER_LME|EFER_LMA)) == EFER_LMA) )
+ {
+ gdprintk(XENLOG_ERR, "HVM restore: bad EFER 0x%"PRIx64"\n",
+ ctxt.msr_efer);
+ return -EINVAL;
+ }
+
/* Architecture-specific vmcs/vmcb bits */
if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
return -EINVAL;
@@ -532,6 +560,39 @@ void hvm_triple_fault(void)
domain_shutdown(v->domain, SHUTDOWN_reboot);
}
+int hvm_set_efer(uint64_t value)
+{
+ struct vcpu *v = current;
+
+ value &= ~EFER_LMA;
+
+ if ( (value & ~(EFER_LME | EFER_NX | EFER_SCE)) ||
+ ((sizeof(long) != 8) && (value & EFER_LME)) ||
+ (!cpu_has_nx && (value & EFER_NX)) ||
+ (!cpu_has_syscall && (value & EFER_SCE)) )
+ {
+ gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
+ "EFER: %"PRIx64"\n", value);
+ hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ return 0;
+ }
+
+ if ( ((value ^ v->arch.hvm_vcpu.guest_efer) & EFER_LME) &&
+ hvm_paging_enabled(v) )
+ {
+ gdprintk(XENLOG_WARNING,
+ "Trying to change EFER.LME with paging enabled\n");
+ hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ return 0;
+ }
+
+ value |= v->arch.hvm_vcpu.guest_efer & EFER_LMA;
+ v->arch.hvm_vcpu.guest_efer = value;
+ hvm_update_guest_efer(v);
+
+ return 1;
+}
+
int hvm_set_cr0(unsigned long value)
{
struct vcpu *v = current;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index d6db5bf028..90b1eba7cc 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -69,6 +69,8 @@ static void *hsa[NR_CPUS] __read_mostly;
/* vmcb used for extended host state */
static void *root_vmcb[NR_CPUS] __read_mostly;
+static void svm_update_guest_efer(struct vcpu *v);
+
static void inline __update_guest_eip(
struct cpu_user_regs *regs, int inst_len)
{
@@ -103,22 +105,10 @@ static void svm_cpu_down(void)
write_efer(read_efer() & ~EFER_SVME);
}
-static int svm_lme_is_set(struct vcpu *v)
-{
-#ifdef __x86_64__
- u64 guest_efer = v->arch.hvm_vcpu.guest_efer;
- return guest_efer & EFER_LME;
-#else
- return 0;
-#endif
-}
-
static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
{
u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
u32 ecx = regs->ecx;
- struct vcpu *v = current;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
HVM_DBG_LOG(DBG_LEVEL_0, "msr %x msr_content %"PRIx64,
ecx, msr_content);
@@ -126,47 +116,8 @@ static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
switch ( ecx )
{
case MSR_EFER:
- /* Offending reserved bit will cause #GP. */
-#ifdef __x86_64__
- if ( (msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) ||
-#else
- if ( (msr_content & ~(EFER_NX | EFER_SCE)) ||
-#endif
- (!cpu_has_nx && (msr_content & EFER_NX)) ||
- (!cpu_has_syscall && (msr_content & EFER_SCE)) )
- {
- gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
- "EFER: %"PRIx64"\n", msr_content);
- goto gp_fault;
- }
-
- if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
- {
- /* EFER.LME transition from 0 to 1. */
- if ( hvm_paging_enabled(v) ||
- !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
- {
- gdprintk(XENLOG_WARNING, "Trying to set LME bit when "
- "in paging mode or PAE bit is not set\n");
- goto gp_fault;
- }
- }
- else if ( !(msr_content & EFER_LME) && svm_lme_is_set(v) )
- {
- /* EFER.LME transistion from 1 to 0. */
- if ( hvm_paging_enabled(v) )
- {
- gdprintk(XENLOG_WARNING,
- "Trying to clear EFER.LME while paging enabled\n");
- goto gp_fault;
- }
- }
-
- v->arch.hvm_vcpu.guest_efer = msr_content;
- vmcb->efer = msr_content | EFER_SVME;
- if ( !hvm_paging_enabled(v) )
- vmcb->efer &= ~(EFER_LME | EFER_LMA);
-
+ if ( !hvm_set_efer(msr_content) )
+ return HNDL_exception_raised;
break;
case MSR_K8_MC4_MISC: /* Threshold register */
@@ -182,10 +133,6 @@ static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
}
return HNDL_done;
-
- gp_fault:
- svm_inject_exception(v, TRAP_gp_fault, 1, 0);
- return HNDL_exception_raised;
}
@@ -449,11 +396,7 @@ static void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
vmcb->cstar = data->msr_cstar;
vmcb->sfmask = data->msr_syscall_mask;
v->arch.hvm_vcpu.guest_efer = data->msr_efer;
- vmcb->efer = data->msr_efer | EFER_SVME;
- /* VMCB's EFER.LME isn't set unless we're actually in long mode
- * (see long_mode_do_msr_write()) */
- if ( !(vmcb->efer & EFER_LMA) )
- vmcb->efer &= ~EFER_LME;
+ svm_update_guest_efer(v);
hvm_set_guest_time(v, data->tsc);
}
@@ -543,14 +486,11 @@ static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
static void svm_update_guest_efer(struct vcpu *v)
{
-#ifdef __x86_64__
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- if ( v->arch.hvm_vcpu.guest_efer & EFER_LMA )
- vmcb->efer |= EFER_LME | EFER_LMA;
- else
- vmcb->efer &= ~(EFER_LME | EFER_LMA);
-#endif
+ vmcb->efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME;
+ if ( vmcb->efer & EFER_LMA )
+ vmcb->efer |= EFER_LME;
}
static void svm_flush_guest_tlbs(void)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index b47abdad8c..0a18ee2631 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -199,42 +199,8 @@ static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
switch ( ecx )
{
case MSR_EFER:
- /* offending reserved bit will cause #GP */
- if ( (msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) ||
- (!cpu_has_nx && (msr_content & EFER_NX)) ||
- (!cpu_has_syscall && (msr_content & EFER_SCE)) )
- {
- gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
- "EFER: %"PRIx64"\n", msr_content);
- goto gp_fault;
- }
-
- if ( (msr_content & EFER_LME)
- && !(v->arch.hvm_vcpu.guest_efer & EFER_LME) )
- {
- if ( unlikely(hvm_paging_enabled(v)) )
- {
- gdprintk(XENLOG_WARNING,
- "Trying to set EFER.LME with paging enabled\n");
- goto gp_fault;
- }
- }
- else if ( !(msr_content & EFER_LME)
- && (v->arch.hvm_vcpu.guest_efer & EFER_LME) )
- {
- if ( unlikely(hvm_paging_enabled(v)) )
- {
- gdprintk(XENLOG_WARNING,
- "Trying to clear EFER.LME with paging enabled\n");
- goto gp_fault;
- }
- }
-
- if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & (EFER_NX|EFER_SCE) )
- write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
- (msr_content & (EFER_NX|EFER_SCE)));
-
- v->arch.hvm_vcpu.guest_efer = msr_content;
+ if ( !hvm_set_efer(msr_content) )
+ goto exception_raised;
break;
case MSR_FS_BASE:
@@ -285,6 +251,7 @@ static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", ecx);
gp_fault:
vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+ exception_raised:
return HNDL_exception_raised;
}
@@ -380,7 +347,8 @@ static enum handler_return long_mode_do_msr_read(struct cpu_user_regs *regs)
u64 msr_content = 0;
struct vcpu *v = current;
- switch ( regs->ecx ) {
+ switch ( regs->ecx )
+ {
case MSR_EFER:
msr_content = v->arch.hvm_vcpu.guest_efer;
break;
@@ -398,25 +366,12 @@ static enum handler_return long_mode_do_msr_read(struct cpu_user_regs *regs)
static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
{
u64 msr_content = regs->eax | ((u64)regs->edx << 32);
- struct vcpu *v = current;
switch ( regs->ecx )
{
case MSR_EFER:
- /* offending reserved bit will cause #GP */
- if ( (msr_content & ~EFER_NX) ||
- (!cpu_has_nx && (msr_content & EFER_NX)) )
- {
- gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
- "EFER: %"PRIx64"\n", msr_content);
- vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+ if ( !hvm_set_efer(msr_content) )
return HNDL_exception_raised;
- }
-
- if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & EFER_NX )
- write_efer((read_efer() & ~EFER_NX) | (msr_content & EFER_NX));
-
- v->arch.hvm_vcpu.guest_efer = msr_content;
break;
default:
@@ -1096,6 +1051,10 @@ static void vmx_update_guest_efer(struct vcpu *v)
vmx_vmcs_exit(v);
#endif
+
+ if ( v == current )
+ write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
+ (v->arch.hvm_vcpu.guest_efer & (EFER_NX|EFER_SCE)));
}
static void vmx_flush_guest_tlbs(void)
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 33288e3a7d..62fd6fa04e 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -234,6 +234,7 @@ int hvm_do_hypercall(struct cpu_user_regs *pregs);
void hvm_hlt(unsigned long rflags);
void hvm_triple_fault(void);
+int hvm_set_efer(uint64_t value);
int hvm_set_cr0(unsigned long value);
int hvm_set_cr3(unsigned long value);
int hvm_set_cr4(unsigned long value);