aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-02-22 18:32:41 +0000
committerKeir Fraser <keir.fraser@citrix.com>2008-02-22 18:32:41 +0000
commit2ea2f369eb3aae4a9bfaa86ce51c31a5258ee20f (patch)
tree6a872eaf316eb132693dcefac373fd7a9fd668b4 /xen/arch/x86/hvm
parent15adfd0426ab8db33c0a26022b8440b836cc1cd4 (diff)
downloadxen-2ea2f369eb3aae4a9bfaa86ce51c31a5258ee20f.tar.gz
xen-2ea2f369eb3aae4a9bfaa86ce51c31a5258ee20f.tar.bz2
xen-2ea2f369eb3aae4a9bfaa86ce51c31a5258ee20f.zip
hvm: More emulation changes: push some of the realmode or HVM-emulate
specific stuff into core x86_emulate(). Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/arch/x86/hvm')
-rw-r--r--xen/arch/x86/hvm/emulate.c76
-rw-r--r--xen/arch/x86/hvm/hvm.c20
-rw-r--r--xen/arch/x86/hvm/io.c4
-rw-r--r--xen/arch/x86/hvm/svm/svm.c28
-rw-r--r--xen/arch/x86/hvm/vmx/intr.c4
-rw-r--r--xen/arch/x86/hvm/vmx/realmode.c121
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c33
-rw-r--r--xen/arch/x86/hvm/vmx/x86_32/exits.S9
-rw-r--r--xen/arch/x86/hvm/vmx/x86_64/exits.S9
9 files changed, 133 insertions, 171 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index f30c6fed1c..7ba9b703de 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -124,8 +124,9 @@ static int hvmemul_virtual_to_linear(
if ( !okay )
{
- hvmemul_ctxt->flags.exn_pending = 1;
+ hvmemul_ctxt->exn_pending = 1;
hvmemul_ctxt->exn_vector = TRAP_gp_fault;
+ hvmemul_ctxt->exn_error_code = 0;
hvmemul_ctxt->exn_insn_len = 0;
return X86EMUL_EXCEPTION;
}
@@ -439,9 +440,6 @@ static int hvmemul_write_segment(
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
- if ( seg == x86_seg_ss )
- hvmemul_ctxt->flags.mov_ss = 1;
-
memcpy(sreg, reg, sizeof(struct segment_register));
__set_bit(seg, &hvmemul_ctxt->seg_reg_dirty);
@@ -571,17 +569,6 @@ static int hvmemul_write_msr(
return hvm_funcs.msr_write_intercept(&_regs);
}
-static int hvmemul_write_rflags(
- unsigned long val,
- struct x86_emulate_ctxt *ctxt)
-{
- struct hvm_emulate_ctxt *hvmemul_ctxt =
- container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- if ( (val & X86_EFLAGS_IF) && !(ctxt->regs->eflags & X86_EFLAGS_IF) )
- hvmemul_ctxt->flags.sti = 1;
- return X86EMUL_OKAY;
-}
-
static int hvmemul_wbinvd(
struct x86_emulate_ctxt *ctxt)
{
@@ -600,28 +587,17 @@ static int hvmemul_cpuid(
return X86EMUL_OKAY;
}
-static int hvmemul_hlt(
- struct x86_emulate_ctxt *ctxt)
-{
- struct hvm_emulate_ctxt *hvmemul_ctxt =
- container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- hvmemul_ctxt->flags.hlt = 1;
- return X86EMUL_OKAY;
-}
-
static int hvmemul_inject_hw_exception(
uint8_t vector,
- uint16_t error_code,
+ int32_t error_code,
struct x86_emulate_ctxt *ctxt)
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- if ( error_code != 0 )
- return X86EMUL_UNHANDLEABLE;
-
- hvmemul_ctxt->flags.exn_pending = 1;
+ hvmemul_ctxt->exn_pending = 1;
hvmemul_ctxt->exn_vector = vector;
+ hvmemul_ctxt->exn_error_code = error_code;
hvmemul_ctxt->exn_insn_len = 0;
return X86EMUL_OKAY;
@@ -635,8 +611,9 @@ static int hvmemul_inject_sw_interrupt(
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- hvmemul_ctxt->flags.exn_pending = 1;
+ hvmemul_ctxt->exn_pending = 1;
hvmemul_ctxt->exn_vector = vector;
+ hvmemul_ctxt->exn_error_code = -1;
hvmemul_ctxt->exn_insn_len = insn_len;
return X86EMUL_OKAY;
@@ -684,10 +661,8 @@ static struct x86_emulate_ops hvm_emulate_ops = {
.write_cr = hvmemul_write_cr,
.read_msr = hvmemul_read_msr,
.write_msr = hvmemul_write_msr,
- .write_rflags = hvmemul_write_rflags,
.wbinvd = hvmemul_wbinvd,
.cpuid = hvmemul_cpuid,
- .hlt = hvmemul_hlt,
.inject_hw_exception = hvmemul_inject_hw_exception,
.inject_sw_interrupt = hvmemul_inject_sw_interrupt,
.load_fpu_ctxt = hvmemul_load_fpu_ctxt,
@@ -698,7 +673,9 @@ int hvm_emulate_one(
struct hvm_emulate_ctxt *hvmemul_ctxt)
{
struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
+ uint32_t new_intr_shadow;
unsigned long addr;
+ int rc;
hvmemul_ctxt->ctxt.addr_size =
hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
@@ -715,15 +692,46 @@ int hvm_emulate_one(
hvmemul_ctxt->insn_buf, addr, sizeof(hvmemul_ctxt->insn_buf)))
? sizeof(hvmemul_ctxt->insn_buf) : 0;
- hvmemul_ctxt->flag_word = 0;
+ hvmemul_ctxt->exn_pending = 0;
+
+ rc = x86_emulate(&hvmemul_ctxt->ctxt, &hvm_emulate_ops);
+ if ( rc != X86EMUL_OKAY )
+ return rc;
- return x86_emulate(&hvmemul_ctxt->ctxt, &hvm_emulate_ops);
+ new_intr_shadow = hvmemul_ctxt->intr_shadow;
+
+ /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
+ if ( hvmemul_ctxt->ctxt.retire.flags.mov_ss )
+ new_intr_shadow ^= HVM_INTR_SHADOW_MOV_SS;
+ else
+ new_intr_shadow &= ~HVM_INTR_SHADOW_MOV_SS;
+
+ /* STI instruction toggles STI shadow, else we just clear it. */
+ if ( hvmemul_ctxt->ctxt.retire.flags.sti )
+ new_intr_shadow ^= HVM_INTR_SHADOW_STI;
+ else
+ new_intr_shadow &= ~HVM_INTR_SHADOW_STI;
+
+ if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )
+ {
+ hvmemul_ctxt->intr_shadow = new_intr_shadow;
+ hvm_funcs.set_interrupt_shadow(current, new_intr_shadow);
+ }
+
+ if ( hvmemul_ctxt->ctxt.retire.flags.hlt &&
+ !hvm_local_events_need_delivery(current) )
+ {
+ hvm_hlt(regs->eflags);
+ }
+
+ return X86EMUL_OKAY;
}
void hvm_emulate_prepare(
struct hvm_emulate_ctxt *hvmemul_ctxt,
struct cpu_user_regs *regs)
{
+ hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(current);
hvmemul_ctxt->ctxt.regs = regs;
hvmemul_ctxt->ctxt.force_writeback = 1;
hvmemul_ctxt->seg_reg_accessed = 0;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 82dc934c3f..954f48e58c 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1640,12 +1640,22 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
{
- enum hvm_intblk r;
+ unsigned long intr_shadow;
+
ASSERT(v == current);
- r = hvm_funcs.interrupt_blocked(v, intack);
- if ( r != hvm_intblk_none )
- return r;
+ if ( (intack.source != hvm_intsrc_nmi) &&
+ !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
+ return hvm_intblk_rflags_ie;
+
+ intr_shadow = hvm_funcs.get_interrupt_shadow(v);
+
+ if ( intr_shadow & (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) )
+ return hvm_intblk_shadow;
+
+ if ( intack.source == hvm_intsrc_nmi )
+ return ((intr_shadow & HVM_INTR_SHADOW_NMI) ?
+ hvm_intblk_nmi_iret : hvm_intblk_none);
if ( intack.source == hvm_intsrc_lapic )
{
@@ -1654,7 +1664,7 @@ enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
return hvm_intblk_tpr;
}
- return r;
+ return hvm_intblk_none;
}
static long hvm_grant_table_op(
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 48415c7690..83d73a0e9e 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -262,8 +262,8 @@ int handle_mmio(void)
ctxt.insn_buf[4], ctxt.insn_buf[5]);
return 0;
case X86EMUL_EXCEPTION:
- if ( ctxt.flags.exn_pending )
- hvm_inject_exception(ctxt.exn_vector, 0, 0);
+ if ( ctxt.exn_pending )
+ hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0);
break;
default:
break;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index fe4fd7e544..f6254e1949 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -366,24 +366,17 @@ static void svm_fpu_leave(struct vcpu *v)
}
}
-static enum hvm_intblk svm_interrupt_blocked(
- struct vcpu *v, struct hvm_intack intack)
+static unsigned int svm_get_interrupt_shadow(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ return (vmcb->interrupt_shadow ? HVM_INTR_SHADOW_MOV_SS : 0);
+}
- if ( vmcb->interrupt_shadow )
- return hvm_intblk_shadow;
-
- if ( intack.source == hvm_intsrc_nmi )
- return hvm_intblk_none;
-
- ASSERT((intack.source == hvm_intsrc_pic) ||
- (intack.source == hvm_intsrc_lapic));
-
- if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
- return hvm_intblk_rflags_ie;
-
- return hvm_intblk_none;
+static void svm_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ vmcb->interrupt_shadow = !!(vmcb->interrupt_shadow &
+ (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI));
}
static int svm_guest_x86_mode(struct vcpu *v)
@@ -779,7 +772,8 @@ static struct hvm_function_table svm_function_table = {
.vcpu_destroy = svm_vcpu_destroy,
.save_cpu_ctxt = svm_save_vmcb_ctxt,
.load_cpu_ctxt = svm_load_vmcb_ctxt,
- .interrupt_blocked = svm_interrupt_blocked,
+ .get_interrupt_shadow = svm_get_interrupt_shadow,
+ .set_interrupt_shadow = svm_set_interrupt_shadow,
.guest_x86_mode = svm_guest_x86_mode,
.get_segment_register = svm_get_segment_register,
.set_segment_register = svm_set_segment_register,
@@ -1176,7 +1170,7 @@ static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
/* Check for pending exception or new interrupt. */
if ( vmcb->eventinj.fields.v ||
((intack.source != hvm_intsrc_none) &&
- !svm_interrupt_blocked(current, intack)) )
+ !hvm_interrupt_blocked(current, intack)) )
{
HVMTRACE_1D(HLT, curr, /*int pending=*/ 1);
return;
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index da39d85e77..a212310d75 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -65,10 +65,6 @@
* Injecting a virtual NMI sets the NMI-blocking interruptibility flag only
* if the 'virtual NMIs' control is set. Injecting *any* kind of event clears
* the STI- and MOV-SS-blocking interruptibility-state flags.
- *
- * If MOV/POP SS is executed while MOV-SS-blocking is in effect, the effect
- * is cleared. If STI is executed while MOV-SS- or STI-blocking is in effect,
- * the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking).
*/
static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 9ab137970f..8667588883 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -21,25 +21,20 @@
#include <asm/hvm/vmx/vmx.h>
#include <asm/hvm/vmx/vmcs.h>
-struct realmode_emulate_ctxt {
- struct hvm_emulate_ctxt hvm;
- uint32_t intr_shadow;
-};
-
static void realmode_deliver_exception(
unsigned int vector,
unsigned int insn_len,
- struct realmode_emulate_ctxt *rm_ctxt)
+ struct hvm_emulate_ctxt *hvmemul_ctxt)
{
struct segment_register *idtr, *csr;
- struct cpu_user_regs *regs = rm_ctxt->hvm.ctxt.regs;
+ struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
uint32_t cs_eip, pstk;
uint16_t frame[3];
unsigned int last_byte;
- idtr = hvmemul_get_seg_reg(x86_seg_idtr, &rm_ctxt->hvm);
- csr = hvmemul_get_seg_reg(x86_seg_cs, &rm_ctxt->hvm);
- __set_bit(x86_seg_cs, &rm_ctxt->hvm.seg_reg_dirty);
+ idtr = hvmemul_get_seg_reg(x86_seg_idtr, hvmemul_ctxt);
+ csr = hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
+ __set_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty);
again:
last_byte = (vector * 4) + 3;
@@ -74,7 +69,7 @@ static void realmode_deliver_exception(
frame[1] = csr->sel;
frame[2] = regs->eflags & ~X86_EFLAGS_RF;
- if ( rm_ctxt->hvm.ctxt.addr_size == 32 )
+ if ( hvmemul_ctxt->ctxt.addr_size == 32 )
{
regs->esp -= 6;
pstk = regs->esp;
@@ -86,7 +81,7 @@ static void realmode_deliver_exception(
regs->esp |= pstk;
}
- pstk += hvmemul_get_seg_reg(x86_seg_ss, &rm_ctxt->hvm)->base;
+ pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base;
(void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
csr->sel = cs_eip >> 16;
@@ -95,41 +90,42 @@ static void realmode_deliver_exception(
regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
/* Exception delivery clears STI and MOV-SS blocking. */
- if ( rm_ctxt->intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
+ if ( hvmemul_ctxt->intr_shadow &
+ (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
{
- rm_ctxt->intr_shadow &= ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
- __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
+ hvmemul_ctxt->intr_shadow &=
+ ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
+ __vmwrite(GUEST_INTERRUPTIBILITY_INFO, hvmemul_ctxt->intr_shadow);
}
}
-static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
+static void realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
{
- struct cpu_user_regs *regs = rm_ctxt->hvm.ctxt.regs;
struct vcpu *curr = current;
unsigned long seg_reg_dirty;
- uint32_t new_intr_shadow, intr_info;
+ uint32_t intr_info;
int rc;
- seg_reg_dirty = rm_ctxt->hvm.seg_reg_dirty;
- rm_ctxt->hvm.seg_reg_dirty = 0;
+ seg_reg_dirty = hvmemul_ctxt->seg_reg_dirty;
+ hvmemul_ctxt->seg_reg_dirty = 0;
- rc = hvm_emulate_one(&rm_ctxt->hvm);
+ rc = hvm_emulate_one(hvmemul_ctxt);
- if ( test_bit(x86_seg_cs, &rm_ctxt->hvm.seg_reg_dirty) )
+ if ( test_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty) )
{
curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_CS;
- if ( hvmemul_get_seg_reg(x86_seg_cs, &rm_ctxt->hvm)->sel & 3 )
+ if ( hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt)->sel & 3 )
curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_CS;
}
- if ( test_bit(x86_seg_ss, &rm_ctxt->hvm.seg_reg_dirty) )
+ if ( test_bit(x86_seg_ss, &hvmemul_ctxt->seg_reg_dirty) )
{
curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_SS;
- if ( hvmemul_get_seg_reg(x86_seg_ss, &rm_ctxt->hvm)->sel & 3 )
+ if ( hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->sel & 3 )
curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_SS;
}
- rm_ctxt->hvm.seg_reg_dirty |= seg_reg_dirty;
+ hvmemul_ctxt->seg_reg_dirty |= seg_reg_dirty;
if ( rc == X86EMUL_UNHANDLEABLE )
{
@@ -137,33 +133,9 @@ static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
goto fail;
}
- if ( rc == X86EMUL_RETRY )
- return;
-
- new_intr_shadow = rm_ctxt->intr_shadow;
-
- /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
- if ( rm_ctxt->hvm.flags.mov_ss )
- new_intr_shadow ^= VMX_INTR_SHADOW_MOV_SS;
- else
- new_intr_shadow &= ~VMX_INTR_SHADOW_MOV_SS;
-
- /* STI instruction toggles STI shadow, else we just clear it. */
- if ( rm_ctxt->hvm.flags.sti )
- new_intr_shadow ^= VMX_INTR_SHADOW_STI;
- else
- new_intr_shadow &= ~VMX_INTR_SHADOW_STI;
-
- /* Update interrupt shadow information in VMCS only if it changes. */
- if ( rm_ctxt->intr_shadow != new_intr_shadow )
- {
- rm_ctxt->intr_shadow = new_intr_shadow;
- __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
- }
-
if ( rc == X86EMUL_EXCEPTION )
{
- if ( !rm_ctxt->hvm.flags.exn_pending )
+ if ( !hvmemul_ctxt->exn_pending )
{
intr_info = __vmread(VM_ENTRY_INTR_INFO);
__vmwrite(VM_ENTRY_INTR_INFO, 0);
@@ -172,23 +144,21 @@ static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
gdprintk(XENLOG_ERR, "Exception pending but no info.\n");
goto fail;
}
- rm_ctxt->hvm.exn_vector = (uint8_t)intr_info;
- rm_ctxt->hvm.exn_insn_len = 0;
+ hvmemul_ctxt->exn_vector = (uint8_t)intr_info;
+ hvmemul_ctxt->exn_insn_len = 0;
}
if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
{
gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
- rm_ctxt->hvm.exn_vector);
+ hvmemul_ctxt->exn_vector);
goto fail;
}
realmode_deliver_exception(
- rm_ctxt->hvm.exn_vector, rm_ctxt->hvm.exn_insn_len, rm_ctxt);
- }
- else if ( rm_ctxt->hvm.flags.hlt && !hvm_local_events_need_delivery(curr) )
- {
- hvm_hlt(regs->eflags);
+ hvmemul_ctxt->exn_vector,
+ hvmemul_ctxt->exn_insn_len,
+ hvmemul_ctxt);
}
return;
@@ -197,18 +167,18 @@ static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
gdprintk(XENLOG_ERR,
"Real-mode emulation failed @ %04x:%08lx: "
"%02x %02x %02x %02x %02x %02x\n",
- hvmemul_get_seg_reg(x86_seg_cs, &rm_ctxt->hvm)->sel,
- rm_ctxt->hvm.insn_buf_eip,
- rm_ctxt->hvm.insn_buf[0], rm_ctxt->hvm.insn_buf[1],
- rm_ctxt->hvm.insn_buf[2], rm_ctxt->hvm.insn_buf[3],
- rm_ctxt->hvm.insn_buf[4], rm_ctxt->hvm.insn_buf[5]);
+ hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt)->sel,
+ hvmemul_ctxt->insn_buf_eip,
+ hvmemul_ctxt->insn_buf[0], hvmemul_ctxt->insn_buf[1],
+ hvmemul_ctxt->insn_buf[2], hvmemul_ctxt->insn_buf[3],
+ hvmemul_ctxt->insn_buf[4], hvmemul_ctxt->insn_buf[5]);
domain_crash_synchronous();
}
void vmx_realmode(struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
- struct realmode_emulate_ctxt rm_ctxt;
+ struct hvm_emulate_ctxt hvmemul_ctxt;
struct segment_register *sreg;
unsigned long intr_info;
unsigned int emulations = 0;
@@ -218,17 +188,16 @@ void vmx_realmode(struct cpu_user_regs *regs)
if ( intr_info & INTR_INFO_VALID_MASK )
__vmwrite(VM_ENTRY_INTR_INFO, 0);
- hvm_emulate_prepare(&rm_ctxt.hvm, regs);
- rm_ctxt.intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
+ hvm_emulate_prepare(&hvmemul_ctxt, regs);
if ( curr->arch.hvm_vcpu.io_completed )
- realmode_emulate_one(&rm_ctxt);
+ realmode_emulate_one(&hvmemul_ctxt);
/* Only deliver interrupts into emulated real mode. */
if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
(intr_info & INTR_INFO_VALID_MASK) )
{
- realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt);
+ realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt);
intr_info = 0;
}
@@ -245,7 +214,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
!(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
hvm_local_events_need_delivery(curr) )
break;
- realmode_emulate_one(&rm_ctxt);
+ realmode_emulate_one(&hvmemul_ctxt);
}
if ( !curr->arch.hvm_vmx.vmxemul )
@@ -255,20 +224,20 @@ void vmx_realmode(struct cpu_user_regs *regs)
* At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For
* DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
*/
- sreg = hvmemul_get_seg_reg(x86_seg_ds, &rm_ctxt.hvm);
+ sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt);
sreg->attr.fields.dpl = sreg->sel & 3;
- sreg = hvmemul_get_seg_reg(x86_seg_es, &rm_ctxt.hvm);
+ sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt);
sreg->attr.fields.dpl = sreg->sel & 3;
- sreg = hvmemul_get_seg_reg(x86_seg_fs, &rm_ctxt.hvm);
+ sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt);
sreg->attr.fields.dpl = sreg->sel & 3;
- sreg = hvmemul_get_seg_reg(x86_seg_gs, &rm_ctxt.hvm);
+ sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt);
sreg->attr.fields.dpl = sreg->sel & 3;
- rm_ctxt.hvm.seg_reg_dirty |=
+ hvmemul_ctxt.seg_reg_dirty |=
(1ul << x86_seg_ds) | (1ul << x86_seg_es) |
(1ul << x86_seg_fs) | (1ul << x86_seg_gs);
}
- hvm_emulate_writeback(&rm_ctxt.hvm);
+ hvm_emulate_writeback(&hvmemul_ctxt);
/* Re-instate VM_ENTRY_INTR_INFO if we did not discharge it. */
if ( intr_info & INTR_INFO_VALID_MASK )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d99a24ccdf..bcb371ca55 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -890,32 +890,14 @@ static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
*(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
}
-static enum hvm_intblk vmx_interrupt_blocked(
- struct vcpu *v, struct hvm_intack intack)
+static unsigned int vmx_get_interrupt_shadow(struct vcpu *v)
{
- unsigned long intr_shadow;
-
- /*
- * Test EFLAGS.IF first. It is often the most likely reason for interrupt
- * blockage, and is the cheapest to test (because no VMREAD is required).
- */
- if ( (intack.source != hvm_intsrc_nmi) &&
- !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
- return hvm_intblk_rflags_ie;
-
- intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
-
- if ( intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
- return hvm_intblk_shadow;
-
- if ( intack.source == hvm_intsrc_nmi )
- return ((intr_shadow & VMX_INTR_SHADOW_NMI) ?
- hvm_intblk_nmi_iret : hvm_intblk_none);
-
- ASSERT((intack.source == hvm_intsrc_pic) ||
- (intack.source == hvm_intsrc_lapic));
+ return __vmread(GUEST_INTERRUPTIBILITY_INFO);
+}
- return hvm_intblk_none;
+static void vmx_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
+{
+ __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
}
static void vmx_update_host_cr3(struct vcpu *v)
@@ -1038,7 +1020,8 @@ static struct hvm_function_table vmx_function_table = {
.vcpu_destroy = vmx_vcpu_destroy,
.save_cpu_ctxt = vmx_save_vmcs_ctxt,
.load_cpu_ctxt = vmx_load_vmcs_ctxt,
- .interrupt_blocked = vmx_interrupt_blocked,
+ .get_interrupt_shadow = vmx_get_interrupt_shadow,
+ .set_interrupt_shadow = vmx_set_interrupt_shadow,
.guest_x86_mode = vmx_guest_x86_mode,
.get_segment_register = vmx_get_segment_register,
.set_segment_register = vmx_set_segment_register,
diff --git a/xen/arch/x86/hvm/vmx/x86_32/exits.S b/xen/arch/x86/hvm/vmx/x86_32/exits.S
index 56f241755d..70cd1dae36 100644
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S
@@ -89,7 +89,7 @@ ENTRY(vmx_asm_vmexit_handler)
ALIGN
vmx_process_softirqs:
- sti
+ sti
call do_softirq
jmp vmx_asm_do_vmentry
@@ -104,6 +104,10 @@ ENTRY(vmx_asm_do_vmentry)
jnz vmx_process_softirqs
call vmx_intr_assist
+
+ testb $0xff,VCPU_vmx_emul(%ebx)
+ jnz vmx_goto_realmode
+
movl VCPU_hvm_guest_cr2(%ebx),%eax
movl %eax,%cr2
call vmx_trace_vmentry
@@ -115,9 +119,6 @@ ENTRY(vmx_asm_do_vmentry)
movl $GUEST_RFLAGS,%eax
VMWRITE(UREGS_eflags)
- testb $0xff,VCPU_vmx_emul(%ebx)
- jnz vmx_goto_realmode
-
cmpb $0,VCPU_vmx_launched(%ebx)
je vmx_launch
diff --git a/xen/arch/x86/hvm/vmx/x86_64/exits.S b/xen/arch/x86/hvm/vmx/x86_64/exits.S
index 738642f733..fda4f179b1 100644
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S
@@ -105,7 +105,7 @@ ENTRY(vmx_asm_vmexit_handler)
ALIGN
vmx_process_softirqs:
- sti
+ sti
call do_softirq
jmp vmx_asm_do_vmentry
@@ -121,6 +121,10 @@ ENTRY(vmx_asm_do_vmentry)
jnz vmx_process_softirqs
call vmx_intr_assist
+
+ testb $0xff,VCPU_vmx_emul(%rbx)
+ jnz vmx_goto_realmode
+
movq VCPU_hvm_guest_cr2(%rbx),%rax
movq %rax,%cr2
call vmx_trace_vmentry
@@ -134,9 +138,6 @@ ENTRY(vmx_asm_do_vmentry)
movl $GUEST_RFLAGS,%eax
VMWRITE(UREGS_eflags)
- testb $0xff,VCPU_vmx_emul(%rbx)
- jnz vmx_goto_realmode
-
cmpb $0,VCPU_vmx_launched(%rbx)
je vmx_launch