aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-02-22 18:32:41 +0000
committerKeir Fraser <keir.fraser@citrix.com>2008-02-22 18:32:41 +0000
commit2ea2f369eb3aae4a9bfaa86ce51c31a5258ee20f (patch)
tree6a872eaf316eb132693dcefac373fd7a9fd668b4 /xen/arch
parent15adfd0426ab8db33c0a26022b8440b836cc1cd4 (diff)
downloadxen-2ea2f369eb3aae4a9bfaa86ce51c31a5258ee20f.tar.gz
xen-2ea2f369eb3aae4a9bfaa86ce51c31a5258ee20f.tar.bz2
xen-2ea2f369eb3aae4a9bfaa86ce51c31a5258ee20f.zip
hvm: More emulation changes: push some of the realmode or HVM-emulate
specific stuff into core x86_emulate(). Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/arch')
-rw-r--r--xen/arch/x86/hvm/emulate.c76
-rw-r--r--xen/arch/x86/hvm/hvm.c20
-rw-r--r--xen/arch/x86/hvm/io.c4
-rw-r--r--xen/arch/x86/hvm/svm/svm.c28
-rw-r--r--xen/arch/x86/hvm/vmx/intr.c4
-rw-r--r--xen/arch/x86/hvm/vmx/realmode.c121
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c33
-rw-r--r--xen/arch/x86/hvm/vmx/x86_32/exits.S9
-rw-r--r--xen/arch/x86/hvm/vmx/x86_64/exits.S9
-rw-r--r--xen/arch/x86/x86_emulate.c161
10 files changed, 213 insertions, 252 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index f30c6fed1c..7ba9b703de 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -124,8 +124,9 @@ static int hvmemul_virtual_to_linear(
if ( !okay )
{
- hvmemul_ctxt->flags.exn_pending = 1;
+ hvmemul_ctxt->exn_pending = 1;
hvmemul_ctxt->exn_vector = TRAP_gp_fault;
+ hvmemul_ctxt->exn_error_code = 0;
hvmemul_ctxt->exn_insn_len = 0;
return X86EMUL_EXCEPTION;
}
@@ -439,9 +440,6 @@ static int hvmemul_write_segment(
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
- if ( seg == x86_seg_ss )
- hvmemul_ctxt->flags.mov_ss = 1;
-
memcpy(sreg, reg, sizeof(struct segment_register));
__set_bit(seg, &hvmemul_ctxt->seg_reg_dirty);
@@ -571,17 +569,6 @@ static int hvmemul_write_msr(
return hvm_funcs.msr_write_intercept(&_regs);
}
-static int hvmemul_write_rflags(
- unsigned long val,
- struct x86_emulate_ctxt *ctxt)
-{
- struct hvm_emulate_ctxt *hvmemul_ctxt =
- container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- if ( (val & X86_EFLAGS_IF) && !(ctxt->regs->eflags & X86_EFLAGS_IF) )
- hvmemul_ctxt->flags.sti = 1;
- return X86EMUL_OKAY;
-}
-
static int hvmemul_wbinvd(
struct x86_emulate_ctxt *ctxt)
{
@@ -600,28 +587,17 @@ static int hvmemul_cpuid(
return X86EMUL_OKAY;
}
-static int hvmemul_hlt(
- struct x86_emulate_ctxt *ctxt)
-{
- struct hvm_emulate_ctxt *hvmemul_ctxt =
- container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- hvmemul_ctxt->flags.hlt = 1;
- return X86EMUL_OKAY;
-}
-
static int hvmemul_inject_hw_exception(
uint8_t vector,
- uint16_t error_code,
+ int32_t error_code,
struct x86_emulate_ctxt *ctxt)
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- if ( error_code != 0 )
- return X86EMUL_UNHANDLEABLE;
-
- hvmemul_ctxt->flags.exn_pending = 1;
+ hvmemul_ctxt->exn_pending = 1;
hvmemul_ctxt->exn_vector = vector;
+ hvmemul_ctxt->exn_error_code = error_code;
hvmemul_ctxt->exn_insn_len = 0;
return X86EMUL_OKAY;
@@ -635,8 +611,9 @@ static int hvmemul_inject_sw_interrupt(
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- hvmemul_ctxt->flags.exn_pending = 1;
+ hvmemul_ctxt->exn_pending = 1;
hvmemul_ctxt->exn_vector = vector;
+ hvmemul_ctxt->exn_error_code = -1;
hvmemul_ctxt->exn_insn_len = insn_len;
return X86EMUL_OKAY;
@@ -684,10 +661,8 @@ static struct x86_emulate_ops hvm_emulate_ops = {
.write_cr = hvmemul_write_cr,
.read_msr = hvmemul_read_msr,
.write_msr = hvmemul_write_msr,
- .write_rflags = hvmemul_write_rflags,
.wbinvd = hvmemul_wbinvd,
.cpuid = hvmemul_cpuid,
- .hlt = hvmemul_hlt,
.inject_hw_exception = hvmemul_inject_hw_exception,
.inject_sw_interrupt = hvmemul_inject_sw_interrupt,
.load_fpu_ctxt = hvmemul_load_fpu_ctxt,
@@ -698,7 +673,9 @@ int hvm_emulate_one(
struct hvm_emulate_ctxt *hvmemul_ctxt)
{
struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
+ uint32_t new_intr_shadow;
unsigned long addr;
+ int rc;
hvmemul_ctxt->ctxt.addr_size =
hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
@@ -715,15 +692,46 @@ int hvm_emulate_one(
hvmemul_ctxt->insn_buf, addr, sizeof(hvmemul_ctxt->insn_buf)))
? sizeof(hvmemul_ctxt->insn_buf) : 0;
- hvmemul_ctxt->flag_word = 0;
+ hvmemul_ctxt->exn_pending = 0;
+
+ rc = x86_emulate(&hvmemul_ctxt->ctxt, &hvm_emulate_ops);
+ if ( rc != X86EMUL_OKAY )
+ return rc;
- return x86_emulate(&hvmemul_ctxt->ctxt, &hvm_emulate_ops);
+ new_intr_shadow = hvmemul_ctxt->intr_shadow;
+
+ /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
+ if ( hvmemul_ctxt->ctxt.retire.flags.mov_ss )
+ new_intr_shadow ^= HVM_INTR_SHADOW_MOV_SS;
+ else
+ new_intr_shadow &= ~HVM_INTR_SHADOW_MOV_SS;
+
+ /* STI instruction toggles STI shadow, else we just clear it. */
+ if ( hvmemul_ctxt->ctxt.retire.flags.sti )
+ new_intr_shadow ^= HVM_INTR_SHADOW_STI;
+ else
+ new_intr_shadow &= ~HVM_INTR_SHADOW_STI;
+
+ if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )
+ {
+ hvmemul_ctxt->intr_shadow = new_intr_shadow;
+ hvm_funcs.set_interrupt_shadow(current, new_intr_shadow);
+ }
+
+ if ( hvmemul_ctxt->ctxt.retire.flags.hlt &&
+ !hvm_local_events_need_delivery(current) )
+ {
+ hvm_hlt(regs->eflags);
+ }
+
+ return X86EMUL_OKAY;
}
void hvm_emulate_prepare(
struct hvm_emulate_ctxt *hvmemul_ctxt,
struct cpu_user_regs *regs)
{
+ hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(current);
hvmemul_ctxt->ctxt.regs = regs;
hvmemul_ctxt->ctxt.force_writeback = 1;
hvmemul_ctxt->seg_reg_accessed = 0;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 82dc934c3f..954f48e58c 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1640,12 +1640,22 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
{
- enum hvm_intblk r;
+ unsigned long intr_shadow;
+
ASSERT(v == current);
- r = hvm_funcs.interrupt_blocked(v, intack);
- if ( r != hvm_intblk_none )
- return r;
+ if ( (intack.source != hvm_intsrc_nmi) &&
+ !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
+ return hvm_intblk_rflags_ie;
+
+ intr_shadow = hvm_funcs.get_interrupt_shadow(v);
+
+ if ( intr_shadow & (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) )
+ return hvm_intblk_shadow;
+
+ if ( intack.source == hvm_intsrc_nmi )
+ return ((intr_shadow & HVM_INTR_SHADOW_NMI) ?
+ hvm_intblk_nmi_iret : hvm_intblk_none);
if ( intack.source == hvm_intsrc_lapic )
{
@@ -1654,7 +1664,7 @@ enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
return hvm_intblk_tpr;
}
- return r;
+ return hvm_intblk_none;
}
static long hvm_grant_table_op(
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 48415c7690..83d73a0e9e 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -262,8 +262,8 @@ int handle_mmio(void)
ctxt.insn_buf[4], ctxt.insn_buf[5]);
return 0;
case X86EMUL_EXCEPTION:
- if ( ctxt.flags.exn_pending )
- hvm_inject_exception(ctxt.exn_vector, 0, 0);
+ if ( ctxt.exn_pending )
+ hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0);
break;
default:
break;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index fe4fd7e544..f6254e1949 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -366,24 +366,17 @@ static void svm_fpu_leave(struct vcpu *v)
}
}
-static enum hvm_intblk svm_interrupt_blocked(
- struct vcpu *v, struct hvm_intack intack)
+static unsigned int svm_get_interrupt_shadow(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ return (vmcb->interrupt_shadow ? HVM_INTR_SHADOW_MOV_SS : 0);
+}
- if ( vmcb->interrupt_shadow )
- return hvm_intblk_shadow;
-
- if ( intack.source == hvm_intsrc_nmi )
- return hvm_intblk_none;
-
- ASSERT((intack.source == hvm_intsrc_pic) ||
- (intack.source == hvm_intsrc_lapic));
-
- if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
- return hvm_intblk_rflags_ie;
-
- return hvm_intblk_none;
+static void svm_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ vmcb->interrupt_shadow = !!(vmcb->interrupt_shadow &
+ (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI));
}
static int svm_guest_x86_mode(struct vcpu *v)
@@ -779,7 +772,8 @@ static struct hvm_function_table svm_function_table = {
.vcpu_destroy = svm_vcpu_destroy,
.save_cpu_ctxt = svm_save_vmcb_ctxt,
.load_cpu_ctxt = svm_load_vmcb_ctxt,
- .interrupt_blocked = svm_interrupt_blocked,
+ .get_interrupt_shadow = svm_get_interrupt_shadow,
+ .set_interrupt_shadow = svm_set_interrupt_shadow,
.guest_x86_mode = svm_guest_x86_mode,
.get_segment_register = svm_get_segment_register,
.set_segment_register = svm_set_segment_register,
@@ -1176,7 +1170,7 @@ static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
/* Check for pending exception or new interrupt. */
if ( vmcb->eventinj.fields.v ||
((intack.source != hvm_intsrc_none) &&
- !svm_interrupt_blocked(current, intack)) )
+ !hvm_interrupt_blocked(current, intack)) )
{
HVMTRACE_1D(HLT, curr, /*int pending=*/ 1);
return;
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index da39d85e77..a212310d75 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -65,10 +65,6 @@
* Injecting a virtual NMI sets the NMI-blocking interruptibility flag only
* if the 'virtual NMIs' control is set. Injecting *any* kind of event clears
* the STI- and MOV-SS-blocking interruptibility-state flags.
- *
- * If MOV/POP SS is executed while MOV-SS-blocking is in effect, the effect
- * is cleared. If STI is executed while MOV-SS- or STI-blocking is in effect,
- * the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking).
*/
static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 9ab137970f..8667588883 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -21,25 +21,20 @@
#include <asm/hvm/vmx/vmx.h>
#include <asm/hvm/vmx/vmcs.h>
-struct realmode_emulate_ctxt {
- struct hvm_emulate_ctxt hvm;
- uint32_t intr_shadow;
-};
-
static void realmode_deliver_exception(
unsigned int vector,
unsigned int insn_len,
- struct realmode_emulate_ctxt *rm_ctxt)
+ struct hvm_emulate_ctxt *hvmemul_ctxt)
{
struct segment_register *idtr, *csr;
- struct cpu_user_regs *regs = rm_ctxt->hvm.ctxt.regs;
+ struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
uint32_t cs_eip, pstk;
uint16_t frame[3];
unsigned int last_byte;
- idtr = hvmemul_get_seg_reg(x86_seg_idtr, &rm_ctxt->hvm);
- csr = hvmemul_get_seg_reg(x86_seg_cs, &rm_ctxt->hvm);
- __set_bit(x86_seg_cs, &rm_ctxt->hvm.seg_reg_dirty);
+ idtr = hvmemul_get_seg_reg(x86_seg_idtr, hvmemul_ctxt);
+ csr = hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
+ __set_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty);
again:
last_byte = (vector * 4) + 3;
@@ -74,7 +69,7 @@ static void realmode_deliver_exception(
frame[1] = csr->sel;
frame[2] = regs->eflags & ~X86_EFLAGS_RF;
- if ( rm_ctxt->hvm.ctxt.addr_size == 32 )
+ if ( hvmemul_ctxt->ctxt.addr_size == 32 )
{
regs->esp -= 6;
pstk = regs->esp;
@@ -86,7 +81,7 @@ static void realmode_deliver_exception(
regs->esp |= pstk;
}
- pstk += hvmemul_get_seg_reg(x86_seg_ss, &rm_ctxt->hvm)->base;
+ pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base;
(void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
csr->sel = cs_eip >> 16;
@@ -95,41 +90,42 @@ static void realmode_deliver_exception(
regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
/* Exception delivery clears STI and MOV-SS blocking. */
- if ( rm_ctxt->intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
+ if ( hvmemul_ctxt->intr_shadow &
+ (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
{
- rm_ctxt->intr_shadow &= ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
- __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
+ hvmemul_ctxt->intr_shadow &=
+ ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
+ __vmwrite(GUEST_INTERRUPTIBILITY_INFO, hvmemul_ctxt->intr_shadow);
}
}
-static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
+static void realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
{
- struct cpu_user_regs *regs = rm_ctxt->hvm.ctxt.regs;
struct vcpu *curr = current;
unsigned long seg_reg_dirty;
- uint32_t new_intr_shadow, intr_info;
+ uint32_t intr_info;
int rc;
- seg_reg_dirty = rm_ctxt->hvm.seg_reg_dirty;
- rm_ctxt->hvm.seg_reg_dirty = 0;
+ seg_reg_dirty = hvmemul_ctxt->seg_reg_dirty;
+ hvmemul_ctxt->seg_reg_dirty = 0;
- rc = hvm_emulate_one(&rm_ctxt->hvm);
+ rc = hvm_emulate_one(hvmemul_ctxt);
- if ( test_bit(x86_seg_cs, &rm_ctxt->hvm.seg_reg_dirty) )
+ if ( test_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty) )
{
curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_CS;
- if ( hvmemul_get_seg_reg(x86_seg_cs, &rm_ctxt->hvm)->sel & 3 )
+ if ( hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt)->sel & 3 )
curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_CS;
}
- if ( test_bit(x86_seg_ss, &rm_ctxt->hvm.seg_reg_dirty) )
+ if ( test_bit(x86_seg_ss, &hvmemul_ctxt->seg_reg_dirty) )
{
curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_SS;
- if ( hvmemul_get_seg_reg(x86_seg_ss, &rm_ctxt->hvm)->sel & 3 )
+ if ( hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->sel & 3 )
curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_SS;
}
- rm_ctxt->hvm.seg_reg_dirty |= seg_reg_dirty;
+ hvmemul_ctxt->seg_reg_dirty |= seg_reg_dirty;
if ( rc == X86EMUL_UNHANDLEABLE )
{
@@ -137,33 +133,9 @@ static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
goto fail;
}
- if ( rc == X86EMUL_RETRY )
- return;
-
- new_intr_shadow = rm_ctxt->intr_shadow;
-
- /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
- if ( rm_ctxt->hvm.flags.mov_ss )
- new_intr_shadow ^= VMX_INTR_SHADOW_MOV_SS;
- else
- new_intr_shadow &= ~VMX_INTR_SHADOW_MOV_SS;
-
- /* STI instruction toggles STI shadow, else we just clear it. */
- if ( rm_ctxt->hvm.flags.sti )
- new_intr_shadow ^= VMX_INTR_SHADOW_STI;
- else
- new_intr_shadow &= ~VMX_INTR_SHADOW_STI;
-
- /* Update interrupt shadow information in VMCS only if it changes. */
- if ( rm_ctxt->intr_shadow != new_intr_shadow )
- {
- rm_ctxt->intr_shadow = new_intr_shadow;
- __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
- }
-
if ( rc == X86EMUL_EXCEPTION )
{
- if ( !rm_ctxt->hvm.flags.exn_pending )
+ if ( !hvmemul_ctxt->exn_pending )
{
intr_info = __vmread(VM_ENTRY_INTR_INFO);
__vmwrite(VM_ENTRY_INTR_INFO, 0);
@@ -172,23 +144,21 @@ static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
gdprintk(XENLOG_ERR, "Exception pending but no info.\n");
goto fail;
}
- rm_ctxt->hvm.exn_vector = (uint8_t)intr_info;
- rm_ctxt->hvm.exn_insn_len = 0;
+ hvmemul_ctxt->exn_vector = (uint8_t)intr_info;
+ hvmemul_ctxt->exn_insn_len = 0;
}
if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
{
gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
- rm_ctxt->hvm.exn_vector);
+ hvmemul_ctxt->exn_vector);
goto fail;
}
realmode_deliver_exception(
- rm_ctxt->hvm.exn_vector, rm_ctxt->hvm.exn_insn_len, rm_ctxt);
- }
- else if ( rm_ctxt->hvm.flags.hlt && !hvm_local_events_need_delivery(curr) )
- {
- hvm_hlt(regs->eflags);
+ hvmemul_ctxt->exn_vector,
+ hvmemul_ctxt->exn_insn_len,
+ hvmemul_ctxt);
}
return;
@@ -197,18 +167,18 @@ static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
gdprintk(XENLOG_ERR,
"Real-mode emulation failed @ %04x:%08lx: "
"%02x %02x %02x %02x %02x %02x\n",
- hvmemul_get_seg_reg(x86_seg_cs, &rm_ctxt->hvm)->sel,
- rm_ctxt->hvm.insn_buf_eip,
- rm_ctxt->hvm.insn_buf[0], rm_ctxt->hvm.insn_buf[1],
- rm_ctxt->hvm.insn_buf[2], rm_ctxt->hvm.insn_buf[3],
- rm_ctxt->hvm.insn_buf[4], rm_ctxt->hvm.insn_buf[5]);
+ hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt)->sel,
+ hvmemul_ctxt->insn_buf_eip,
+ hvmemul_ctxt->insn_buf[0], hvmemul_ctxt->insn_buf[1],
+ hvmemul_ctxt->insn_buf[2], hvmemul_ctxt->insn_buf[3],
+ hvmemul_ctxt->insn_buf[4], hvmemul_ctxt->insn_buf[5]);
domain_crash_synchronous();
}
void vmx_realmode(struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
- struct realmode_emulate_ctxt rm_ctxt;
+ struct hvm_emulate_ctxt hvmemul_ctxt;
struct segment_register *sreg;
unsigned long intr_info;
unsigned int emulations = 0;
@@ -218,17 +188,16 @@ void vmx_realmode(struct cpu_user_regs *regs)
if ( intr_info & INTR_INFO_VALID_MASK )
__vmwrite(VM_ENTRY_INTR_INFO, 0);
- hvm_emulate_prepare(&rm_ctxt.hvm, regs);
- rm_ctxt.intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
+ hvm_emulate_prepare(&hvmemul_ctxt, regs);
if ( curr->arch.hvm_vcpu.io_completed )
- realmode_emulate_one(&rm_ctxt);
+ realmode_emulate_one(&hvmemul_ctxt);
/* Only deliver interrupts into emulated real mode. */
if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
(intr_info & INTR_INFO_VALID_MASK) )
{
- realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt);
+ realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt);
intr_info = 0;
}
@@ -245,7 +214,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
!(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
hvm_local_events_need_delivery(curr) )
break;
- realmode_emulate_one(&rm_ctxt);
+ realmode_emulate_one(&hvmemul_ctxt);
}
if ( !curr->arch.hvm_vmx.vmxemul )
@@ -255,20 +224,20 @@ void vmx_realmode(struct cpu_user_regs *regs)
* At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For
* DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
*/
- sreg = hvmemul_get_seg_reg(x86_seg_ds, &rm_ctxt.hvm);
+ sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt);
sreg->attr.fields.dpl = sreg->sel & 3;
- sreg = hvmemul_get_seg_reg(x86_seg_es, &rm_ctxt.hvm);
+ sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt);
sreg->attr.fields.dpl = sreg->sel & 3;
- sreg = hvmemul_get_seg_reg(x86_seg_fs, &rm_ctxt.hvm);
+ sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt);
sreg->attr.fields.dpl = sreg->sel & 3;
- sreg = hvmemul_get_seg_reg(x86_seg_gs, &rm_ctxt.hvm);
+ sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt);
sreg->attr.fields.dpl = sreg->sel & 3;
- rm_ctxt.hvm.seg_reg_dirty |=
+ hvmemul_ctxt.seg_reg_dirty |=
(1ul << x86_seg_ds) | (1ul << x86_seg_es) |
(1ul << x86_seg_fs) | (1ul << x86_seg_gs);
}
- hvm_emulate_writeback(&rm_ctxt.hvm);
+ hvm_emulate_writeback(&hvmemul_ctxt);
/* Re-instate VM_ENTRY_INTR_INFO if we did not discharge it. */
if ( intr_info & INTR_INFO_VALID_MASK )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d99a24ccdf..bcb371ca55 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -890,32 +890,14 @@ static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
*(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
}
-static enum hvm_intblk vmx_interrupt_blocked(
- struct vcpu *v, struct hvm_intack intack)
+static unsigned int vmx_get_interrupt_shadow(struct vcpu *v)
{
- unsigned long intr_shadow;
-
- /*
- * Test EFLAGS.IF first. It is often the most likely reason for interrupt
- * blockage, and is the cheapest to test (because no VMREAD is required).
- */
- if ( (intack.source != hvm_intsrc_nmi) &&
- !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
- return hvm_intblk_rflags_ie;
-
- intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
-
- if ( intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
- return hvm_intblk_shadow;
-
- if ( intack.source == hvm_intsrc_nmi )
- return ((intr_shadow & VMX_INTR_SHADOW_NMI) ?
- hvm_intblk_nmi_iret : hvm_intblk_none);
-
- ASSERT((intack.source == hvm_intsrc_pic) ||
- (intack.source == hvm_intsrc_lapic));
+ return __vmread(GUEST_INTERRUPTIBILITY_INFO);
+}
- return hvm_intblk_none;
+static void vmx_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
+{
+ __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
}
static void vmx_update_host_cr3(struct vcpu *v)
@@ -1038,7 +1020,8 @@ static struct hvm_function_table vmx_function_table = {
.vcpu_destroy = vmx_vcpu_destroy,
.save_cpu_ctxt = vmx_save_vmcs_ctxt,
.load_cpu_ctxt = vmx_load_vmcs_ctxt,
- .interrupt_blocked = vmx_interrupt_blocked,
+ .get_interrupt_shadow = vmx_get_interrupt_shadow,
+ .set_interrupt_shadow = vmx_set_interrupt_shadow,
.guest_x86_mode = vmx_guest_x86_mode,
.get_segment_register = vmx_get_segment_register,
.set_segment_register = vmx_set_segment_register,
diff --git a/xen/arch/x86/hvm/vmx/x86_32/exits.S b/xen/arch/x86/hvm/vmx/x86_32/exits.S
index 56f241755d..70cd1dae36 100644
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S
@@ -89,7 +89,7 @@ ENTRY(vmx_asm_vmexit_handler)
ALIGN
vmx_process_softirqs:
- sti
+ sti
call do_softirq
jmp vmx_asm_do_vmentry
@@ -104,6 +104,10 @@ ENTRY(vmx_asm_do_vmentry)
jnz vmx_process_softirqs
call vmx_intr_assist
+
+ testb $0xff,VCPU_vmx_emul(%ebx)
+ jnz vmx_goto_realmode
+
movl VCPU_hvm_guest_cr2(%ebx),%eax
movl %eax,%cr2
call vmx_trace_vmentry
@@ -115,9 +119,6 @@ ENTRY(vmx_asm_do_vmentry)
movl $GUEST_RFLAGS,%eax
VMWRITE(UREGS_eflags)
- testb $0xff,VCPU_vmx_emul(%ebx)
- jnz vmx_goto_realmode
-
cmpb $0,VCPU_vmx_launched(%ebx)
je vmx_launch
diff --git a/xen/arch/x86/hvm/vmx/x86_64/exits.S b/xen/arch/x86/hvm/vmx/x86_64/exits.S
index 738642f733..fda4f179b1 100644
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S
@@ -105,7 +105,7 @@ ENTRY(vmx_asm_vmexit_handler)
ALIGN
vmx_process_softirqs:
- sti
+ sti
call do_softirq
jmp vmx_asm_do_vmentry
@@ -121,6 +121,10 @@ ENTRY(vmx_asm_do_vmentry)
jnz vmx_process_softirqs
call vmx_intr_assist
+
+ testb $0xff,VCPU_vmx_emul(%rbx)
+ jnz vmx_goto_realmode
+
movq VCPU_hvm_guest_cr2(%rbx),%rax
movq %rax,%cr2
call vmx_trace_vmentry
@@ -134,9 +138,6 @@ ENTRY(vmx_asm_do_vmentry)
movl $GUEST_RFLAGS,%eax
VMWRITE(UREGS_eflags)
- testb $0xff,VCPU_vmx_emul(%rbx)
- jnz vmx_goto_realmode
-
cmpb $0,VCPU_vmx_launched(%rbx)
je vmx_launch
diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c
index 62a8495d0e..fb9f16df12 100644
--- a/xen/arch/x86/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate.c
@@ -482,7 +482,7 @@ do{ asm volatile ( \
if ( !mode_64bit() ) _eip = (uint32_t)_eip; /* ignore upper dword */ \
_regs.eip += (_size); /* real hardware doesn't truncate */ \
generate_exception_if((uint8_t)(_regs.eip - ctxt->regs->eip) > 15, \
- EXC_GP); \
+ EXC_GP, 0); \
rc = ops->insn_fetch(x86_seg_cs, _eip, &_x, (_size), ctxt); \
if ( rc ) goto done; \
_x; \
@@ -505,12 +505,12 @@ do { \
if ( rc ) goto done; \
} while (0)
-#define generate_exception_if(p, e) \
-({ if ( (p) ) { \
- fail_if(ops->inject_hw_exception == NULL); \
- rc = ops->inject_hw_exception(e, 0, ctxt) ? : X86EMUL_EXCEPTION; \
- goto done; \
- } \
+#define generate_exception_if(p, e, ec) \
+({ if ( (p) ) { \
+ fail_if(ops->inject_hw_exception == NULL); \
+ rc = ops->inject_hw_exception(e, ec, ctxt) ? : X86EMUL_EXCEPTION; \
+ goto done; \
+ } \
})
/*
@@ -1023,6 +1023,8 @@ x86_emulate(
ea.mem.seg = x86_seg_ds;
ea.mem.off = 0;
+ ctxt->retire.byte = 0;
+
op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->addr_size/8;
if ( op_bytes == 8 )
{
@@ -1105,7 +1107,7 @@ x86_emulate(
}
/* Lock prefix is allowed only on RMW instructions. */
- generate_exception_if((d & Mov) && lock_prefix, EXC_GP);
+ generate_exception_if((d & Mov) && lock_prefix, EXC_GP, 0);
/* ModRM and SIB bytes. */
if ( d & ModRM )
@@ -1393,7 +1395,7 @@ x86_emulate(
}
/* LOCK prefix allowed only on instructions with memory destination. */
- generate_exception_if(lock_prefix && (dst.type != OP_MEM), EXC_GP);
+ generate_exception_if(lock_prefix && (dst.type != OP_MEM), EXC_GP, 0);
if ( twobyte )
goto twobyte_insn;
@@ -1459,14 +1461,15 @@ x86_emulate(
case 0x62: /* bound */ {
unsigned long src_val2;
int lb, ub, idx;
- generate_exception_if(mode_64bit() || (src.type != OP_MEM), EXC_UD);
+ generate_exception_if(mode_64bit() || (src.type != OP_MEM),
+ EXC_UD, -1);
if ( (rc = ops->read(src.mem.seg, src.mem.off + op_bytes,
&src_val2, op_bytes, ctxt)) )
goto done;
ub = (op_bytes == 2) ? (int16_t)src_val2 : (int32_t)src_val2;
lb = (op_bytes == 2) ? (int16_t)src.val : (int32_t)src.val;
idx = (op_bytes == 2) ? (int16_t)dst.val : (int32_t)dst.val;
- generate_exception_if((idx < lb) || (idx > ub), EXC_BR);
+ generate_exception_if((idx < lb) || (idx > ub), EXC_BR, -1);
dst.type = OP_NONE;
break;
}
@@ -1493,7 +1496,7 @@ x86_emulate(
dst.val = (dst.val & ~3) | (src_val & 3);
else
dst.type = OP_NONE;
- generate_exception_if(in_realmode(ctxt, ops), EXC_UD);
+ generate_exception_if(in_realmode(ctxt, ops), EXC_UD, -1);
}
break;
@@ -1534,7 +1537,7 @@ x86_emulate(
}
case 0x82: /* Grp1 (x86/32 only) */
- generate_exception_if(mode_64bit(), EXC_UD);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
case 0x80: case 0x81: case 0x83: /* Grp1 */
switch ( modrm_reg & 7 )
{
@@ -1571,7 +1574,7 @@ x86_emulate(
break;
case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
- generate_exception_if((modrm_reg & 7) != 0, EXC_UD);
+ generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1);
case 0x88 ... 0x8b: /* mov */
dst.val = src.val;
break;
@@ -1579,7 +1582,7 @@ x86_emulate(
case 0x8c: /* mov Sreg,r/m */ {
struct segment_register reg;
enum x86_segment seg = decode_segment(modrm_reg);
- generate_exception_if(seg == decode_segment_failed, EXC_UD);
+ generate_exception_if(seg == decode_segment_failed, EXC_UD, -1);
fail_if(ops->read_segment == NULL);
if ( (rc = ops->read_segment(seg, &reg, ctxt)) != 0 )
goto done;
@@ -1591,9 +1594,11 @@ x86_emulate(
case 0x8e: /* mov r/m,Sreg */ {
enum x86_segment seg = decode_segment(modrm_reg);
- generate_exception_if(seg == decode_segment_failed, EXC_UD);
+ generate_exception_if(seg == decode_segment_failed, EXC_UD, -1);
if ( (rc = load_seg(seg, (uint16_t)src.val, ctxt, ops)) != 0 )
goto done;
+ if ( seg == x86_seg_ss )
+ ctxt->retire.flags.mov_ss = 1;
dst.type = OP_NONE;
break;
}
@@ -1603,7 +1608,7 @@ x86_emulate(
break;
case 0x8f: /* pop (sole member of Grp1a) */
- generate_exception_if((modrm_reg & 7) != 0, EXC_UD);
+ generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1);
/* 64-bit mode: POP defaults to a 64-bit operand. */
if ( mode_64bit() && (dst.bytes == 4) )
dst.bytes = 8;
@@ -1659,7 +1664,7 @@ x86_emulate(
unsigned long sel;
dst.val = x86_seg_es;
les: /* dst.val identifies the segment */
- generate_exception_if(src.type != OP_MEM, EXC_UD);
+ generate_exception_if(src.type != OP_MEM, EXC_UD, -1);
if ( (rc = ops->read(src.mem.seg, src.mem.off + src.bytes,
&sel, 2, ctxt)) != 0 )
goto done;
@@ -1797,7 +1802,7 @@ x86_emulate(
v = (uint8_t)src.val;
generate_exception_if(
div_dbl(u, v) || ((uint8_t)u[0] != (uint16_t)u[0]),
- EXC_DE);
+ EXC_DE, -1);
dst.val = (uint8_t)u[0];
((uint8_t *)&_regs.eax)[1] = u[1];
break;
@@ -1807,7 +1812,7 @@ x86_emulate(
v = (uint16_t)src.val;
generate_exception_if(
div_dbl(u, v) || ((uint16_t)u[0] != (uint32_t)u[0]),
- EXC_DE);
+ EXC_DE, -1);
dst.val = (uint16_t)u[0];
*(uint16_t *)&_regs.edx = u[1];
break;
@@ -1818,7 +1823,7 @@ x86_emulate(
v = (uint32_t)src.val;
generate_exception_if(
div_dbl(u, v) || ((uint32_t)u[0] != u[0]),
- EXC_DE);
+ EXC_DE, -1);
dst.val = (uint32_t)u[0];
_regs.edx = (uint32_t)u[1];
break;
@@ -1827,7 +1832,7 @@ x86_emulate(
u[0] = _regs.eax;
u[1] = _regs.edx;
v = src.val;
- generate_exception_if(div_dbl(u, v), EXC_DE);
+ generate_exception_if(div_dbl(u, v), EXC_DE, -1);
dst.val = u[0];
_regs.edx = u[1];
break;
@@ -1847,7 +1852,7 @@ x86_emulate(
v = (int8_t)src.val;
generate_exception_if(
idiv_dbl(u, v) || ((int8_t)u[0] != (int16_t)u[0]),
- EXC_DE);
+ EXC_DE, -1);
dst.val = (int8_t)u[0];
((int8_t *)&_regs.eax)[1] = u[1];
break;
@@ -1857,7 +1862,7 @@ x86_emulate(
v = (int16_t)src.val;
generate_exception_if(
idiv_dbl(u, v) || ((int16_t)u[0] != (int32_t)u[0]),
- EXC_DE);
+ EXC_DE, -1);
dst.val = (int16_t)u[0];
*(int16_t *)&_regs.edx = u[1];
break;
@@ -1868,7 +1873,7 @@ x86_emulate(
v = (int32_t)src.val;
generate_exception_if(
idiv_dbl(u, v) || ((int32_t)u[0] != u[0]),
- EXC_DE);
+ EXC_DE, -1);
dst.val = (int32_t)u[0];
_regs.edx = (uint32_t)u[1];
break;
@@ -1877,7 +1882,7 @@ x86_emulate(
u[0] = _regs.eax;
u[1] = _regs.edx;
v = src.val;
- generate_exception_if(idiv_dbl(u, v), EXC_DE);
+ generate_exception_if(idiv_dbl(u, v), EXC_DE, -1);
dst.val = u[0];
_regs.edx = u[1];
break;
@@ -1890,7 +1895,7 @@ x86_emulate(
break;
case 0xfe: /* Grp4 */
- generate_exception_if((modrm_reg & 7) >= 2, EXC_UD);
+ generate_exception_if((modrm_reg & 7) >= 2, EXC_UD, -1);
case 0xff: /* Grp5 */
switch ( modrm_reg & 7 )
{
@@ -1921,7 +1926,7 @@ x86_emulate(
case 5: /* jmp (far, absolute indirect) */ {
unsigned long sel;
- generate_exception_if(dst.type != OP_MEM, EXC_UD);
+ generate_exception_if(dst.type != OP_MEM, EXC_UD, -1);
if ( (rc = ops->read(dst.mem.seg, dst.mem.off+dst.bytes,
&sel, 2, ctxt)) )
@@ -1963,7 +1968,7 @@ x86_emulate(
dst.type = OP_NONE;
break;
case 7:
- generate_exception_if(1, EXC_UD);
+ generate_exception_if(1, EXC_UD, -1);
default:
goto cannot_emulate;
}
@@ -2003,11 +2008,9 @@ x86_emulate(
/* Commit shadow register state. */
_regs.eflags &= ~EFLG_RF;
*ctxt->regs = _regs;
-
- if ( (_regs.eflags & EFLG_TF) &&
- (rc == X86EMUL_OKAY) &&
+ if ( (_regs.eflags & EFLG_TF) && (rc == X86EMUL_OKAY) &&
(ops->inject_hw_exception != NULL) )
- rc = ops->inject_hw_exception(EXC_DB, 0, ctxt) ? : X86EMUL_EXCEPTION;
+ rc = ops->inject_hw_exception(EXC_DB, -1, ctxt) ? : X86EMUL_EXCEPTION;
done:
return rc;
@@ -2022,7 +2025,7 @@ x86_emulate(
generate_exception_if(lock_prefix &&
((b < 0x20) || (b > 0x23)) && /* MOV CRn/DRn */
(b != 0xc7), /* CMPXCHG{8,16}B */
- EXC_GP);
+ EXC_GP, 0);
if ( twobyte )
goto twobyte_special_insn;
@@ -2069,6 +2072,7 @@ x86_emulate(
case 0x17: /* pop %%ss */
src.val = x86_seg_ss;
+ ctxt->retire.flags.mov_ss = 1;
goto pop_seg;
case 0x1e: /* push %%ds */
@@ -2082,7 +2086,7 @@ x86_emulate(
case 0x27: /* daa */ {
uint8_t al = _regs.eax;
unsigned long eflags = _regs.eflags;
- generate_exception_if(mode_64bit(), EXC_UD);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
_regs.eflags &= ~(EFLG_CF|EFLG_AF);
if ( ((al & 0x0f) > 9) || (eflags & EFLG_AF) )
{
@@ -2104,7 +2108,7 @@ x86_emulate(
case 0x2f: /* das */ {
uint8_t al = _regs.eax;
unsigned long eflags = _regs.eflags;
- generate_exception_if(mode_64bit(), EXC_UD);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
_regs.eflags &= ~(EFLG_CF|EFLG_AF);
if ( ((al & 0x0f) > 9) || (eflags & EFLG_AF) )
{
@@ -2127,7 +2131,7 @@ x86_emulate(
case 0x37: /* aaa */
case 0x3f: /* aas */
- generate_exception_if(mode_64bit(), EXC_UD);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
_regs.eflags &= ~EFLG_CF;
if ( ((uint8_t)_regs.eax > 9) || (_regs.eflags & EFLG_AF) )
{
@@ -2171,7 +2175,7 @@ x86_emulate(
unsigned long regs[] = {
_regs.eax, _regs.ecx, _regs.edx, _regs.ebx,
_regs.esp, _regs.ebp, _regs.esi, _regs.edi };
- generate_exception_if(mode_64bit(), EXC_UD);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
for ( i = 0; i < 8; i++ )
if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
regs[i], op_bytes, ctxt)) != 0 )
@@ -2186,7 +2190,7 @@ x86_emulate(
(unsigned long *)&_regs.ebp, (unsigned long *)&dummy_esp,
(unsigned long *)&_regs.ebx, (unsigned long *)&_regs.edx,
(unsigned long *)&_regs.ecx, (unsigned long *)&_regs.eax };
- generate_exception_if(mode_64bit(), EXC_UD);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
for ( i = 0; i < 8; i++ )
{
if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
@@ -2224,7 +2228,7 @@ x86_emulate(
case 0x6c ... 0x6d: /* ins %dx,%es:%edi */ {
unsigned long nr_reps = get_rep_prefix();
- generate_exception_if(!mode_iopl(), EXC_GP);
+ generate_exception_if(!mode_iopl(), EXC_GP, 0);
dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
dst.mem.seg = x86_seg_es;
dst.mem.off = truncate_ea(_regs.edi);
@@ -2254,7 +2258,7 @@ x86_emulate(
case 0x6e ... 0x6f: /* outs %esi,%dx */ {
unsigned long nr_reps = get_rep_prefix();
- generate_exception_if(!mode_iopl(), EXC_GP);
+ generate_exception_if(!mode_iopl(), EXC_GP, 0);
dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
if ( (nr_reps > 1) && (ops->rep_outs != NULL) &&
((rc = ops->rep_outs(ea.mem.seg, truncate_ea(_regs.esi),
@@ -2333,7 +2337,7 @@ x86_emulate(
uint32_t eip;
fail_if(ops->read_segment == NULL);
- generate_exception_if(mode_64bit(), EXC_UD);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
eip = insn_fetch_bytes(op_bytes);
sel = insn_fetch_type(uint16_t);
@@ -2359,7 +2363,6 @@ x86_emulate(
uint32_t mask = EFLG_VIP | EFLG_VIF | EFLG_VM;
if ( !mode_iopl() )
mask |= EFLG_IOPL;
- fail_if(ops->write_rflags == NULL);
/* 64-bit mode: POP defaults to a 64-bit operand. */
if ( mode_64bit() && (op_bytes == 4) )
op_bytes = 8;
@@ -2371,8 +2374,6 @@ x86_emulate(
dst.val &= 0x257fd5;
_regs.eflags &= mask;
_regs.eflags |= (uint32_t)(dst.val & ~mask) | 0x02;
- if ( (rc = ops->write_rflags(_regs.eflags, ctxt)) != 0 )
- goto done;
break;
}
@@ -2597,7 +2598,7 @@ x86_emulate(
goto done;
case 0xce: /* into */
- generate_exception_if(mode_64bit(), EXC_UD);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
if ( !(_regs.eflags & EFLG_OF) )
break;
src.val = EXC_OF;
@@ -2609,7 +2610,6 @@ x86_emulate(
if ( !mode_iopl() )
mask |= EFLG_IOPL;
fail_if(!in_realmode(ctxt, ops));
- fail_if(ops->write_rflags == NULL);
if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
&eip, op_bytes, ctxt)) ||
(rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
@@ -2622,8 +2622,6 @@ x86_emulate(
eflags &= 0x257fd5;
_regs.eflags &= mask;
_regs.eflags |= (uint32_t)(eflags & ~mask) | 0x02;
- if ( (rc = ops->write_rflags(_regs.eflags, ctxt)) != 0 )
- goto done;
_regs.eip = eip;
if ( (rc = load_seg(x86_seg_cs, (uint16_t)cs, ctxt, ops)) != 0 )
goto done;
@@ -2633,8 +2631,8 @@ x86_emulate(
case 0xd4: /* aam */ {
unsigned int base = insn_fetch_type(uint8_t);
uint8_t al = _regs.eax;
- generate_exception_if(mode_64bit(), EXC_UD);
- generate_exception_if(base == 0, EXC_DE);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
+ generate_exception_if(base == 0, EXC_DE, -1);
*(uint16_t *)&_regs.eax = ((al / base) << 8) | (al % base);
_regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
_regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
@@ -2646,7 +2644,7 @@ x86_emulate(
case 0xd5: /* aad */ {
unsigned int base = insn_fetch_type(uint8_t);
uint16_t ax = _regs.eax;
- generate_exception_if(mode_64bit(), EXC_UD);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
*(uint16_t *)&_regs.eax = (uint8_t)(ax + ((ax >> 8) * base));
_regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
_regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
@@ -2656,7 +2654,7 @@ x86_emulate(
}
case 0xd6: /* salc */
- generate_exception_if(mode_64bit(), EXC_UD);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
*(uint8_t *)&_regs.eax = (_regs.eflags & EFLG_CF) ? 0xff : 0x00;
break;
@@ -2743,7 +2741,7 @@ x86_emulate(
unsigned int port = ((b < 0xe8)
? insn_fetch_type(uint8_t)
: (uint16_t)_regs.edx);
- generate_exception_if(!mode_iopl(), EXC_GP);
+ generate_exception_if(!mode_iopl(), EXC_GP, 0);
op_bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
if ( b & 2 )
{
@@ -2787,7 +2785,7 @@ x86_emulate(
case 0xea: /* jmp (far, absolute) */ {
uint16_t sel;
uint32_t eip;
- generate_exception_if(mode_64bit(), EXC_UD);
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
eip = insn_fetch_bytes(op_bytes);
sel = insn_fetch_type(uint16_t);
if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
@@ -2807,9 +2805,7 @@ x86_emulate(
goto swint;
case 0xf4: /* hlt */
- fail_if(ops->hlt == NULL);
- if ( (rc = ops->hlt(ctxt)) != 0 )
- goto done;
+ ctxt->retire.flags.hlt = 1;
break;
case 0xf5: /* cmc */
@@ -2825,14 +2821,17 @@ x86_emulate(
break;
case 0xfa: /* cli */
- case 0xfb: /* sti */
- generate_exception_if(!mode_iopl(), EXC_GP);
- fail_if(ops->write_rflags == NULL);
+ generate_exception_if(!mode_iopl(), EXC_GP, 0);
_regs.eflags &= ~EFLG_IF;
- if ( b == 0xfb ) /* sti */
+ break;
+
+ case 0xfb: /* sti */
+ generate_exception_if(!mode_iopl(), EXC_GP, 0);
+ if ( !(_regs.eflags & EFLG_IF) )
+ {
_regs.eflags |= EFLG_IF;
- if ( (rc = ops->write_rflags(_regs.eflags, ctxt)) != 0 )
- goto done;
+ ctxt->retire.flags.sti = 1;
+ }
break;
case 0xfc: /* cld */
@@ -3001,7 +3000,7 @@ x86_emulate(
case 5: goto bts;
case 6: goto btr;
case 7: goto btc;
- default: generate_exception_if(1, EXC_UD);
+ default: generate_exception_if(1, EXC_UD, -1);
}
break;
@@ -3038,8 +3037,8 @@ x86_emulate(
if ( modrm == 0xdf ) /* invlpga */
{
- generate_exception_if(in_realmode(ctxt, ops), EXC_UD);
- generate_exception_if(!mode_ring0(), EXC_GP);
+ generate_exception_if(in_realmode(ctxt, ops), EXC_UD, -1);
+ generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if(ops->invlpg == NULL);
if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.eax),
ctxt)) )
@@ -3051,7 +3050,7 @@ x86_emulate(
{
case 0: /* sgdt */
case 1: /* sidt */
- generate_exception_if(ea.type != OP_MEM, EXC_UD);
+ generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
fail_if(ops->read_segment == NULL);
if ( (rc = ops->read_segment((modrm_reg & 1) ?
x86_seg_idtr : x86_seg_gdtr,
@@ -3067,7 +3066,7 @@ x86_emulate(
break;
case 2: /* lgdt */
case 3: /* lidt */
- generate_exception_if(ea.type != OP_MEM, EXC_UD);
+ generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
fail_if(ops->write_segment == NULL);
memset(&reg, 0, sizeof(reg));
if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0,
@@ -3108,8 +3107,8 @@ x86_emulate(
goto done;
break;
case 7: /* invlpg */
- generate_exception_if(!mode_ring0(), EXC_GP);
- generate_exception_if(ea.type != OP_MEM, EXC_UD);
+ generate_exception_if(!mode_ring0(), EXC_GP, 0);
+ generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
fail_if(ops->invlpg == NULL);
if ( (rc = ops->invlpg(ea.mem.seg, ea.mem.off, ctxt)) )
goto done;
@@ -3121,7 +3120,7 @@ x86_emulate(
}
case 0x06: /* clts */
- generate_exception_if(!mode_ring0(), EXC_GP);
+ generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if((ops->read_cr == NULL) || (ops->write_cr == NULL));
if ( (rc = ops->read_cr(0, &dst.val, ctxt)) ||
(rc = ops->write_cr(0, dst.val&~8, ctxt)) )
@@ -3130,7 +3129,7 @@ x86_emulate(
case 0x08: /* invd */
case 0x09: /* wbinvd */
- generate_exception_if(!mode_ring0(), EXC_GP);
+ generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if(ops->wbinvd == NULL);
if ( (rc = ops->wbinvd(ctxt)) != 0 )
goto done;
@@ -3145,7 +3144,7 @@ x86_emulate(
case 0x21: /* mov dr,reg */
case 0x22: /* mov reg,cr */
case 0x23: /* mov reg,dr */
- generate_exception_if(!mode_ring0(), EXC_GP);
+ generate_exception_if(!mode_ring0(), EXC_GP, 0);
modrm_rm |= (rex_prefix & 1) << 3;
modrm_reg |= lock_prefix << 3;
if ( b & 2 )
@@ -3182,7 +3181,7 @@ x86_emulate(
case 0x30: /* wrmsr */ {
uint64_t val = ((uint64_t)_regs.edx << 32) | (uint32_t)_regs.eax;
- generate_exception_if(!mode_ring0(), EXC_GP);
+ generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if(ops->write_msr == NULL);
if ( (rc = ops->write_msr((uint32_t)_regs.ecx, val, ctxt)) != 0 )
goto done;
@@ -3195,7 +3194,7 @@ x86_emulate(
fail_if(ops->read_cr == NULL);
if ( (rc = ops->read_cr(4, &cr4, ctxt)) )
goto done;
- generate_exception_if((cr4 & CR4_TSD) && !mode_ring0(), EXC_GP);
+ generate_exception_if((cr4 & CR4_TSD) && !mode_ring0(), EXC_GP, 0);
fail_if(ops->read_msr == NULL);
if ( (rc = ops->read_msr(MSR_TSC, &val, ctxt)) != 0 )
goto done;
@@ -3206,7 +3205,7 @@ x86_emulate(
case 0x32: /* rdmsr */ {
uint64_t val;
- generate_exception_if(!mode_ring0(), EXC_GP);
+ generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if(ops->read_msr == NULL);
if ( (rc = ops->read_msr((uint32_t)_regs.ecx, &val, ctxt)) != 0 )
goto done;
@@ -3255,8 +3254,8 @@ x86_emulate(
#if defined(__i386__)
{
unsigned long old_lo, old_hi;
- generate_exception_if((modrm_reg & 7) != 1, EXC_UD);
- generate_exception_if(ea.type != OP_MEM, EXC_UD);
+ generate_exception_if((modrm_reg & 7) != 1, EXC_UD, -1);
+ generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, &old_lo, 4, ctxt)) ||
(rc = ops->read(ea.mem.seg, ea.mem.off+4, &old_hi, 4, ctxt)) )
goto done;
@@ -3283,8 +3282,8 @@ x86_emulate(
#elif defined(__x86_64__)
{
unsigned long old, new;
- generate_exception_if((modrm_reg & 7) != 1, EXC_UD);
- generate_exception_if(ea.type != OP_MEM, EXC_UD);
+ generate_exception_if((modrm_reg & 7) != 1, EXC_UD, -1);
+ generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &old, 8, ctxt)) != 0 )
goto done;
if ( ((uint32_t)(old>>0) != (uint32_t)_regs.eax) ||