aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/hvm/svm/svm.c2
-rw-r--r--xen/arch/x86/hvm/vmx/realmode.c126
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c4
-rw-r--r--xen/arch/x86/hvm/vmx/x86_32/exits.S4
-rw-r--r--xen/arch/x86/hvm/vmx/x86_64/exits.S4
-rw-r--r--xen/arch/x86/mm/shadow/common.c22
-rw-r--r--xen/arch/x86/x86_32/asm-offsets.c2
-rw-r--r--xen/arch/x86/x86_64/asm-offsets.c2
-rw-r--r--xen/arch/x86/x86_emulate.c167
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmcs.h10
-rw-r--r--xen/include/asm-x86/x86_emulate.h12
11 files changed, 302 insertions, 53 deletions
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index ff7d5be231..5a94de6d96 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -578,8 +578,8 @@ static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
case x86_seg_gdtr: return vmcb->gdtr.base;
case x86_seg_idtr: return vmcb->idtr.base;
case x86_seg_ldtr: svm_sync_vmcb(v); return vmcb->ldtr.base;
+ default: BUG();
}
- BUG();
return 0;
}
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 9c92ec1df5..74838d0680 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -118,6 +118,18 @@ static void realmode_deliver_exception(
}
}
+static uint32_t virtual_to_linear(
+ enum x86_segment seg,
+ uint32_t offset,
+ struct realmode_emulate_ctxt *rm_ctxt)
+{
+ uint32_t addr = offset;
+ if ( seg == x86_seg_none )
+ return addr;
+ ASSERT(is_x86_user_segment(seg));
+ return addr + rm_ctxt->seg_reg[seg].base;
+}
+
static int
realmode_read(
enum x86_segment seg,
@@ -127,14 +139,17 @@ realmode_read(
enum hvm_access_type access_type,
struct realmode_emulate_ctxt *rm_ctxt)
{
- uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
+ uint32_t addr = virtual_to_linear(seg, offset, rm_ctxt);
*val = 0;
- if ( hvm_copy_from_guest_phys(val, addr, bytes) )
+ if ( hvm_copy_from_guest_virt_nofault(val, addr, bytes) )
{
struct vcpu *curr = current;
+ if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
+ return X86EMUL_UNHANDLEABLE;
+
if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
return X86EMUL_UNHANDLEABLE;
@@ -202,12 +217,15 @@ realmode_emulate_write(
{
struct realmode_emulate_ctxt *rm_ctxt =
container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
- uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
+ uint32_t addr = virtual_to_linear(seg, offset, rm_ctxt);
- if ( hvm_copy_to_guest_phys(addr, &val, bytes) )
+ if ( hvm_copy_to_guest_virt_nofault(addr, &val, bytes) )
{
struct vcpu *curr = current;
+ if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
+ return X86EMUL_UNHANDLEABLE;
+
if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
return X86EMUL_UNHANDLEABLE;
@@ -244,7 +262,10 @@ realmode_rep_ins(
struct realmode_emulate_ctxt *rm_ctxt =
container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
struct vcpu *curr = current;
- uint32_t paddr = rm_ctxt->seg_reg[dst_seg].base + dst_offset;
+ uint32_t paddr = virtual_to_linear(dst_seg, dst_offset, rm_ctxt);
+
+ if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
+ return X86EMUL_UNHANDLEABLE;
if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
return X86EMUL_UNHANDLEABLE;
@@ -277,7 +298,10 @@ realmode_rep_outs(
struct realmode_emulate_ctxt *rm_ctxt =
container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
struct vcpu *curr = current;
- uint32_t paddr = rm_ctxt->seg_reg[src_seg].base + src_offset;
+ uint32_t paddr = virtual_to_linear(src_seg, src_offset, rm_ctxt);
+
+ if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
+ return X86EMUL_UNHANDLEABLE;
if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
return X86EMUL_UNHANDLEABLE;
@@ -310,9 +334,29 @@ realmode_write_segment(
{
struct realmode_emulate_ctxt *rm_ctxt =
container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
- memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register));
+ struct vcpu *curr = current;
+
+ if ( seg == x86_seg_cs )
+ {
+ if ( reg->attr.fields.dpl != 0 )
+ return X86EMUL_UNHANDLEABLE;
+ curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_CS;
+ if ( reg->sel & 3 )
+ curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_CS;
+ }
+
if ( seg == x86_seg_ss )
+ {
+ if ( reg->attr.fields.dpl != 0 )
+ return X86EMUL_UNHANDLEABLE;
+ curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_SS;
+ if ( reg->sel & 3 )
+ curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_SS;
rm_ctxt->flags.mov_ss = 1;
+ }
+
+ memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register));
+
return X86EMUL_OKAY;
}
@@ -336,7 +380,7 @@ realmode_read_io(
if ( !curr->arch.hvm_vmx.real_mode_io_completed )
return X86EMUL_RETRY;
-
+
*val = curr->arch.hvm_vmx.real_mode_io_data;
curr->arch.hvm_vmx.real_mode_io_completed = 0;
@@ -506,11 +550,19 @@ static int realmode_hlt(
static int realmode_inject_hw_exception(
uint8_t vector,
+ uint16_t error_code,
struct x86_emulate_ctxt *ctxt)
{
struct realmode_emulate_ctxt *rm_ctxt =
container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
+ /* We don't emulate protected-mode exception delivery. */
+ if ( current->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
+ return X86EMUL_UNHANDLEABLE;
+
+ if ( error_code != 0 )
+ return X86EMUL_UNHANDLEABLE;
+
rm_ctxt->exn_vector = vector;
rm_ctxt->exn_insn_len = 0;
@@ -525,6 +577,10 @@ static int realmode_inject_sw_interrupt(
struct realmode_emulate_ctxt *rm_ctxt =
container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
+ /* We don't emulate protected-mode exception delivery. */
+ if ( current->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
+ return X86EMUL_UNHANDLEABLE;
+
rm_ctxt->exn_vector = vector;
rm_ctxt->exn_insn_len = insn_len;
@@ -568,12 +624,22 @@ static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
struct vcpu *curr = current;
u32 new_intr_shadow;
int rc, io_completed;
-
- rm_ctxt->insn_buf_eip = regs->eip;
- (void)hvm_copy_from_guest_phys(
- rm_ctxt->insn_buf,
- (uint32_t)(rm_ctxt->seg_reg[x86_seg_cs].base + regs->eip),
- sizeof(rm_ctxt->insn_buf));
+ unsigned long addr;
+
+ rm_ctxt->ctxt.addr_size =
+ rm_ctxt->seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
+ rm_ctxt->ctxt.sp_size =
+ rm_ctxt->seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;
+
+ rm_ctxt->insn_buf_eip = (uint32_t)regs->eip;
+ addr = virtual_to_linear(x86_seg_cs, regs->eip, rm_ctxt);
+ if ( hvm_fetch_from_guest_virt_nofault(rm_ctxt->insn_buf, addr,
+ sizeof(rm_ctxt->insn_buf))
+ != HVMCOPY_okay )
+ {
+ gdprintk(XENLOG_ERR, "Failed to pre-fetch instruction bytes.\n");
+ goto fail;
+ }
rm_ctxt->flag_word = 0;
@@ -670,39 +736,35 @@ void vmx_realmode(struct cpu_user_regs *regs)
for ( i = 0; i < 10; i++ )
hvm_get_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
- rm_ctxt.ctxt.addr_size =
- rm_ctxt.seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
- rm_ctxt.ctxt.sp_size =
- rm_ctxt.seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;
-
rm_ctxt.intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
if ( curr->arch.hvm_vmx.real_mode_io_in_progress ||
curr->arch.hvm_vmx.real_mode_io_completed )
realmode_emulate_one(&rm_ctxt);
- if ( intr_info & INTR_INFO_VALID_MASK )
+ /* Only deliver interrupts into emulated real mode. */
+ if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
+ (intr_info & INTR_INFO_VALID_MASK) )
{
realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt);
__vmwrite(VM_ENTRY_INTR_INFO, 0);
}
- while ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
+ while ( curr->arch.hvm_vmx.vmxemul &&
!softirq_pending(smp_processor_id()) &&
- !hvm_local_events_need_delivery(curr) &&
- !curr->arch.hvm_vmx.real_mode_io_in_progress )
+ !curr->arch.hvm_vmx.real_mode_io_in_progress &&
+ /* Check for pending interrupts only in proper real mode. */
+ ((curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
+ !hvm_local_events_need_delivery(curr)) )
realmode_emulate_one(&rm_ctxt);
- /*
- * Cannot enter protected mode with bogus selector RPLs and DPLs. Hence we
- * fix up as best we can, even though this deviates from native execution
- */
- if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
+ if ( !curr->arch.hvm_vmx.vmxemul )
{
- /* CS.RPL == SS.RPL == SS.DPL == 0. */
- rm_ctxt.seg_reg[x86_seg_cs].sel &= ~3;
- rm_ctxt.seg_reg[x86_seg_ss].sel &= ~3;
- /* DS,ES,FS,GS: The most uninvasive trick is to set DPL == RPL. */
+ /*
+ * Cannot enter protected mode with bogus selector RPLs and DPLs.
+ * At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For
+ * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
+ */
rm_ctxt.seg_reg[x86_seg_ds].attr.fields.dpl =
rm_ctxt.seg_reg[x86_seg_ds].sel & 3;
rm_ctxt.seg_reg[x86_seg_es].attr.fields.dpl =
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 11d8550eca..09500b2dc1 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1061,6 +1061,10 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
vmx_fpu_enter(v);
}
+ v->arch.hvm_vmx.vmxemul &= ~VMXEMUL_REALMODE;
+ if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
+ v->arch.hvm_vmx.vmxemul |= VMXEMUL_REALMODE;
+
v->arch.hvm_vcpu.hw_cr[0] =
v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
__vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
diff --git a/xen/arch/x86/hvm/vmx/x86_32/exits.S b/xen/arch/x86/hvm/vmx/x86_32/exits.S
index 09202eaed3..860ad8cd86 100644
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S
@@ -116,8 +116,8 @@ ENTRY(vmx_asm_do_vmentry)
VMWRITE(UREGS_eflags)
#ifndef VMXASSIST
- testb $X86_CR0_PE,VCPU_hvm_guest_cr0(%ebx)
- jz vmx_goto_realmode
+ testb $0xff,VCPU_vmx_emul(%ebx)
+ jnz vmx_goto_realmode
#endif
cmpb $0,VCPU_vmx_launched(%ebx)
diff --git a/xen/arch/x86/hvm/vmx/x86_64/exits.S b/xen/arch/x86/hvm/vmx/x86_64/exits.S
index 0adb337681..f6541b7685 100644
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S
@@ -135,8 +135,8 @@ ENTRY(vmx_asm_do_vmentry)
VMWRITE(UREGS_eflags)
#ifndef VMXASSIST
- testb $X86_CR0_PE,VCPU_hvm_guest_cr0(%rbx)
- jz vmx_goto_realmode
+ testb $0xff,VCPU_vmx_emul(%rbx)
+ jnz vmx_goto_realmode
#endif
cmpb $0,VCPU_vmx_launched(%rbx)
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 7dc79c2e04..f7c73e008b 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -176,6 +176,8 @@ hvm_emulate_read(enum x86_segment seg,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
return hvm_read(seg, offset, val, bytes, hvm_access_read,
container_of(ctxt, struct sh_emulate_ctxt, ctxt));
}
@@ -191,6 +193,8 @@ hvm_emulate_insn_fetch(enum x86_segment seg,
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
unsigned int insn_off = offset - sh_ctxt->insn_buf_eip;
+ ASSERT(seg == x86_seg_cs);
+
/* Fall back if requested bytes are not in the prefetch cache. */
if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) )
return hvm_read(seg, offset, val, bytes,
@@ -215,6 +219,9 @@ hvm_emulate_write(enum x86_segment seg,
unsigned long addr;
int rc;
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
+
/* How many emulations could we save if we unshadowed on stack writes? */
if ( seg == x86_seg_ss )
perfc_incr(shadow_fault_emulate_stack);
@@ -242,6 +249,9 @@ hvm_emulate_cmpxchg(enum x86_segment seg,
unsigned long addr;
int rc;
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
+
rc = hvm_translate_linear_addr(
seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
if ( rc )
@@ -266,6 +276,9 @@ hvm_emulate_cmpxchg8b(enum x86_segment seg,
unsigned long addr;
int rc;
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
+
rc = hvm_translate_linear_addr(
seg, offset, 8, hvm_access_write, sh_ctxt, &addr);
if ( rc )
@@ -292,6 +305,9 @@ pv_emulate_read(enum x86_segment seg,
{
unsigned int rc;
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
+
*val = 0;
if ( (rc = copy_from_user((void *)val, (void *)offset, bytes)) != 0 )
{
@@ -312,6 +328,8 @@ pv_emulate_write(enum x86_segment seg,
struct sh_emulate_ctxt *sh_ctxt =
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
struct vcpu *v = current;
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
return v->arch.paging.mode->shadow.x86_emulate_write(
v, offset, &val, bytes, sh_ctxt);
}
@@ -327,6 +345,8 @@ pv_emulate_cmpxchg(enum x86_segment seg,
struct sh_emulate_ctxt *sh_ctxt =
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
struct vcpu *v = current;
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
v, offset, old, new, bytes, sh_ctxt);
}
@@ -343,6 +363,8 @@ pv_emulate_cmpxchg8b(enum x86_segment seg,
struct sh_emulate_ctxt *sh_ctxt =
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
struct vcpu *v = current;
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b(
v, offset, old_lo, old_hi, new_lo, new_hi, sh_ctxt);
}
diff --git a/xen/arch/x86/x86_32/asm-offsets.c b/xen/arch/x86/x86_32/asm-offsets.c
index 2e08ccf2fd..ad8d1a2e6f 100644
--- a/xen/arch/x86/x86_32/asm-offsets.c
+++ b/xen/arch/x86/x86_32/asm-offsets.c
@@ -84,7 +84,7 @@ void __dummy__(void)
BLANK();
OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
- OFFSET(VCPU_hvm_guest_cr0, struct vcpu, arch.hvm_vcpu.guest_cr[0]);
+ OFFSET(VCPU_vmx_emul, struct vcpu, arch.hvm_vmx.vmxemul);
OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
BLANK();
diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c
index 8a7c55f951..54a27b2ae1 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -103,7 +103,7 @@ void __dummy__(void)
BLANK();
OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
- OFFSET(VCPU_hvm_guest_cr0, struct vcpu, arch.hvm_vcpu.guest_cr[0]);
+ OFFSET(VCPU_vmx_emul, struct vcpu, arch.hvm_vmx.vmxemul);
OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
BLANK();
diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c
index e559e3c91a..bf867a7b56 100644
--- a/xen/arch/x86/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate.c
@@ -303,7 +303,11 @@ struct operand {
#define EXC_OF 4
#define EXC_BR 5
#define EXC_UD 6
+#define EXC_TS 10
+#define EXC_NP 11
+#define EXC_SS 12
#define EXC_GP 13
+#define EXC_PF 14
/*
* Instruction emulation:
@@ -500,12 +504,12 @@ do { \
if ( rc ) goto done; \
} while (0)
-#define generate_exception_if(p, e) \
-({ if ( (p) ) { \
- fail_if(ops->inject_hw_exception == NULL); \
- rc = ops->inject_hw_exception(e, ctxt) ? : X86EMUL_EXCEPTION; \
- goto done; \
- } \
+#define generate_exception_if(p, e) \
+({ if ( (p) ) { \
+ fail_if(ops->inject_hw_exception == NULL); \
+ rc = ops->inject_hw_exception(e, 0, ctxt) ? : X86EMUL_EXCEPTION; \
+ goto done; \
+ } \
})
/*
@@ -774,7 +778,7 @@ in_realmode(
}
static int
-load_seg(
+realmode_load_seg(
enum x86_segment seg,
uint16_t sel,
struct x86_emulate_ctxt *ctxt,
@@ -783,11 +787,6 @@ load_seg(
struct segment_register reg;
int rc;
- if ( !in_realmode(ctxt, ops) ||
- (ops->read_segment == NULL) ||
- (ops->write_segment == NULL) )
- return X86EMUL_UNHANDLEABLE;
-
if ( (rc = ops->read_segment(seg, &reg, ctxt)) != 0 )
return rc;
@@ -797,6 +796,148 @@ load_seg(
return ops->write_segment(seg, &reg, ctxt);
}
+static int
+protmode_load_seg(
+ enum x86_segment seg,
+ uint16_t sel,
+ struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops)
+{
+ struct segment_register desctab, cs, segr;
+ struct { uint32_t a, b; } desc;
+ unsigned long val;
+ uint8_t dpl, rpl, cpl;
+ int rc, fault_type = EXC_TS;
+
+ /* NULL selector? */
+ if ( (sel & 0xfffc) == 0 )
+ {
+ if ( (seg == x86_seg_cs) || (seg == x86_seg_ss) )
+ goto raise_exn;
+ memset(&segr, 0, sizeof(segr));
+ return ops->write_segment(seg, &segr, ctxt);
+ }
+
+ /* LDT descriptor must be in the GDT. */
+ if ( (seg == x86_seg_ldtr) && (sel & 4) )
+ goto raise_exn;
+
+ if ( (rc = ops->read_segment(x86_seg_cs, &cs, ctxt)) ||
+ (rc = ops->read_segment((sel & 4) ? x86_seg_ldtr : x86_seg_gdtr,
+ &desctab, ctxt)) )
+ return rc;
+
+ /* Check against descriptor table limit. */
+ if ( ((sel & 0xfff8) + 7) > desctab.limit )
+ goto raise_exn;
+
+ do {
+ if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8),
+ &val, 4, ctxt)) )
+ return rc;
+ desc.a = val;
+ if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
+ &val, 4, ctxt)) )
+ return rc;
+ desc.b = val;
+
+ /* Segment present in memory? */
+ if ( !(desc.b & (1u<<15)) )
+ {
+ fault_type = EXC_NP;
+ goto raise_exn;
+ }
+
+ /* LDT descriptor is a system segment. All others are code/data. */
+ if ( (desc.b & (1u<<12)) == ((seg == x86_seg_ldtr) << 12) )
+ goto raise_exn;
+
+ dpl = (desc.b >> 13) & 3;
+ rpl = sel & 3;
+ cpl = cs.sel & 3;
+
+ switch ( seg )
+ {
+ case x86_seg_cs:
+ /* Code segment? */
+ if ( !(desc.b & (1u<<11)) )
+ goto raise_exn;
+ /* Non-conforming segment: check DPL against RPL. */
+ if ( ((desc.b & (6u<<9)) != 6) && (dpl != rpl) )
+ goto raise_exn;
+ break;
+ case x86_seg_ss:
+ /* Writable data segment? */
+ if ( (desc.b & (5u<<9)) != (1u<<9) )
+ goto raise_exn;
+ if ( (dpl != cpl) || (dpl != rpl) )
+ goto raise_exn;
+ break;
+ case x86_seg_ldtr:
+ /* LDT system segment? */
+ if ( (desc.b & (15u<<8)) != (2u<<8) )
+ goto raise_exn;
+ goto skip_accessed_flag;
+ default:
+ /* Readable code or data segment? */
+ if ( (desc.b & (5u<<9)) == (4u<<9) )
+ goto raise_exn;
+ /* Non-conforming segment: check DPL against RPL and CPL. */
+ if ( ((desc.b & (6u<<9)) != 6) && ((dpl < cpl) || (dpl < rpl)) )
+ goto raise_exn;
+ break;
+ }
+
+ /* Ensure Accessed flag is set. */
+ rc = ((desc.b & 0x100) ? X86EMUL_OKAY :
+ ops->cmpxchg(
+ x86_seg_none, desctab.base + (sel & 0xfff8) + 4, desc.b,
+ desc.b | 0x100, 4, ctxt));
+ } while ( rc == X86EMUL_CMPXCHG_FAILED );
+
+ if ( rc )
+ return rc;
+
+ /* Force the Accessed flag in our local copy. */
+ desc.b |= 0x100;
+
+ skip_accessed_flag:
+ segr.base = (((desc.b << 0) & 0xff000000u) |
+ ((desc.b << 16) & 0x00ff0000u) |
+ ((desc.a >> 16) & 0x0000ffffu));
+ segr.attr.bytes = (((desc.b >> 8) & 0x00ffu) |
+ ((desc.b >> 12) & 0x0f00u));
+ segr.limit = (desc.b & 0x000f0000u) | (desc.a & 0x0000ffffu);
+ if ( segr.attr.fields.g )
+ segr.limit = (segr.limit << 12) | 0xfffu;
+ segr.sel = sel;
+ return ops->write_segment(seg, &segr, ctxt);
+
+ raise_exn:
+ if ( ops->inject_hw_exception == NULL )
+ return X86EMUL_UNHANDLEABLE;
+ if ( (rc = ops->inject_hw_exception(fault_type, sel & 0xfffc, ctxt)) )
+ return rc;
+ return X86EMUL_EXCEPTION;
+}
+
+static int
+load_seg(
+ enum x86_segment seg,
+ uint16_t sel,
+ struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops)
+{
+ if ( (ops->read_segment == NULL) ||
+ (ops->write_segment == NULL) )
+ return X86EMUL_UNHANDLEABLE;
+
+ if ( in_realmode(ctxt, ops) )
+ return realmode_load_seg(seg, sel, ctxt, ops);
+
+ return protmode_load_seg(seg, sel, ctxt, ops);
+}
+
void *
decode_register(
uint8_t modrm_reg, struct cpu_user_regs *regs, int highbyte_regs)
@@ -1858,7 +1999,7 @@ x86_emulate(
if ( (_regs.eflags & EFLG_TF) &&
(rc == X86EMUL_OKAY) &&
(ops->inject_hw_exception != NULL) )
- rc = ops->inject_hw_exception(EXC_DB, ctxt) ? : X86EMUL_EXCEPTION;
+ rc = ops->inject_hw_exception(EXC_DB, 0, ctxt) ? : X86EMUL_EXCEPTION;
done:
return rc;
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 7f8080739c..b3f7ed0730 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -95,10 +95,20 @@ struct arch_vmx_struct {
unsigned long host_cr0;
#ifdef VMXASSIST
+
unsigned long vmxassist_enabled:1;
unsigned long irqbase_mode:1;
unsigned char pm_irqbase[2];
+
#else
+
+ /* Are we emulating rather than VMENTERing? */
+#define VMXEMUL_REALMODE 1 /* Yes, because CR0.PE == 0 */
+#define VMXEMUL_BAD_CS 2 /* Yes, because CS.RPL != CPL */
+#define VMXEMUL_BAD_SS 4 /* Yes, because SS.RPL != CPL */
+ uint8_t vmxemul;
+
+ /* I/O request in flight to device model. */
bool_t real_mode_io_in_progress;
bool_t real_mode_io_completed;
unsigned long real_mode_io_data;
diff --git a/xen/include/asm-x86/x86_emulate.h b/xen/include/asm-x86/x86_emulate.h
index 17a76f936a..53d12f52be 100644
--- a/xen/include/asm-x86/x86_emulate.h
+++ b/xen/include/asm-x86/x86_emulate.h
@@ -39,9 +39,18 @@ enum x86_segment {
x86_seg_tr,
x86_seg_ldtr,
x86_seg_gdtr,
- x86_seg_idtr
+ x86_seg_idtr,
+ /*
+ * Dummy: used to emulate direct processor accesses to management
+ * structures (TSS, GDT, LDT, IDT, etc.) which use linear addressing
+ * (no segment component) and bypass usual segment- and page-level
+ * protection checks.
+ */
+ x86_seg_none
};
+#define is_x86_user_segment(seg) ((unsigned)(seg) <= x86_seg_gs)
+
/*
* Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
* segment descriptor. It happens to match the format of an AMD SVM VMCB.
@@ -333,6 +342,7 @@ struct x86_emulate_ops
/* inject_hw_exception */
int (*inject_hw_exception)(
uint8_t vector,
+ uint16_t error_code,
struct x86_emulate_ctxt *ctxt);
/* inject_sw_interrupt */