aboutsummaryrefslogtreecommitdiffstats
path: root/tools/firmware/vmxassist
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2006-11-29 14:16:36 -0600
committerHollis Blanchard <hollisb@us.ibm.com>2006-11-29 14:16:36 -0600
commitab26a6a563a0acb589af87a8e063c0e171d75665 (patch)
tree71a432bde5d016e928ab3ad7860fca01312ec787 /tools/firmware/vmxassist
parentd3be8a6ca1aa9312cc01e780a2fea56ab8ec12b4 (diff)
parent1c804664cf63f0c2e80d0420e52d5f82c3956685 (diff)
downloadxen-ab26a6a563a0acb589af87a8e063c0e171d75665.tar.gz
xen-ab26a6a563a0acb589af87a8e063c0e171d75665.tar.bz2
xen-ab26a6a563a0acb589af87a8e063c0e171d75665.zip
Merge with xen-unstable.hg.
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
Diffstat (limited to 'tools/firmware/vmxassist')
-rw-r--r--tools/firmware/vmxassist/Makefile9
-rw-r--r--tools/firmware/vmxassist/head.S2
-rw-r--r--tools/firmware/vmxassist/machine.h1
-rw-r--r--tools/firmware/vmxassist/setup.c3
-rw-r--r--tools/firmware/vmxassist/trap.S2
-rw-r--r--tools/firmware/vmxassist/util.c38
-rw-r--r--tools/firmware/vmxassist/util.h1
-rw-r--r--tools/firmware/vmxassist/vm86.c274
-rw-r--r--tools/firmware/vmxassist/vm86.h10
9 files changed, 244 insertions, 96 deletions
diff --git a/tools/firmware/vmxassist/Makefile b/tools/firmware/vmxassist/Makefile
index 08fea45781..70f7a0e0a3 100644
--- a/tools/firmware/vmxassist/Makefile
+++ b/tools/firmware/vmxassist/Makefile
@@ -32,14 +32,13 @@ DEFINES=-DDEBUG -DTEXTADDR=$(TEXTADDR)
XENINC=-I$(XEN_ROOT)/tools/libxc
# Disable PIE/SSP if GCC supports them. They can break us.
-CFLAGS += $(call test-gcc-flag,$(CC),-nopie)
-CFLAGS += $(call test-gcc-flag,$(CC),-fno-stack-protector)
-CFLAGS += $(call test-gcc-flag,$(CC),-fno-stack-protector-all)
+CFLAGS += $(call cc-option,$(CC),-nopie,)
+CFLAGS += $(call cc-option,$(CC),-fno-stack-protector,)
+CFLAGS += $(call cc-option,$(CC),-fno-stack-protector-all,)
CPP = cpp -P
OBJCOPY = objcopy -p -O binary -R .note -R .comment -R .bss -S --gap-fill=0
CFLAGS += $(DEFINES) -I. $(XENINC) -fno-builtin -O2 -msoft-float
-LDFLAGS = -m elf_i386
OBJECTS = head.o trap.o vm86.o setup.o util.o
@@ -48,7 +47,7 @@ all: vmxassist.bin
vmxassist.bin: vmxassist.ld $(OBJECTS)
$(CPP) $(DEFINES) vmxassist.ld > vmxassist.tmp
- $(LD) -o vmxassist $(LDFLAGS) -nostdlib --fatal-warnings -N -T vmxassist.tmp $(OBJECTS)
+ $(LD) -o vmxassist $(LDFLAGS_DIRECT) -nostdlib --fatal-warnings -N -T vmxassist.tmp $(OBJECTS)
nm -n vmxassist > vmxassist.sym
$(OBJCOPY) vmxassist vmxassist.tmp
dd if=vmxassist.tmp of=vmxassist.bin ibs=512 conv=sync
diff --git a/tools/firmware/vmxassist/head.S b/tools/firmware/vmxassist/head.S
index b183fac54e..a4cb614c68 100644
--- a/tools/firmware/vmxassist/head.S
+++ b/tools/firmware/vmxassist/head.S
@@ -59,7 +59,7 @@ _start16:
/* go to protected mode */
movl %cr0, %eax
- orl $CR0_PE, %eax
+ orl $(CR0_PE), %eax
movl %eax, %cr0
data32 ljmp $0x08, $1f
diff --git a/tools/firmware/vmxassist/machine.h b/tools/firmware/vmxassist/machine.h
index 82fa12965d..0ea2adfa84 100644
--- a/tools/firmware/vmxassist/machine.h
+++ b/tools/firmware/vmxassist/machine.h
@@ -36,6 +36,7 @@
#define CR4_VME (1 << 0)
#define CR4_PVI (1 << 1)
#define CR4_PSE (1 << 4)
+#define CR4_PAE (1 << 5)
#define EFLAGS_ZF (1 << 6)
#define EFLAGS_TF (1 << 8)
diff --git a/tools/firmware/vmxassist/setup.c b/tools/firmware/vmxassist/setup.c
index 07ef70a1df..4f82ca0d9e 100644
--- a/tools/firmware/vmxassist/setup.c
+++ b/tools/firmware/vmxassist/setup.c
@@ -53,13 +53,10 @@ unsigned pgd[NR_PGD] __attribute__ ((aligned(PGSIZE))) = { 0 };
struct e820entry e820map[] = {
{ 0x0000000000000000ULL, 0x000000000009F800ULL, E820_RAM },
{ 0x000000000009F800ULL, 0x0000000000000800ULL, E820_RESERVED },
- { 0x00000000000A0000ULL, 0x0000000000020000ULL, E820_IO },
{ 0x00000000000C0000ULL, 0x0000000000040000ULL, E820_RESERVED },
{ 0x0000000000100000ULL, 0x0000000000000000ULL, E820_RAM },
- { 0x0000000000000000ULL, 0x0000000000001000ULL, E820_SHARED_PAGE },
{ 0x0000000000000000ULL, 0x0000000000003000ULL, E820_NVS },
{ 0x0000000000003000ULL, 0x000000000000A000ULL, E820_ACPI },
- { 0x00000000FEC00000ULL, 0x0000000001400000ULL, E820_IO },
};
#endif /* TEST */
diff --git a/tools/firmware/vmxassist/trap.S b/tools/firmware/vmxassist/trap.S
index 468da0a5db..30e87adb85 100644
--- a/tools/firmware/vmxassist/trap.S
+++ b/tools/firmware/vmxassist/trap.S
@@ -106,7 +106,7 @@ common_trap: /* common trap handler */
pushl %es
pushal
- movl $DATA_SELECTOR, %eax /* make sure these are sane */
+ movl $(DATA_SELECTOR), %eax /* make sure these are sane */
movl %eax, %ds
movl %eax, %es
movl %eax, %fs
diff --git a/tools/firmware/vmxassist/util.c b/tools/firmware/vmxassist/util.c
index 0181fe702c..6ae4b25f79 100644
--- a/tools/firmware/vmxassist/util.c
+++ b/tools/firmware/vmxassist/util.c
@@ -29,6 +29,31 @@ static void putchar(int);
static char *printnum(char *, unsigned long, int);
static void _doprint(void (*)(int), char const *, va_list);
+void
+cpuid_addr_value(uint64_t addr, uint64_t *value)
+{
+ uint32_t addr_low = (uint32_t)addr;
+ uint32_t addr_high = (uint32_t)(addr >> 32);
+ uint32_t value_low, value_high;
+ static unsigned int addr_leaf;
+
+ if (!addr_leaf) {
+ unsigned int eax, ebx, ecx, edx;
+ __asm__ __volatile__(
+ "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "0" (0x40000000));
+ addr_leaf = eax + 1;
+ }
+
+ __asm__ __volatile__(
+ "cpuid"
+ : "=c" (value_low), "=d" (value_high)
+ : "a" (addr_leaf), "0" (addr_low), "1" (addr_high)
+ : "ebx");
+
+ *value = (uint64_t)value_high << 32 | value_low;
+}
void
dump_regs(struct regs *regs)
@@ -37,14 +62,15 @@ dump_regs(struct regs *regs)
regs->eax, regs->ecx, regs->edx, regs->ebx);
printf("esp %8x ebp %8x esi %8x edi %8x\n",
regs->esp, regs->ebp, regs->esi, regs->edi);
- printf("eip %8x eflags %8x cs %8x ds %8x\n",
- regs->eip, regs->eflags, regs->cs, regs->ds);
- printf("es %8x fs %8x uss %8x uesp %8x\n",
- regs->es, regs->fs, regs->uss, regs->uesp);
+ printf("es %8x ds %8x fs %8x gs %8x\n",
+ regs->es, regs->ds, regs->fs, regs->gs);
+ printf("trapno %8x errno %8x\n", regs->trapno, regs->errno);
+ printf("eip %8x cs %8x eflags %8x\n",
+ regs->eip, regs->cs, regs->eflags);
+ printf("uesp %8x uss %8x \n",
+ regs->uesp, regs->uss);
printf("ves %8x vds %8x vfs %8x vgs %8x\n",
regs->ves, regs->vds, regs->vfs, regs->vgs);
- if (regs->trapno != -1 || regs->errno != -1)
- printf("trapno %8x errno %8x\n", regs->trapno, regs->errno);
printf("cr0 %8lx cr2 %8x cr3 %8lx cr4 %8lx\n",
(long)oldctx.cr0, get_cr2(),
diff --git a/tools/firmware/vmxassist/util.h b/tools/firmware/vmxassist/util.h
index b2ace92b8f..c426f4e846 100644
--- a/tools/firmware/vmxassist/util.h
+++ b/tools/firmware/vmxassist/util.h
@@ -31,6 +31,7 @@
struct vmx_assist_context;
+extern void cpuid_addr_value(uint64_t addr, uint64_t *value);
extern void hexdump(unsigned char *, int);
extern void dump_regs(struct regs *);
extern void dump_vmx_context(struct vmx_assist_context *);
diff --git a/tools/firmware/vmxassist/vm86.c b/tools/firmware/vmxassist/vm86.c
index 52a8ae236d..5bae53ed6d 100644
--- a/tools/firmware/vmxassist/vm86.c
+++ b/tools/firmware/vmxassist/vm86.c
@@ -52,34 +52,78 @@ char *states[] = {
static char *rnames[] = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di" };
#endif /* DEBUG */
+#define PDE_PS (1 << 7)
#define PT_ENTRY_PRESENT 0x1
-static unsigned
-guest_linear_to_real(unsigned long base, unsigned off)
+/* We only support access to <=4G physical memory due to 1:1 mapping */
+static uint64_t
+guest_linear_to_phys(uint32_t base)
{
- unsigned int gcr3 = oldctx.cr3;
- unsigned int l1_mfn;
- unsigned int l0_mfn;
+ uint32_t gcr3 = oldctx.cr3;
+ uint64_t l2_mfn;
+ uint64_t l1_mfn;
+ uint64_t l0_mfn;
if (!(oldctx.cr0 & CR0_PG))
- return base + off;
+ return base;
+
+ if (!(oldctx.cr4 & CR4_PAE)) {
+ l1_mfn = ((uint32_t *)(long)gcr3)[(base >> 22) & 0x3ff];
+ if (!(l1_mfn & PT_ENTRY_PRESENT))
+ panic("l2 entry not present\n");
+
+ if ((oldctx.cr4 & CR4_PSE) && (l1_mfn & PDE_PS)) {
+ l0_mfn = l1_mfn & 0xffc00000;
+ return l0_mfn + (base & 0x3fffff);
+ }
+
+ l1_mfn &= 0xfffff000;
+
+ l0_mfn = ((uint32_t *)(long)l1_mfn)[(base >> 12) & 0x3ff];
+ if (!(l0_mfn & PT_ENTRY_PRESENT))
+ panic("l1 entry not present\n");
+ l0_mfn &= 0xfffff000;
+
+ return l0_mfn + (base & 0xfff);
+ } else {
+ l2_mfn = ((uint64_t *)(long)gcr3)[(base >> 30) & 0x3];
+ if (!(l2_mfn & PT_ENTRY_PRESENT))
+ panic("l3 entry not present\n");
+ l2_mfn &= 0xffffff000ULL;
+
+ if (l2_mfn & 0xf00000000ULL) {
+ printf("l2 page above 4G\n");
+ cpuid_addr_value(l2_mfn + 8 * ((base >> 21) & 0x1ff), &l1_mfn);
+ } else
+ l1_mfn = ((uint64_t *)(long)l2_mfn)[(base >> 21) & 0x1ff];
+ if (!(l1_mfn & PT_ENTRY_PRESENT))
+ panic("l2 entry not present\n");
+
+ if (l1_mfn & PDE_PS) { /* CR4.PSE is ignored in PAE mode */
+ l0_mfn = l1_mfn & 0xfffe00000ULL;
+ return l0_mfn + (base & 0x1fffff);
+ }
+
+ l1_mfn &= 0xffffff000ULL;
- l1_mfn = ((unsigned int *)gcr3)[(base >> 22) & 0x3ff ];
- if (!(l1_mfn & PT_ENTRY_PRESENT))
- panic("l2 entry not present\n");
- l1_mfn = l1_mfn & 0xfffff000 ;
+ if (l1_mfn & 0xf00000000ULL) {
+ printf("l1 page above 4G\n");
+ cpuid_addr_value(l1_mfn + 8 * ((base >> 12) & 0x1ff), &l0_mfn);
+ } else
+ l0_mfn = ((uint64_t *)(long)l1_mfn)[(base >> 12) & 0x1ff];
+ if (!(l0_mfn & PT_ENTRY_PRESENT))
+ panic("l1 entry not present\n");
- l0_mfn = ((unsigned int *)l1_mfn)[(base >> 12) & 0x3ff];
- if (!(l0_mfn & PT_ENTRY_PRESENT))
- panic("l1 entry not present\n");
- l0_mfn = l0_mfn & 0xfffff000;
+ l0_mfn &= 0xffffff000ULL;
- return l0_mfn + off + (base & 0xfff);
+ return l0_mfn + (base & 0xfff);
+ }
}
static unsigned
address(struct regs *regs, unsigned seg, unsigned off)
{
+ uint64_t gdt_phys_base;
unsigned long long entry;
unsigned seg_base, seg_limit;
unsigned entry_low, entry_high;
@@ -95,7 +139,13 @@ address(struct regs *regs, unsigned seg, unsigned off)
(mode == VM86_REAL_TO_PROTECTED && regs->cs == seg))
return ((seg & 0xFFFF) << 4) + off;
- entry = ((unsigned long long *) guest_linear_to_real(oldctx.gdtr_base, 0))[seg >> 3];
+ gdt_phys_base = guest_linear_to_phys(oldctx.gdtr_base);
+ if (gdt_phys_base != (uint32_t)gdt_phys_base) {
+ printf("gdt base address above 4G\n");
+ cpuid_addr_value(gdt_phys_base + 8 * (seg >> 3), &entry);
+ } else
+ entry = ((unsigned long long *)(long)gdt_phys_base)[seg >> 3];
+
entry_high = entry >> 32;
entry_low = entry & 0xFFFFFFFF;
@@ -763,12 +813,63 @@ pop(struct regs *regs, unsigned prefix, unsigned opc)
return 1;
}
+static int
+mov_to_seg(struct regs *regs, unsigned prefix, unsigned opc)
+{
+ unsigned modrm = fetch8(regs);
+
+ /* Only need to emulate segment loads in real->protected mode. */
+ if (mode != VM86_REAL_TO_PROTECTED)
+ return 0;
+
+ /* Register source only. */
+ if ((modrm & 0xC0) != 0xC0)
+ goto fail;
+
+ switch ((modrm & 0x38) >> 3) {
+ case 0: /* es */
+ regs->ves = getreg16(regs, modrm);
+ saved_rm_regs.ves = 0;
+ oldctx.es_sel = regs->ves;
+ return 1;
+
+ /* case 1: cs */
+
+ case 2: /* ss */
+ regs->uss = getreg16(regs, modrm);
+ saved_rm_regs.uss = 0;
+ oldctx.ss_sel = regs->uss;
+ return 1;
+ case 3: /* ds */
+ regs->vds = getreg16(regs, modrm);
+ saved_rm_regs.vds = 0;
+ oldctx.ds_sel = regs->vds;
+ return 1;
+ case 4: /* fs */
+ regs->vfs = getreg16(regs, modrm);
+ saved_rm_regs.vfs = 0;
+ oldctx.fs_sel = regs->vfs;
+ return 1;
+ case 5: /* gs */
+ regs->vgs = getreg16(regs, modrm);
+ saved_rm_regs.vgs = 0;
+ oldctx.gs_sel = regs->vgs;
+ return 1;
+ }
+
+ fail:
+ printf("%s:%d: missed opcode %02x %02x\n",
+ __FUNCTION__, __LINE__, opc, modrm);
+ return 0;
+}
+
/*
* Emulate a segment load in protected mode
*/
static int
load_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union vmcs_arbytes *arbytes)
{
+ uint64_t gdt_phys_base;
unsigned long long entry;
/* protected mode: use seg as index into gdt */
@@ -780,7 +881,12 @@ load_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union vmcs_arbytes
return 1;
}
- entry = ((unsigned long long *) guest_linear_to_real(oldctx.gdtr_base, 0))[sel >> 3];
+ gdt_phys_base = guest_linear_to_phys(oldctx.gdtr_base);
+ if (gdt_phys_base != (uint32_t)gdt_phys_base) {
+ printf("gdt base address above 4G\n");
+ cpuid_addr_value(gdt_phys_base + 8 * (sel >> 3), &entry);
+ } else
+ entry = ((unsigned long long *)(long)gdt_phys_base)[sel >> 3];
/* Check the P bit first */
if (!((entry >> (15+32)) & 0x1) && sel != 0)
@@ -811,6 +917,18 @@ load_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union vmcs_arbytes
}
/*
+ * Emulate a protected mode segment load, falling back to clearing it if
+ * the descriptor was invalid.
+ */
+static void
+load_or_clear_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union vmcs_arbytes *arbytes)
+{
+ if (!load_seg(sel, base, limit, arbytes))
+ load_seg(0, base, limit, arbytes);
+}
+
+
+/*
* Transition to protected mode
*/
static void
@@ -822,63 +940,22 @@ protected_mode(struct regs *regs)
oldctx.esp = regs->uesp;
oldctx.eflags = regs->eflags;
- memset(&saved_rm_regs, 0, sizeof(struct regs));
-
/* reload all segment registers */
if (!load_seg(regs->cs, &oldctx.cs_base,
&oldctx.cs_limit, &oldctx.cs_arbytes))
panic("Invalid %%cs=0x%x for protected mode\n", regs->cs);
oldctx.cs_sel = regs->cs;
- if (load_seg(regs->ves, &oldctx.es_base,
- &oldctx.es_limit, &oldctx.es_arbytes))
- oldctx.es_sel = regs->ves;
- else {
- load_seg(0, &oldctx.es_base,
- &oldctx.es_limit, &oldctx.es_arbytes);
- oldctx.es_sel = 0;
- saved_rm_regs.ves = regs->ves;
- }
-
- if (load_seg(regs->uss, &oldctx.ss_base,
- &oldctx.ss_limit, &oldctx.ss_arbytes))
- oldctx.ss_sel = regs->uss;
- else {
- load_seg(0, &oldctx.ss_base,
- &oldctx.ss_limit, &oldctx.ss_arbytes);
- oldctx.ss_sel = 0;
- saved_rm_regs.uss = regs->uss;
- }
-
- if (load_seg(regs->vds, &oldctx.ds_base,
- &oldctx.ds_limit, &oldctx.ds_arbytes))
- oldctx.ds_sel = regs->vds;
- else {
- load_seg(0, &oldctx.ds_base,
- &oldctx.ds_limit, &oldctx.ds_arbytes);
- oldctx.ds_sel = 0;
- saved_rm_regs.vds = regs->vds;
- }
-
- if (load_seg(regs->vfs, &oldctx.fs_base,
- &oldctx.fs_limit, &oldctx.fs_arbytes))
- oldctx.fs_sel = regs->vfs;
- else {
- load_seg(0, &oldctx.fs_base,
- &oldctx.fs_limit, &oldctx.fs_arbytes);
- oldctx.fs_sel = 0;
- saved_rm_regs.vfs = regs->vfs;
- }
-
- if (load_seg(regs->vgs, &oldctx.gs_base,
- &oldctx.gs_limit, &oldctx.gs_arbytes))
- oldctx.gs_sel = regs->vgs;
- else {
- load_seg(0, &oldctx.gs_base,
- &oldctx.gs_limit, &oldctx.gs_arbytes);
- oldctx.gs_sel = 0;
- saved_rm_regs.vgs = regs->vgs;
- }
+ load_or_clear_seg(oldctx.es_sel, &oldctx.es_base,
+ &oldctx.es_limit, &oldctx.es_arbytes);
+ load_or_clear_seg(oldctx.ss_sel, &oldctx.ss_base,
+ &oldctx.ss_limit, &oldctx.ss_arbytes);
+ load_or_clear_seg(oldctx.ds_sel, &oldctx.ds_base,
+ &oldctx.ds_limit, &oldctx.ds_arbytes);
+ load_or_clear_seg(oldctx.fs_sel, &oldctx.fs_base,
+ &oldctx.fs_limit, &oldctx.fs_arbytes);
+ load_or_clear_seg(oldctx.gs_sel, &oldctx.gs_base,
+ &oldctx.gs_limit, &oldctx.gs_arbytes);
/* initialize jump environment to warp back to protected mode */
regs->cs = CODE_SELECTOR;
@@ -966,6 +1043,16 @@ set_mode(struct regs *regs, enum vm86_mode newmode)
case VM86_REAL_TO_PROTECTED:
if (mode == VM86_REAL) {
regs->eflags |= EFLAGS_TF;
+ saved_rm_regs.vds = regs->vds;
+ saved_rm_regs.ves = regs->ves;
+ saved_rm_regs.vfs = regs->vfs;
+ saved_rm_regs.vgs = regs->vgs;
+ saved_rm_regs.uss = regs->uss;
+ oldctx.ds_sel = 0;
+ oldctx.es_sel = 0;
+ oldctx.fs_sel = 0;
+ oldctx.gs_sel = 0;
+ oldctx.ss_sel = 0;
break;
} else if (mode == VM86_REAL_TO_PROTECTED) {
break;
@@ -1194,6 +1281,18 @@ pushrm(struct regs *regs, int prefix, unsigned modrm)
enum { OPC_INVALID, OPC_EMULATED };
+#define rdmsr(msr,val1,val2) \
+ __asm__ __volatile__( \
+ "rdmsr" \
+ : "=a" (val1), "=d" (val2) \
+ : "c" (msr))
+
+#define wrmsr(msr,val1,val2) \
+ __asm__ __volatile__( \
+ "wrmsr" \
+ : /* no outputs */ \
+ : "c" (msr), "a" (val1), "d" (val2))
+
/*
* Emulate a single instruction, including all its prefixes. We only implement
* a small subset of the opcodes, and not all opcodes are implemented for each
@@ -1208,12 +1307,14 @@ opcode(struct regs *regs)
for (;;) {
switch ((opc = fetch8(regs))) {
- case 0x07:
- if (prefix & DATA32)
- regs->ves = pop32(regs);
- else
- regs->ves = pop16(regs);
+ case 0x07: /* pop %es */
+ regs->ves = (prefix & DATA32) ?
+ pop32(regs) : pop16(regs);
TRACE((regs, regs->eip - eip, "pop %%es"));
+ if (mode == VM86_REAL_TO_PROTECTED) {
+ saved_rm_regs.ves = 0;
+ oldctx.es_sel = regs->ves;
+ }
return OPC_EMULATED;
case 0x0F: /* two byte opcode */
@@ -1252,11 +1353,27 @@ opcode(struct regs *regs)
if (!movcr(regs, prefix, opc))
goto invalid;
return OPC_EMULATED;
+ case 0x30: /* WRMSR */
+ wrmsr(regs->ecx, regs->eax, regs->edx);
+ return OPC_EMULATED;
+ case 0x32: /* RDMSR */
+ rdmsr(regs->ecx, regs->eax, regs->edx);
+ return OPC_EMULATED;
default:
goto invalid;
}
goto invalid;
+ case 0x1F: /* pop %ds */
+ regs->vds = (prefix & DATA32) ?
+ pop32(regs) : pop16(regs);
+ TRACE((regs, regs->eip - eip, "pop %%ds"));
+ if (mode == VM86_REAL_TO_PROTECTED) {
+ saved_rm_regs.vds = 0;
+ oldctx.ds_sel = regs->vds;
+ }
+ return OPC_EMULATED;
+
case 0x26:
TRACE((regs, regs->eip - eip, "%%es:"));
prefix |= SEG_ES;
@@ -1343,6 +1460,11 @@ opcode(struct regs *regs)
goto invalid;
return OPC_EMULATED;
+ case 0x8E: /* mov r16, sreg */
+ if (!mov_to_seg(regs, prefix, opc))
+ goto invalid;
+ return OPC_EMULATED;
+
case 0x8F: /* addr32 pop r/m16 */
if ((prefix & ADDR32) == 0)
goto invalid;
@@ -1376,12 +1498,14 @@ opcode(struct regs *regs)
{
int addr, data;
int seg = segment(prefix, regs, regs->vds);
+ int offset = prefix & ADDR32? fetch32(regs) : fetch16(regs);
+
if (prefix & DATA32) {
- addr = address(regs, seg, fetch32(regs));
+ addr = address(regs, seg, offset);
data = read32(addr);
setreg32(regs, 0, data);
} else {
- addr = address(regs, seg, fetch16(regs));
+ addr = address(regs, seg, offset);
data = read16(addr);
setreg16(regs, 0, data);
}
diff --git a/tools/firmware/vmxassist/vm86.h b/tools/firmware/vmxassist/vm86.h
index 0c04dc6e73..4c6609daf0 100644
--- a/tools/firmware/vmxassist/vm86.h
+++ b/tools/firmware/vmxassist/vm86.h
@@ -33,11 +33,11 @@
#ifndef __ASSEMBLY__
struct regs {
- unsigned edi, esi, ebp, esp, ebx, edx, ecx, eax;
- unsigned ds, es, fs, gs;
- unsigned trapno, errno;
- unsigned eip, cs, eflags, uesp, uss;
- unsigned ves, vds, vfs, vgs;
+ unsigned edi, esi, ebp, esp, ebx, edx, ecx, eax;
+ unsigned es, ds, fs, gs;
+ unsigned trapno, errno;
+ unsigned eip, cs, eflags, uesp, uss;
+ unsigned ves, vds, vfs, vgs;
};
enum vm86_mode {