aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-11-30 17:07:26 +0000
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-11-30 17:07:26 +0000
commitcffd8b0db89e6cead31090adf0c96d7d84ee6769 (patch)
tree4c31a18eeec9555c4b9a13d10463bad100da9ecd
parentc04d106a58f584781760e4c828401dfcb0d81580 (diff)
downloadxen-cffd8b0db89e6cead31090adf0c96d7d84ee6769.tar.gz
xen-cffd8b0db89e6cead31090adf0c96d7d84ee6769.tar.bz2
xen-cffd8b0db89e6cead31090adf0c96d7d84ee6769.zip
[XEN] Proper segmentation emulation added to HVM emulated PTE writes.
Sigmed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--xen/arch/x86/hvm/platform.c18
-rw-r--r--xen/arch/x86/hvm/svm/emulate.c4
-rw-r--r--xen/arch/x86/hvm/svm/svm.c98
-rw-r--r--xen/arch/x86/hvm/svm/vmcb.c22
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c134
-rw-r--r--xen/arch/x86/mm.c8
-rw-r--r--xen/arch/x86/mm/shadow/common.c104
-rw-r--r--xen/arch/x86/x86_emulate.c32
-rw-r--r--xen/include/asm-x86/hvm/hvm.h66
-rw-r--r--xen/include/asm-x86/hvm/svm/vmcb.h50
-rw-r--r--xen/include/asm-x86/x86_emulate.h36
11 files changed, 395 insertions, 177 deletions
diff --git a/xen/arch/x86/hvm/platform.c b/xen/arch/x86/hvm/platform.c
index 46eab76175..a822fc093f 100644
--- a/xen/arch/x86/hvm/platform.c
+++ b/xen/arch/x86/hvm/platform.c
@@ -920,7 +920,7 @@ void handle_mmio(unsigned long gpa)
df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
mode = hvm_guest_x86_mode(v);
- inst_addr = hvm_get_segment_base(v, seg_cs) + regs->eip;
+ inst_addr = hvm_get_segment_base(v, x86_seg_cs) + regs->eip;
inst_len = hvm_instruction_length(inst_addr, mode);
if ( inst_len <= 0 )
{
@@ -964,10 +964,10 @@ void handle_mmio(unsigned long gpa)
addr = regs->edi;
if ( ad_size == WORD )
addr &= 0xFFFF;
- addr += hvm_get_segment_base(v, seg_es);
+ addr += hvm_get_segment_base(v, x86_seg_es);
if ( addr == gpa )
{
- enum segment seg;
+ enum x86_segment seg;
dir = IOREQ_WRITE;
addr = regs->esi;
@@ -975,13 +975,13 @@ void handle_mmio(unsigned long gpa)
addr &= 0xFFFF;
switch ( seg_sel )
{
- case 0x26: seg = seg_es; break;
- case 0x2e: seg = seg_cs; break;
- case 0x36: seg = seg_ss; break;
+ case 0x26: seg = x86_seg_es; break;
+ case 0x2e: seg = x86_seg_cs; break;
+ case 0x36: seg = x86_seg_ss; break;
case 0:
- case 0x3e: seg = seg_ds; break;
- case 0x64: seg = seg_fs; break;
- case 0x65: seg = seg_gs; break;
+ case 0x3e: seg = x86_seg_ds; break;
+ case 0x64: seg = x86_seg_fs; break;
+ case 0x65: seg = x86_seg_gs; break;
default: domain_crash_synchronous();
}
addr += hvm_get_segment_base(v, seg);
diff --git a/xen/arch/x86/hvm/svm/emulate.c b/xen/arch/x86/hvm/svm/emulate.c
index 504bafe7db..c0ea001c82 100644
--- a/xen/arch/x86/hvm/svm/emulate.c
+++ b/xen/arch/x86/hvm/svm/emulate.c
@@ -209,7 +209,7 @@ unsigned long get_effective_addr_modrm64(struct vmcb_struct *vmcb,
#if __x86_64__
/* 64-bit mode */
- if (vmcb->cs.attributes.fields.l && (vmcb->efer & EFER_LMA))
+ if (vmcb->cs.attr.fields.l && (vmcb->efer & EFER_LMA))
return vmcb->rip + inst_len + *size + disp;
#endif
return disp;
@@ -334,7 +334,7 @@ unsigned long svm_rip2pointer(struct vmcb_struct *vmcb)
* no matter what kind of addressing is used.
*/
unsigned long p = vmcb->cs.base + vmcb->rip;
- if (!(vmcb->cs.attributes.fields.l && vmcb->efer & EFER_LMA))
+ if (!(vmcb->cs.attr.fields.l && vmcb->efer & EFER_LMA))
return (u32)p; /* mask to 32 bits */
/* NB. Should mask to 16 bits if in real mode or 16-bit protected mode. */
return p;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index a9cc9e9325..9c5393f5f5 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -476,13 +476,13 @@ static int svm_guest_x86_mode(struct vcpu *v)
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
if ( vmcb->efer & EFER_LMA )
- return (vmcb->cs.attributes.fields.l ?
+ return (vmcb->cs.attr.fields.l ?
X86EMUL_MODE_PROT64 : X86EMUL_MODE_PROT32);
if ( svm_realmode(v) )
return X86EMUL_MODE_REAL;
- return (vmcb->cs.attributes.fields.db ?
+ return (vmcb->cs.attr.fields.db ?
X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16);
}
@@ -509,31 +509,51 @@ unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
return 0; /* dummy */
}
-static unsigned long svm_get_segment_base(struct vcpu *v, enum segment seg)
+static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
int long_mode = 0;
#ifdef __x86_64__
- long_mode = vmcb->cs.attributes.fields.l && (vmcb->efer & EFER_LMA);
+ long_mode = vmcb->cs.attr.fields.l && (vmcb->efer & EFER_LMA);
#endif
switch ( seg )
{
- case seg_cs: return long_mode ? 0 : vmcb->cs.base;
- case seg_ds: return long_mode ? 0 : vmcb->ds.base;
- case seg_es: return long_mode ? 0 : vmcb->es.base;
- case seg_fs: return vmcb->fs.base;
- case seg_gs: return vmcb->gs.base;
- case seg_ss: return long_mode ? 0 : vmcb->ss.base;
- case seg_tr: return vmcb->tr.base;
- case seg_gdtr: return vmcb->gdtr.base;
- case seg_idtr: return vmcb->idtr.base;
- case seg_ldtr: return vmcb->ldtr.base;
+ case x86_seg_cs: return long_mode ? 0 : vmcb->cs.base;
+ case x86_seg_ds: return long_mode ? 0 : vmcb->ds.base;
+ case x86_seg_es: return long_mode ? 0 : vmcb->es.base;
+ case x86_seg_fs: return vmcb->fs.base;
+ case x86_seg_gs: return vmcb->gs.base;
+ case x86_seg_ss: return long_mode ? 0 : vmcb->ss.base;
+ case x86_seg_tr: return vmcb->tr.base;
+ case x86_seg_gdtr: return vmcb->gdtr.base;
+ case x86_seg_idtr: return vmcb->idtr.base;
+ case x86_seg_ldtr: return vmcb->ldtr.base;
}
BUG();
return 0;
}
+static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
+ struct segment_register *reg)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ switch ( seg )
+ {
+ case x86_seg_cs: memcpy(reg, &vmcb->cs, sizeof(*reg)); break;
+ case x86_seg_ds: memcpy(reg, &vmcb->ds, sizeof(*reg)); break;
+ case x86_seg_es: memcpy(reg, &vmcb->es, sizeof(*reg)); break;
+ case x86_seg_fs: memcpy(reg, &vmcb->fs, sizeof(*reg)); break;
+ case x86_seg_gs: memcpy(reg, &vmcb->gs, sizeof(*reg)); break;
+ case x86_seg_ss: memcpy(reg, &vmcb->ss, sizeof(*reg)); break;
+ case x86_seg_tr: memcpy(reg, &vmcb->tr, sizeof(*reg)); break;
+ case x86_seg_gdtr: memcpy(reg, &vmcb->gdtr, sizeof(*reg)); break;
+ case x86_seg_idtr: memcpy(reg, &vmcb->idtr, sizeof(*reg)); break;
+ case x86_seg_ldtr: memcpy(reg, &vmcb->ldtr, sizeof(*reg)); break;
+ default: BUG();
+ }
+}
+
/* Make sure that xen intercepts any FP accesses from current */
static void svm_stts(struct vcpu *v)
{
@@ -785,6 +805,11 @@ static void svm_vcpu_destroy(struct vcpu *v)
svm_destroy_vmcb(v);
}
+static void svm_hvm_inject_exception(unsigned int trapnr, int errcode)
+{
+ svm_inject_exception(current, trapnr, (errcode != -1), errcode);
+}
+
int start_svm(void)
{
u32 eax, ecx, edx;
@@ -844,12 +869,15 @@ int start_svm(void)
hvm_funcs.guest_x86_mode = svm_guest_x86_mode;
hvm_funcs.get_guest_ctrl_reg = svm_get_ctrl_reg;
hvm_funcs.get_segment_base = svm_get_segment_base;
+ hvm_funcs.get_segment_register = svm_get_segment_register;
hvm_funcs.update_host_cr3 = svm_update_host_cr3;
hvm_funcs.stts = svm_stts;
hvm_funcs.set_tsc_offset = svm_set_tsc_offset;
+ hvm_funcs.inject_exception = svm_hvm_inject_exception;
+
hvm_funcs.init_ap_context = svm_init_ap_context;
hvm_funcs.init_hypercall_page = svm_init_hypercall_page;
@@ -1154,7 +1182,7 @@ static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs)
static void svm_get_prefix_info(
struct vmcb_struct *vmcb,
- unsigned int dir, segment_selector_t **seg, unsigned int *asize)
+ unsigned int dir, svm_segment_register_t **seg, unsigned int *asize)
{
unsigned char inst[MAX_INST_LEN];
int i;
@@ -1235,18 +1263,18 @@ static inline int svm_get_io_address(
unsigned long reg;
unsigned int asize, isize;
int long_mode = 0;
- segment_selector_t *seg = NULL;
+ svm_segment_register_t *seg = NULL;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
#ifdef __x86_64__
/* If we're in long mode, we shouldn't check the segment presence & limit */
- long_mode = vmcb->cs.attributes.fields.l && vmcb->efer & EFER_LMA;
+ long_mode = vmcb->cs.attr.fields.l && vmcb->efer & EFER_LMA;
#endif
- /* d field of cs.attributes is 1 for 32-bit, 0 for 16 or 64 bit.
+ /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit.
* l field combined with EFER_LMA -> longmode says whether it's 16 or 64 bit.
*/
- asize = (long_mode)?64:((vmcb->cs.attributes.fields.db)?32:16);
+ asize = (long_mode)?64:((vmcb->cs.attr.fields.db)?32:16);
/* The ins/outs instructions are single byte, so if we have got more
@@ -1266,7 +1294,7 @@ static inline int svm_get_io_address(
reg = regs->esi;
if (!seg) /* If no prefix, used DS. */
seg = &vmcb->ds;
- if (!long_mode && (seg->attributes.fields.type & 0xa) == 0x8) {
+ if (!long_mode && (seg->attr.fields.type & 0xa) == 0x8) {
svm_inject_exception(v, TRAP_gp_fault, 1, 0);
return 0;
}
@@ -1275,14 +1303,14 @@ static inline int svm_get_io_address(
{
reg = regs->edi;
seg = &vmcb->es; /* Note: This is ALWAYS ES. */
- if (!long_mode && (seg->attributes.fields.type & 0xa) != 0x2) {
+ if (!long_mode && (seg->attr.fields.type & 0xa) != 0x2) {
svm_inject_exception(v, TRAP_gp_fault, 1, 0);
return 0;
}
}
/* If the segment isn't present, give GP fault! */
- if (!long_mode && !seg->attributes.fields.p)
+ if (!long_mode && !seg->attr.fields.p)
{
svm_inject_exception(v, TRAP_gp_fault, 1, 0);
return 0;
@@ -1305,7 +1333,7 @@ static inline int svm_get_io_address(
{
ASSERT(*addr == (u32)*addr);
if ((u32)(*addr + size - 1) < (u32)*addr ||
- (seg->attributes.fields.type & 0xc) != 0x4 ?
+ (seg->attr.fields.type & 0xc) != 0x4 ?
*addr + size - 1 > seg->limit :
*addr <= seg->limit)
{
@@ -1318,9 +1346,9 @@ static inline int svm_get_io_address(
occur. Note that the checking is not necessary for page granular
segments as transfers crossing page boundaries will be broken up
anyway. */
- if (!seg->attributes.fields.g && *count > 1)
+ if (!seg->attr.fields.g && *count > 1)
{
- if ((seg->attributes.fields.type & 0xc) != 0x4)
+ if ((seg->attr.fields.type & 0xc) != 0x4)
{
/* expand-up */
if (!(regs->eflags & EF_DF))
@@ -2154,52 +2182,52 @@ static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
/* setup the segment registers and all their hidden states */
vmcb->cs.sel = 0xF000;
- vmcb->cs.attributes.bytes = 0x089b;
+ vmcb->cs.attr.bytes = 0x089b;
vmcb->cs.limit = 0xffff;
vmcb->cs.base = 0x000F0000;
vmcb->ss.sel = 0x00;
- vmcb->ss.attributes.bytes = 0x0893;
+ vmcb->ss.attr.bytes = 0x0893;
vmcb->ss.limit = 0xffff;
vmcb->ss.base = 0x00;
vmcb->ds.sel = 0x00;
- vmcb->ds.attributes.bytes = 0x0893;
+ vmcb->ds.attr.bytes = 0x0893;
vmcb->ds.limit = 0xffff;
vmcb->ds.base = 0x00;
vmcb->es.sel = 0x00;
- vmcb->es.attributes.bytes = 0x0893;
+ vmcb->es.attr.bytes = 0x0893;
vmcb->es.limit = 0xffff;
vmcb->es.base = 0x00;
vmcb->fs.sel = 0x00;
- vmcb->fs.attributes.bytes = 0x0893;
+ vmcb->fs.attr.bytes = 0x0893;
vmcb->fs.limit = 0xffff;
vmcb->fs.base = 0x00;
vmcb->gs.sel = 0x00;
- vmcb->gs.attributes.bytes = 0x0893;
+ vmcb->gs.attr.bytes = 0x0893;
vmcb->gs.limit = 0xffff;
vmcb->gs.base = 0x00;
vmcb->ldtr.sel = 0x00;
- vmcb->ldtr.attributes.bytes = 0x0000;
+ vmcb->ldtr.attr.bytes = 0x0000;
vmcb->ldtr.limit = 0x0;
vmcb->ldtr.base = 0x00;
vmcb->gdtr.sel = 0x00;
- vmcb->gdtr.attributes.bytes = 0x0000;
+ vmcb->gdtr.attr.bytes = 0x0000;
vmcb->gdtr.limit = 0x0;
vmcb->gdtr.base = 0x00;
vmcb->tr.sel = 0;
- vmcb->tr.attributes.bytes = 0;
+ vmcb->tr.attr.bytes = 0;
vmcb->tr.limit = 0x0;
vmcb->tr.base = 0;
vmcb->idtr.sel = 0x00;
- vmcb->idtr.attributes.bytes = 0x0000;
+ vmcb->idtr.attr.bytes = 0x0000;
vmcb->idtr.limit = 0x3ff;
vmcb->idtr.base = 0x00;
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index f00372440e..b32db1b62c 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -90,7 +90,7 @@ static int construct_vmcb(struct vcpu *v)
{
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
struct vmcb_struct *vmcb = arch_svm->vmcb;
- segment_attributes_t attrib;
+ svm_segment_attributes_t attrib;
/* Always flush the TLB on VMRUN. */
vmcb->tlb_control = 1;
@@ -166,13 +166,13 @@ static int construct_vmcb(struct vcpu *v)
attrib.fields.p = 1; /* segment present */
attrib.fields.db = 1; /* 32-bit */
attrib.fields.g = 1; /* 4K pages in limit */
- vmcb->es.attributes = attrib;
- vmcb->ss.attributes = attrib;
- vmcb->ds.attributes = attrib;
- vmcb->fs.attributes = attrib;
- vmcb->gs.attributes = attrib;
+ vmcb->es.attr = attrib;
+ vmcb->ss.attr = attrib;
+ vmcb->ds.attr = attrib;
+ vmcb->fs.attr = attrib;
+ vmcb->gs.attr = attrib;
attrib.fields.type = 0xb; /* type=0xb -> executable/readable, accessed */
- vmcb->cs.attributes = attrib;
+ vmcb->cs.attr = attrib;
/* Guest IDT. */
vmcb->idtr.base = 0;
@@ -186,11 +186,11 @@ static int construct_vmcb(struct vcpu *v)
vmcb->ldtr.sel = 0;
vmcb->ldtr.base = 0;
vmcb->ldtr.limit = 0;
- vmcb->ldtr.attributes.bytes = 0;
+ vmcb->ldtr.attr.bytes = 0;
/* Guest TSS. */
attrib.fields.type = 0xb; /* 32-bit TSS (busy) */
- vmcb->tr.attributes = attrib;
+ vmcb->tr.attr = attrib;
vmcb->tr.base = 0;
vmcb->tr.limit = 0xff;
@@ -278,10 +278,10 @@ void svm_do_launch(struct vcpu *v)
v->arch.schedule_tail = arch_svm_do_resume;
}
-static void svm_dump_sel(char *name, segment_selector_t *s)
+static void svm_dump_sel(char *name, svm_segment_register_t *s)
{
printk("%s: sel=0x%04x, attr=0x%04x, limit=0x%08x, base=0x%016llx\n",
- name, s->sel, s->attributes.bytes, s->limit,
+ name, s->sel, s->attr.bytes, s->limit,
(unsigned long long)s->base);
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 7ccd218f53..6a1f70a79d 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -501,7 +501,7 @@ static unsigned long vmx_get_ctrl_reg(struct vcpu *v, unsigned int num)
return 0; /* dummy */
}
-static unsigned long vmx_get_segment_base(struct vcpu *v, enum segment seg)
+static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
{
unsigned long base = 0;
int long_mode = 0;
@@ -516,22 +516,94 @@ static unsigned long vmx_get_segment_base(struct vcpu *v, enum segment seg)
switch ( seg )
{
- case seg_cs: if ( !long_mode ) base = __vmread(GUEST_CS_BASE); break;
- case seg_ds: if ( !long_mode ) base = __vmread(GUEST_DS_BASE); break;
- case seg_es: if ( !long_mode ) base = __vmread(GUEST_ES_BASE); break;
- case seg_fs: base = __vmread(GUEST_FS_BASE); break;
- case seg_gs: base = __vmread(GUEST_GS_BASE); break;
- case seg_ss: if ( !long_mode ) base = __vmread(GUEST_SS_BASE); break;
- case seg_tr: base = __vmread(GUEST_TR_BASE); break;
- case seg_gdtr: base = __vmread(GUEST_GDTR_BASE); break;
- case seg_idtr: base = __vmread(GUEST_IDTR_BASE); break;
- case seg_ldtr: base = __vmread(GUEST_LDTR_BASE); break;
+ case x86_seg_cs: if ( !long_mode ) base = __vmread(GUEST_CS_BASE); break;
+ case x86_seg_ds: if ( !long_mode ) base = __vmread(GUEST_DS_BASE); break;
+ case x86_seg_es: if ( !long_mode ) base = __vmread(GUEST_ES_BASE); break;
+ case x86_seg_fs: base = __vmread(GUEST_FS_BASE); break;
+ case x86_seg_gs: base = __vmread(GUEST_GS_BASE); break;
+ case x86_seg_ss: if ( !long_mode ) base = __vmread(GUEST_SS_BASE); break;
+ case x86_seg_tr: base = __vmread(GUEST_TR_BASE); break;
+ case x86_seg_gdtr: base = __vmread(GUEST_GDTR_BASE); break;
+ case x86_seg_idtr: base = __vmread(GUEST_IDTR_BASE); break;
+ case x86_seg_ldtr: base = __vmread(GUEST_LDTR_BASE); break;
default: BUG(); break;
}
return base;
}
+static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
+ struct segment_register *reg)
+{
+ u16 attr = 0;
+
+ ASSERT(v == current);
+
+ switch ( seg )
+ {
+ case x86_seg_cs:
+ reg->sel = __vmread(GUEST_CS_SELECTOR);
+ reg->limit = __vmread(GUEST_CS_LIMIT);
+ reg->base = __vmread(GUEST_CS_BASE);
+ attr = __vmread(GUEST_CS_AR_BYTES);
+ break;
+ case x86_seg_ds:
+ reg->sel = __vmread(GUEST_DS_SELECTOR);
+ reg->limit = __vmread(GUEST_DS_LIMIT);
+ reg->base = __vmread(GUEST_DS_BASE);
+ attr = __vmread(GUEST_DS_AR_BYTES);
+ break;
+ case x86_seg_es:
+ reg->sel = __vmread(GUEST_ES_SELECTOR);
+ reg->limit = __vmread(GUEST_ES_LIMIT);
+ reg->base = __vmread(GUEST_ES_BASE);
+ attr = __vmread(GUEST_ES_AR_BYTES);
+ break;
+ case x86_seg_fs:
+ reg->sel = __vmread(GUEST_FS_SELECTOR);
+ reg->limit = __vmread(GUEST_FS_LIMIT);
+ reg->base = __vmread(GUEST_FS_BASE);
+ attr = __vmread(GUEST_FS_AR_BYTES);
+ break;
+ case x86_seg_gs:
+ reg->sel = __vmread(GUEST_GS_SELECTOR);
+ reg->limit = __vmread(GUEST_GS_LIMIT);
+ reg->base = __vmread(GUEST_GS_BASE);
+ attr = __vmread(GUEST_GS_AR_BYTES);
+ break;
+ case x86_seg_ss:
+ reg->sel = __vmread(GUEST_SS_SELECTOR);
+ reg->limit = __vmread(GUEST_SS_LIMIT);
+ reg->base = __vmread(GUEST_SS_BASE);
+ attr = __vmread(GUEST_SS_AR_BYTES);
+ break;
+ case x86_seg_tr:
+ reg->sel = __vmread(GUEST_TR_SELECTOR);
+ reg->limit = __vmread(GUEST_TR_LIMIT);
+ reg->base = __vmread(GUEST_TR_BASE);
+ attr = __vmread(GUEST_TR_AR_BYTES);
+ break;
+ case x86_seg_gdtr:
+ reg->limit = __vmread(GUEST_GDTR_LIMIT);
+ reg->base = __vmread(GUEST_GDTR_BASE);
+ break;
+ case x86_seg_idtr:
+ reg->limit = __vmread(GUEST_IDTR_LIMIT);
+ reg->base = __vmread(GUEST_IDTR_BASE);
+ break;
+ case x86_seg_ldtr:
+ reg->sel = __vmread(GUEST_LDTR_SELECTOR);
+ reg->limit = __vmread(GUEST_LDTR_LIMIT);
+ reg->base = __vmread(GUEST_LDTR_BASE);
+ attr = __vmread(GUEST_LDTR_AR_BYTES);
+ break;
+ default:
+ BUG();
+ }
+
+ reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00);
+}
+
/* Make sure that xen intercepts any FP accesses from current */
static void vmx_stts(struct vcpu *v)
{
@@ -630,6 +702,11 @@ static int vmx_pae_enabled(struct vcpu *v)
return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
}
+static void vmx_inject_exception(unsigned int trapnr, int errcode)
+{
+ vmx_inject_hw_exception(current, trapnr, errcode);
+}
+
/* Setup HVM interfaces */
static void vmx_setup_hvm_funcs(void)
{
@@ -650,12 +727,15 @@ static void vmx_setup_hvm_funcs(void)
hvm_funcs.guest_x86_mode = vmx_guest_x86_mode;
hvm_funcs.get_guest_ctrl_reg = vmx_get_ctrl_reg;
hvm_funcs.get_segment_base = vmx_get_segment_base;
+ hvm_funcs.get_segment_register = vmx_get_segment_register;
hvm_funcs.update_host_cr3 = vmx_update_host_cr3;
hvm_funcs.stts = vmx_stts;
hvm_funcs.set_tsc_offset = vmx_set_tsc_offset;
+ hvm_funcs.inject_exception = vmx_inject_exception;
+
hvm_funcs.init_ap_context = vmx_init_ap_context;
hvm_funcs.init_hypercall_page = vmx_init_hypercall_page;
@@ -962,14 +1042,14 @@ static void vmx_do_invlpg(unsigned long va)
static int vmx_check_descriptor(int long_mode, unsigned long eip, int inst_len,
- enum segment seg, unsigned long *base,
+ enum x86_segment seg, unsigned long *base,
u32 *limit, u32 *ar_bytes)
{
enum vmcs_field ar_field, base_field, limit_field;
*base = 0;
*limit = 0;
- if ( seg != seg_es )
+ if ( seg != x86_seg_es )
{
unsigned char inst[MAX_INST_LEN];
int i;
@@ -999,22 +1079,22 @@ static int vmx_check_descriptor(int long_mode, unsigned long eip, int inst_len,
#endif
continue;
case 0x2e: /* CS */
- seg = seg_cs;
+ seg = x86_seg_cs;
continue;
case 0x36: /* SS */
- seg = seg_ss;
+ seg = x86_seg_ss;
continue;
case 0x26: /* ES */
- seg = seg_es;
+ seg = x86_seg_es;
continue;
case 0x64: /* FS */
- seg = seg_fs;
+ seg = x86_seg_fs;
continue;
case 0x65: /* GS */
- seg = seg_gs;
+ seg = x86_seg_gs;
continue;
case 0x3e: /* DS */
- seg = seg_ds;
+ seg = x86_seg_ds;
continue;
}
}
@@ -1022,32 +1102,32 @@ static int vmx_check_descriptor(int long_mode, unsigned long eip, int inst_len,
switch ( seg )
{
- case seg_cs:
+ case x86_seg_cs:
ar_field = GUEST_CS_AR_BYTES;
base_field = GUEST_CS_BASE;
limit_field = GUEST_CS_LIMIT;
break;
- case seg_ds:
+ case x86_seg_ds:
ar_field = GUEST_DS_AR_BYTES;
base_field = GUEST_DS_BASE;
limit_field = GUEST_DS_LIMIT;
break;
- case seg_es:
+ case x86_seg_es:
ar_field = GUEST_ES_AR_BYTES;
base_field = GUEST_ES_BASE;
limit_field = GUEST_ES_LIMIT;
break;
- case seg_fs:
+ case x86_seg_fs:
ar_field = GUEST_FS_AR_BYTES;
base_field = GUEST_FS_BASE;
limit_field = GUEST_FS_LIMIT;
break;
- case seg_gs:
+ case x86_seg_gs:
ar_field = GUEST_FS_AR_BYTES;
base_field = GUEST_FS_BASE;
limit_field = GUEST_FS_LIMIT;
break;
- case seg_ss:
+ case x86_seg_ss:
ar_field = GUEST_GS_AR_BYTES;
base_field = GUEST_GS_BASE;
limit_field = GUEST_GS_LIMIT;
@@ -1057,7 +1137,7 @@ static int vmx_check_descriptor(int long_mode, unsigned long eip, int inst_len,
return 0;
}
- if ( !long_mode || seg == seg_fs || seg == seg_gs )
+ if ( !long_mode || seg == x86_seg_fs || seg == x86_seg_gs )
{
*base = __vmread(base_field);
*limit = __vmread(limit_field);
@@ -1127,7 +1207,7 @@ static void vmx_io_instruction(unsigned long exit_qualification,
* selector is null.
*/
if ( !vmx_check_descriptor(long_mode, regs->eip, inst_len,
- dir == IOREQ_WRITE ? seg_ds : seg_es,
+ dir==IOREQ_WRITE ? x86_seg_ds : x86_seg_es,
&base, &limit, &ar_bytes) ) {
if ( !long_mode ) {
vmx_inject_hw_exception(current, TRAP_gp_fault, 0);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 24a43663ae..47fb500fc6 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3040,7 +3040,7 @@ struct ptwr_emulate_ctxt {
};
static int ptwr_emulated_read(
- unsigned int seg,
+ enum x86_segment seg,
unsigned long offset,
unsigned long *val,
unsigned int bytes,
@@ -3183,7 +3183,7 @@ static int ptwr_emulated_update(
}
static int ptwr_emulated_write(
- unsigned int seg,
+ enum x86_segment seg,
unsigned long offset,
unsigned long val,
unsigned int bytes,
@@ -3195,7 +3195,7 @@ static int ptwr_emulated_write(
}
static int ptwr_emulated_cmpxchg(
- unsigned int seg,
+ enum x86_segment seg,
unsigned long offset,
unsigned long old,
unsigned long new,
@@ -3208,7 +3208,7 @@ static int ptwr_emulated_cmpxchg(
}
static int ptwr_emulated_cmpxchg8b(
- unsigned int seg,
+ enum x86_segment seg,
unsigned long offset,
unsigned long old,
unsigned long old_hi,
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 539f81f947..8bf43133e4 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -69,14 +69,87 @@ int _shadow_mode_refcounts(struct domain *d)
/* x86 emulator support for the shadow code
*/
+static int hvm_translate_linear_addr(
+ enum x86_segment seg,
+ unsigned long offset,
+ unsigned int bytes,
+ unsigned int is_write,
+ unsigned long *paddr)
+{
+ struct segment_register creg, dreg;
+ unsigned long limit, addr = offset;
+ uint32_t last_byte;
+
+ hvm_get_segment_register(current, x86_seg_cs, &creg);
+ hvm_get_segment_register(current, seg, &dreg);
+
+ if ( !creg.attr.fields.l || !hvm_long_mode_enabled(current) )
+ {
+ /*
+ * COMPATIBILITY MODE: Apply segment checks and add base.
+ */
+
+ /* If this is a store, is the segment a writable data segment? */
+ if ( is_write && ((dreg.attr.fields.type & 0xa) != 0x2) )
+ goto gpf;
+
+ /* Calculate the segment limit, including granularity flag. */
+ limit = dreg.limit;
+ if ( dreg.attr.fields.g )
+ limit = (limit << 12) | 0xfff;
+
+ last_byte = offset + bytes - 1;
+
+ /* Is this a grows-down data segment? Special limit check if so. */
+ if ( (dreg.attr.fields.type & 0xc) == 0x4 )
+ {
+ /* Is upper limit 0xFFFF or 0xFFFFFFFF? */
+ if ( !dreg.attr.fields.db )
+ last_byte = (uint16_t)last_byte;
+
+ /* Check first byte and last byte against respective bounds. */
+ if ( (offset <= limit) || (last_byte < offset) )
+ goto gpf;
+ }
+ else if ( (last_byte > limit) || (last_byte < offset) )
+ goto gpf; /* last byte is beyond limit or wraps 0xFFFFFFFF */
+
+ /*
+ * Hardware truncates to 32 bits in compatibility mode.
+ * It does not truncate to 16 bits in 16-bit address-size mode.
+ */
+ addr = (uint32_t)(addr + dreg.base);
+ }
+ else if ( (seg == x86_seg_fs) || (seg == x86_seg_gs) )
+ {
+ /*
+ * LONG MODE: FS and GS add a segment base.
+ */
+ addr += dreg.base;
+ }
+
+ *paddr = addr;
+ return 0;
+
+ gpf:
+ /* Inject #GP(0). */
+ hvm_inject_exception(TRAP_gp_fault, 0);
+ return X86EMUL_PROPAGATE_FAULT;
+}
+
static int
-sh_x86_emulate_read(unsigned int seg,
+sh_x86_emulate_read(enum x86_segment seg,
unsigned long offset,
unsigned long *val,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
- unsigned long addr = offset;
+ unsigned long addr;
+ int rc;
+
+ rc = hvm_translate_linear_addr(seg, offset, bytes, 0, &addr);
+ if ( rc )
+ return rc;
*val = 0;
// XXX -- this is WRONG.
@@ -102,14 +175,19 @@ sh_x86_emulate_read(unsigned int seg,
}
static int
-sh_x86_emulate_write(unsigned int seg,
+sh_x86_emulate_write(enum x86_segment seg,
unsigned long offset,
unsigned long val,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
struct vcpu *v = current;
- unsigned long addr = offset;
+ unsigned long addr;
+ int rc;
+
+ rc = hvm_translate_linear_addr(seg, offset, bytes, 1, &addr);
+ if ( rc )
+ return rc;
#if 0
SHADOW_PRINTK("d=%u v=%u a=%#lx v=%#lx bytes=%u\n",
@@ -119,7 +197,7 @@ sh_x86_emulate_write(unsigned int seg,
}
static int
-sh_x86_emulate_cmpxchg(unsigned int seg,
+sh_x86_emulate_cmpxchg(enum x86_segment seg,
unsigned long offset,
unsigned long old,
unsigned long new,
@@ -127,7 +205,12 @@ sh_x86_emulate_cmpxchg(unsigned int seg,
struct x86_emulate_ctxt *ctxt)
{
struct vcpu *v = current;
- unsigned long addr = offset;
+ unsigned long addr;
+ int rc;
+
+ rc = hvm_translate_linear_addr(seg, offset, bytes, 1, &addr);
+ if ( rc )
+ return rc;
#if 0
SHADOW_PRINTK("d=%u v=%u a=%#lx o?=%#lx n:=%#lx bytes=%u\n",
@@ -138,7 +221,7 @@ sh_x86_emulate_cmpxchg(unsigned int seg,
}
static int
-sh_x86_emulate_cmpxchg8b(unsigned int seg,
+sh_x86_emulate_cmpxchg8b(enum x86_segment seg,
unsigned long offset,
unsigned long old_lo,
unsigned long old_hi,
@@ -147,7 +230,12 @@ sh_x86_emulate_cmpxchg8b(unsigned int seg,
struct x86_emulate_ctxt *ctxt)
{
struct vcpu *v = current;
- unsigned long addr = offset;
+ unsigned long addr;
+ int rc;
+
+ rc = hvm_translate_linear_addr(seg, offset, 8, 1, &addr);
+ if ( rc )
+ return rc;
#if 0
SHADOW_PRINTK("d=%u v=%u a=%#lx o?=%#lx:%lx n:=%#lx:%lx\n",
diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c
index 18921846ee..555655f83a 100644
--- a/xen/arch/x86/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate.c
@@ -199,8 +199,8 @@ struct operand {
/* OP_REG: Pointer to register field. */
unsigned long *reg;
/* OP_MEM: Segment and offset. */
- unsigned int mem_seg;
- unsigned long mem_off;
+ enum x86_segment mem_seg;
+ unsigned long mem_off;
};
/* EFLAGS bit definitions. */
@@ -375,7 +375,7 @@ do{ __asm__ __volatile__ ( \
/* Fetch next part of the instruction being emulated. */
#define _insn_fetch(_size) \
({ unsigned long _x; \
- rc = ops->read(_regs.cs, _regs.eip, &_x, (_size), ctxt); \
+ rc = ops->read(x86_seg_cs, _regs.eip, &_x, (_size), ctxt); \
if ( rc != 0 ) \
goto done; \
_regs.eip += (_size); \
@@ -452,7 +452,7 @@ dump_instr(
dprintf("Instr:");
for ( i = 0; i < 16; i++, eip++ )
{
- if ( ops->read(ctxt->regs->cs, eip, &x, 1, ctxt) != 0 )
+ if ( ops->read(x86_seg_cs, eip, &x, 1, ctxt) != 0 )
printk(" ??");
else
printk(" %02x", (uint8_t)x);
@@ -476,8 +476,8 @@ x86_emulate_memop(
struct operand src, dst;
int mode = ctxt->mode;
- unsigned int ea_seg = X86_SEG_DS;
- unsigned long ea_off = 0;
+ enum x86_segment ea_seg = x86_seg_ds;
+ unsigned long ea_off = 0;
switch ( mode )
{
@@ -513,22 +513,22 @@ x86_emulate_memop(
ad_bytes ^= 6; /* switch between 2/4 bytes */
break;
case 0x2e: /* CS override */
- ea_seg = X86_SEG_CS;
+ ea_seg = x86_seg_cs;
break;
case 0x3e: /* DS override */
- ea_seg = X86_SEG_DS;
+ ea_seg = x86_seg_ds;
break;
case 0x26: /* ES override */
- ea_seg = X86_SEG_ES;
+ ea_seg = x86_seg_es;
break;
case 0x64: /* FS override */
- ea_seg = X86_SEG_FS;
+ ea_seg = x86_seg_fs;
break;
case 0x65: /* GS override */
- ea_seg = X86_SEG_GS;
+ ea_seg = x86_seg_gs;
break;
case 0x36: /* SS override */
- ea_seg = X86_SEG_SS;
+ ea_seg = x86_seg_ss;
break;
case 0xf0: /* LOCK */
lock_prefix = 1;
@@ -860,7 +860,7 @@ x86_emulate_memop(
/* 64-bit mode: POP always pops a 64-bit operand. */
if ( mode == X86EMUL_MODE_PROT64 )
dst.bytes = 8;
- if ( (rc = ops->read(X86_SEG_SS, truncate_ea(_regs.esp),
+ if ( (rc = ops->read(x86_seg_ss, truncate_ea(_regs.esp),
&dst.val, dst.bytes, ctxt)) != 0 )
goto done;
register_address_increment(_regs.esp, dst.bytes);
@@ -942,7 +942,7 @@ x86_emulate_memop(
goto done;
}
register_address_increment(_regs.esp, -dst.bytes);
- if ( (rc = ops->write(X86_SEG_SS, truncate_ea(_regs.esp),
+ if ( (rc = ops->write(x86_seg_ss, truncate_ea(_regs.esp),
dst.val, dst.bytes, ctxt)) != 0 )
goto done;
dst.val = dst.orig_val; /* skanky: disable writeback */
@@ -1024,7 +1024,7 @@ x86_emulate_memop(
case 0xa4 ... 0xa5: /* movs */
dst.type = OP_MEM;
dst.bytes = (d & ByteOp) ? 1 : op_bytes;
- dst.mem_seg = X86_SEG_ES;
+ dst.mem_seg = x86_seg_es;
dst.mem_off = truncate_ea(_regs.edi);
if ( (rc = ops->read(ea_seg, truncate_ea(_regs.esi),
&dst.val, dst.bytes, ctxt)) != 0 )
@@ -1037,7 +1037,7 @@ x86_emulate_memop(
case 0xaa ... 0xab: /* stos */
dst.type = OP_MEM;
dst.bytes = (d & ByteOp) ? 1 : op_bytes;
- dst.mem_seg = X86_SEG_ES;
+ dst.mem_seg = x86_seg_es;
dst.mem_off = truncate_ea(_regs.edi);
dst.val = _regs.eax;
register_address_increment(
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 83ebf48805..aafeecf3df 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -17,28 +17,47 @@
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
+
#ifndef __ASM_X86_HVM_HVM_H__
#define __ASM_X86_HVM_HVM_H__
-enum segment {
- seg_cs,
- seg_ss,
- seg_ds,
- seg_es,
- seg_fs,
- seg_gs,
- seg_tr,
- seg_ldtr,
- seg_gdtr,
- seg_idtr
-};
+#include <asm/x86_emulate.h>
+
+/*
+ * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
+ * segment descriptor. It happens to match the format of an AMD SVM VMCB.
+ */
+typedef union segment_attributes {
+ u16 bytes;
+ struct
+ {
+ u16 type:4; /* 0; Bit 40-43 */
+ u16 s: 1; /* 4; Bit 44 */
+ u16 dpl: 2; /* 5; Bit 45-46 */
+ u16 p: 1; /* 7; Bit 47 */
+ u16 avl: 1; /* 8; Bit 52 */
+ u16 l: 1; /* 9; Bit 53 */
+ u16 db: 1; /* 10; Bit 54 */
+ u16 g: 1; /* 11; Bit 55 */
+ } fields;
+} __attribute__ ((packed)) segment_attributes_t;
+
+/*
+ * Full state of a segment register (visible and hidden portions).
+ * Again, this happens to match the format of an AMD SVM VMCB.
+ */
+typedef struct segment_register {
+ u16 sel;
+ segment_attributes_t attr;
+ u32 limit;
+ u64 base;
+} __attribute__ ((packed)) segment_register_t;
/*
* The hardware virtual machine (HVM) interface abstracts away from the
* x86/x86_64 CPU virtualization assist specifics. Currently this interface
* supports Intel's VT-x and AMD's SVM extensions.
*/
-
struct hvm_function_table {
/*
* Disable HVM functionality
@@ -74,7 +93,9 @@ struct hvm_function_table {
int (*pae_enabled)(struct vcpu *v);
int (*guest_x86_mode)(struct vcpu *v);
unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
- unsigned long (*get_segment_base)(struct vcpu *v, enum segment seg);
+ unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
+ void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
+ struct segment_register *reg);
/*
* Re-set the value of CR3 that Xen runs on when handling VM exits
@@ -89,6 +110,8 @@ struct hvm_function_table {
void (*stts)(struct vcpu *v);
void (*set_tsc_offset)(struct vcpu *v, u64 offset);
+ void (*inject_exception)(unsigned int trapnr, int errcode);
+
void (*init_ap_context)(struct vcpu_guest_context *ctxt,
int vcpuid, int trampoline_vector);
@@ -172,11 +195,18 @@ hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
}
static inline unsigned long
-hvm_get_segment_base(struct vcpu *v, enum segment seg)
+hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
{
return hvm_funcs.get_segment_base(v, seg);
}
+static inline void
+hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
+ struct segment_register *reg)
+{
+ hvm_funcs.get_segment_register(v, seg, reg);
+}
+
void hvm_stts(struct vcpu *v);
void hvm_set_guest_time(struct vcpu *v, u64 gtime);
void hvm_freeze_time(struct vcpu *v);
@@ -190,6 +220,12 @@ hvm_init_ap_context(struct vcpu_guest_context *ctxt,
return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
}
+static inline void
+hvm_inject_exception(unsigned int trapnr, int errcode)
+{
+ hvm_funcs.inject_exception(trapnr, errcode);
+}
+
int hvm_bringup_ap(int vcpuid, int trampoline_vector);
#endif /* __ASM_X86_HVM_HVM_H__ */
diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h b/xen/include/asm-x86/hvm/svm/vmcb.h
index 2d6a4ed0f3..81d41ea688 100644
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -311,33 +311,9 @@ enum {
SVM_CPU_STATE_ASSIST_ENABLED,
};
-/*
- * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
- * segment descriptor.
- */
-typedef union
-{
- u16 bytes;
- struct
- {
- u16 type:4; /* 0; Bit 40-43 */
- u16 s: 1; /* 4; Bit 44 */
- u16 dpl: 2; /* 5; Bit 45-46 */
- u16 p: 1; /* 7; Bit 47 */
- u16 avl: 1; /* 8; Bit 52 */
- u16 l: 1; /* 9; Bit 53 */
- u16 db: 1; /* 10; Bit 54 */
- u16 g: 1; /* 11; Bit 55 */
- } fields;
-} __attribute__ ((packed)) segment_attributes_t;
-
-typedef struct
-{
- u16 sel;
- segment_attributes_t attributes;
- u32 limit;
- u64 base;
-} __attribute__ ((packed)) segment_selector_t;
+/* Definitions of segment state are borrowed by the generic HVM code. */
+typedef segment_attributes_t svm_segment_attributes_t;
+typedef segment_register_t svm_segment_register_t;
typedef union
{
@@ -426,16 +402,16 @@ struct vmcb_struct {
u64 h_cr3; /* offset 0xB0 */
u64 res09[105]; /* offset 0xB8 pad to save area */
- segment_selector_t es; /* offset 1024 */
- segment_selector_t cs;
- segment_selector_t ss;
- segment_selector_t ds;
- segment_selector_t fs;
- segment_selector_t gs;
- segment_selector_t gdtr;
- segment_selector_t ldtr;
- segment_selector_t idtr;
- segment_selector_t tr;
+ svm_segment_register_t es; /* offset 1024 */
+ svm_segment_register_t cs;
+ svm_segment_register_t ss;
+ svm_segment_register_t ds;
+ svm_segment_register_t fs;
+ svm_segment_register_t gs;
+ svm_segment_register_t gdtr;
+ svm_segment_register_t ldtr;
+ svm_segment_register_t idtr;
+ svm_segment_register_t tr;
u64 res10[5];
u8 res11[3];
u8 cpl;
diff --git a/xen/include/asm-x86/x86_emulate.h b/xen/include/asm-x86/x86_emulate.h
index 4bc49c5487..ef23291cdc 100644
--- a/xen/include/asm-x86/x86_emulate.h
+++ b/xen/include/asm-x86/x86_emulate.h
@@ -11,16 +11,26 @@
struct x86_emulate_ctxt;
-#define X86_SEG_CS 0
-#define X86_SEG_SS 1
-#define X86_SEG_DS 2
-#define X86_SEG_ES 3
-#define X86_SEG_FS 4
-#define X86_SEG_GS 5
+/*
+ * Comprehensive enumeration of x86 segment registers. Note that the system
+ * registers (TR, LDTR, GDTR, IDTR) are never referenced by the emulator.
+ */
+enum x86_segment {
+ /* General purpose. */
+ x86_seg_cs,
+ x86_seg_ss,
+ x86_seg_ds,
+ x86_seg_es,
+ x86_seg_fs,
+ x86_seg_gs,
+ /* System. */
+ x86_seg_tr,
+ x86_seg_ldtr,
+ x86_seg_gdtr,
+ x86_seg_idtr
+};
/*
- * x86_emulate_ops:
- *
* These operations represent the instruction emulator's interface to memory.
*
* NOTES:
@@ -45,7 +55,7 @@ struct x86_emulate_ops
{
/*
* All functions:
- * @seg: [IN ] Segment being dereferenced (specified as X86_SEG_??).
+ * @seg: [IN ] Segment being dereferenced (specified as x86_seg_??).
* @offset [IN ] Offset within segment.
*/
@@ -55,7 +65,7 @@ struct x86_emulate_ops
* @bytes: [IN ] Number of bytes to read from memory.
*/
int (*read)(
- unsigned int seg,
+ enum x86_segment seg,
unsigned long offset,
unsigned long *val,
unsigned int bytes,
@@ -67,7 +77,7 @@ struct x86_emulate_ops
* @bytes: [IN ] Number of bytes to write to memory.
*/
int (*write)(
- unsigned int seg,
+ enum x86_segment seg,
unsigned long offset,
unsigned long val,
unsigned int bytes,
@@ -80,7 +90,7 @@ struct x86_emulate_ops
* @bytes: [IN ] Number of bytes to access using CMPXCHG.
*/
int (*cmpxchg)(
- unsigned int seg,
+ enum x86_segment seg,
unsigned long offset,
unsigned long old,
unsigned long new,
@@ -98,7 +108,7 @@ struct x86_emulate_ops
* to defining a function that always returns X86EMUL_UNHANDLEABLE.
*/
int (*cmpxchg8b)(
- unsigned int seg,
+ enum x86_segment seg,
unsigned long offset,
unsigned long old_lo,
unsigned long old_hi,