aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2007-12-27 10:41:43 +0000
committerKeir Fraser <keir.fraser@citrix.com>2007-12-27 10:41:43 +0000
commit3658ceb342ae70e307509ad7554b24e8d9cd2571 (patch)
treebcef3daed10a522ddde763c8d86aeb22c56a1e12
parentd22f3583407347c84e88fd81351e74f04e9c1743 (diff)
downloadxen-3658ceb342ae70e307509ad7554b24e8d9cd2571.tar.gz
xen-3658ceb342ae70e307509ad7554b24e8d9cd2571.tar.bz2
xen-3658ceb342ae70e307509ad7554b24e8d9cd2571.zip
hvm: Cannot use ring_3() macro on HVM guests. It does not work because
the CS field is not saved/restored and also because CS.RPL does not always equal the DPL (e.g., when executing in real mode). Instead we must interrogate SS.DPL, or CPL directly (SVM supports this). Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
-rw-r--r--xen/arch/x86/hvm/hvm.c9
-rw-r--r--xen/arch/x86/hvm/instrlen.c6
-rw-r--r--xen/arch/x86/hvm/platform.c4
-rw-r--r--xen/arch/x86/hvm/svm/svm.c2
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c2
-rw-r--r--xen/arch/x86/mm/shadow/common.c6
-rw-r--r--xen/arch/x86/mm/shadow/multi.c12
-rw-r--r--xen/arch/x86/mm/shadow/private.h3
8 files changed, 28 insertions, 16 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f0575afdab..bce02f0c05 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1272,15 +1272,18 @@ void hvm_task_switch(
static int __hvm_copy(void *buf, paddr_t addr, int size, int dir,
int virt, int fetch)
{
+ struct segment_register sreg;
unsigned long gfn, mfn;
p2m_type_t p2mt;
char *p;
int count, todo;
uint32_t pfec = PFEC_page_present;
+ hvm_get_segment_register(current, x86_seg_ss, &sreg);
+
if ( dir )
pfec |= PFEC_write_access;
- if ( ring_3(guest_cpu_user_regs()) )
+ if ( sreg.attr.fields.dpl == 3 )
pfec |= PFEC_user_mode;
if ( fetch )
pfec |= PFEC_insn_fetch;
@@ -1514,6 +1517,7 @@ static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
int hvm_do_hypercall(struct cpu_user_regs *regs)
{
+ struct segment_register sreg;
int flush, mode = hvm_guest_x86_mode(current);
uint32_t eax = regs->eax;
@@ -1524,7 +1528,8 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
#endif
case 4:
case 2:
- if ( unlikely(ring_3(regs)) )
+ hvm_get_segment_register(current, x86_seg_ss, &sreg);
+ if ( unlikely(sreg.attr.fields.dpl == 3) )
{
default:
regs->eax = -EPERM;
diff --git a/xen/arch/x86/hvm/instrlen.c b/xen/arch/x86/hvm/instrlen.c
index d258e744dc..7e6353d2e9 100644
--- a/xen/arch/x86/hvm/instrlen.c
+++ b/xen/arch/x86/hvm/instrlen.c
@@ -192,15 +192,15 @@ static uint8_t twobyte_table[256] = {
return -1; \
if ( inst_copy_from_guest(&_x, pc, 1) != 1 ) { \
unsigned long err; \
- struct segment_register cs; \
+ struct segment_register ss; \
gdprintk(XENLOG_WARNING, \
"Cannot read from address %lx (eip %lx, mode %d)\n", \
pc, org_pc, address_bytes); \
err = 0; /* Must be not-present: we don't enforce reserved bits */ \
if ( hvm_nx_enabled(current) ) \
err |= PFEC_insn_fetch; \
- hvm_get_segment_register(current, x86_seg_cs, &cs); \
- if ( cs.attr.fields.dpl != 0 ) \
+ hvm_get_segment_register(current, x86_seg_ss, &ss); \
+ if ( ss.attr.fields.dpl == 3 ) \
err |= PFEC_user_mode; \
hvm_inject_exception(TRAP_page_fault, err, pc); \
return -1; \
diff --git a/xen/arch/x86/hvm/platform.c b/xen/arch/x86/hvm/platform.c
index 66c30d74fd..c210fa5d71 100644
--- a/xen/arch/x86/hvm/platform.c
+++ b/xen/arch/x86/hvm/platform.c
@@ -1074,6 +1074,7 @@ void handle_mmio(paddr_t gpa)
case INSTR_MOVS:
{
+ struct segment_register sreg;
unsigned long count = GET_REPEAT_COUNT();
int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
unsigned long addr, gfn;
@@ -1089,7 +1090,8 @@ void handle_mmio(paddr_t gpa)
addr &= 0xFFFF;
addr += hvm_get_segment_base(v, x86_seg_es);
pfec = PFEC_page_present | PFEC_write_access;
- if ( ring_3(regs) )
+ hvm_get_segment_register(v, x86_seg_ss, &sreg);
+ if ( sreg.attr.fields.dpl == 3 )
pfec |= PFEC_user_mode;
gfn = paging_gva_to_gfn(v, addr, &pfec);
paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index ca935e1458..ca70979edb 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1440,7 +1440,7 @@ static void svm_io_instruction(struct vcpu *v)
pfec = PFEC_page_present;
if ( dir == IOREQ_READ ) /* Read from PIO --> write to RAM */
pfec |= PFEC_write_access;
- if ( ring_3(regs) )
+ if ( vmcb->cpl == 3 )
pfec |= PFEC_user_mode;
gfn = paging_gva_to_gfn(v, addr, &pfec);
if ( gfn == INVALID_GFN )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index ee608600a3..d1a58991d6 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1754,7 +1754,7 @@ static void vmx_do_str_pio(unsigned long exit_qualification,
pfec = PFEC_page_present;
if ( dir == IOREQ_READ ) /* Read from PIO --> write to RAM */
pfec |= PFEC_write_access;
- if ( ring_3(regs) )
+ if ( ((__vmread(GUEST_SS_AR_BYTES) >> 5) & 3) == 3 )
pfec |= PFEC_user_mode;
gfn = paging_gva_to_gfn(current, addr, &pfec);
if ( gfn == INVALID_GFN )
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 3221517579..191e87feb3 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -101,7 +101,7 @@ int _shadow_mode_refcounts(struct domain *d)
/* x86 emulator support for the shadow code
*/
-static struct segment_register *hvm_get_seg_reg(
+struct segment_register *hvm_get_seg_reg(
enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt)
{
struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg];
@@ -141,6 +141,7 @@ hvm_read(enum x86_segment seg,
enum hvm_access_type access_type,
struct sh_emulate_ctxt *sh_ctxt)
{
+ struct segment_register *sreg;
unsigned long addr;
int rc, errcode;
@@ -163,7 +164,8 @@ hvm_read(enum x86_segment seg,
* was mapped here. This should never happen: we're here because
* of a write fault at the end of the instruction we're emulating. */
SHADOW_PRINTK("read failed to va %#lx\n", addr);
- errcode = ring_3(sh_ctxt->ctxt.regs) ? PFEC_user_mode : 0;
+ sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
+ errcode = (sreg->attr.fields.dpl == 3) ? PFEC_user_mode : 0;
if ( access_type == hvm_access_insn_fetch )
errcode |= PFEC_insn_fetch;
hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - rc);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index fe74ec566f..a7cef75b01 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -4018,16 +4018,18 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v,
/* Check that the user is allowed to perform this write.
* Returns a mapped pointer to write to, or NULL for error. */
-static void * emulate_map_dest(struct vcpu *v,
- unsigned long vaddr,
- u32 bytes,
- struct sh_emulate_ctxt *sh_ctxt)
+static void *emulate_map_dest(struct vcpu *v,
+ unsigned long vaddr,
+ u32 bytes,
+ struct sh_emulate_ctxt *sh_ctxt)
{
+ struct segment_register *sreg;
unsigned long offset;
void *map = NULL;
/* We don't emulate user-mode writes to page tables */
- if ( ring_3(sh_ctxt->ctxt.regs) )
+ sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
+ if ( sreg->attr.fields.dpl == 3 )
return NULL;
sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 541177d2de..72a41d9a74 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -680,7 +680,8 @@ struct x86_emulate_ops *shadow_init_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
void shadow_continue_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
-
+struct segment_register *hvm_get_seg_reg(
+ enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt);
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
/**************************************************************************/