aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2007-12-27 12:00:30 +0000
committerKeir Fraser <keir.fraser@citrix.com>2007-12-27 12:00:30 +0000
commit5e1b25eac4788497e8f8d6117794ae4803d3a4f5 (patch)
tree141eb0687bdcd856bab4f1e2cf4c0b5b95e68be0
parent3658ceb342ae70e307509ad7554b24e8d9cd2571 (diff)
downloadxen-5e1b25eac4788497e8f8d6117794ae4803d3a4f5.tar.gz
xen-5e1b25eac4788497e8f8d6117794ae4803d3a4f5.tar.bz2
xen-5e1b25eac4788497e8f8d6117794ae4803d3a4f5.zip
hvm: For functions which translate virtual addresses to machine
addresses, page faults should only be raised when the gva->gfn translation fails. These should be distinguished from gfn->mfn translation failures. The main effect of this is to change the behaviour of functions derived from __hvm_copy(), which now returns a three-way enumeration, and also can automatically inject #PF when the gva->gfn translation fails. Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
-rw-r--r--xen/arch/x86/hvm/hvm.c75
-rw-r--r--xen/arch/x86/hvm/io.c36
-rw-r--r--xen/arch/x86/hvm/platform.c27
-rw-r--r--xen/arch/x86/hvm/svm/svm.c20
-rw-r--r--xen/arch/x86/hvm/vmx/realmode.c23
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c17
-rw-r--r--xen/arch/x86/mm/shadow/common.c27
-rw-r--r--xen/arch/x86/mm/shadow/multi.c36
-rw-r--r--xen/include/asm-x86/hvm/support.h49
9 files changed, 164 insertions, 146 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index bce02f0c05..c812982140 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1251,7 +1251,7 @@ void hvm_task_switch(
if ( hvm_virtual_to_linear_addr(x86_seg_ss, &reg, regs->esp,
4, hvm_access_write, 32,
&linear_addr) )
- hvm_copy_to_guest_virt(linear_addr, &errcode, 4);
+ hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4);
}
out:
@@ -1269,24 +1269,26 @@ void hvm_task_switch(
* @fetch = copy is an instruction fetch?
* Returns number of bytes failed to copy (0 == complete success).
*/
-static int __hvm_copy(void *buf, paddr_t addr, int size, int dir,
- int virt, int fetch)
+static enum hvm_copy_result __hvm_copy(
+ void *buf, paddr_t addr, int size, int dir, int virt, int fetch)
{
- struct segment_register sreg;
unsigned long gfn, mfn;
p2m_type_t p2mt;
char *p;
int count, todo;
uint32_t pfec = PFEC_page_present;
- hvm_get_segment_register(current, x86_seg_ss, &sreg);
-
- if ( dir )
- pfec |= PFEC_write_access;
- if ( sreg.attr.fields.dpl == 3 )
- pfec |= PFEC_user_mode;
- if ( fetch )
- pfec |= PFEC_insn_fetch;
+ if ( virt )
+ {
+ struct segment_register sreg;
+ hvm_get_segment_register(current, x86_seg_ss, &sreg);
+ if ( sreg.attr.fields.dpl == 3 )
+ pfec |= PFEC_user_mode;
+ if ( dir )
+ pfec |= PFEC_write_access;
+ if ( fetch )
+ pfec |= PFEC_insn_fetch;
+ }
todo = size;
while ( todo > 0 )
@@ -1294,14 +1296,24 @@ static int __hvm_copy(void *buf, paddr_t addr, int size, int dir,
count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
if ( virt )
+ {
gfn = paging_gva_to_gfn(current, addr, &pfec);
+ if ( gfn == INVALID_GFN )
+ {
+ if ( virt == 2 ) /* 2 means generate a fault */
+ hvm_inject_exception(TRAP_page_fault, pfec, addr);
+ return HVMCOPY_bad_gva_to_gfn;
+ }
+ }
else
+ {
gfn = addr >> PAGE_SHIFT;
-
+ }
+
mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt));
if ( !p2m_is_ram(p2mt) )
- return todo;
+ return HVMCOPY_bad_gfn_to_mfn;
ASSERT(mfn_valid(mfn));
p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
@@ -1321,30 +1333,53 @@ static int __hvm_copy(void *buf, paddr_t addr, int size, int dir,
todo -= count;
}
- return 0;
+ return HVMCOPY_okay;
}
-int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
+enum hvm_copy_result hvm_copy_to_guest_phys(
+ paddr_t paddr, void *buf, int size)
{
return __hvm_copy(buf, paddr, size, 1, 0, 0);
}
-int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
+enum hvm_copy_result hvm_copy_from_guest_phys(
+ void *buf, paddr_t paddr, int size)
{
return __hvm_copy(buf, paddr, size, 0, 0, 0);
}
-int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
+enum hvm_copy_result hvm_copy_to_guest_virt(
+ unsigned long vaddr, void *buf, int size)
+{
+ return __hvm_copy(buf, vaddr, size, 1, 2, 0);
+}
+
+enum hvm_copy_result hvm_copy_from_guest_virt(
+ void *buf, unsigned long vaddr, int size)
+{
+ return __hvm_copy(buf, vaddr, size, 0, 2, 0);
+}
+
+enum hvm_copy_result hvm_fetch_from_guest_virt(
+ void *buf, unsigned long vaddr, int size)
+{
+ return __hvm_copy(buf, vaddr, size, 0, 2, hvm_nx_enabled(current));
+}
+
+enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
+ unsigned long vaddr, void *buf, int size)
{
return __hvm_copy(buf, vaddr, size, 1, 1, 0);
}
-int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
+enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
+ void *buf, unsigned long vaddr, int size)
{
return __hvm_copy(buf, vaddr, size, 0, 1, 0);
}
-int hvm_fetch_from_guest_virt(void *buf, unsigned long vaddr, int size)
+enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
+ void *buf, unsigned long vaddr, int size)
{
return __hvm_copy(buf, vaddr, size, 0, 1, hvm_nx_enabled(current));
}
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index f522d60378..178e6891c8 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -435,17 +435,8 @@ static void hvm_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
if ( hvm_paging_enabled(current) )
{
int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
- if ( rv != 0 )
- {
- /* Failed on the page-spanning copy. Inject PF into
- * the guest for the address where we failed. */
- addr += p->size - rv;
- gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side "
- "of a page-spanning PIO: va=%#lx\n", addr);
- hvm_inject_exception(TRAP_page_fault,
- PFEC_write_access, addr);
- return;
- }
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
+ return; /* exception already injected */
}
else
(void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
@@ -569,17 +560,8 @@ static void hvm_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
if (hvm_paging_enabled(current))
{
int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
- if ( rv != 0 )
- {
- /* Failed on the page-spanning copy. Inject PF into
- * the guest for the address where we failed. */
- addr += p->size - rv;
- gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of "
- "a page-spanning MMIO: va=%#lx\n", addr);
- hvm_inject_exception(TRAP_page_fault,
- PFEC_write_access, addr);
- return;
- }
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
+ return; /* exception already injected */
}
else
(void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
@@ -812,14 +794,8 @@ static void hvm_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
{
unsigned long addr = mmio_opp->addr;
int rv = hvm_copy_to_guest_virt(addr, &p->data, size);
- if ( rv != 0 )
- {
- addr += p->size - rv;
- gdprintk(XENLOG_DEBUG, "Pagefault emulating PUSH from MMIO:"
- " va=%#lx\n", addr);
- hvm_inject_exception(TRAP_page_fault, PFEC_write_access, addr);
- return;
- }
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
+ return; /* exception already injected */
}
break;
}
diff --git a/xen/arch/x86/hvm/platform.c b/xen/arch/x86/hvm/platform.c
index c210fa5d71..273a2ed08a 100644
--- a/xen/arch/x86/hvm/platform.c
+++ b/xen/arch/x86/hvm/platform.c
@@ -829,11 +829,12 @@ static int mmio_decode(int address_bytes, unsigned char *opcode,
}
}
-int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
+int inst_copy_from_guest(
+ unsigned char *buf, unsigned long guest_eip, int inst_len)
{
if ( inst_len > MAX_INST_LEN || inst_len <= 0 )
return 0;
- if ( hvm_fetch_from_guest_virt(buf, guest_eip, inst_len) )
+ if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len) )
return 0;
return inst_len;
}
@@ -1150,21 +1151,11 @@ void handle_mmio(paddr_t gpa)
if ( hvm_paging_enabled(v) )
{
int rv = hvm_copy_from_guest_virt(&value, addr, size);
- if ( rv != 0 )
- {
- /* Failed on the page-spanning copy. Inject PF into
- * the guest for the address where we failed */
- regs->eip -= inst_len; /* do not advance %eip */
- /* Must set CR2 at the failing address */
- addr += size - rv;
- gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a "
- "page-spanning MMIO: va=%#lx\n", addr);
- hvm_inject_exception(TRAP_page_fault, 0, addr);
- return;
- }
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
+ return; /* exception already injected */
}
else
- (void) hvm_copy_from_guest_phys(&value, addr, size);
+ (void)hvm_copy_from_guest_phys(&value, addr, size);
} else /* dir != IOREQ_WRITE */
/* Remember where to write the result, as a *VA*.
* Must be a VA so we can handle the page overlap
@@ -1325,7 +1316,8 @@ unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len)
return 0;
}
- return hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len);
+ return hvm_copy_to_guest_virt_nofault(
+ (unsigned long)to, (void *)from, len);
}
unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
@@ -1336,7 +1328,8 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
return 0;
}
- return hvm_copy_from_guest_virt(to, (unsigned long)from, len);
+ return hvm_copy_from_guest_virt_nofault(
+ to, (unsigned long)from, len);
}
/*
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index ca70979edb..3b331ffe67 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1468,20 +1468,13 @@ static void svm_io_instruction(struct vcpu *v)
if ( hvm_paging_enabled(current) )
{
int rv = hvm_copy_from_guest_virt(&value, addr, size);
- if ( rv != 0 )
- {
- /* Failed on the page-spanning copy. Inject PF into
- * the guest for the address where we failed. */
- addr += size - rv;
- gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
- "of a page-spanning PIO: va=%#lx\n", addr);
- svm_inject_exception(TRAP_page_fault, 0, addr);
- return;
- }
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
+ return; /* exception already injected */
}
else
- (void) hvm_copy_from_guest_phys(&value, addr, size);
- } else /* dir != IOREQ_WRITE */
+ (void)hvm_copy_from_guest_phys(&value, addr, size);
+ }
+ else /* dir != IOREQ_WRITE */
/* Remember where to write the result, as a *VA*.
* Must be a VA so we can handle the page overlap
* correctly in hvm_pio_assist() */
@@ -1705,7 +1698,8 @@ static void svm_cr_access(
offset = ( addr_size == 4 ) ? offset : ( offset & 0xFFFF );
addr = hvm_get_segment_base(v, seg);
addr += offset;
- hvm_copy_to_guest_virt(addr,&value,2);
+ result = (hvm_copy_to_guest_virt(addr, &value, 2)
+ != HVMCOPY_bad_gva_to_gfn);
}
else
{
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 0ca7f5b0b3..a7f767926e 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -119,22 +119,13 @@ realmode_read(
struct realmode_emulate_ctxt *rm_ctxt)
{
uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
- int todo;
*val = 0;
- todo = hvm_copy_from_guest_phys(val, addr, bytes);
- if ( todo )
+ if ( hvm_copy_from_guest_phys(val, addr, bytes) )
{
struct vcpu *curr = current;
- if ( todo != bytes )
- {
- gdprintk(XENLOG_WARNING, "RM: Partial read at %08x (%d/%d)\n",
- addr, todo, bytes);
- return X86EMUL_UNHANDLEABLE;
- }
-
if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
return X86EMUL_UNHANDLEABLE;
@@ -203,21 +194,11 @@ realmode_emulate_write(
struct realmode_emulate_ctxt *rm_ctxt =
container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
- int todo;
-
- todo = hvm_copy_to_guest_phys(addr, &val, bytes);
- if ( todo )
+ if ( hvm_copy_to_guest_phys(addr, &val, bytes) )
{
struct vcpu *curr = current;
- if ( todo != bytes )
- {
- gdprintk(XENLOG_WARNING, "RM: Partial write at %08x (%d/%d)\n",
- addr, todo, bytes);
- return X86EMUL_UNHANDLEABLE;
- }
-
if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
return X86EMUL_UNHANDLEABLE;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d1a58991d6..ddf60430d3 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1629,20 +1629,13 @@ static void vmx_send_str_pio(struct cpu_user_regs *regs,
if ( hvm_paging_enabled(current) )
{
int rv = hvm_copy_from_guest_virt(&value, addr, size);
- if ( rv != 0 )
- {
- /* Failed on the page-spanning copy. Inject PF into
- * the guest for the address where we failed. */
- addr += size - rv;
- gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
- "of a page-spanning PIO: va=%#lx\n", addr);
- vmx_inject_exception(TRAP_page_fault, 0, addr);
- return;
- }
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
+ return; /* exception already injected */
}
else
- (void) hvm_copy_from_guest_phys(&value, addr, size);
- } else /* dir != IOREQ_WRITE */
+ (void)hvm_copy_from_guest_phys(&value, addr, size);
+ }
+ else /* dir != IOREQ_WRITE */
/* Remember where to write the result, as a *VA*.
* Must be a VA so we can handle the page overlap
* correctly in hvm_pio_assist() */
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 191e87feb3..7dc79c2e04 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -141,9 +141,8 @@ hvm_read(enum x86_segment seg,
enum hvm_access_type access_type,
struct sh_emulate_ctxt *sh_ctxt)
{
- struct segment_register *sreg;
unsigned long addr;
- int rc, errcode;
+ int rc;
rc = hvm_translate_linear_addr(
seg, offset, bytes, access_type, sh_ctxt, &addr);
@@ -157,19 +156,17 @@ hvm_read(enum x86_segment seg,
else
rc = hvm_copy_from_guest_virt(val, addr, bytes);
- if ( rc == 0 )
+ switch ( rc )
+ {
+ case HVMCOPY_okay:
return X86EMUL_OKAY;
+ case HVMCOPY_bad_gva_to_gfn:
+ return X86EMUL_EXCEPTION;
+ default:
+ break;
+ }
- /* If we got here, there was nothing mapped here, or a bad GFN
- * was mapped here. This should never happen: we're here because
- * of a write fault at the end of the instruction we're emulating. */
- SHADOW_PRINTK("read failed to va %#lx\n", addr);
- sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
- errcode = (sreg->attr.fields.dpl == 3) ? PFEC_user_mode : 0;
- if ( access_type == hvm_access_insn_fetch )
- errcode |= PFEC_insn_fetch;
- hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - rc);
- return X86EMUL_EXCEPTION;
+ return X86EMUL_UNHANDLEABLE;
}
static int
@@ -399,7 +396,7 @@ struct x86_emulate_ops *shadow_init_emulation(
(!hvm_translate_linear_addr(
x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
- !hvm_fetch_from_guest_virt(
+ !hvm_fetch_from_guest_virt_nofault(
sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
? sizeof(sh_ctxt->insn_buf) : 0;
@@ -427,7 +424,7 @@ void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
(!hvm_translate_linear_addr(
x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
- !hvm_fetch_from_guest_virt(
+ !hvm_fetch_from_guest_virt_nofault(
sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
? sizeof(sh_ctxt->insn_buf) : 0;
sh_ctxt->insn_buf_eip = regs->eip;
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index a7cef75b01..4777e54080 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3984,6 +3984,8 @@ int sh_remove_l3_shadow(struct vcpu *v, mfn_t sl4mfn, mfn_t sl3mfn)
/* Handling HVM guest writes to pagetables */
/* Translate a VA to an MFN, injecting a page-fault if we fail */
+#define BAD_GVA_TO_GFN (~0UL)
+#define BAD_GFN_TO_MFN (~1UL)
static mfn_t emulate_gva_to_mfn(struct vcpu *v,
unsigned long vaddr,
struct sh_emulate_ctxt *sh_ctxt)
@@ -4001,7 +4003,7 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v,
hvm_inject_exception(TRAP_page_fault, pfec, vaddr);
else
propagate_page_fault(vaddr, pfec);
- return _mfn(INVALID_MFN);
+ return _mfn(BAD_GVA_TO_GFN);
}
/* Translate the GFN to an MFN */
@@ -4013,11 +4015,14 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v,
return mfn;
}
- return _mfn(INVALID_MFN);
+ return _mfn(BAD_GFN_TO_MFN);
}
/* Check that the user is allowed to perform this write.
* Returns a mapped pointer to write to, or NULL for error. */
+#define MAPPING_UNHANDLEABLE ((void *)0)
+#define MAPPING_EXCEPTION ((void *)1)
+#define emulate_map_dest_failed(rc) ((unsigned long)(rc) <= 1)
static void *emulate_map_dest(struct vcpu *v,
unsigned long vaddr,
u32 bytes,
@@ -4030,11 +4035,12 @@ static void *emulate_map_dest(struct vcpu *v,
/* We don't emulate user-mode writes to page tables */
sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
if ( sreg->attr.fields.dpl == 3 )
- return NULL;
+ return MAPPING_UNHANDLEABLE;
sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
if ( !mfn_valid(sh_ctxt->mfn1) )
- return NULL;
+ return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ?
+ MAPPING_EXCEPTION : MAPPING_UNHANDLEABLE);
/* Unaligned writes mean probably this isn't a pagetable */
if ( vaddr & (bytes - 1) )
@@ -4051,13 +4057,14 @@ static void *emulate_map_dest(struct vcpu *v,
/* Cross-page emulated writes are only supported for HVM guests;
* PV guests ought to know better */
if ( !is_hvm_vcpu(v) )
- return NULL;
+ return MAPPING_UNHANDLEABLE;
/* This write crosses a page boundary. Translate the second page */
sh_ctxt->mfn2 = emulate_gva_to_mfn(v, (vaddr + bytes - 1) & PAGE_MASK,
sh_ctxt);
if ( !mfn_valid(sh_ctxt->mfn2) )
- return NULL;
+ return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ?
+ MAPPING_EXCEPTION : MAPPING_UNHANDLEABLE);
/* Cross-page writes mean probably not a pagetable */
sh_remove_shadows(v, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ );
@@ -4075,7 +4082,7 @@ static void *emulate_map_dest(struct vcpu *v,
flush_tlb_local();
map += (vaddr & ~PAGE_MASK);
}
-
+
#if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY)
/* Remember if the bottom bit was clear, so we can choose not to run
* the change through the verify code if it's still clear afterwards */
@@ -4172,10 +4179,11 @@ sh_x86_emulate_write(struct vcpu *v, unsigned long vaddr, void *src,
shadow_lock(v->domain);
addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt);
- if ( addr == NULL )
+ if ( emulate_map_dest_failed(addr) )
{
shadow_unlock(v->domain);
- return X86EMUL_EXCEPTION;
+ return ((addr == MAPPING_EXCEPTION) ?
+ X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
}
memcpy(addr, src, bytes);
@@ -4202,10 +4210,11 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, unsigned long vaddr,
shadow_lock(v->domain);
addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt);
- if ( addr == NULL )
+ if ( emulate_map_dest_failed(addr) )
{
shadow_unlock(v->domain);
- return X86EMUL_EXCEPTION;
+ return ((addr == MAPPING_EXCEPTION) ?
+ X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
}
switch ( bytes )
@@ -4249,10 +4258,11 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v, unsigned long vaddr,
shadow_lock(v->domain);
addr = emulate_map_dest(v, vaddr, 8, sh_ctxt);
- if ( addr == NULL )
+ if ( emulate_map_dest_failed(addr) )
{
shadow_unlock(v->domain);
- return X86EMUL_EXCEPTION;
+ return ((addr == MAPPING_EXCEPTION) ?
+ X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
}
old = (((u64) old_hi) << 32) | (u64) old_lo;
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index f251c881fa..48338f454b 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -82,11 +82,50 @@ extern char hvm_io_bitmap[];
void hvm_enable(struct hvm_function_table *);
-int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
-int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
-int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size);
-int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
-int hvm_fetch_from_guest_virt(void *buf, unsigned long vaddr, int size);
+enum hvm_copy_result {
+ HVMCOPY_okay = 0,
+ HVMCOPY_bad_gva_to_gfn,
+ HVMCOPY_bad_gfn_to_mfn
+};
+
+/*
+ * Copy to/from a guest physical address.
+ * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given physical
+ * address range does not map entirely onto ordinary machine memory.
+ */
+enum hvm_copy_result hvm_copy_to_guest_phys(
+ paddr_t paddr, void *buf, int size);
+enum hvm_copy_result hvm_copy_from_guest_phys(
+ void *buf, paddr_t paddr, int size);
+
+/*
+ * Copy to/from a guest virtual address.
+ * Returns:
+ * HVMCOPY_okay: Copy was entirely successful.
+ * HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
+ * ordinary machine memory.
+ * HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
+ * mapping to a guest physical address. In this case
+ * a page fault exception is automatically queued
+ * for injection into the current HVM VCPU.
+ */
+enum hvm_copy_result hvm_copy_to_guest_virt(
+ unsigned long vaddr, void *buf, int size);
+enum hvm_copy_result hvm_copy_from_guest_virt(
+ void *buf, unsigned long vaddr, int size);
+enum hvm_copy_result hvm_fetch_from_guest_virt(
+ void *buf, unsigned long vaddr, int size);
+
+/*
+ * As above (copy to/from a guest virtual address), but no fault is generated
+ * when HVMCOPY_bad_gva_to_gfn is returned.
+ */
+enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
+ unsigned long vaddr, void *buf, int size);
+enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
+ void *buf, unsigned long vaddr, int size);
+enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
+ void *buf, unsigned long vaddr, int size);
void hvm_print_line(struct vcpu *v, const char c);
void hlt_timer_fn(void *data);