aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-x86/hvm/support.h
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2007-12-27 12:00:30 +0000
committerKeir Fraser <keir.fraser@citrix.com>2007-12-27 12:00:30 +0000
commit5e1b25eac4788497e8f8d6117794ae4803d3a4f5 (patch)
tree141eb0687bdcd856bab4f1e2cf4c0b5b95e68be0 /xen/include/asm-x86/hvm/support.h
parent3658ceb342ae70e307509ad7554b24e8d9cd2571 (diff)
downloadxen-5e1b25eac4788497e8f8d6117794ae4803d3a4f5.tar.gz
xen-5e1b25eac4788497e8f8d6117794ae4803d3a4f5.tar.bz2
xen-5e1b25eac4788497e8f8d6117794ae4803d3a4f5.zip
hvm: For functions which translate virtual addresses to machine
addresses, page faults should only be raised when the gva->gfn translation fails. These should be distinguished from gfn->mfn translation failures. The main effect of this is to change the behaviour of functions derived from __hvm_copy(), which now returns a three-way enumeration, and also can automatically inject #PF when the gva->gfn translation fails. Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/include/asm-x86/hvm/support.h')
-rw-r--r--xen/include/asm-x86/hvm/support.h49
1 files changed, 44 insertions, 5 deletions
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index f251c881fa..48338f454b 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -82,11 +82,50 @@ extern char hvm_io_bitmap[];
void hvm_enable(struct hvm_function_table *);
-int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
-int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
-int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size);
-int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
-int hvm_fetch_from_guest_virt(void *buf, unsigned long vaddr, int size);
+enum hvm_copy_result {
+ HVMCOPY_okay = 0,
+ HVMCOPY_bad_gva_to_gfn,
+ HVMCOPY_bad_gfn_to_mfn
+};
+
+/*
+ * Copy to/from a guest physical address.
+ * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given physical
+ * address range does not map entirely onto ordinary machine memory.
+ */
+enum hvm_copy_result hvm_copy_to_guest_phys(
+ paddr_t paddr, void *buf, int size);
+enum hvm_copy_result hvm_copy_from_guest_phys(
+ void *buf, paddr_t paddr, int size);
+
+/*
+ * Copy to/from a guest virtual address.
+ * Returns:
+ * HVMCOPY_okay: Copy was entirely successful.
+ * HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
+ * ordinary machine memory.
+ * HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
+ * mapping to a guest physical address. In this case
+ * a page fault exception is automatically queued
+ * for injection into the current HVM VCPU.
+ */
+enum hvm_copy_result hvm_copy_to_guest_virt(
+ unsigned long vaddr, void *buf, int size);
+enum hvm_copy_result hvm_copy_from_guest_virt(
+ void *buf, unsigned long vaddr, int size);
+enum hvm_copy_result hvm_fetch_from_guest_virt(
+ void *buf, unsigned long vaddr, int size);
+
+/*
+ * As above (copy to/from a guest virtual address), but no fault is generated
+ * when HVMCOPY_bad_gva_to_gfn is returned.
+ */
+enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
+ unsigned long vaddr, void *buf, int size);
+enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
+ void *buf, unsigned long vaddr, int size);
+enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
+ void *buf, unsigned long vaddr, int size);
void hvm_print_line(struct vcpu *v, const char c);
void hlt_timer_fn(void *data);