aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm/emulate.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-07-13 12:35:34 +0100
committerKeir Fraser <keir.fraser@citrix.com>2009-07-13 12:35:34 +0100
commitb37d30c8fc5226203f5bb71fe79789cd8545f09b (patch)
tree029563b737d32391316a9bb7e4fd4fadd66c97fb /xen/arch/x86/hvm/emulate.c
parent1c4e651d3f00df7e81f8dbbd123b07835d6c9ebc (diff)
downloadxen-b37d30c8fc5226203f5bb71fe79789cd8545f09b.tar.gz
xen-b37d30c8fc5226203f5bb71fe79789cd8545f09b.tar.bz2
xen-b37d30c8fc5226203f5bb71fe79789cd8545f09b.zip
Mapping grant references into HVM guests, take 2
After some discussion, here's a second version of the patch I posted a couple of weeks back to map grant references into HVM guests. As before, this is done by modifying the P2M map, but this time there's no new hypercall to do it. Instead, the existing GNTTABOP_map is overloaded to perform a P2M mapping if called from a shadow mode translate guest. This matches the IA64 API. Signed-off-by: Steven Smith <steven.smith@citrix.com> Acked-by: Tim Deegan <tim.deegan@citrix.com> CC: Bhaskar Jayaraman <Bhaskar.Jayaraman@lsi.com>
Diffstat (limited to 'xen/arch/x86/hvm/emulate.c')
-rw-r--r--xen/arch/x86/hvm/emulate.c29
1 files changed, 18 insertions, 11 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index ad5ec35a24..a8c6d1a5e3 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -408,20 +408,23 @@ static int __hvmemul_read(
rc = ((access_type == hvm_access_insn_fetch) ?
hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec) :
hvm_copy_from_guest_virt(p_data, addr, bytes, pfec));
- if ( rc == HVMCOPY_bad_gva_to_gfn )
- return X86EMUL_EXCEPTION;
- if ( rc == HVMCOPY_bad_gfn_to_mfn )
+ switch ( rc )
{
+ case HVMCOPY_bad_gva_to_gfn:
+ return X86EMUL_EXCEPTION;
+ case HVMCOPY_unhandleable:
+ return X86EMUL_UNHANDLEABLE;
+ case HVMCOPY_bad_gfn_to_mfn:
if ( access_type == hvm_access_insn_fetch )
return X86EMUL_UNHANDLEABLE;
-
rc = hvmemul_linear_to_phys(
addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt);
if ( rc != X86EMUL_OKAY )
return rc;
-
return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, p_data);
+ default:
+ break;
}
return X86EMUL_OKAY;
@@ -496,18 +499,22 @@ static int hvmemul_write(
pfec |= PFEC_user_mode;
rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec);
- if ( rc == HVMCOPY_bad_gva_to_gfn )
- return X86EMUL_EXCEPTION;
- if ( rc == HVMCOPY_bad_gfn_to_mfn )
+ switch ( rc )
{
+ case HVMCOPY_bad_gva_to_gfn:
+ return X86EMUL_EXCEPTION;
+ case HVMCOPY_unhandleable:
+ return X86EMUL_UNHANDLEABLE;
+ case HVMCOPY_bad_gfn_to_mfn:
rc = hvmemul_linear_to_phys(
addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt);
if ( rc != X86EMUL_OKAY )
return rc;
-
return hvmemul_do_mmio(gpa, &reps, bytes, 0,
IOREQ_WRITE, 0, p_data);
+ default:
+ break;
}
return X86EMUL_OKAY;
@@ -636,12 +643,12 @@ static int hvmemul_rep_movs(
return rc;
(void)gfn_to_mfn_current(sgpa >> PAGE_SHIFT, &p2mt);
- if ( !p2m_is_ram(p2mt) )
+ if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
return hvmemul_do_mmio(
sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
(void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt);
- if ( !p2m_is_ram(p2mt) )
+ if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
return hvmemul_do_mmio(
dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);