aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm/emulate.c
diff options
context:
space:
mode:
authorAndres Lagar-Cavilla <andres@lagarcavilla.org>2011-11-11 18:11:34 +0000
committerAndres Lagar-Cavilla <andres@lagarcavilla.org>2011-11-11 18:11:34 +0000
commit51032ca058e43fbd37ea1f7c7c003496f6451340 (patch)
tree179fda9e3eba652562ca7aa3e6139852cbe3ed23 /xen/arch/x86/hvm/emulate.c
parente0594d9bc1c7996840f421a4084830d5a296e51b (diff)
downloadxen-51032ca058e43fbd37ea1f7c7c003496f6451340.tar.gz
xen-51032ca058e43fbd37ea1f7c7c003496f6451340.tar.bz2
xen-51032ca058e43fbd37ea1f7c7c003496f6451340.zip
Modify naming of queries into the p2m
Callers of lookups into the p2m code are now variants of get_gfn. All callers need to call put_gfn. The code behind it is a no-op at the moment, but will change to proper locking in a later patch. This patch does not change functionality. Only naming, and adds put_gfn's. set_p2m_entry retains its name because it is always called with p2m_lock held. This patch is humongous, unfortunately, given the dozens of call sites involved. After this patch, anyone using old style gfn_to_mfn will not succeed in compiling their code. This is on purpose: adapt to the new API. Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org> Acked-by: Tim Deegan <tim@xen.org> Committed-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/hvm/emulate.c')
-rw-r--r--xen/arch/x86/hvm/emulate.c29
1 files changed, 26 insertions, 3 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index c9b3214679..0d6967a21c 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -63,14 +63,18 @@ static int hvmemul_do_io(
int rc;
/* Check for paged out page */
- ram_mfn = gfn_to_mfn_unshare(curr->domain, ram_gfn, &p2mt);
+ ram_mfn = get_gfn_unshare(curr->domain, ram_gfn, &p2mt);
if ( p2m_is_paging(p2mt) )
{
p2m_mem_paging_populate(curr->domain, ram_gfn);
+ put_gfn(curr->domain, ram_gfn);
return X86EMUL_RETRY;
}
if ( p2m_is_shared(p2mt) )
+ {
+ put_gfn(curr->domain, ram_gfn);
return X86EMUL_RETRY;
+ }
/*
* Weird-sized accesses have undefined behaviour: we discard writes
@@ -82,6 +86,7 @@ static int hvmemul_do_io(
ASSERT(p_data != NULL); /* cannot happen with a REP prefix */
if ( dir == IOREQ_READ )
memset(p_data, ~0, size);
+ put_gfn(curr->domain, ram_gfn);
return X86EMUL_UNHANDLEABLE;
}
@@ -101,7 +106,10 @@ static int hvmemul_do_io(
paddr_t pa = vio->mmio_large_write_pa;
unsigned int bytes = vio->mmio_large_write_bytes;
if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
+ {
+ put_gfn(curr->domain, ram_gfn);
return X86EMUL_OKAY;
+ }
}
else
{
@@ -111,6 +119,7 @@ static int hvmemul_do_io(
{
memcpy(p_data, &vio->mmio_large_read[addr - pa],
size);
+ put_gfn(curr->domain, ram_gfn);
return X86EMUL_OKAY;
}
}
@@ -123,15 +132,22 @@ static int hvmemul_do_io(
case HVMIO_completed:
vio->io_state = HVMIO_none;
if ( p_data == NULL )
+ {
+ put_gfn(curr->domain, ram_gfn);
return X86EMUL_UNHANDLEABLE;
+ }
goto finish_access;
case HVMIO_dispatched:
/* May have to wait for previous cycle of a multi-write to complete. */
if ( is_mmio && !value_is_ptr && (dir == IOREQ_WRITE) &&
(addr == (vio->mmio_large_write_pa +
vio->mmio_large_write_bytes)) )
+ {
+ put_gfn(curr->domain, ram_gfn);
return X86EMUL_RETRY;
+ }
default:
+ put_gfn(curr->domain, ram_gfn);
return X86EMUL_UNHANDLEABLE;
}
@@ -139,6 +155,7 @@ static int hvmemul_do_io(
{
gdprintk(XENLOG_WARNING, "WARNING: io already pending (%d)?\n",
p->state);
+ put_gfn(curr->domain, ram_gfn);
return X86EMUL_UNHANDLEABLE;
}
@@ -189,7 +206,10 @@ static int hvmemul_do_io(
}
if ( rc != X86EMUL_OKAY )
+ {
+ put_gfn(curr->domain, ram_gfn);
return rc;
+ }
finish_access:
if ( p_data != NULL )
@@ -223,6 +243,7 @@ static int hvmemul_do_io(
}
}
+ put_gfn(curr->domain, ram_gfn);
return X86EMUL_OKAY;
}
@@ -671,12 +692,14 @@ static int hvmemul_rep_movs(
if ( rc != X86EMUL_OKAY )
return rc;
- (void)gfn_to_mfn(current->domain, sgpa >> PAGE_SHIFT, &p2mt);
+ /* Unlocked works here because we get_gfn for real in whatever
+ * we call later. */
+ (void)get_gfn_unlocked(current->domain, sgpa >> PAGE_SHIFT, &p2mt);
if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
return hvmemul_do_mmio(
sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
- (void)gfn_to_mfn(current->domain, dgpa >> PAGE_SHIFT, &p2mt);
+ (void)get_gfn_unlocked(current->domain, dgpa >> PAGE_SHIFT, &p2mt);
if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
return hvmemul_do_mmio(
dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);