aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorAndres Lagar-Cavilla <andres@lagarcavilla.org>2012-01-26 13:21:27 +0000
committerAndres Lagar-Cavilla <andres@lagarcavilla.org>2012-01-26 13:21:27 +0000
commitbe1ff5e8f0f71f0927ec2681ec7f51ac2efe50d9 (patch)
tree2471304b8998d8daca6ac50fd5bcfc7564d06378 /xen
parent91279d9f230d7314e212a0e3aa0cee874edb6a3d (diff)
downloadxen-be1ff5e8f0f71f0927ec2681ec7f51ac2efe50d9.tar.gz
xen-be1ff5e8f0f71f0927ec2681ec7f51ac2efe50d9.tar.bz2
xen-be1ff5e8f0f71f0927ec2681ec7f51ac2efe50d9.zip
x86/mm: clean use of p2m unlocked queries
Limit such queries only to p2m_query types. This is more compatible with the name and intended semantics: perform only a lookup, and explicitly in an unlocked way. Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org> Acked-by: Tim Deegan <tim@xen.org> Committed-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/hvm/emulate.c35
-rw-r--r--xen/arch/x86/hvm/hvm.c5
-rw-r--r--xen/arch/x86/hvm/stdvga.c12
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c2
-rw-r--r--xen/arch/x86/mm/mem_sharing.c2
5 files changed, 44 insertions, 12 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 15f413c576..53cddbecf4 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -660,7 +660,7 @@ static int hvmemul_rep_movs(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- unsigned long saddr, daddr, bytes;
+ unsigned long saddr, daddr, bytes, sgfn, dgfn;
paddr_t sgpa, dgpa;
uint32_t pfec = PFEC_page_present;
p2m_type_t p2mt;
@@ -693,17 +693,28 @@ static int hvmemul_rep_movs(
if ( rc != X86EMUL_OKAY )
return rc;
- /* Unlocked works here because we get_gfn for real in whatever
- * we call later. */
- (void)get_gfn_unlocked(current->domain, sgpa >> PAGE_SHIFT, &p2mt);
+ /* XXX In a fine-grained p2m locking scenario, we need to sort this
+ * get_gfn's, or else we might deadlock */
+ sgfn = sgpa >> PAGE_SHIFT;
+ (void)get_gfn(current->domain, sgfn, &p2mt);
if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
- return hvmemul_do_mmio(
+ {
+ rc = hvmemul_do_mmio(
sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
+ put_gfn(current->domain, sgfn);
+ return rc;
+ }
- (void)get_gfn_unlocked(current->domain, dgpa >> PAGE_SHIFT, &p2mt);
+ dgfn = dgpa >> PAGE_SHIFT;
+ (void)get_gfn(current->domain, dgfn, &p2mt);
if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
- return hvmemul_do_mmio(
+ {
+ rc = hvmemul_do_mmio(
dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
+ put_gfn(current->domain, sgfn);
+ put_gfn(current->domain, dgfn);
+ return rc;
+ }
/* RAM-to-RAM copy: emulate as equivalent of memmove(dgpa, sgpa, bytes). */
bytes = *reps * bytes_per_rep;
@@ -718,7 +729,11 @@ static int hvmemul_rep_movs(
* can be emulated by a source-to-buffer-to-destination block copy.
*/
if ( ((dgpa + bytes_per_rep) > sgpa) && (dgpa < (sgpa + bytes)) )
+ {
+ put_gfn(current->domain, sgfn);
+ put_gfn(current->domain, dgfn);
return X86EMUL_UNHANDLEABLE;
+ }
/* Adjust destination address for reverse copy. */
if ( df )
@@ -727,7 +742,11 @@ static int hvmemul_rep_movs(
/* Allocate temporary buffer. Fall back to slow emulation if this fails. */
buf = xmalloc_bytes(bytes);
if ( buf == NULL )
+ {
+ put_gfn(current->domain, sgfn);
+ put_gfn(current->domain, dgfn);
return X86EMUL_UNHANDLEABLE;
+ }
/*
* We do a modicum of checking here, just for paranoia's sake and to
@@ -738,6 +757,8 @@ static int hvmemul_rep_movs(
rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
xfree(buf);
+ put_gfn(current->domain, sgfn);
+ put_gfn(current->domain, dgfn);
if ( rc == HVMCOPY_gfn_paged_out )
return X86EMUL_RETRY;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 8847b499ec..f07b5939de 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3975,7 +3975,10 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
rc = -EINVAL;
if ( is_hvm_domain(d) )
{
- get_gfn_unshare_unlocked(d, a.pfn, &t);
+ /* Use get_gfn query as we are interested in the current
+ * type, not in allocating or unsharing. That'll happen
+ * on access. */
+ get_gfn_query_unlocked(d, a.pfn, &t);
if ( p2m_is_mmio(t) )
a.mem_type = HVMMEM_mmio_dm;
else if ( p2m_is_readonly(t) )
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index c9ae70d111..ddbd9970f3 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -482,15 +482,19 @@ static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
HVMCOPY_okay )
{
- (void)get_gfn_unlocked(d, data >> PAGE_SHIFT, &p2mt);
+ (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
/*
* The only case we handle is vga_mem <-> vga_mem.
* Anything else disables caching and leaves it to qemu-dm.
*/
if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
+ {
+ put_gfn(d, data >> PAGE_SHIFT);
return 0;
+ }
stdvga_mem_write(data, tmp, p->size);
+ put_gfn(d, data >> PAGE_SHIFT);
}
data += sign * p->size;
addr += sign * p->size;
@@ -504,11 +508,15 @@ static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
HVMCOPY_okay )
{
- (void)get_gfn_unlocked(d, data >> PAGE_SHIFT, &p2mt);
+ (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
+ {
+ put_gfn(d, data >> PAGE_SHIFT);
return 0;
+ }
tmp = stdvga_mem_read(data, p->size);
+ put_gfn(d, data >> PAGE_SHIFT);
}
stdvga_mem_write(addr, tmp, p->size);
data += sign * p->size;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d2bb64a6f2..1e20863098 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2112,7 +2112,7 @@ static void ept_handle_violation(unsigned long qualification, paddr_t gpa)
return;
/* Everything else is an error. */
- mfn = get_gfn_guest_unlocked(d, gfn, &p2mt);
+ mfn = get_gfn_query_unlocked(d, gfn, &p2mt);
gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
"gpa %#"PRIpaddr", mfn %#lx, type %i.\n",
qualification,
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 05840a7261..ed2bc063ff 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -421,7 +421,7 @@ int mem_sharing_debug_gfn(struct domain *d, unsigned long gfn)
p2m_type_t p2mt;
mfn_t mfn;
- mfn = get_gfn_unlocked(d, gfn, &p2mt);
+ mfn = get_gfn_query_unlocked(d, gfn, &p2mt);
gdprintk(XENLOG_DEBUG, "Debug for domain=%d, gfn=%lx, ",
d->domain_id,