aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorAndres Lagar-Cavilla <andres@lagarcavilla.org>2012-02-02 12:22:34 +0000
committerAndres Lagar-Cavilla <andres@lagarcavilla.org>2012-02-02 12:22:34 +0000
commit4b9686b36f778d0c97fc64fad9592a03207aabc8 (patch)
tree0a548d3af943b12938a3598b1e62d91dccb6f90a /xen
parent6623226237f1c2db19ed5b91b6dca4d1f8a6f8d7 (diff)
downloadxen-4b9686b36f778d0c97fc64fad9592a03207aabc8.tar.gz
xen-4b9686b36f778d0c97fc64fad9592a03207aabc8.tar.bz2
xen-4b9686b36f778d0c97fc64fad9592a03207aabc8.zip
x86/mm: Reorder locks used by shadow code in anticipation of synchronized p2m lookups
Currently, mm-locks.h enforces a strict ordering between locks in the mm layer lest there be an inversion in the order locks are taken and thus the risk of deadlock. Once p2m lookups becoming synchronized, get_gfn* calls take the p2m lock, and a new set of inversion arises. Reorder some of the locks in the shadow code so that even in this case no deadlocks happen. After this, synchronized p2m lookups are in principle ready to be enabled in shadow mode. Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org> Acked-by: Tim Deegan <tim@xen.org> Committed-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/mm/shadow/common.c3
-rw-r--r--xen/arch/x86/mm/shadow/multi.c18
2 files changed, 12 insertions, 9 deletions
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index b1a1e1d634..1f506365ac 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3609,6 +3609,8 @@ int shadow_track_dirty_vram(struct domain *d,
|| end_pfn >= p2m->max_mapped_pfn)
return -EINVAL;
+ /* We perform p2m lookups, so lock the p2m upfront to avoid deadlock */
+ p2m_lock(p2m_get_hostp2m(d));
paging_lock(d);
if ( dirty_vram && (!nr ||
@@ -3782,6 +3784,7 @@ out_dirty_vram:
out:
paging_unlock(d);
+ p2m_unlock(p2m_get_hostp2m(d));
return rc;
}
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index a2436563d2..83a581cc00 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2444,7 +2444,7 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se)
perfc_incr(shadow_validate_gl1e_calls);
gfn = guest_l1e_get_gfn(new_gl1e);
- gmfn = get_gfn_query(v->domain, gfn, &p2mt);
+ gmfn = get_gfn_query_unlocked(v->domain, gfn_x(gfn), &p2mt);
l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
result |= shadow_set_l1e(v, sl1p, new_sl1e, p2mt, sl1mfn);
@@ -2466,7 +2466,6 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se)
}
#endif /* OOS */
- put_gfn(v->domain, gfn_x(gfn));
return result;
}
@@ -4715,8 +4714,6 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
unsigned long l3gfn;
mfn_t l3mfn;
- paging_lock(v->domain);
-
gcr3 = (v->arch.hvm_vcpu.guest_cr[3]);
/* fast path: the pagetable belongs to the current context */
if ( gcr3 == gpa )
@@ -4728,8 +4725,11 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
{
printk(XENLOG_DEBUG "sh_pagetable_dying: gpa not valid %"PRIpaddr"\n",
gpa);
- goto out;
+ goto out_put_gfn;
}
+
+ paging_lock(v->domain);
+
if ( !fast_path )
{
gl3pa = sh_map_domain_page(l3mfn);
@@ -4770,11 +4770,11 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
v->arch.paging.shadow.pagetable_dying = 1;
-out:
if ( !fast_path )
unmap_domain_page(gl3pa);
- put_gfn(v->domain, l3gfn);
paging_unlock(v->domain);
+out_put_gfn:
+ put_gfn(v->domain, l3gfn);
}
#else
static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
@@ -4782,15 +4782,14 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
mfn_t smfn, gmfn;
p2m_type_t p2mt;
+ gmfn = get_gfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
paging_lock(v->domain);
- gmfn = get_gfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
#if GUEST_PAGING_LEVELS == 2
smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_32_shadow);
#else
smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l4_64_shadow);
#endif
- put_gfn(v->domain, gpa >> PAGE_SHIFT);
if ( mfn_valid(smfn) )
{
@@ -4808,6 +4807,7 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
v->arch.paging.shadow.pagetable_dying = 1;
paging_unlock(v->domain);
+ put_gfn(v->domain, gpa >> PAGE_SHIFT);
}
#endif