aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorAndres Lagar-Cavilla <andres@lagarcavilla.org>2012-01-26 12:46:26 +0000
committerAndres Lagar-Cavilla <andres@lagarcavilla.org>2012-01-26 12:46:26 +0000
commit8ed0c272481880b75442e62aad75c30ecfa798ce (patch)
treeeca536fc2cf4240c4bfdf7250ea24e545ab22eb6 /xen
parent6a11f31bd53381b925e67413e8230af8979e3807 (diff)
downloadxen-8ed0c272481880b75442e62aad75c30ecfa798ce.tar.gz
xen-8ed0c272481880b75442e62aad75c30ecfa798ce.tar.bz2
xen-8ed0c272481880b75442e62aad75c30ecfa798ce.zip
x86/mm: Enforce lock ordering for sharing page locks
Use the ordering constructs in mm-locks.h to enforce an order for the p2m and page locks in the sharing code. Applies to either the global sharing lock (in audit mode) or the per page locks. Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org> Signed-off-by: Adin Scanneell <adin@scannell.ca> Acked-by: Tim Deegan <tim@xen.org> Committed-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/mm/mem_sharing.c16
-rw-r--r--xen/arch/x86/mm/mm-locks.h18
-rw-r--r--xen/include/asm-x86/mm.h3
3 files changed, 35 insertions, 2 deletions
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index a9ed1aa012..237b5509f8 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -37,6 +37,13 @@
static shr_handle_t next_handle = 1;
+typedef struct pg_lock_data {
+ int mm_unlock_level;
+ unsigned short recurse_count;
+} pg_lock_data_t;
+
+DEFINE_PER_CPU(pg_lock_data_t, __pld);
+
#if MEM_SHARING_AUDIT
static mm_lock_t shr_lock;
@@ -85,16 +92,25 @@ static inline int mem_sharing_page_lock(struct page_info *p)
static inline int mem_sharing_page_lock(struct page_info *pg)
{
int rc;
+ pg_lock_data_t *pld = &(this_cpu(__pld));
+
+ page_sharing_mm_pre_lock();
rc = page_lock(pg);
if ( rc )
{
preempt_disable();
+ page_sharing_mm_post_lock(&pld->mm_unlock_level,
+ &pld->recurse_count);
}
return rc;
}
static inline void mem_sharing_page_unlock(struct page_info *pg)
{
+ pg_lock_data_t *pld = &(this_cpu(__pld));
+
+ page_sharing_mm_unlock(pld->mm_unlock_level,
+ &pld->recurse_count);
preempt_enable();
page_unlock(pg);
}
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index eaafbe1086..836bfecc7d 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -156,7 +156,23 @@ declare_mm_lock(shr)
#else
-/* We use an efficient per-page lock when AUDIT is not enabled. */
+/* Sharing per page lock
+ *
+ * This is an external lock, not represented by an mm_lock_t. The memory
+ * sharing lock uses it to protect addition and removal of (gfn,domain)
+ * tuples to a shared page. We enforce order here against the p2m lock,
+ * which is taken after the page_lock to change the gfn's p2m entry.
+ *
+ * Note that in sharing audit mode, we use the global page lock above,
+ * instead.
+ *
+ * The lock is recursive because during share we lock two pages. */
+
+declare_mm_order_constraint(per_page_sharing)
+#define page_sharing_mm_pre_lock() mm_enforce_order_lock_pre_per_page_sharing()
+#define page_sharing_mm_post_lock(l, r) \
+ mm_enforce_order_lock_post_per_page_sharing((l), (r))
+#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
#endif /* MEM_SHARING_AUDIT */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index c8d5d66e4f..3c2090f035 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -351,7 +351,8 @@ void clear_superpage_mark(struct page_info *page);
* backing. Nesting may happen when sharing (and locking) two pages -- deadlock
* is avoided by locking pages in increasing order.
* Memory sharing may take the p2m_lock within a page_lock/unlock
- * critical section.
+ * critical section. We enforce ordering between page_lock and p2m_lock using an
+ * mm-locks.h construct.
*
* These two users (pte serialization and memory sharing) do not collide, since
* sharing is only supported for hvm guests, which do not perform pv pte updates.