aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/page_alloc.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-09-13 17:08:31 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-09-13 17:08:31 +0100
commit8b8b7e6c90d7af1c29567d1cb2bbc70492b58bbf (patch)
tree8f074c8bbea1e141b9bf9950f3f2bfa805b9c7d1 /xen/common/page_alloc.c
parent8462487d489956d294c453a33ff922e17f6753fc (diff)
downloadxen-8b8b7e6c90d7af1c29567d1cb2bbc70492b58bbf.tar.gz
xen-8b8b7e6c90d7af1c29567d1cb2bbc70492b58bbf.tar.bz2
xen-8b8b7e6c90d7af1c29567d1cb2bbc70492b58bbf.zip
page_alloc: Hold heap_lock while adjusting page states to/from PGC_state_free.
This avoids races with buddy-merging logic in free_heap_pages(). Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/common/page_alloc.c')
-rw-r--r--xen/common/page_alloc.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index ed2dc1d599..257f6bcf91 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -415,8 +415,6 @@ static struct page_info *alloc_heap_pages(
if ( d != NULL )
d->last_alloc_node = node;
- spin_unlock(&heap_lock);
-
cpus_clear(mask);
for ( i = 0; i < (1 << order); i++ )
@@ -438,6 +436,8 @@ static struct page_info *alloc_heap_pages(
page_set_owner(&pg[i], NULL);
}
+ spin_unlock(&heap_lock);
+
if ( unlikely(!cpus_empty(mask)) )
{
perfc_incr(need_flush_tlb_flush);
@@ -533,6 +533,8 @@ static void free_heap_pages(
ASSERT(order <= MAX_ORDER);
ASSERT(node >= 0);
+ spin_lock(&heap_lock);
+
for ( i = 0; i < (1 << order); i++ )
{
/*
@@ -560,8 +562,6 @@ static void free_heap_pages(
pg[i].tlbflush_timestamp = tlbflush_current_time();
}
- spin_lock(&heap_lock);
-
avail[node][zone] += 1 << order;
total_avail_pages += 1 << order;