aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-08-11 11:18:50 +0100
committerKeir Fraser <keir.fraser@citrix.com>2008-08-11 11:18:50 +0100
commitb87e597231d4d625ee570003cb18efad5c0ae05c (patch)
treec8707af851d4fc11c6a6ac944c666090294d9a10
parentc3eade240902c2a420d202b5d9cd8434ef2e4b37 (diff)
downloadxen-b87e597231d4d625ee570003cb18efad5c0ae05c.tar.gz
xen-b87e597231d4d625ee570003cb18efad5c0ae05c.tar.bz2
xen-b87e597231d4d625ee570003cb18efad5c0ae05c.zip
Serialize scrubbing pages
I noticed that parallel scrubbing pages is not efficient due to spinlock contention. (also memory bandwidth?) Actually heap_lock becomes a bottleneck. In my investigation, it often takes 1 millisec just to acquire the lock on ia64 with 8cpus. It's very wasteful. Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
-rw-r--r--xen/common/page_alloc.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 997b178d7c..b202570a0f 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -951,10 +951,15 @@ static void page_scrub_softirq(void)
int i;
s_time_t start = NOW();
+ /* free_heap_pages() does not parallelise well. Serialise this function. */
+ if ( !spin_trylock(&page_scrub_lock) )
+ {
+ set_timer(&this_cpu(page_scrub_timer), NOW() + MILLISECS(1));
+ return;
+ }
+
/* Aim to do 1ms of work every 10ms. */
do {
- spin_lock(&page_scrub_lock);
-
if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
{
spin_unlock(&page_scrub_lock);
@@ -974,8 +979,6 @@ static void page_scrub_softirq(void)
page_scrub_list.next = ent->next;
scrub_pages -= (i+1);
- spin_unlock(&page_scrub_lock);
-
/* Working backwards, scrub each page in turn. */
while ( ent != &page_scrub_list )
{
@@ -988,6 +991,8 @@ static void page_scrub_softirq(void)
}
} while ( (NOW() - start) < MILLISECS(1) );
+ spin_unlock(&page_scrub_lock);
+
set_timer(&this_cpu(page_scrub_timer), NOW() + MILLISECS(10));
}