aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/page_alloc.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-12-08 07:55:21 +0000
committerKeir Fraser <keir.fraser@citrix.com>2009-12-08 07:55:21 +0000
commit272c176b86a1e442f9ec0bddf44034fbe0aa26c1 (patch)
treef78ef071bf278cd240965169e8c468fca8796ac0 /xen/common/page_alloc.c
parentb9c20c78789f94b3b550d0a9d694662cba5fa794 (diff)
downloadxen-272c176b86a1e442f9ec0bddf44034fbe0aa26c1.tar.gz
xen-272c176b86a1e442f9ec0bddf44034fbe0aa26c1.tar.bz2
xen-272c176b86a1e442f9ec0bddf44034fbe0aa26c1.zip
Track free pages live rather than count pages in all nodes/zones
Trying to fix a livelock condition in tmem that occurs only when the system is totally out of memory requires the ability to easily determine if all zones in all nodes are empty, and this must be checked at a fairly high frequency. So to avoid walking all the zones in all the nodes each time, I'd like a fast way to determine if "free_pages" is zero. This patch tracks the sum of the free pages in all nodes/zones. Since I think the value is modified only when heap_lock is held, it need not be atomic. I don't know this for sure, but suspect this will be useful in other future memory utilization code, e.g. page sharing. This has had limited testing, though I did drive free memory down to zero and up and down a few times with debug on and no asserts were triggered. Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Diffstat (limited to 'xen/common/page_alloc.c')
-rw-r--r--xen/common/page_alloc.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index c9392a6655..cb2d99cae5 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -222,6 +222,7 @@ static heap_by_zone_and_order_t *_heap[MAX_NUMNODES];
#define heap(node, zone, order) ((*_heap[node])[zone][order])
static unsigned long *avail[MAX_NUMNODES];
+static long total_avail_pages;
static DEFINE_SPINLOCK(heap_lock);
@@ -350,6 +351,8 @@ static struct page_info *alloc_heap_pages(
ASSERT(avail[node][zone] >= request);
avail[node][zone] -= request;
+ total_avail_pages -= request;
+ ASSERT(total_avail_pages >= 0);
spin_unlock(&heap_lock);
@@ -445,6 +448,8 @@ static int reserve_offlined_page(struct page_info *head)
continue;
avail[node][zone]--;
+ total_avail_pages--;
+ ASSERT(total_avail_pages >= 0);
page_list_add_tail(cur_head,
test_bit(_PGC_broken, &cur_head->count_info) ?
@@ -497,6 +502,7 @@ static void free_heap_pages(
spin_lock(&heap_lock);
avail[node][zone] += 1 << order;
+ total_avail_pages += 1 << order;
/* Merge chunks as far as possible. */
while ( order < MAX_ORDER )
@@ -834,6 +840,11 @@ static unsigned long avail_heap_pages(
return free_pages;
}
+unsigned long total_free_pages(void)
+{
+ return total_avail_pages;
+}
+
void __init end_boot_allocator(void)
{
unsigned int i;