aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/xmalloc_tlsf.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-10-21 09:21:01 +0100
committerKeir Fraser <keir.fraser@citrix.com>2009-10-21 09:21:01 +0100
commitba0d6cfc6b6cdf9e58ab5c625452ab9c4852764b (patch)
tree12876a39452addbf79dd700062a4f4877a459a13 /xen/common/xmalloc_tlsf.c
parent2a03afa74dd5960c83fd02fa726cb3b408cfd772 (diff)
downloadxen-ba0d6cfc6b6cdf9e58ab5c625452ab9c4852764b.tar.gz
xen-ba0d6cfc6b6cdf9e58ab5c625452ab9c4852764b.tar.bz2
xen-ba0d6cfc6b6cdf9e58ab5c625452ab9c4852764b.zip
xmalloc_tlsf: Fall back to xmalloc_whole_pages() if xmem_pool_alloc() fails.
This was happening for xmalloc request sizes between 3921 and 3951 bytes. The reason being that xmem_pool_alloc() may add extra padding to the requested size, making the total block size greater than a page. Rather than add yet more smarts about TLSF to _xmalloc(), we just dumbly attempt any request smaller than a page via xmem_pool_alloc() first, then fall back on xmalloc_whole_pages() if this fails. Based on bug diagnosis and initial patch by John Byrne <john.l.byrne@hp.com> Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/common/xmalloc_tlsf.c')
-rw-r--r--xen/common/xmalloc_tlsf.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/xen/common/xmalloc_tlsf.c b/xen/common/xmalloc_tlsf.c
index 3f85389e23..6be78e1b98 100644
--- a/xen/common/xmalloc_tlsf.c
+++ b/xen/common/xmalloc_tlsf.c
@@ -553,7 +553,7 @@ static void tlsf_init(void)
void *_xmalloc(unsigned long size, unsigned long align)
{
- void *p;
+ void *p = NULL;
u32 pad;
ASSERT(!in_irq());
@@ -566,10 +566,10 @@ void *_xmalloc(unsigned long size, unsigned long align)
if ( !xenpool )
tlsf_init();
- if ( size >= (PAGE_SIZE - (2*BHDR_OVERHEAD)) )
- p = xmalloc_whole_pages(size);
- else
+ if ( size < PAGE_SIZE )
p = xmem_pool_alloc(size, xenpool);
+ if ( p == NULL )
+ p = xmalloc_whole_pages(size);
/* Add alignment padding. */
if ( (pad = -(long)p & (align - 1)) != 0 )
@@ -603,7 +603,7 @@ void xfree(void *p)
ASSERT(!(b->size & 1));
}
- if ( b->size >= (PAGE_SIZE - (2*BHDR_OVERHEAD)) )
+ if ( b->size >= PAGE_SIZE )
free_xenheap_pages((void *)b, get_order_from_bytes(b->size));
else
xmem_pool_free(p, xenpool);