aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/xmalloc_tlsf.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2011-10-13 10:03:43 +0200
committerJan Beulich <jbeulich@suse.com>2011-10-13 10:03:43 +0200
commitb829a0ff5794ee5b0f96a0c872f6a4ed7b1007c7 (patch)
tree0f86021d74a5fc61fc0badd2c142d64cb90ba396 /xen/common/xmalloc_tlsf.c
parentddbb942359ebec0d96a73a90495e0b29a19266f9 (diff)
downloadxen-b829a0ff5794ee5b0f96a0c872f6a4ed7b1007c7.tar.gz
xen-b829a0ff5794ee5b0f96a0c872f6a4ed7b1007c7.tar.bz2
xen-b829a0ff5794ee5b0f96a0c872f6a4ed7b1007c7.zip
xmalloc: return unused full pages on multi-page allocations
Certain (boot time) allocations are relatively large (particularly when building with high NR_CPUS), but can also happen to be pretty far away from a power-of-two size. Utilize the page allocator's (other than Linux'es) capability of allowing to return space from higher-order allocations in smaller pieces to return the unused parts immediately. Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/common/xmalloc_tlsf.c')
-rw-r--r--xen/common/xmalloc_tlsf.c27
1 files changed, 24 insertions, 3 deletions
diff --git a/xen/common/xmalloc_tlsf.c b/xen/common/xmalloc_tlsf.c
index 8a82355e09..3235af117c 100644
--- a/xen/common/xmalloc_tlsf.c
+++ b/xen/common/xmalloc_tlsf.c
@@ -527,13 +527,21 @@ static void xmalloc_pool_put(void *p)
static void *xmalloc_whole_pages(unsigned long size)
{
struct bhdr *b;
- unsigned int pageorder = get_order_from_bytes(size + BHDR_OVERHEAD);
+ unsigned int i, pageorder = get_order_from_bytes(size + BHDR_OVERHEAD);
+ char *p;
b = alloc_xenheap_pages(pageorder, 0);
if ( b == NULL )
return NULL;
- b->size = (1 << (pageorder + PAGE_SHIFT));
+ b->size = PAGE_ALIGN(size + BHDR_OVERHEAD);
+ for ( p = (char *)b + b->size, i = 0; i < pageorder; ++i )
+ if ( (unsigned long)p & (PAGE_SIZE << i) )
+ {
+ free_xenheap_pages(p, i);
+ p += PAGE_SIZE << i;
+ }
+
return (void *)b->ptr.buffer;
}
@@ -611,7 +619,20 @@ void xfree(void *p)
}
if ( b->size >= PAGE_SIZE )
- free_xenheap_pages((void *)b, get_order_from_bytes(b->size));
+ {
+ unsigned int i, order = get_order_from_bytes(b->size);
+
+ BUG_ON((unsigned long)b & ((PAGE_SIZE << order) - 1));
+ for ( i = 0; ; ++i )
+ {
+ if ( !(b->size & (PAGE_SIZE << i)) )
+ continue;
+ b->size -= PAGE_SIZE << i;
+ free_xenheap_pages((void *)b + b->size, i);
+ if ( i + 1 >= order )
+ break;
+ }
+ }
else
xmem_pool_free(p, xenpool);
}