aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-10-16 11:09:50 +0100
committerKeir Fraser <keir.fraser@citrix.com>2008-10-16 11:09:50 +0100
commit9736b76d829b2dba07e9941978bdf132fa118e18 (patch)
tree9b4537fff9c068bb123d1c8fe606cce22e1e0c86 /xen/common
parent261aa81d0588107b3ce9fdac2d18b164b0359a26 (diff)
downloadxen-9736b76d829b2dba07e9941978bdf132fa118e18.tar.gz
xen-9736b76d829b2dba07e9941978bdf132fa118e18.tar.bz2
xen-9736b76d829b2dba07e9941978bdf132fa118e18.zip
xmalloc: use tlsf algorithm
This patch replaces the Xen xmalloc engine with tlsf, an allocation engine that is both more space efficient and time-bounded, especially for allocation sizes between PAGE_SIZE/2 and PAGE_SIZE. The file xmalloc.c is deprecated but not yet deleted. A simple changein common/Makefile will change back to the legacy xmalloc/xfree if needed for testing. Code adapted from Nitin Gupta's tlsf-kmod, rev 229, found here: http://code.google.com/p/compcache/source/browse/trunk/sub-projects/allocat= ors/tlsf-kmod with description and performance details here: http://code.google.com/p/compcache/wiki/TLSFAllocator (new Xen code uses 4K=3DPAGE_SIZE for the region size) For detailed info on tlsf, see: http://rtportal.upv.es/rtmalloc/ Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com> Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/common')
-rw-r--r--xen/common/Makefile2
-rw-r--r--xen/common/xmalloc_tlsf.c655
2 files changed, 656 insertions, 1 deletions
diff --git a/xen/common/Makefile b/xen/common/Makefile
index e190a55014..5190d789dc 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -25,7 +25,7 @@ obj-y += timer.o
obj-y += trace.o
obj-y += version.o
obj-y += vsprintf.o
-obj-y += xmalloc.o
+obj-y += xmalloc_tlsf.o
obj-y += rcupdate.o
obj-$(perfc) += perfc.o
diff --git a/xen/common/xmalloc_tlsf.c b/xen/common/xmalloc_tlsf.c
new file mode 100644
index 0000000000..8c8fb2d393
--- /dev/null
+++ b/xen/common/xmalloc_tlsf.c
@@ -0,0 +1,655 @@
+/*
+ * Two Levels Segregate Fit memory allocator (TLSF)
+ * Version 2.3.2
+ *
+ * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
+ *
+ * Thanks to Ismael Ripoll for his suggestions and reviews
+ *
+ * Copyright (C) 2007, 2006, 2005, 2004
+ *
+ * This code is released using a dual license strategy: GPL/LGPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of the GNU General Public License Version 2.0
+ * Released under the terms of the GNU Lesser General Public License
+ * Version 2.1
+ *
+ * This is kernel port of TLSF allocator.
+ * Original code can be found at: http://rtportal.upv.es/rtmalloc/
+ * Adapted for Linux by Nitin Gupta (nitingupta910@gmail.com)
+ * (http://code.google.com/p/compcache/source/browse/trunk/sub-projects
+ * /allocators/tlsf-kmod r229 dated Aug 27, 2008
+ * Adapted for Xen by Dan Magenheimer (dan.magenheimer@oracle.com)
+ */
+
+#include <xen/config.h>
+#include <xen/irq.h>
+#include <xen/mm.h>
+#include <asm/time.h>
+
+#define MAX_POOL_NAME_LEN 16
+
+typedef void *(get_memory)(size_t bytes);
+typedef void (put_memory)(void *ptr);
+
+/* Some IMPORTANT TLSF parameters */
+#define MEM_ALIGN (sizeof(void *) * 2)
+#define MEM_ALIGN_MASK (~(MEM_ALIGN - 1))
+
+#define MAX_FLI (30)
+#define MAX_LOG2_SLI (5)
+#define MAX_SLI (1 << MAX_LOG2_SLI)
+
+#define FLI_OFFSET (6)
+/* tlsf structure just will manage blocks bigger than 128 bytes */
+#define SMALL_BLOCK (128)
+#define REAL_FLI (MAX_FLI - FLI_OFFSET)
+#define MIN_BLOCK_SIZE (sizeof(struct free_ptr))
+#define BHDR_OVERHEAD (sizeof(struct bhdr) - MIN_BLOCK_SIZE)
+
+#define PTR_MASK (sizeof(void *) - 1)
+#define BLOCK_SIZE_MASK (0xFFFFFFFF - PTR_MASK)
+
+#define GET_NEXT_BLOCK(addr, r) ((struct bhdr *) \
+ ((char *)(addr) + (r)))
+#define ROUNDUP_SIZE(r) (((r) + MEM_ALIGN - 1) & MEM_ALIGN_MASK)
+#define ROUNDDOWN_SIZE(r) ((r) & MEM_ALIGN_MASK)
+#define ROUNDUP_PAGE(r) (((r) + PAGE_SIZE - 1) & PAGE_MASK)
+
+#define BLOCK_STATE (0x1)
+#define PREV_STATE (0x2)
+
+/* bit 0 of the block size */
+#define FREE_BLOCK (0x1)
+#define USED_BLOCK (0x0)
+
+/* bit 1 of the block size */
+#define PREV_FREE (0x2)
+#define PREV_USED (0x0)
+
+static spinlock_t pool_list_lock;
+static struct list_head pool_list_head;
+
+struct free_ptr {
+ struct bhdr *prev;
+ struct bhdr *next;
+};
+
+struct bhdr {
+ /* All blocks in a region are linked in order of physical address */
+ struct bhdr *prev_hdr;
+ /*
+ * The size is stored in bytes
+ * bit 0: block is free, if set
+ * bit 1: previous block is free, if set
+ */
+ u32 size;
+ /* Free blocks in individual freelists are linked */
+ union {
+ struct free_ptr free_ptr;
+ u8 buffer[sizeof(struct free_ptr)];
+ } ptr;
+};
+
+struct pool {
+ /* First level bitmap (REAL_FLI bits) */
+ u32 fl_bitmap;
+
+ /* Second level bitmap */
+ u32 sl_bitmap[REAL_FLI];
+
+ /* Free lists */
+ struct bhdr *matrix[REAL_FLI][MAX_SLI];
+
+ spinlock_t lock;
+
+ size_t init_size;
+ size_t max_size;
+ size_t grow_size;
+
+ /* Basic stats */
+ size_t used_size;
+ size_t num_regions;
+
+ /* User provided functions for expanding/shrinking pool */
+ get_memory *get_mem;
+ put_memory *put_mem;
+
+ struct list_head list;
+
+ void *init_region;
+ char name[MAX_POOL_NAME_LEN];
+};
+
+/*
+ * Helping functions
+ */
+
+/**
+ * Returns indexes (fl, sl) of the list used to serve request of size r
+ */
+static inline void MAPPING_SEARCH(size_t *r, int *fl, int *sl)
+{
+ int t;
+
+ if ( *r < SMALL_BLOCK )
+ {
+ *fl = 0;
+ *sl = *r / (SMALL_BLOCK / MAX_SLI);
+ }
+ else
+ {
+ t = (1 << (fls(*r) - 1 - MAX_LOG2_SLI)) - 1;
+ *r = *r + t;
+ *fl = fls(*r) - 1;
+ *sl = (*r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
+ *fl -= FLI_OFFSET;
+ /*if ((*fl -= FLI_OFFSET) < 0) // FL will be always >0!
+ *fl = *sl = 0;
+ */
+ *r &= ~t;
+ }
+}
+
+/**
+ * Returns indexes (fl, sl) which is used as starting point to search
+ * for a block of size r. It also rounds up requested size(r) to the
+ * next list.
+ */
+static inline void MAPPING_INSERT(size_t r, int *fl, int *sl)
+{
+ if ( r < SMALL_BLOCK )
+ {
+ *fl = 0;
+ *sl = r / (SMALL_BLOCK / MAX_SLI);
+ }
+ else
+ {
+ *fl = fls(r) - 1;
+ *sl = (r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
+ *fl -= FLI_OFFSET;
+ }
+}
+
+/**
+ * Returns first block from a list that hold blocks larger than or
+ * equal to the one pointed by the indexes (fl, sl)
+ */
+static inline struct bhdr *FIND_SUITABLE_BLOCK(struct pool *p, int *fl,
+ int *sl)
+{
+ u32 tmp = p->sl_bitmap[*fl] & (~0 << *sl);
+ struct bhdr *b = NULL;
+
+ if ( tmp )
+ {
+ *sl = ffs(tmp) - 1;
+ b = p->matrix[*fl][*sl];
+ }
+ else
+ {
+ *fl = ffs(p->fl_bitmap & (~0 << (*fl + 1))) - 1;
+ if ( likely(*fl > 0) )
+ {
+ *sl = ffs(p->sl_bitmap[*fl]) - 1;
+ b = p->matrix[*fl][*sl];
+ }
+ }
+
+ return b;
+}
+
+/**
+ * Remove first free block(b) from free list with indexes (fl, sl).
+ */
+static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct pool *p, int fl,
+ int sl)
+{
+ p->matrix[fl][sl] = b->ptr.free_ptr.next;
+ if ( p->matrix[fl][sl] )
+ {
+ p->matrix[fl][sl]->ptr.free_ptr.prev = NULL;
+ }
+ else
+ {
+ clear_bit(sl, &p->sl_bitmap[fl]);
+ if ( !p->sl_bitmap[fl] )
+ clear_bit(fl, &p->fl_bitmap);
+ }
+ b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
+}
+
+/**
+ * Removes block(b) from free list with indexes (fl, sl)
+ */
+static inline void EXTRACT_BLOCK(struct bhdr *b, struct pool *p, int fl,
+ int sl)
+{
+ if ( b->ptr.free_ptr.next )
+ b->ptr.free_ptr.next->ptr.free_ptr.prev =
+ b->ptr.free_ptr.prev;
+ if ( b->ptr.free_ptr.prev )
+ b->ptr.free_ptr.prev->ptr.free_ptr.next =
+ b->ptr.free_ptr.next;
+ if ( p->matrix[fl][sl] == b )
+ {
+ p->matrix[fl][sl] = b->ptr.free_ptr.next;
+ if ( !p->matrix[fl][sl] )
+ {
+ clear_bit(sl, &p->sl_bitmap[fl]);
+ if ( !p->sl_bitmap[fl] )
+ clear_bit (fl, &p->fl_bitmap);
+ }
+ }
+ b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
+}
+
+/**
+ * Insert block(b) in free list with indexes (fl, sl)
+ */
+static inline void INSERT_BLOCK(struct bhdr *b, struct pool *p, int fl, int sl)
+{
+ b->ptr.free_ptr = (struct free_ptr) {NULL, p->matrix[fl][sl]};
+ if ( p->matrix[fl][sl] )
+ p->matrix[fl][sl]->ptr.free_ptr.prev = b;
+ p->matrix[fl][sl] = b;
+ set_bit(sl, &p->sl_bitmap[fl]);
+ set_bit(fl, &p->fl_bitmap);
+}
+
+/**
+ * Region is a virtually contiguous memory region and Pool is
+ * collection of such regions
+ */
+static inline void ADD_REGION(void *region, size_t region_size,
+ struct pool *pool)
+{
+ int fl, sl;
+ struct bhdr *b, *lb;
+
+ b = (struct bhdr *)(region);
+ b->prev_hdr = NULL;
+ b->size = ROUNDDOWN_SIZE(region_size - 2 * BHDR_OVERHEAD)
+ | FREE_BLOCK | PREV_USED;
+ MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
+ INSERT_BLOCK(b, pool, fl, sl);
+ /* The sentinel block: allows us to know when we're in the last block */
+ lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
+ lb->prev_hdr = b;
+ lb->size = 0 | USED_BLOCK | PREV_FREE;
+ pool->used_size += BHDR_OVERHEAD; /* only sentinel block is "used" */
+ pool->num_regions++;
+}
+
+/*
+ * Allocator code start
+ */
+
+/**
+ * tlsf_create_memory_pool - create dynamic memory pool
+ * @name: name of the pool
+ * @get_mem: callback function used to expand pool
+ * @put_mem: callback function used to shrink pool
+ * @init_size: inital pool size (in bytes)
+ * @max_size: maximum pool size (in bytes) - set this as 0 for no limit
+ * @grow_size: amount of memory (in bytes) added to pool whenever required
+ *
+ * All size values are rounded up to next page boundary.
+ */
+static void *tlsf_create_memory_pool(const char *name,
+ get_memory get_mem,
+ put_memory put_mem,
+ size_t init_size,
+ size_t max_size,
+ size_t grow_size)
+{
+ struct pool *pool;
+ void *region;
+ int pool_bytes, pool_order;
+
+ BUG_ON(max_size && (max_size < init_size));
+
+ pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
+ pool_order = get_order_from_bytes(pool_bytes);
+
+ pool = (void *)alloc_xenheap_pages(pool_order);
+ if ( pool == NULL )
+ return NULL;
+ memset(pool, 0, pool_bytes);
+
+ /* Round to next page boundary */
+ init_size = ROUNDUP_PAGE(init_size);
+ max_size = ROUNDUP_PAGE(max_size);
+ grow_size = ROUNDUP_PAGE(grow_size);
+
+ /* pool global overhead not included in used size */
+ pool->used_size = 0;
+
+ pool->init_size = init_size;
+ pool->max_size = max_size;
+ pool->grow_size = grow_size;
+ pool->get_mem = get_mem;
+ pool->put_mem = put_mem;
+ pool->name[0] = '\0'; /* ignore name for now */
+ region = get_mem(init_size);
+ if ( region == NULL )
+ goto out_region;
+ ADD_REGION(region, init_size, pool);
+ pool->init_region = region;
+
+ spin_lock_init(&pool->lock);
+
+ spin_lock(&pool_list_lock);
+ list_add_tail(&pool->list, &pool_list_head);
+ spin_unlock(&pool_list_lock);
+
+ return pool;
+
+ out_region:
+ free_xenheap_pages(pool, pool_order);
+ return NULL;
+}
+
+#if 0
+
+/**
+ * tlsf_get_used_size - get memory currently used by given pool
+ *
+ * Used memory includes stored data + metadata + internal fragmentation
+ */
+static size_t tlsf_get_used_size(void *mem_pool)
+{
+ struct pool *pool = (struct pool *)mem_pool;
+ return pool->used_size;
+}
+
+/**
+ * tlsf_get_total_size - get total memory currently allocated for given pool
+ *
+ * This is the total memory currently allocated for this pool which includes
+ * used size + free size.
+ *
+ * (Total - Used) is good indicator of memory efficiency of allocator.
+ */
+static size_t tlsf_get_total_size(void *mem_pool)
+{
+ size_t total;
+ struct pool *pool = (struct pool *)mem_pool;
+ total = ROUNDUP_SIZE(sizeof(*pool))
+ + pool->init_size
+ + (pool->num_regions - 1) * pool->grow_size;
+ return total;
+}
+
+/**
+ * tlsf_destroy_memory_pool - cleanup given pool
+ * @mem_pool: Pool to be destroyed
+ *
+ * Data structures associated with pool are freed.
+ * All memory allocated from pool must be freed before
+ * destorying it.
+ */
+static void tlsf_destroy_memory_pool(void *mem_pool)
+{
+ struct pool *pool;
+
+ if ( mem_pool == NULL )
+ return;
+
+ pool = (struct pool *)mem_pool;
+
+ /* User is destroying without ever allocating from this pool */
+ if ( tlsf_get_used_size(pool) == BHDR_OVERHEAD )
+ {
+ pool->put_mem(pool->init_region);
+ pool->used_size -= BHDR_OVERHEAD;
+ }
+
+ /* Check for memory leaks in this pool */
+ if ( tlsf_get_used_size(pool) )
+ printk("memory leak in pool: %s (%p). "
+ "%lu bytes still in use.\n",
+ pool->name, pool, (long)tlsf_get_used_size(pool));
+
+ spin_lock(&pool_list_lock);
+ list_del_init(&pool->list);
+ spin_unlock(&pool_list_lock);
+ pool->put_mem(pool);
+}
+
+#endif
+
+/**
+ * tlsf_malloc - allocate memory from given pool
+ * @size: no. of bytes
+ * @mem_pool: pool to allocate from
+ */
+static void *tlsf_malloc(size_t size, void *mem_pool)
+{
+ struct pool *pool = (struct pool *)mem_pool;
+ struct bhdr *b, *b2, *next_b, *region;
+ int fl, sl;
+ size_t tmp_size;
+
+ size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
+ /* Rounding up the requested size and calculating fl and sl */
+
+ spin_lock(&pool->lock);
+ retry_find:
+ MAPPING_SEARCH(&size, &fl, &sl);
+
+ /* Searching a free block */
+ if ( !(b = FIND_SUITABLE_BLOCK(pool, &fl, &sl)) )
+ {
+ /* Not found */
+ if ( size > (pool->grow_size - 2 * BHDR_OVERHEAD) )
+ goto out_locked;
+ if ( pool->max_size && (pool->init_size +
+ pool->num_regions * pool->grow_size
+ > pool->max_size) )
+ goto out_locked;
+ spin_unlock(&pool->lock);
+ if ( (region = pool->get_mem(pool->grow_size)) == NULL )
+ goto out;
+ spin_lock(&pool->lock);
+ ADD_REGION(region, pool->grow_size, pool);
+ goto retry_find;
+ }
+ EXTRACT_BLOCK_HDR(b, pool, fl, sl);
+
+ /*-- found: */
+ next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
+ /* Should the block be split? */
+ tmp_size = (b->size & BLOCK_SIZE_MASK) - size;
+ if ( tmp_size >= sizeof(struct bhdr) )
+ {
+ tmp_size -= BHDR_OVERHEAD;
+ b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
+
+ b2->size = tmp_size | FREE_BLOCK | PREV_USED;
+ b2->prev_hdr = b;
+
+ next_b->prev_hdr = b2;
+
+ MAPPING_INSERT(tmp_size, &fl, &sl);
+ INSERT_BLOCK(b2, pool, fl, sl);
+
+ b->size = size | (b->size & PREV_STATE);
+ }
+ else
+ {
+ next_b->size &= (~PREV_FREE);
+ b->size &= (~FREE_BLOCK); /* Now it's used */
+ }
+
+ pool->used_size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
+
+ spin_unlock(&pool->lock);
+ return (void *)b->ptr.buffer;
+
+ /* Failed alloc */
+ out_locked:
+ spin_unlock(&pool->lock);
+
+ out:
+ return NULL;
+}
+
+/**
+ * tlsf_free - free memory from given pool
+ * @ptr: address of memory to be freed
+ * @mem_pool: pool to free from
+ */
+static void tlsf_free(void *ptr, void *mem_pool)
+{
+ struct pool *pool = (struct pool *)mem_pool;
+ struct bhdr *b, *tmp_b;
+ int fl = 0, sl = 0;
+
+ if ( unlikely(ptr == NULL) )
+ return;
+
+ b = (struct bhdr *)((char *) ptr - BHDR_OVERHEAD);
+
+ spin_lock(&pool->lock);
+ b->size |= FREE_BLOCK;
+ pool->used_size -= (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
+ b->ptr.free_ptr = (struct free_ptr) { NULL, NULL};
+ tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
+ if ( tmp_b->size & FREE_BLOCK )
+ {
+ MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
+ EXTRACT_BLOCK(tmp_b, pool, fl, sl);
+ b->size += (tmp_b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
+ }
+ if ( b->size & PREV_FREE )
+ {
+ tmp_b = b->prev_hdr;
+ MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
+ EXTRACT_BLOCK(tmp_b, pool, fl, sl);
+ tmp_b->size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
+ b = tmp_b;
+ }
+ tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
+ tmp_b->prev_hdr = b;
+
+ MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
+
+ if ( (b->prev_hdr == NULL) && ((tmp_b->size & BLOCK_SIZE_MASK) == 0) )
+ {
+ pool->put_mem(b);
+ pool->num_regions--;
+ pool->used_size -= BHDR_OVERHEAD; /* sentinel block header */
+ goto out;
+ }
+
+ INSERT_BLOCK(b, pool, fl, sl);
+
+ tmp_b->size |= PREV_FREE;
+ tmp_b->prev_hdr = b;
+ out:
+ spin_unlock(&pool->lock);
+}
+
+/*
+ * Glue for xmalloc().
+ */
+
+static struct pool *xenpool;
+
+static void *tlsf_get_xenheap_page(size_t size)
+{
+ ASSERT(size == PAGE_SIZE);
+ return alloc_xenheap_pages(0);
+}
+
+static void tlsf_put_xenheap_page(void *p)
+{
+ free_xenheap_pages(p,0);
+}
+
+static void *tlsf_xenheap_malloc_whole_pages(size_t size)
+{
+ struct bhdr *b;
+ unsigned int pageorder = get_order_from_bytes(size + BHDR_OVERHEAD);
+
+ b = alloc_xenheap_pages(pageorder);
+ if ( b == NULL )
+ return NULL;
+
+ b->size = (1 << (pageorder + PAGE_SHIFT));
+ return (void *)b->ptr.buffer;
+}
+
+static void tlsf_xenheap_init(void)
+{
+ INIT_LIST_HEAD(&pool_list_head);
+ spin_lock_init(&pool_list_lock);
+ xenpool = tlsf_create_memory_pool(
+ "", tlsf_get_xenheap_page,
+ tlsf_put_xenheap_page, PAGE_SIZE, 0, PAGE_SIZE);
+ BUG_ON(!xenpool);
+}
+
+/*
+ * xmalloc()
+ */
+
+void *_xmalloc(size_t size, size_t align)
+{
+ void *p;
+ u32 pad;
+
+ ASSERT(!in_irq());
+
+ ASSERT((align & (align - 1)) == 0);
+ if ( align < MEM_ALIGN )
+ align = MEM_ALIGN;
+ size += align - MEM_ALIGN;
+
+ if ( !xenpool )
+ tlsf_xenheap_init();
+
+ if ( size >= (PAGE_SIZE - (2*BHDR_OVERHEAD)) )
+ p = tlsf_xenheap_malloc_whole_pages(size);
+ else
+ p = tlsf_malloc(size, xenpool);
+
+ /* Add alignment padding. */
+ if ( (pad = -(long)p & (align - 1)) != 0 )
+ {
+ char *q = (char *)p + pad;
+ struct bhdr *b = (struct bhdr *)(q - BHDR_OVERHEAD);
+ ASSERT(q > (char *)p);
+ b->size = pad | 1;
+ p = q;
+ }
+
+ ASSERT(((unsigned long)p & (align - 1)) == 0);
+ return p;
+}
+
+void xfree(void *p)
+{
+ struct bhdr *b;
+
+ ASSERT(!in_irq());
+
+ if ( p == NULL )
+ return;
+
+ /* Strip alignment padding. */
+ b = (struct bhdr *)((char *) p - BHDR_OVERHEAD);
+ if ( b->size & 1 )
+ {
+ p = (char *)p - (b->size & ~1u);
+ b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
+ ASSERT(!(b->size & 1));
+ }
+
+ if ( b->size >= (PAGE_SIZE - (2*BHDR_OVERHEAD)) )
+ free_xenheap_pages((void *)b, get_order_from_bytes(b->size));
+ else
+ tlsf_free(p, xenpool);
+}