aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/avr32
Commit message (Expand)AuthorAgeFilesLines
* upgrade to 2.6.30-rc8Imre Kaloz2009-06-033-230/+23
* upgrade avr32 to 2.6.30-rc7Imre Kaloz2009-06-023-14/+246
* config cleanupsImre Kaloz2009-05-151-16/+7
* some additional config file cleanup..Imre Kaloz2009-05-141-1/+0
* some more kernel config cleanup.. last for today :)Imre Kaloz2009-05-041-9/+0
* more cleanupImre Kaloz2009-05-041-31/+0
* cleanup MTD option handlingImre Kaloz2009-05-041-33/+0
* move MFD stuff to the generic kernel configsImre Kaloz2009-05-041-2/+0
* upgrade to newest stable kernel versionsImre Kaloz2009-05-031-2/+2
* update to kernel version 2.6.28.9Hauke Mehrtens2009-03-241-1/+1
* update other 2.6.28.7 targets to 2.6.28.8, tooImre Kaloz2009-03-171-1/+1
* fix u-boot build error (make[...]: avr32-linux-gcc: Command not found)Nicolas Thill2009-03-011-3/+2
* update to 2.6.28.7 and 2.6.27.19Hauke Mehrtens2009-02-211-1/+1
* update to 2.6.28.6Gabor Juhos2009-02-191-1/+1
* update to 2.6.28.5Gabor Juhos2009-02-151-1/+1
* update to 2.6.28.4Gabor Juhos2009-02-081-1/+1
* update to 2.6.28.2Gabor Juhos2009-01-281-1/+1
* upgrade avr32 to 2.6.28Imre Kaloz2009-01-053-12/+6
* update to 2.6.27.10Gabor Juhos2008-12-191-1/+1
* update to 2.6.27.9 and refresh patchesGabor Juhos2008-12-161-1/+1
* update to 2.6.27.8 and refresh patchesGabor Juhos2008-12-061-1/+1
* compile in-cpu rtc into the kernel, clean up configImre Kaloz2008-12-021-33/+25
* add avr32 support to binutils 2.18Imre Kaloz2008-11-251-0/+11
* fix lzma uImage generationImre Kaloz2008-11-251-1/+1
* upgrade avr32 to 2.6.27Imre Kaloz2008-11-253-16679/+4
* update to 2.6.25.20, 2.6.26.8, 2.6.27.5 and refresh patches * based on a patc...Gabor Juhos2008-11-121-1/+1
* update to 2.6.25.19, and refresh patchesGabor Juhos2008-11-073-286/+286
* update to 2.6.26.5 and 2.6.25.17Gabor Juhos2008-09-261-1/+1
* disable CONFIG_USB_SUPPORT on avr32Nicolas Thill2008-09-251-1/+0
* disable CONFIG_USB_SUPPORT in generic config for 2.6.25 & 2.6.26 and enable i...Nicolas Thill2008-09-241-0/+1
* update to 2.6.26.3 and 2.6.25.16Gabor Juhos2008-08-221-1/+1
* update to 2.6.25.15 and 2.6.26.2Gabor Juhos2008-08-071-1/+1
* only build avr32 u-boot if requested, fixes build failure in default configFelix Fietkau2008-08-051-4/+6
* switch CONFIG_I2C to =m by default on avr32 (see #3756)Felix Fietkau2008-07-311-1/+1
* update to 2.6.25.12Gabor Juhos2008-07-262-3/+3
* avr32: update to latest atmel sourcesMatteo Croce2008-07-054-1056/+1600
* upgrade to 2.6.25.9Imre Kaloz2008-06-261-1/+1
* avr32: fix a bug in the MMC driverMatteo Croce2008-06-261-0/+43
* upgrade to 2.6.25.7, and refresh patchesGabor Juhos2008-06-171-1/+1
* avr32: clean and refresh patchMatteo Croce2008-06-121-34782/+235
* avr32: upgrade to 2.6.25.6Matteo Croce2008-06-124-10876/+39979
* avr32: fix kernel panic when using ondemand cpufreq givernorMatteo Croce2008-05-301-0/+25
* refresh avr32 patches to match upstream styleImre Kaloz2008-05-302-683/+437
* generate LZMA compressed uImage as wellGabor Juhos2008-05-231-2/+10
* u-boot: add LZMA decompression supportGabor Juhos2008-05-232-0/+1147
* upgrade U-Boot to 1.3.3Gabor Juhos2008-05-231-2/+2
* refresh kernel patchesGabor Juhos2008-05-192-608/+731
* upgrade to latest kernel versions (2.6.25.4; 2.6.24.7; 2.6.23.17; 2.6.22.19)Gabor Juhos2008-05-181-1/+1
* fix uboot not buildingTravis Kemen2008-05-091-1/+2
* be politically correctImre Kaloz2008-04-281-2/+2
615' href='#n615'>615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
/*
 * Two Levels Segregate Fit memory allocator (TLSF)
 * Version 2.3.2
 *
 * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
 *
 * Thanks to Ismael Ripoll for his suggestions and reviews
 *
 * Copyright (C) 2007, 2006, 2005, 2004
 *
 * This code is released using a dual license strategy: GPL/LGPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of the GNU General Public License Version 2.0
 * Released under the terms of the GNU Lesser General Public License 
 * Version 2.1
 *
 * This is kernel port of TLSF allocator.
 * Original code can be found at: http://rtportal.upv.es/rtmalloc/
 * Adapted for Linux by Nitin Gupta (nitingupta910@gmail.com)
 * (http://code.google.com/p/compcache/source/browse/trunk/sub-projects
 *  /allocators/tlsf-kmod r229 dated Aug 27, 2008
 * Adapted for Xen by Dan Magenheimer (dan.magenheimer@oracle.com)
 */

#include <xen/config.h>
#include <xen/irq.h>
#include <xen/mm.h>
#include <asm/time.h>

#define MAX_POOL_NAME_LEN       16

/* Some IMPORTANT TLSF parameters */
#define MEM_ALIGN       (sizeof(void *) * 2)
#define MEM_ALIGN_MASK  (~(MEM_ALIGN - 1))

#define MAX_FLI         (30)
#define MAX_LOG2_SLI    (5)
#define MAX_SLI         (1 << MAX_LOG2_SLI)

#define FLI_OFFSET      (6)
/* tlsf structure just will manage blocks bigger than 128 bytes */
#define SMALL_BLOCK     (128)
#define REAL_FLI        (MAX_FLI - FLI_OFFSET)
#define MIN_BLOCK_SIZE  (sizeof(struct free_ptr))
#define BHDR_OVERHEAD   (sizeof(struct bhdr) - MIN_BLOCK_SIZE)

#define PTR_MASK        (sizeof(void *) - 1)
#define BLOCK_SIZE_MASK (0xFFFFFFFF - PTR_MASK)

#define GET_NEXT_BLOCK(addr, r) ((struct bhdr *) \
                                ((char *)(addr) + (r)))
#define ROUNDUP_SIZE(r)         (((r) + MEM_ALIGN - 1) & MEM_ALIGN_MASK)
#define ROUNDDOWN_SIZE(r)       ((r) & MEM_ALIGN_MASK)
#define ROUNDUP_PAGE(r)         (((r) + PAGE_SIZE - 1) & PAGE_MASK)

#define BLOCK_STATE     (0x1)
#define PREV_STATE      (0x2)

/* bit 0 of the block size */
#define FREE_BLOCK      (0x1)
#define USED_BLOCK      (0x0)

/* bit 1 of the block size */
#define PREV_FREE       (0x2)
#define PREV_USED       (0x0)

static spinlock_t pool_list_lock;
static struct list_head pool_list_head;

struct free_ptr {
    struct bhdr *prev;
    struct bhdr *next;
};

struct bhdr {
    /* All blocks in a region are linked in order of physical address */
    struct bhdr *prev_hdr;
    /*
     * The size is stored in bytes
     *  bit 0: block is free, if set
     *  bit 1: previous block is free, if set
     */
    u32 size;
    /* Free blocks in individual freelists are linked */
    union {
        struct free_ptr free_ptr;
        u8 buffer[sizeof(struct free_ptr)];
    } ptr;
};

struct xmem_pool {
    /* First level bitmap (REAL_FLI bits) */
    u32 fl_bitmap;

    /* Second level bitmap */
    u32 sl_bitmap[REAL_FLI];

    /* Free lists */
    struct bhdr *matrix[REAL_FLI][MAX_SLI];

    spinlock_t lock;

    unsigned long init_size;
    unsigned long max_size;
    unsigned long grow_size;

    /* Basic stats */
    unsigned long used_size;
    unsigned long num_regions;

    /* User provided functions for expanding/shrinking pool */
    xmem_pool_get_memory *get_mem;
    xmem_pool_put_memory *put_mem;

    struct list_head list;

    void *init_region;
    char name[MAX_POOL_NAME_LEN];
};

/*
 * Helping functions
 */

/**
 * Returns indexes (fl, sl) of the list used to serve request of size r
 */
static inline void MAPPING_SEARCH(unsigned long *r, int *fl, int *sl)
{
    int t;

    if ( *r < SMALL_BLOCK )
    {
        *fl = 0;
        *sl = *r / (SMALL_BLOCK / MAX_SLI);
    }
    else
    {
        t = (1 << (fls(*r) - 1 - MAX_LOG2_SLI)) - 1;
        *r = *r + t;
        *fl = fls(*r) - 1;
        *sl = (*r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
        *fl -= FLI_OFFSET;
        /*if ((*fl -= FLI_OFFSET) < 0) // FL will be always >0!
         *fl = *sl = 0;
         */
        *r &= ~t;
    }
}

/**
 * Returns indexes (fl, sl) which is used as starting point to search
 * for a block of size r. It also rounds up requested size(r) to the
 * next list.
 */
static inline void MAPPING_INSERT(unsigned long r, int *fl, int *sl)
{
    if ( r < SMALL_BLOCK )
    {
        *fl = 0;
        *sl = r / (SMALL_BLOCK / MAX_SLI);
    }
    else
    {
        *fl = fls(r) - 1;
        *sl = (r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
        *fl -= FLI_OFFSET;
    }
}

/**
 * Returns first block from a list that hold blocks larger than or
 * equal to the one pointed by the indexes (fl, sl)
 */
static inline struct bhdr *FIND_SUITABLE_BLOCK(struct xmem_pool *p, int *fl,
                                               int *sl)
{
    u32 tmp = p->sl_bitmap[*fl] & (~0 << *sl);
    struct bhdr *b = NULL;

    if ( tmp )
    {
        *sl = ffs(tmp) - 1;
        b = p->matrix[*fl][*sl];
    }
    else
    {
        *fl = ffs(p->fl_bitmap & (~0 << (*fl + 1))) - 1;
        if ( likely(*fl > 0) )
        {
            *sl = ffs(p->sl_bitmap[*fl]) - 1;
            b = p->matrix[*fl][*sl];
        }
    }

    return b;
}

/**
 * Remove first free block(b) from free list with indexes (fl, sl).
 */
static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, int fl,
                                     int sl)
{
    p->matrix[fl][sl] = b->ptr.free_ptr.next;
    if ( p->matrix[fl][sl] )
    {
        p->matrix[fl][sl]->ptr.free_ptr.prev = NULL;
    }
    else
    {
        clear_bit(sl, &p->sl_bitmap[fl]);
        if ( !p->sl_bitmap[fl] )
            clear_bit(fl, &p->fl_bitmap);
    }
    b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
}

/**
 * Removes block(b) from free list with indexes (fl, sl)
 */
static inline void EXTRACT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl,
                                 int sl)
{
    if ( b->ptr.free_ptr.next )
        b->ptr.free_ptr.next->ptr.free_ptr.prev =
            b->ptr.free_ptr.prev;
    if ( b->ptr.free_ptr.prev )
        b->ptr.free_ptr.prev->ptr.free_ptr.next =
            b->ptr.free_ptr.next;
    if ( p->matrix[fl][sl] == b )
    {
        p->matrix[fl][sl] = b->ptr.free_ptr.next;
        if ( !p->matrix[fl][sl] )
        {
            clear_bit(sl, &p->sl_bitmap[fl]);
            if ( !p->sl_bitmap[fl] )
                clear_bit (fl, &p->fl_bitmap);
        }
    }
    b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
}

/**
 * Insert block(b) in free list with indexes (fl, sl)
 */
static inline void INSERT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl)
{
    b->ptr.free_ptr = (struct free_ptr) {NULL, p->matrix[fl][sl]};
    if ( p->matrix[fl][sl] )
        p->matrix[fl][sl]->ptr.free_ptr.prev = b;
    p->matrix[fl][sl] = b;
    set_bit(sl, &p->sl_bitmap[fl]);
    set_bit(fl, &p->fl_bitmap);
}

/**
 * Region is a virtually contiguous memory region and Pool is
 * collection of such regions
 */
static inline void ADD_REGION(void *region, unsigned long region_size,
                              struct xmem_pool *pool)
{
    int fl, sl;
    struct bhdr *b, *lb;

    b = (struct bhdr *)(region);
    b->prev_hdr = NULL;
    b->size = ROUNDDOWN_SIZE(region_size - 2 * BHDR_OVERHEAD)
        | FREE_BLOCK | PREV_USED;
    MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
    INSERT_BLOCK(b, pool, fl, sl);
    /* The sentinel block: allows us to know when we're in the last block */
    lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
    lb->prev_hdr = b;
    lb->size = 0 | USED_BLOCK | PREV_FREE;
    pool->used_size += BHDR_OVERHEAD; /* only sentinel block is "used" */
    pool->num_regions++;
}

/*
 * TLSF pool-based allocator start.
 */

struct xmem_pool *xmem_pool_create(
    const char *name,
    xmem_pool_get_memory get_mem,
    xmem_pool_put_memory put_mem,
    unsigned long init_size,
    unsigned long max_size,
    unsigned long grow_size)
{
    struct xmem_pool *pool;
    int pool_bytes, pool_order;

    BUG_ON(max_size && (max_size < init_size));

    pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
    pool_order = get_order_from_bytes(pool_bytes);

    pool = (void *)alloc_xenheap_pages(pool_order, 0);
    if ( pool == NULL )
        return NULL;
    memset(pool, 0, pool_bytes);

    /* Round to next page boundary */
    init_size = ROUNDUP_PAGE(init_size);
    max_size = ROUNDUP_PAGE(max_size);
    grow_size = ROUNDUP_PAGE(grow_size);

    /* pool global overhead not included in used size */
    pool->used_size = 0;

    pool->init_size = init_size;
    pool->max_size = max_size;
    pool->grow_size = grow_size;
    pool->get_mem = get_mem;
    pool->put_mem = put_mem;
    strlcpy(pool->name, name, sizeof(pool->name));

    /* always obtain init_region lazily now to ensure it is get_mem'd
     * in the same "context" as all other regions */

    spin_lock_init(&pool->lock);

    spin_lock(&pool_list_lock);
    list_add_tail(&pool->list, &pool_list_head);
    spin_unlock(&pool_list_lock);

    return pool;
}

unsigned long xmem_pool_get_used_size(struct xmem_pool *pool)
{
    return pool->used_size;
}

unsigned long xmem_pool_get_total_size(struct xmem_pool *pool)
{
    unsigned long total;
    total = ROUNDUP_SIZE(sizeof(*pool))
        + pool->init_size
        + (pool->num_regions - 1) * pool->grow_size;
    return total;
}

void xmem_pool_destroy(struct xmem_pool *pool) 
{
    int pool_bytes, pool_order;

    if ( pool == NULL )
        return;

    /* User is destroying without ever allocating from this pool */
    if ( xmem_pool_get_used_size(pool) == BHDR_OVERHEAD )
    {
        ASSERT(!pool->init_region);
        pool->used_size -= BHDR_OVERHEAD;
    }

    /* Check for memory leaks in this pool */
    if ( xmem_pool_get_used_size(pool) )
        printk("memory leak in pool: %s (%p). "
               "%lu bytes still in use.\n",
               pool->name, pool, xmem_pool_get_used_size(pool));

    spin_lock(&pool_list_lock);
    list_del_init(&pool->list);
    spin_unlock(&pool_list_lock);

    pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
    pool_order = get_order_from_bytes(pool_bytes);
    free_xenheap_pages(pool,pool_order);
}

void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool)
{
    struct bhdr *b, *b2, *next_b, *region;
    int fl, sl;
    unsigned long tmp_size;

    if ( pool->init_region == NULL )
    {
        if ( (region = pool->get_mem(pool->init_size)) == NULL )
            goto out;
        ADD_REGION(region, pool->init_size, pool);
        pool->init_region = region;
    }

    size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
    /* Rounding up the requested size and calculating fl and sl */

    spin_lock(&pool->lock);
 retry_find:
    MAPPING_SEARCH(&size, &fl, &sl);

    /* Searching a free block */
    if ( !(b = FIND_SUITABLE_BLOCK(pool, &fl, &sl)) )
    {
        /* Not found */
        if ( size > (pool->grow_size - 2 * BHDR_OVERHEAD) )
            goto out_locked;
        if ( pool->max_size && (pool->init_size +
                                pool->num_regions * pool->grow_size
                                > pool->max_size) )
            goto out_locked;
        spin_unlock(&pool->lock);
        if ( (region = pool->get_mem(pool->grow_size)) == NULL )
            goto out;
        spin_lock(&pool->lock);
        ADD_REGION(region, pool->grow_size, pool);
        goto retry_find;
    }
    EXTRACT_BLOCK_HDR(b, pool, fl, sl);

    /*-- found: */
    next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
    /* Should the block be split? */
    tmp_size = (b->size & BLOCK_SIZE_MASK) - size;
    if ( tmp_size >= sizeof(struct bhdr) )
    {
        tmp_size -= BHDR_OVERHEAD;
        b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);

        b2->size = tmp_size | FREE_BLOCK | PREV_USED;
        b2->prev_hdr = b;

        next_b->prev_hdr = b2;

        MAPPING_INSERT(tmp_size, &fl, &sl);
        INSERT_BLOCK(b2, pool, fl, sl);

        b->size = size | (b->size & PREV_STATE);
    }
    else
    {
        next_b->size &= (~PREV_FREE);
        b->size &= (~FREE_BLOCK); /* Now it's used */
    }

    pool->used_size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;

    spin_unlock(&pool->lock);
    return (void *)b->ptr.buffer;

    /* Failed alloc */
 out_locked:
    spin_unlock(&pool->lock);

 out:
    return NULL;
}

void xmem_pool_free(void *ptr, struct xmem_pool *pool)
{
    struct bhdr *b, *tmp_b;
    int fl = 0, sl = 0;

    if ( unlikely(ptr == NULL) )
        return;

    b = (struct bhdr *)((char *) ptr - BHDR_OVERHEAD);

    spin_lock(&pool->lock);
    b->size |= FREE_BLOCK;
    pool->used_size -= (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
    b->ptr.free_ptr = (struct free_ptr) { NULL, NULL};
    tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
    if ( tmp_b->size & FREE_BLOCK )
    {
        MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
        EXTRACT_BLOCK(tmp_b, pool, fl, sl);
        b->size += (tmp_b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
    }
    if ( b->size & PREV_FREE )
    {
        tmp_b = b->prev_hdr;
        MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
        EXTRACT_BLOCK(tmp_b, pool, fl, sl);
        tmp_b->size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
        b = tmp_b;
    }
    tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
    tmp_b->prev_hdr = b;

    MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);

    if ( (b->prev_hdr == NULL) && ((tmp_b->size & BLOCK_SIZE_MASK) == 0) )
    {
        pool->put_mem(b);
        pool->num_regions--;
        pool->used_size -= BHDR_OVERHEAD; /* sentinel block header */
        goto out;
    }

    INSERT_BLOCK(b, pool, fl, sl);

    tmp_b->size |= PREV_FREE;
    tmp_b->prev_hdr = b;
 out:
    spin_unlock(&pool->lock);
}

int xmem_pool_maxalloc(struct xmem_pool *pool)
{
    return pool->grow_size - (2 * BHDR_OVERHEAD);
}

/*
 * Glue for xmalloc().
 */

static struct xmem_pool *xenpool;

static void *xmalloc_pool_get(unsigned long size)
{
    ASSERT(size == PAGE_SIZE);
    return alloc_xenheap_page();
}

static void xmalloc_pool_put(void *p)
{
    free_xenheap_page(p);
}

static void *xmalloc_whole_pages(unsigned long size)
{
    struct bhdr *b;
    unsigned int i, pageorder = get_order_from_bytes(size + BHDR_OVERHEAD);
    char *p;

    b = alloc_xenheap_pages(pageorder, 0);
    if ( b == NULL )
        return NULL;

    b->size = PAGE_ALIGN(size + BHDR_OVERHEAD);
    for ( p = (char *)b + b->size, i = 0; i < pageorder; ++i )
        if ( (unsigned long)p & (PAGE_SIZE << i) )
        {
            free_xenheap_pages(p, i);
            p += PAGE_SIZE << i;
        }

    return (void *)b->ptr.buffer;
}

static void tlsf_init(void)
{
    INIT_LIST_HEAD(&pool_list_head);
    spin_lock_init(&pool_list_lock);
    xenpool = xmem_pool_create(
        "xmalloc", xmalloc_pool_get, xmalloc_pool_put,
        PAGE_SIZE, 0, PAGE_SIZE);
    BUG_ON(!xenpool);
}

/*
 * xmalloc()
 */

void *_xmalloc(unsigned long size, unsigned long align)
{
    void *p = NULL;
    u32 pad;

    ASSERT(!in_irq());

    ASSERT((align & (align - 1)) == 0);
    if ( align < MEM_ALIGN )
        align = MEM_ALIGN;
    size += align - MEM_ALIGN;

    if ( !xenpool )
        tlsf_init();

    if ( size < PAGE_SIZE )
        p = xmem_pool_alloc(size, xenpool);
    if ( p == NULL )
        p = xmalloc_whole_pages(size);

    /* Add alignment padding. */
    if ( (pad = -(long)p & (align - 1)) != 0 )
    {
        char *q = (char *)p + pad;
        struct bhdr *b = (struct bhdr *)(q - BHDR_OVERHEAD);
        ASSERT(q > (char *)p);
        b->size = pad | 1;
        p = q;
    }

    ASSERT(((unsigned long)p & (align - 1)) == 0);
    return p;
}

void *_xzalloc(unsigned long size, unsigned long align)
{
    void *p = _xmalloc(size, align);

    return p ? memset(p, 0, size) : p;
}

void xfree(void *p)
{
    struct bhdr *b;

    if ( p == NULL )
        return;

    ASSERT(!in_irq());

    /* Strip alignment padding. */
    b = (struct bhdr *)((char *) p - BHDR_OVERHEAD);
    if ( b->size & 1 )
    {
        p = (char *)p - (b->size & ~1u);
        b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
        ASSERT(!(b->size & 1));
    }

    if ( b->size >= PAGE_SIZE )
    {
        unsigned int i, order = get_order_from_bytes(b->size);

        BUG_ON((unsigned long)b & ((PAGE_SIZE << order) - 1));
        for ( i = 0; ; ++i )
        {
            if ( !(b->size & (PAGE_SIZE << i)) )
                continue;
            b->size -= PAGE_SIZE << i;
            free_xenheap_pages((void *)b + b->size, i);
            if ( i + 1 >= order )
                break;
        }
    }
    else
        xmem_pool_free(p, xenpool);
}