aboutsummaryrefslogtreecommitdiffstats
path: root/libs
diff options
context:
space:
mode:
authorJim Lawson <ucbjrl@berkeley.edu>2018-08-28 08:03:40 -0700
committerGitHub <noreply@github.com>2018-08-28 08:03:40 -0700
commit036e3f9c1b62f42812cd3bdd933af7d6a8e2209c (patch)
treee93f96e0ecaf9e760936b4f475176a790811ab9c /libs
parent604b5d4e2026ffcb726ed65e4edbc6dbcec5f64f (diff)
parentcf2ea218998ed3bbe6ca079d38cfe415324a6ddd (diff)
downloadyosys-036e3f9c1b62f42812cd3bdd933af7d6a8e2209c.tar.gz
yosys-036e3f9c1b62f42812cd3bdd933af7d6a8e2209c.tar.bz2
yosys-036e3f9c1b62f42812cd3bdd933af7d6a8e2209c.zip
Merge pull request #4 from YosysHQ/master
merge with YosysHQ master
Diffstat (limited to 'libs')
0 files changed, 0 insertions, 0 deletions
a id='n89' href='#n89'>89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
/* 
 ****************************************************************************
 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
 ****************************************************************************
 *
 *        File: xmaloc.c
 *      Author: Grzegorz Milos (gm281@cam.ac.uk)
 *              Samuel Thibault (samuel.thibault@eu.citrix.com)
 *     Changes: 
 *              
 *        Date: Aug 2005
 *              Jan 2008
 * 
 * Environment: Xen Minimal OS
 * Description: simple memory allocator
 *
 ****************************************************************************
 * Simple allocator for Mini-os.  If larger than a page, simply use the
 * page-order allocator.
 *
 * Copy of the allocator for Xen by Rusty Russell:
 * Copyright (C) 2005 Rusty Russell IBM Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <os.h>
#include <mm.h>
#include <types.h>
#include <lib.h>
#include <list.h>
#include <xmalloc.h>

#ifndef HAVE_LIBC
static LIST_HEAD(freelist);
/* static spinlock_t freelist_lock = SPIN_LOCK_UNLOCKED; */

struct xmalloc_hdr
{
    /* Total including this hdr, unused padding and second hdr. */
    size_t size;
    struct list_head freelist;
} __cacheline_aligned;

/* Unused padding data between the two hdrs. */

struct xmalloc_pad
{
    /* Size including both hdrs. */
    size_t hdr_size;
};

static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block)
{
    struct xmalloc_hdr *extra;
    size_t leftover = block - size;

    /* If enough is left to make a block, put it on free list. */
    if ( leftover >= (2 * (sizeof(struct xmalloc_hdr) + sizeof(struct xmalloc_pad))) )
    {
        extra = (struct xmalloc_hdr *)((unsigned long)hdr + size);
        extra->size = leftover;
        /* spin_lock_irqsave(&freelist_lock, flags); */
        list_add(&extra->freelist, &freelist);
        /* spin_unlock_irqrestore(&freelist_lock, flags); */
    }
    else
    {
        size = block;
    }

    hdr->size = size;
    /* Debugging aid. */
    hdr->freelist.next = hdr->freelist.prev = NULL;
}

static struct xmalloc_hdr *xmalloc_new_page(size_t size)
{
    struct xmalloc_hdr *hdr;
    /* unsigned long flags; */

    hdr = (struct xmalloc_hdr *)alloc_page();
    if ( hdr == NULL )
        return NULL;

    maybe_split(hdr, size, PAGE_SIZE);

    return hdr;
}

/* Return size, increased to alignment with align. */
static inline size_t align_up(size_t size, size_t align)
{
    return (size + align - 1) & ~(align - 1);
}

/* Big object?  Just use the page allocator. */
static void *xmalloc_whole_pages(size_t size, size_t align)
{
    struct xmalloc_hdr *hdr;
    struct xmalloc_pad *pad;
    unsigned int pageorder;
    void *ret;
    /* Room for headers */
    size_t hdr_size = sizeof(struct xmalloc_hdr) + sizeof(struct xmalloc_pad);
    /* Align for actual beginning of data */
    hdr_size = align_up(hdr_size, align);

    pageorder = get_order(hdr_size + size);

    hdr = (struct xmalloc_hdr *)alloc_pages(pageorder);
    if ( hdr == NULL )
        return NULL;

    hdr->size = (1 << (pageorder + PAGE_SHIFT));
    /* Debugging aid. */
    hdr->freelist.next = hdr->freelist.prev = NULL;

    ret = (char*)hdr + hdr_size;
    pad = (struct xmalloc_pad *) ret - 1;
    pad->hdr_size = hdr_size;
    return ret;
}

void *_xmalloc(size_t size, size_t align)
{
    struct xmalloc_hdr *i, *hdr = NULL;
    uintptr_t data_begin;
    size_t hdr_size;
    /* unsigned long flags; */

    hdr_size = sizeof(struct xmalloc_hdr) + sizeof(struct xmalloc_pad);
    /* Align on headers requirements. */
    align = align_up(align, __alignof__(struct xmalloc_hdr));
    align = align_up(align, __alignof__(struct xmalloc_pad));

    /* For big allocs, give them whole pages. */
    if ( size + align_up(hdr_size, align) >= PAGE_SIZE )
        return xmalloc_whole_pages(size, align);

    /* Search free list. */
    /* spin_lock_irqsave(&freelist_lock, flags); */
    list_for_each_entry( i, &freelist, freelist )
    {
        data_begin = align_up((uintptr_t)i + hdr_size, align);

        if ( data_begin + size > (uintptr_t)i + i->size )
            continue;

        list_del(&i->freelist);
        /* spin_unlock_irqrestore(&freelist_lock, flags); */

        uintptr_t size_before = (data_begin - hdr_size) - (uintptr_t)i;

        if (size_before >= 2 * hdr_size) {
            /* Worth splitting the beginning */
            struct xmalloc_hdr *new_i = (void*)(data_begin - hdr_size);
            new_i->size = i->size - size_before;
            i->size = size_before;
            /* spin_lock_irqsave(&freelist_lock, flags); */
            list_add(&i->freelist, &freelist);
            /* spin_unlock_irqrestore(&freelist_lock, flags); */
            i = new_i;
        }
        maybe_split(i, (data_begin + size) - (uintptr_t)i, i->size);
        hdr = i;
        break;
    }

    if (!hdr) {
        /* spin_unlock_irqrestore(&freelist_lock, flags); */

        /* Alloc a new page and return from that. */
        hdr = xmalloc_new_page(align_up(hdr_size, align) + size);
        data_begin = (uintptr_t)hdr + align_up(hdr_size, align);
    }

    struct xmalloc_pad *pad = (struct xmalloc_pad *) data_begin - 1;
    pad->hdr_size = data_begin - (uintptr_t)hdr;
    BUG_ON(data_begin % align);
    return (void*)data_begin;
}

void xfree(const void *p)
{
    /* unsigned long flags; */
    struct xmalloc_hdr *i, *tmp, *hdr;
    struct xmalloc_pad *pad;

    if ( p == NULL )
        return;

    pad = (struct xmalloc_pad *)p - 1;
    hdr = (struct xmalloc_hdr *)((char *)p - pad->hdr_size);

    /* We know hdr will be on same page. */
    if(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK))
    {
        printk("Header should be on the same page\n");
        *(int*)0=0;
    }

    /* Not previously freed. */
    if(hdr->freelist.next || hdr->freelist.prev)
    {
        printk("Should not be previously freed\n");
        *(int*)0=0;
    }

    /* Big allocs free directly. */
    if ( hdr->size >= PAGE_SIZE )
    {
        free_pages(hdr, get_order(hdr->size));
        return;
    }

    /* Merge with other free block, or put in list. */
    /* spin_lock_irqsave(&freelist_lock, flags); */
    list_for_each_entry_safe( i, tmp, &freelist, freelist )
    {
        unsigned long _i   = (unsigned long)i;