aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/xmalloc_tlsf.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-05-26 11:05:04 +0100
committerKeir Fraser <keir.fraser@citrix.com>2009-05-26 11:05:04 +0100
commit6009f4ddb2cdb8555d2d5e030d351893e971b995 (patch)
tree6f146a530b5065a1688aa456280f965e1751f2c8 /xen/common/xmalloc_tlsf.c
parentff811c2bc429a70798cf65913549c0ddaab70c3d (diff)
downloadxen-6009f4ddb2cdb8555d2d5e030d351893e971b995.tar.gz
xen-6009f4ddb2cdb8555d2d5e030d351893e971b995.tar.bz2
xen-6009f4ddb2cdb8555d2d5e030d351893e971b995.zip
Transcendent memory ("tmem") for Xen.
Tmem, when called from a tmem-capable (paravirtualized) guest, makes use of otherwise unutilized ("fallow") memory to create and manage pools of pages that can be accessed from the guest either as "ephemeral" pages or as "persistent" pages. In either case, the pages are not directly addressible by the guest, only copied to and fro via the tmem interface. Ephemeral pages are a nice place for a guest to put recently evicted clean pages that it might need again; these pages can be reclaimed synchronously by Xen for other guests or other uses. Persistent pages are a nice place for a guest to put "swap" pages to avoid sending them to disk. These pages retain data as long as the guest lives, but count against the guest memory allocation. Tmem pages may optionally be compressed and, in certain cases, can be shared between guests. Tmem also handles concurrency nicely and provides limited QoS settings to combat malicious DoS attempts. Save/restore and live migration support is not yet provided. Tmem is primarily targeted for an x86 64-bit hypervisor. On a 32-bit x86 hypervisor, it has limited functionality and testing due to limitations of the xen heap. Nearly all of tmem is architecture-independent; three routines remain to be ported to ia64 and it should work on that architecture too. It is also structured to be portable to non-Xen environments. Tmem defaults off (for now) and must be enabled with a "tmem" xen boot option (and does nothing unless a tmem-capable guest is running). The "tmem_compress" boot option enables compression which takes about 10x more CPU but approximately doubles the number of pages that can be stored. Tmem can be controlled via several "xm" commands and many interesting tmem statistics can be obtained. A README and internal specification will follow, but lots of useful prose about tmem, as well as Linux patches, can be found at http://oss.oracle.com/projects/tmem . Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Diffstat (limited to 'xen/common/xmalloc_tlsf.c')
-rw-r--r--xen/common/xmalloc_tlsf.c35
1 files changed, 23 insertions, 12 deletions
diff --git a/xen/common/xmalloc_tlsf.c b/xen/common/xmalloc_tlsf.c
index 7a476e8fb7..3f85389e23 100644
--- a/xen/common/xmalloc_tlsf.c
+++ b/xen/common/xmalloc_tlsf.c
@@ -292,7 +292,6 @@ struct xmem_pool *xmem_pool_create(
unsigned long grow_size)
{
struct xmem_pool *pool;
- void *region;
int pool_bytes, pool_order;
BUG_ON(max_size && (max_size < init_size));
@@ -319,11 +318,9 @@ struct xmem_pool *xmem_pool_create(
pool->get_mem = get_mem;
pool->put_mem = put_mem;
strlcpy(pool->name, name, sizeof(pool->name));
- region = get_mem(init_size);
- if ( region == NULL )
- goto out_region;
- ADD_REGION(region, init_size, pool);
- pool->init_region = region;
+
+ /* always obtain init_region lazily now to ensure it is get_mem'd
+ * in the same "context" as all other regions */
spin_lock_init(&pool->lock);
@@ -332,10 +329,6 @@ struct xmem_pool *xmem_pool_create(
spin_unlock(&pool_list_lock);
return pool;
-
- out_region:
- free_xenheap_pages(pool, pool_order);
- return NULL;
}
unsigned long xmem_pool_get_used_size(struct xmem_pool *pool)
@@ -354,13 +347,15 @@ unsigned long xmem_pool_get_total_size(struct xmem_pool *pool)
void xmem_pool_destroy(struct xmem_pool *pool)
{
+ int pool_bytes, pool_order;
+
if ( pool == NULL )
return;
/* User is destroying without ever allocating from this pool */
if ( xmem_pool_get_used_size(pool) == BHDR_OVERHEAD )
{
- pool->put_mem(pool->init_region);
+ ASSERT(!pool->init_region);
pool->used_size -= BHDR_OVERHEAD;
}
@@ -373,7 +368,10 @@ void xmem_pool_destroy(struct xmem_pool *pool)
spin_lock(&pool_list_lock);
list_del_init(&pool->list);
spin_unlock(&pool_list_lock);
- pool->put_mem(pool);
+
+ pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
+ pool_order = get_order_from_bytes(pool_bytes);
+ free_xenheap_pages(pool,pool_order);
}
void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool)
@@ -382,6 +380,14 @@ void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool)
int fl, sl;
unsigned long tmp_size;
+ if ( pool->init_region == NULL )
+ {
+ if ( (region = pool->get_mem(pool->init_size)) == NULL )
+ goto out;
+ ADD_REGION(region, pool->init_size, pool);
+ pool->init_region = region;
+ }
+
size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
/* Rounding up the requested size and calculating fl and sl */
@@ -496,6 +502,11 @@ void xmem_pool_free(void *ptr, struct xmem_pool *pool)
spin_unlock(&pool->lock);
}
+int xmem_pool_maxalloc(struct xmem_pool *pool)
+{
+ return pool->grow_size - (2 * BHDR_OVERHEAD);
+}
+
/*
* Glue for xmalloc().
*/