aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/tmem.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-02-10 09:18:43 +0000
committerKeir Fraser <keir.fraser@citrix.com>2010-02-10 09:18:43 +0000
commitb14339379a571840ce040fce563580c09fb0a1f5 (patch)
tree556719b9ddc32870730ea8657fda6ddcfba67203 /xen/common/tmem.c
parent948593955a433efb249034849be0313e34acfbfa (diff)
downloadxen-b14339379a571840ce040fce563580c09fb0a1f5.tar.gz
xen-b14339379a571840ce040fce563580c09fb0a1f5.tar.bz2
xen-b14339379a571840ce040fce563580c09fb0a1f5.zip
Fix domain reference leaks
Besides two unlikely/rarely hit ones in x86 code, the main offender was tmh_client_from_cli_id(), which didn't even have a counterpart (albeit it had a comment correctly saying that it causes d->refcnt to get incremented). Unfortunately(?) this required a bit of code restructuring (as I needed to change the code anyway, I also fixed a couple os missing bounds checks which would sooner or later be reported as security vulnerabilities), so I would hope Dan could give it his blessing before it gets applied. Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'xen/common/tmem.c')
-rw-r--r--xen/common/tmem.c164
1 files changed, 105 insertions, 59 deletions
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index eed3192efb..82075b6944 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -912,14 +912,14 @@ static client_t *client_create(cli_id_t cli_id)
return NULL;
}
memset(client,0,sizeof(client_t));
- if ( (client->tmh = tmh_client_init()) == NULL )
+ if ( (client->tmh = tmh_client_init(cli_id)) == NULL )
{
printk("failed... can't allocate host-dependent part of client\n");
if ( client )
tmh_free_infra(client);
return NULL;
}
- tmh_set_client_from_id(client,cli_id);
+ tmh_set_client_from_id(client, client->tmh, cli_id);
client->cli_id = cli_id;
#ifdef __i386__
client->compress = 0;
@@ -1528,7 +1528,7 @@ static NOINLINE int do_tmem_destroy_pool(uint32_t pool_id)
}
static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
- uint32_t this_pool_id, uint32_t flags,
+ uint32_t d_poolid, uint32_t flags,
uint64_t uuid_lo, uint64_t uuid_hi)
{
client_t *client;
@@ -1540,19 +1540,13 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
int specversion = (flags >> TMEM_POOL_VERSION_SHIFT)
& TMEM_POOL_VERSION_MASK;
pool_t *pool, *shpool;
- int s_poolid, d_poolid, first_unused_s_poolid;
+ int s_poolid, first_unused_s_poolid;
int i;
if ( this_cli_id == CLI_ID_NULL )
- {
- client = tmh_client_from_current();
cli_id = tmh_get_cli_id_from_current();
- } else {
- if ( (client = tmh_client_from_cli_id(this_cli_id)) == NULL)
- return -EPERM;
+ else
cli_id = this_cli_id;
- }
- ASSERT(client != NULL);
printk("tmem: allocating %s-%s tmem pool for %s=%d...",
persistent ? "persistent" : "ephemeral" ,
shared ? "shared" : "private", cli_id_str, cli_id);
@@ -1573,19 +1567,24 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
}
if ( this_cli_id != CLI_ID_NULL )
{
- d_poolid = this_pool_id;
- if ( client->pools[d_poolid] != NULL )
- return -EPERM;
- d_poolid = this_pool_id;
+ if ( (client = tmh_client_from_cli_id(this_cli_id)) == NULL
+ || d_poolid >= MAX_POOLS_PER_DOMAIN
+ || client->pools[d_poolid] != NULL )
+ goto fail;
}
- else for ( d_poolid = 0; d_poolid < MAX_POOLS_PER_DOMAIN; d_poolid++ )
- if ( client->pools[d_poolid] == NULL )
- break;
- if ( d_poolid >= MAX_POOLS_PER_DOMAIN )
+ else
{
- printk("failed... no more pool slots available for this %s\n",
- client_str);
- goto fail;
+ client = tmh_client_from_current();
+ ASSERT(client != NULL);
+ for ( d_poolid = 0; d_poolid < MAX_POOLS_PER_DOMAIN; d_poolid++ )
+ if ( client->pools[d_poolid] == NULL )
+ break;
+ if ( d_poolid >= MAX_POOLS_PER_DOMAIN )
+ {
+ printk("failed... no more pool slots available for this %s\n",
+ client_str);
+ goto fail;
+ }
}
if ( shared )
{
@@ -1618,6 +1617,8 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
client->pools[d_poolid] = global_shared_pools[s_poolid];
shared_pool_join(global_shared_pools[s_poolid], client);
pool_free(pool);
+ if ( this_cli_id != CLI_ID_NULL )
+ tmh_client_put(client->tmh);
return d_poolid;
}
}
@@ -1638,6 +1639,8 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
}
}
client->pools[d_poolid] = pool;
+ if ( this_cli_id != CLI_ID_NULL )
+ tmh_client_put(client->tmh);
list_add_tail(&pool->pool_list, &global_pool_list);
pool->pool_id = d_poolid;
pool->persistent = persistent;
@@ -1647,6 +1650,8 @@ static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
fail:
pool_free(pool);
+ if ( this_cli_id != CLI_ID_NULL )
+ tmh_client_put(client->tmh);
return -EPERM;
}
@@ -1672,6 +1677,7 @@ static int tmemc_freeze_pools(cli_id_t cli_id, int arg)
if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
return -1;
client_freeze(client,freeze);
+ tmh_client_put(client->tmh);
printk("tmem: all pools %s for %s=%d\n",s,cli_id_str,cli_id);
}
return 0;
@@ -1876,8 +1882,10 @@ static int tmemc_list(cli_id_t cli_id, tmem_cli_va_t buf, uint32_t len,
}
else if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
return -1;
- else
+ else {
off = tmemc_list_client(client, buf, 0, len, use_long);
+ tmh_client_put(client->tmh);
+ }
return 0;
}
@@ -1925,7 +1933,10 @@ static int tmemc_set_var(cli_id_t cli_id, uint32_t subop, uint32_t arg1)
else if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
return -1;
else
- tmemc_set_var_one(client, subop, arg1);
+ {
+ tmemc_set_var_one(client, subop, arg1);
+ tmh_client_put(client->tmh);
+ }
return 0;
}
@@ -1941,6 +1952,8 @@ static NOINLINE int tmemc_shared_pool_auth(cli_id_t cli_id, uint64_t uuid_lo,
return 1;
}
client = tmh_client_from_cli_id(cli_id);
+ if ( client == NULL )
+ return -EINVAL;
for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
{
if ( (client->shared_auth_uuid[i][0] == uuid_lo) &&
@@ -1949,6 +1962,7 @@ static NOINLINE int tmemc_shared_pool_auth(cli_id_t cli_id, uint64_t uuid_lo,
if ( auth == 0 )
client->shared_auth_uuid[i][0] =
client->shared_auth_uuid[i][1] = -1L;
+ tmh_client_put(client->tmh);
return 1;
}
if ( (auth == 1) && (client->shared_auth_uuid[i][0] == -1L) &&
@@ -1956,11 +1970,15 @@ static NOINLINE int tmemc_shared_pool_auth(cli_id_t cli_id, uint64_t uuid_lo,
free = i;
}
if ( auth == 0 )
+ {
+ tmh_client_put(client->tmh);
return 0;
+ }
if ( auth == 1 && free == -1 )
return -ENOMEM;
client->shared_auth_uuid[free][0] = uuid_lo;
client->shared_auth_uuid[free][1] = uuid_hi;
+ tmh_client_put(client->tmh);
return 1;
}
@@ -1968,10 +1986,12 @@ static NOINLINE int tmemc_save_subop(int cli_id, uint32_t pool_id,
uint32_t subop, tmem_cli_va_t buf, uint32_t arg1)
{
client_t *client = tmh_client_from_cli_id(cli_id);
- pool_t *pool = (client == NULL) ? NULL : client->pools[pool_id];
+ pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
+ ? NULL : client->pools[pool_id];
uint32_t p;
uint64_t *uuid;
pgp_t *pgp, *pgp2;
+ int rc = -1;
switch(subop)
{
@@ -1982,45 +2002,55 @@ static NOINLINE int tmemc_save_subop(int cli_id, uint32_t pool_id,
if ( client->pools[p] != NULL )
break;
if ( p == MAX_POOLS_PER_DOMAIN )
- return 0;
+ {
+ rc = 0;
+ break;
+ }
client->was_frozen = client->frozen;
client->frozen = 1;
if ( arg1 != 0 )
client->live_migrating = 1;
- return 1;
+ rc = 1;
+ break;
case TMEMC_RESTORE_BEGIN:
- ASSERT(client == NULL);
- if ( (client = client_create(cli_id)) == NULL )
- return -1;
- return 1;
+ if ( client == NULL && (client = client_create(cli_id)) != NULL )
+ return 1;
+ break;
case TMEMC_SAVE_GET_VERSION:
- return TMEM_SPEC_VERSION;
+ rc = TMEM_SPEC_VERSION;
+ break;
case TMEMC_SAVE_GET_MAXPOOLS:
- return MAX_POOLS_PER_DOMAIN;
+ rc = MAX_POOLS_PER_DOMAIN;
+ break;
case TMEMC_SAVE_GET_CLIENT_WEIGHT:
- return client->weight == -1 ? -2 : client->weight;
+ rc = client->weight == -1 ? -2 : client->weight;
+ break;
case TMEMC_SAVE_GET_CLIENT_CAP:
- return client->cap == -1 ? -2 : client->cap;
+ rc = client->cap == -1 ? -2 : client->cap;
+ break;
case TMEMC_SAVE_GET_CLIENT_FLAGS:
- return (client->compress ? TMEM_CLIENT_COMPRESS : 0 ) |
- (client->was_frozen ? TMEM_CLIENT_FROZEN : 0 );
+ rc = (client->compress ? TMEM_CLIENT_COMPRESS : 0 ) |
+ (client->was_frozen ? TMEM_CLIENT_FROZEN : 0 );
+ break;
case TMEMC_SAVE_GET_POOL_FLAGS:
if ( pool == NULL )
- return -1;
- return (pool->persistent ? TMEM_POOL_PERSIST : 0) |
- (pool->shared ? TMEM_POOL_SHARED : 0) |
- (pool->pageshift << TMEM_POOL_PAGESIZE_SHIFT);
+ break;
+ rc = (pool->persistent ? TMEM_POOL_PERSIST : 0) |
+ (pool->shared ? TMEM_POOL_SHARED : 0) |
+ (pool->pageshift << TMEM_POOL_PAGESIZE_SHIFT);
+ break;
case TMEMC_SAVE_GET_POOL_NPAGES:
if ( pool == NULL )
- return -1;
- return _atomic_read(pool->pgp_count);
+ break;
+ rc = _atomic_read(pool->pgp_count);
+ break;
case TMEMC_SAVE_GET_POOL_UUID:
if ( pool == NULL )
- return -1;
+ break;
uuid = (uint64_t *)buf.p;
*uuid++ = pool->uuid[0];
*uuid = pool->uuid[1];
- return 0;
+ rc = 0;
case TMEMC_SAVE_END:
client->live_migrating = 0;
if ( !list_empty(&client->persistent_invalidated_list) )
@@ -2028,27 +2058,34 @@ static NOINLINE int tmemc_save_subop(int cli_id, uint32_t pool_id,
&client->persistent_invalidated_list, client_inv_pages)
pgp_free_from_inv_list(client,pgp);
client->frozen = client->was_frozen;
- return 0;
+ rc = 0;
}
- return -1;
+ if ( client )
+ tmh_client_put(client->tmh);
+ return rc;
}
static NOINLINE int tmemc_save_get_next_page(int cli_id, int pool_id,
tmem_cli_va_t buf, uint32_t bufsize)
{
client_t *client = tmh_client_from_cli_id(cli_id);
- pool_t *pool = (client == NULL) ? NULL : client->pools[pool_id];
+ pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
+ ? NULL : client->pools[pool_id];
pgp_t *pgp;
int ret = 0;
struct tmem_handle *h;
unsigned int pagesize = 1 << (pool->pageshift+12);
- if ( pool == NULL )
- return -1;
- if ( is_ephemeral(pool) )
+ if ( pool == NULL || is_ephemeral(pool) )
+ {
+ tmh_client_put(client->tmh);
return -1;
+ }
if ( bufsize < pagesize + sizeof(struct tmem_handle) )
+ {
+ tmh_client_put(client->tmh);
return -ENOMEM;
+ }
tmem_spin_lock(&pers_lists_spinlock);
if ( list_empty(&pool->persistent_page_list) )
@@ -2080,6 +2117,7 @@ static NOINLINE int tmemc_save_get_next_page(int cli_id, int pool_id,
out:
tmem_spin_unlock(&pers_lists_spinlock);
+ tmh_client_put(client->tmh);
return ret;
}
@@ -2094,7 +2132,10 @@ static NOINLINE int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_t buf,
if ( client == NULL )
return 0;
if ( bufsize < sizeof(struct tmem_handle) )
+ {
+ tmh_client_put(client->tmh);
return 0;
+ }
tmem_spin_lock(&pers_lists_spinlock);
if ( list_empty(&client->persistent_invalidated_list) )
goto out;
@@ -2121,6 +2162,7 @@ static NOINLINE int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_t buf,
ret = 1;
out:
tmem_spin_unlock(&pers_lists_spinlock);
+ tmh_client_put(client->tmh);
return ret;
}
@@ -2128,22 +2170,26 @@ static int tmemc_restore_put_page(int cli_id, int pool_id, uint64_t oid,
uint32_t index, tmem_cli_va_t buf, uint32_t bufsize)
{
client_t *client = tmh_client_from_cli_id(cli_id);
- pool_t *pool = (client == NULL) ? NULL : client->pools[pool_id];
+ pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
+ ? NULL : client->pools[pool_id];
+ int rc = pool ? do_tmem_put(pool,oid,index,0,0,0,bufsize,buf.p) : -1;
- if ( pool == NULL )
- return -1;
- return do_tmem_put(pool,oid,index,0,0,0,bufsize,buf.p);
+ if ( client )
+ tmh_client_put(client->tmh);
+ return rc;
}
static int tmemc_restore_flush_page(int cli_id, int pool_id, uint64_t oid,
uint32_t index)
{
client_t *client = tmh_client_from_cli_id(cli_id);
- pool_t *pool = (client == NULL) ? NULL : client->pools[pool_id];
+ pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
+ ? NULL : client->pools[pool_id];
+ int rc = pool ? do_tmem_flush_page(pool, oid, index) : -1;
- if ( pool == NULL )
- return -1;
- return do_tmem_flush_page(pool, oid, index);
+ if ( client )
+ tmh_client_put(client->tmh);
+ return rc;
}
static NOINLINE int do_tmem_control(struct tmem_op *op)