aboutsummaryrefslogtreecommitdiffstats
path: root/tools/xenstore
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-06-10 13:49:54 +0100
committerKeir Fraser <keir.fraser@citrix.com>2008-06-10 13:49:54 +0100
commitdf8a7719e5f972641f6f6730e2bdc2ca5b584c37 (patch)
treeae79e411592cb4d146bbb65cdb3445dc6463b50e /tools/xenstore
parentdeb36502454d30eff815e09a0ade1c732366ff4c (diff)
downloadxen-df8a7719e5f972641f6f6730e2bdc2ca5b584c37.tar.gz
xen-df8a7719e5f972641f6f6730e2bdc2ca5b584c37.tar.bz2
xen-df8a7719e5f972641f6f6730e2bdc2ca5b584c37.zip
xenstore: cleanups
Attached patch uses calloc() for hash allocation. This makes sure, the allocated memory is always initialized. Also cleanup error handling a bit. On *BSD avoid conflicts with BSD list macros. Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
Diffstat (limited to 'tools/xenstore')
-rw-r--r--tools/xenstore/hashtable.c25
-rw-r--r--tools/xenstore/list.h4
2 files changed, 21 insertions, 8 deletions
diff --git a/tools/xenstore/hashtable.c b/tools/xenstore/hashtable.c
index c96fb675cd..58da7e6e7f 100644
--- a/tools/xenstore/hashtable.c
+++ b/tools/xenstore/hashtable.c
@@ -33,17 +33,22 @@ create_hashtable(unsigned int minsize,
{
struct hashtable *h;
unsigned int pindex, size = primes[0];
+
/* Check requested hashtable isn't too large */
if (minsize > (1u << 30)) return NULL;
+
/* Enforce size as prime */
for (pindex=0; pindex < prime_table_length; pindex++) {
if (primes[pindex] > minsize) { size = primes[pindex]; break; }
}
- h = (struct hashtable *)malloc(sizeof(struct hashtable));
- if (NULL == h) return NULL; /*oom*/
- h->table = (struct entry **)malloc(sizeof(struct entry*) * size);
- if (NULL == h->table) { free(h); return NULL; } /*oom*/
- memset(h->table, 0, size * sizeof(struct entry *));
+
+ h = (struct hashtable *)calloc(1, sizeof(struct hashtable));
+ if (NULL == h)
+ goto err0;
+ h->table = (struct entry **)calloc(size, sizeof(struct entry *));
+ if (NULL == h->table)
+ goto err1;
+
h->tablelength = size;
h->primeindex = pindex;
h->entrycount = 0;
@@ -51,6 +56,11 @@ create_hashtable(unsigned int minsize,
h->eqfn = eqf;
h->loadlimit = (unsigned int)(((uint64_t)size * max_load_factor) / 100);
return h;
+
+err0:
+ free(h);
+err1:
+ return NULL;
}
/*****************************************************************************/
@@ -80,10 +90,9 @@ hashtable_expand(struct hashtable *h)
if (h->primeindex == (prime_table_length - 1)) return 0;
newsize = primes[++(h->primeindex)];
- newtable = (struct entry **)malloc(sizeof(struct entry*) * newsize);
+ newtable = (struct entry **)calloc(newsize, sizeof(struct entry*));
if (NULL != newtable)
{
- memset(newtable, 0, newsize * sizeof(struct entry *));
/* This algorithm is not 'stable'. ie. it reverses the list
* when it transfers entries between the tables */
for (i = 0; i < h->tablelength; i++) {
@@ -149,7 +158,7 @@ hashtable_insert(struct hashtable *h, void *k, void *v)
* element may be ok. Next time we insert, we'll try expanding again.*/
hashtable_expand(h);
}
- e = (struct entry *)malloc(sizeof(struct entry));
+ e = (struct entry *)calloc(1, sizeof(struct entry));
if (NULL == e) { --(h->entrycount); return 0; } /*oom*/
e->h = hash(h,k);
index = indexFor(h->tablelength,e->h);
diff --git a/tools/xenstore/list.h b/tools/xenstore/list.h
index 9cd53413cf..b17d13e0ec 100644
--- a/tools/xenstore/list.h
+++ b/tools/xenstore/list.h
@@ -3,6 +3,10 @@
/* Taken from Linux kernel code, but de-kernelized for userspace. */
#include <stddef.h>
+#undef LIST_HEAD_INIT
+#undef LIST_HEAD
+#undef INIT_LIST_HEAD
+
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses