aboutsummaryrefslogtreecommitdiffstats
path: root/tools/memshr
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-12-28 09:14:16 +0000
committerKeir Fraser <keir.fraser@citrix.com>2009-12-28 09:14:16 +0000
commitb24834b7d5692627c6beb406a81ada6bc64bc814 (patch)
tree77d42285950c4ea3a597a33a8a7fcf7fa90b6ec7 /tools/memshr
parentcc2a4ccbc5edfb70835e6ae04b5ce595d329470b (diff)
downloadxen-b24834b7d5692627c6beb406a81ada6bc64bc814.tar.gz
xen-b24834b7d5692627c6beb406a81ada6bc64bc814.tar.bz2
xen-b24834b7d5692627c6beb406a81ada6bc64bc814.zip
memshr: Build fixes
* Build memshr/xenpaging on x86/Linux only * Remove dependency on GCC 4.1+ __sync_*() intrinsics. Signed-off-by: Keir Fraser <keir.fraser@citrix.com> Signed-off-by: KUWAMURA Shin'ya <kuwa@jp.fujitsu.com>
Diffstat (limited to 'tools/memshr')
-rw-r--r--tools/memshr/bidir-hash.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/tools/memshr/bidir-hash.c b/tools/memshr/bidir-hash.c
index e421c221df..e3e0e15831 100644
--- a/tools/memshr/bidir-hash.c
+++ b/tools/memshr/bidir-hash.c
@@ -75,7 +75,7 @@ struct __hash
* *_tab, tab_size, size_idx, *_load
* (all writes with wrlock)
*/
- volatile uint32_t nr_ent; /* # entries held in hashtables */
+ uint32_t nr_ent; /* # entries held in hashtables */
struct bucket *key_tab; /* forward mapping hashtable */
struct bucket *value_tab; /* backward mapping hashtable */
struct bucket_lock *key_lock_tab; /* key table bucket locks */
@@ -100,6 +100,21 @@ int __hash_iterator(struct __hash *h,
void *d);
static void hash_resize(struct __hash *h);
+static inline void atomic_add(uint32_t *v, uint32_t i)
+{
+ asm volatile(
+ "lock ; addl %1,%0"
+ :"=m" (*(volatile uint32_t *)v)
+ :"ir" (i), "m" (*(volatile uint32_t *)v) );
+}
+
+static inline void atomic_sub(uint32_t *v, uint32_t i)
+{
+ asm volatile (
+ "lock ; subl %1,%0"
+ : "=m" (*(volatile uint32_t *)v)
+ : "ir" (i), "m" (*(volatile uint32_t *)v) );
+}
#ifdef BIDIR_USE_STDMALLOC
@@ -759,7 +774,6 @@ int __insert(struct __hash *h, __k_t k, __v_t v)
struct bucket *bk, *bv;
struct bucket_lock *bltk, *bltv;
-
/* Allocate new entry before any locks (in case it fails) */
entry = (struct hash_entry*)
alloc_entry(h, sizeof(struct hash_entry));
@@ -797,7 +811,7 @@ int __insert(struct __hash *h, __k_t k, __v_t v)
TWO_BUCKETS_LOCK_WRUNLOCK(h, bltk, k_idx, bltv, v_idx);
/* Book keeping */
- __sync_add_and_fetch(&h->nr_ent, 1);
+ atomic_add(&h->nr_ent, 1);
HASH_LOCK_RDUNLOCK(h);
@@ -931,7 +945,8 @@ found_again:
*pek = e->__prim_next;
*pev = e->__sec_next;
- nr_ent = __sync_sub_and_fetch(&h->nr_ent, 1);
+ atomic_sub(&h->nr_ent, 1);
+ nr_ent = h->nr_ent;
/* read min_load still under the hash lock! */
min_load = h->min_load;
@@ -1079,7 +1094,8 @@ found_again:
*pek = e->__prim_next;
*pev = e->__sec_next;
- nr_ent = __sync_sub_and_fetch(&h->nr_ent, 1);
+ atomic_sub(&h->nr_ent, 1);
+ nr_ent = h->nr_ent;
/* read min_load still under the hash lock! */
min_load = h->min_load;