aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/spinlock.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-03-26 08:03:21 +0000
committerKeir Fraser <keir@xen.org>2011-03-26 08:03:21 +0000
commit138a41150a2ded3f0c96a726ba4d28a30fe59b58 (patch)
tree89d45144e290cf764a43ae875e367942f2121972 /xen/common/spinlock.c
parent2a738c3fe110a880835450c48fd9c3b5912f31f3 (diff)
downloadxen-138a41150a2ded3f0c96a726ba4d28a30fe59b58.tar.gz
xen-138a41150a2ded3f0c96a726ba4d28a30fe59b58.tar.bz2
xen-138a41150a2ded3f0c96a726ba4d28a30fe59b58.zip
rwlock: Allow to scale to 2^31-1 readers on x86.
Also rework to match the 'trylock' style of raw function used for spinlocks. Inspired by Jan Beulich's patch to do similar improved scaling. Signed-off-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/common/spinlock.c')
-rw-r--r--xen/common/spinlock.c53
1 files changed, 47 insertions, 6 deletions
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 2abf89e9b1..a76038c4f2 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -234,7 +234,11 @@ void _spin_unlock_recursive(spinlock_t *lock)
void _read_lock(rwlock_t *lock)
{
check_lock(&lock->debug);
- _raw_read_lock(&lock->raw);
+ while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+ {
+ while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
+ cpu_relax();
+ }
preempt_disable();
}
@@ -243,7 +247,13 @@ void _read_lock_irq(rwlock_t *lock)
ASSERT(local_irq_is_enabled());
local_irq_disable();
check_lock(&lock->debug);
- _raw_read_lock(&lock->raw);
+ while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+ {
+ local_irq_enable();
+ while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
+ cpu_relax();
+ local_irq_disable();
+ }
preempt_disable();
}
@@ -252,11 +262,26 @@ unsigned long _read_lock_irqsave(rwlock_t *lock)
unsigned long flags;
local_irq_save(flags);
check_lock(&lock->debug);
- _raw_read_lock(&lock->raw);
+ while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+ {
+ local_irq_restore(flags);
+ while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
+ cpu_relax();
+ local_irq_save(flags);
+ }
preempt_disable();
return flags;
}
+int _read_trylock(rwlock_t *lock)
+{
+ check_lock(&lock->debug);
+ if ( !_raw_read_trylock(&lock->raw) )
+ return 0;
+ preempt_disable();
+ return 1;
+}
+
void _read_unlock(rwlock_t *lock)
{
preempt_enable();
@@ -280,7 +305,11 @@ void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
void _write_lock(rwlock_t *lock)
{
check_lock(&lock->debug);
- _raw_write_lock(&lock->raw);
+ while ( unlikely(!_raw_write_trylock(&lock->raw)) )
+ {
+ while ( likely(_raw_rw_is_locked(&lock->raw)) )
+ cpu_relax();
+ }
preempt_disable();
}
@@ -289,7 +318,13 @@ void _write_lock_irq(rwlock_t *lock)
ASSERT(local_irq_is_enabled());
local_irq_disable();
check_lock(&lock->debug);
- _raw_write_lock(&lock->raw);
+ while ( unlikely(!_raw_write_trylock(&lock->raw)) )
+ {
+ local_irq_enable();
+ while ( likely(_raw_rw_is_locked(&lock->raw)) )
+ cpu_relax();
+ local_irq_disable();
+ }
preempt_disable();
}
@@ -298,7 +333,13 @@ unsigned long _write_lock_irqsave(rwlock_t *lock)
unsigned long flags;
local_irq_save(flags);
check_lock(&lock->debug);
- _raw_write_lock(&lock->raw);
+ while ( unlikely(!_raw_write_trylock(&lock->raw)) )
+ {
+ local_irq_restore(flags);
+ while ( likely(_raw_rw_is_locked(&lock->raw)) )
+ cpu_relax();
+ local_irq_save(flags);
+ }
preempt_disable();
return flags;
}