diff options
author | Keir Fraser <keir@xen.org> | 2011-03-26 08:27:41 +0000 |
---|---|---|
committer | Keir Fraser <keir@xen.org> | 2011-03-26 08:27:41 +0000 |
commit | 1b5efa212fcff6439d91cbc04cda7d57b498d582 (patch) | |
tree | 189c800e94c68107a395aa44f67a5617e1b8967c /xen/common/spinlock.c | |
parent | 138a41150a2ded3f0c96a726ba4d28a30fe59b58 (diff) | |
download | xen-1b5efa212fcff6439d91cbc04cda7d57b498d582.tar.gz xen-1b5efa212fcff6439d91cbc04cda7d57b498d582.tar.bz2 xen-1b5efa212fcff6439d91cbc04cda7d57b498d582.zip |
Remove spin_barrier_irq(). It is pointless.
Add a barrier-appropriate consistency check to spinlock.c, and add
code comments to explain why barrier operations are more relaxed than
lock-acquisition operations.
Signed-off-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/common/spinlock.c')
-rw-r--r-- | xen/common/spinlock.c | 49 |
1 files changed, 39 insertions, 10 deletions
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index a76038c4f2..6ca780bce6 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -23,6 +23,24 @@ static void check_lock(struct lock_debug *debug) /* A few places take liberties with this. */ /* BUG_ON(in_irq() && !irq_safe); */ + /* + * We partition locks into IRQ-safe (always held with IRQs disabled) and + * IRQ-unsafe (always held with IRQs enabled) types. The convention for + * every lock must be consistently observed else we can deadlock in + * IRQ-context rendezvous functions (a rendezvous which gets every CPU + * into IRQ context before any CPU is released from the rendezvous). + * + * If we can mix IRQ-disabled and IRQ-enabled callers, the following can + * happen: + * * Lock is held by CPU A, with IRQs enabled + * * CPU B is spinning on same lock, with IRQs disabled + * * Rendezvous starts -- CPU A takes interrupt and enters rendezbous spin + * * DEADLOCK -- CPU B will never enter rendezvous, CPU A will never exit + * the rendezvous, and will hence never release the lock. + * + * To guard against this subtle bug we latch the IRQ safety of every + * spinlock in the system, on first use. + */ if ( unlikely(debug->irq_safe != irq_safe) ) { int seen = cmpxchg(&debug->irq_safe, -1, irq_safe); @@ -30,6 +48,25 @@ static void check_lock(struct lock_debug *debug) } } +static void check_barrier(struct lock_debug *debug) +{ + if ( unlikely(atomic_read(&spin_debug) <= 0) ) + return; + + /* + * For a barrier, we have a relaxed IRQ-safety-consistency check. + * + * It is always safe to spin at the barrier with IRQs enabled -- that does + * not prevent us from entering an IRQ-context rendezvous, and nor are + * we preventing anyone else from doing so (since we do not actually + * acquire the lock during a barrier operation). + * + * However, if we spin on an IRQ-unsafe lock with IRQs disabled then that + * is clearly wrong, for the same reason outlined in check_lock() above. + */ + BUG_ON(!local_irq_is_enabled() && (debug->irq_safe == 0)); +} + void spin_debug_enable(void) { atomic_inc(&spin_debug); @@ -171,7 +208,7 @@ void _spin_barrier(spinlock_t *lock) s_time_t block = NOW(); u64 loop = 0; - check_lock(&lock->debug); + check_barrier(&lock->debug); do { mb(); loop++;} while ( _raw_spin_is_locked(&lock->raw) ); if (loop > 1) { @@ -179,20 +216,12 @@ void _spin_barrier(spinlock_t *lock) lock->profile.block_cnt++; } #else - check_lock(&lock->debug); + check_barrier(&lock->debug); do { mb(); } while ( _raw_spin_is_locked(&lock->raw) ); #endif mb(); } -void _spin_barrier_irq(spinlock_t *lock) -{ - unsigned long flags; - local_irq_save(flags); - _spin_barrier(lock); - local_irq_restore(flags); -} - int _spin_trylock_recursive(spinlock_t *lock) { int cpu = smp_processor_id(); |