aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-03-26 08:27:41 +0000
committerKeir Fraser <keir@xen.org>2011-03-26 08:27:41 +0000
commit1b5efa212fcff6439d91cbc04cda7d57b498d582 (patch)
tree189c800e94c68107a395aa44f67a5617e1b8967c
parent138a41150a2ded3f0c96a726ba4d28a30fe59b58 (diff)
downloadxen-1b5efa212fcff6439d91cbc04cda7d57b498d582.tar.gz
xen-1b5efa212fcff6439d91cbc04cda7d57b498d582.tar.bz2
xen-1b5efa212fcff6439d91cbc04cda7d57b498d582.zip
Remove spin_barrier_irq(). It is pointless.
Add a barrier-appropriate consistency check to spinlock.c, and add code comments to explain why barrier operations are more relaxed than lock-acquisition operations. Signed-off-by: Keir Fraser <keir@xen.org>
-rw-r--r--xen/common/event_channel.c2
-rw-r--r--xen/common/spinlock.c49
-rw-r--r--xen/include/xen/spinlock.h2
3 files changed, 40 insertions, 13 deletions
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index c4308ed8f7..558fbb1256 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -417,7 +417,7 @@ static long __evtchn_close(struct domain *d1, int port1)
if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
continue;
v->virq_to_evtchn[chn1->u.virq] = 0;
- spin_barrier_irq(&v->virq_lock);
+ spin_barrier(&v->virq_lock);
}
break;
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index a76038c4f2..6ca780bce6 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -23,6 +23,24 @@ static void check_lock(struct lock_debug *debug)
/* A few places take liberties with this. */
/* BUG_ON(in_irq() && !irq_safe); */
+ /*
+ * We partition locks into IRQ-safe (always held with IRQs disabled) and
+ * IRQ-unsafe (always held with IRQs enabled) types. The convention for
+ * every lock must be consistently observed else we can deadlock in
+ * IRQ-context rendezvous functions (a rendezvous which gets every CPU
+ * into IRQ context before any CPU is released from the rendezvous).
+ *
+ * If we can mix IRQ-disabled and IRQ-enabled callers, the following can
+ * happen:
+ * * Lock is held by CPU A, with IRQs enabled
+ * * CPU B is spinning on same lock, with IRQs disabled
+ * * Rendezvous starts -- CPU A takes interrupt and enters rendezbous spin
+ * * DEADLOCK -- CPU B will never enter rendezvous, CPU A will never exit
+ * the rendezvous, and will hence never release the lock.
+ *
+ * To guard against this subtle bug we latch the IRQ safety of every
+ * spinlock in the system, on first use.
+ */
if ( unlikely(debug->irq_safe != irq_safe) )
{
int seen = cmpxchg(&debug->irq_safe, -1, irq_safe);
@@ -30,6 +48,25 @@ static void check_lock(struct lock_debug *debug)
}
}
+static void check_barrier(struct lock_debug *debug)
+{
+ if ( unlikely(atomic_read(&spin_debug) <= 0) )
+ return;
+
+ /*
+ * For a barrier, we have a relaxed IRQ-safety-consistency check.
+ *
+ * It is always safe to spin at the barrier with IRQs enabled -- that does
+ * not prevent us from entering an IRQ-context rendezvous, and nor are
+ * we preventing anyone else from doing so (since we do not actually
+ * acquire the lock during a barrier operation).
+ *
+ * However, if we spin on an IRQ-unsafe lock with IRQs disabled then that
+ * is clearly wrong, for the same reason outlined in check_lock() above.
+ */
+ BUG_ON(!local_irq_is_enabled() && (debug->irq_safe == 0));
+}
+
void spin_debug_enable(void)
{
atomic_inc(&spin_debug);
@@ -171,7 +208,7 @@ void _spin_barrier(spinlock_t *lock)
s_time_t block = NOW();
u64 loop = 0;
- check_lock(&lock->debug);
+ check_barrier(&lock->debug);
do { mb(); loop++;} while ( _raw_spin_is_locked(&lock->raw) );
if (loop > 1)
{
@@ -179,20 +216,12 @@ void _spin_barrier(spinlock_t *lock)
lock->profile.block_cnt++;
}
#else
- check_lock(&lock->debug);
+ check_barrier(&lock->debug);
do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
#endif
mb();
}
-void _spin_barrier_irq(spinlock_t *lock)
-{
- unsigned long flags;
- local_irq_save(flags);
- _spin_barrier(lock);
- local_irq_restore(flags);
-}
-
int _spin_trylock_recursive(spinlock_t *lock)
{
int cpu = smp_processor_id();
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 6ebe646c19..dd1550ca58 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -144,7 +144,6 @@ void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
int _spin_is_locked(spinlock_t *lock);
int _spin_trylock(spinlock_t *lock);
void _spin_barrier(spinlock_t *lock);
-void _spin_barrier_irq(spinlock_t *lock);
int _spin_trylock_recursive(spinlock_t *lock);
void _spin_lock_recursive(spinlock_t *lock);
@@ -191,7 +190,6 @@ int _rw_is_write_locked(rwlock_t *lock);
/* Ensure a lock is quiescent between two critical operations. */
#define spin_barrier(l) _spin_barrier(l)
-#define spin_barrier_irq(l) _spin_barrier_irq(l)
/*
* spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be