aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/spinlock.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-10-20 16:48:17 +0100
committerKeir Fraser <keir.fraser@citrix.com>2008-10-20 16:48:17 +0100
commit0b99d7118c4e4b79ff492b7458d7370479522aa4 (patch)
tree74b21d626a187577840bb58e12036d32ff41430e /xen/common/spinlock.c
parent8e75cae72a33c531e2ca39adf834fcad8fca2307 (diff)
downloadxen-0b99d7118c4e4b79ff492b7458d7370479522aa4.tar.gz
xen-0b99d7118c4e4b79ff492b7458d7370479522aa4.tar.bz2
xen-0b99d7118c4e4b79ff492b7458d7370479522aa4.zip
Clean up spinlock operations and compile as first-class functions.
This follows modern Linux, since apparently outlining spinlock operations does not slow down execution. The cleanups will also allow more convenient addition of diagnostic code. Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/common/spinlock.c')
-rw-r--r--xen/common/spinlock.c147
1 files changed, 147 insertions, 0 deletions
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
new file mode 100644
index 0000000000..9e9369a432
--- /dev/null
+++ b/xen/common/spinlock.c
@@ -0,0 +1,147 @@
+#include <xen/config.h>
+#include <xen/smp.h>
+#include <xen/spinlock.h>
+
+void _spin_lock(spinlock_t *lock)
+{
+ _raw_spin_lock(&lock->raw);
+}
+
+void _spin_lock_irq(spinlock_t *lock)
+{
+ local_irq_disable();
+ _raw_spin_lock(&lock->raw);
+}
+
+unsigned long _spin_lock_irqsave(spinlock_t *lock)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ _raw_spin_lock(&lock->raw);
+ return flags;
+}
+
+void _spin_unlock(spinlock_t *lock)
+{
+ _raw_spin_unlock(&lock->raw);
+}
+
+void _spin_unlock_irq(spinlock_t *lock)
+{
+ _raw_spin_unlock(&lock->raw);
+ local_irq_enable();
+}
+
+void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ _raw_spin_unlock(&lock->raw);
+ local_irq_restore(flags);
+}
+
+int _spin_is_locked(spinlock_t *lock)
+{
+ return _raw_spin_is_locked(&lock->raw);
+}
+
+int _spin_trylock(spinlock_t *lock)
+{
+ return _raw_spin_trylock(&lock->raw);
+}
+
+void _spin_barrier(spinlock_t *lock)
+{
+ do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
+ mb();
+}
+
+void _spin_lock_recursive(spinlock_t *lock)
+{
+ int cpu = smp_processor_id();
+ if ( likely(lock->recurse_cpu != cpu) )
+ {
+ spin_lock(lock);
+ lock->recurse_cpu = cpu;
+ }
+ lock->recurse_cnt++;
+}
+
+void _spin_unlock_recursive(spinlock_t *lock)
+{
+ if ( likely(--lock->recurse_cnt == 0) )
+ {
+ lock->recurse_cpu = -1;
+ spin_unlock(lock);
+ }
+}
+
+void _read_lock(rwlock_t *lock)
+{
+ _raw_read_lock(&lock->raw);
+}
+
+void _read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ _raw_read_lock(&lock->raw);
+}
+
+unsigned long _read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ _raw_read_lock(&lock->raw);
+ return flags;
+}
+
+void _read_unlock(rwlock_t *lock)
+{
+ _raw_read_unlock(&lock->raw);
+}
+
+void _read_unlock_irq(rwlock_t *lock)
+{
+ _raw_read_unlock(&lock->raw);
+ local_irq_enable();
+}
+
+void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ _raw_read_unlock(&lock->raw);
+ local_irq_restore(flags);
+}
+
+void _write_lock(rwlock_t *lock)
+{
+ _raw_write_lock(&lock->raw);
+}
+
+void _write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ _raw_write_lock(&lock->raw);
+}
+
+unsigned long _write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ _raw_write_lock(&lock->raw);
+ return flags;
+}
+
+void _write_unlock(rwlock_t *lock)
+{
+ _raw_write_unlock(&lock->raw);
+}
+
+void _write_unlock_irq(rwlock_t *lock)
+{
+ _raw_write_unlock(&lock->raw);
+ local_irq_enable();
+}
+
+void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ _raw_write_unlock(&lock->raw);
+ local_irq_restore(flags);
+}