aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-10-20 16:48:17 +0100
committerKeir Fraser <keir.fraser@citrix.com>2008-10-20 16:48:17 +0100
commit0b99d7118c4e4b79ff492b7458d7370479522aa4 (patch)
tree74b21d626a187577840bb58e12036d32ff41430e /xen
parent8e75cae72a33c531e2ca39adf834fcad8fca2307 (diff)
downloadxen-0b99d7118c4e4b79ff492b7458d7370479522aa4.tar.gz
xen-0b99d7118c4e4b79ff492b7458d7370479522aa4.tar.bz2
xen-0b99d7118c4e4b79ff492b7458d7370479522aa4.zip
Clean up spinlock operations and compile as first-class functions.
This follows modern Linux, since apparently outlining spinlock operations does not slow down execution. The cleanups will also allow more convenient addition of diagnostic code. Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/x86_64/mm.c2
-rw-r--r--xen/common/Makefile1
-rw-r--r--xen/common/spinlock.c147
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/spinlock.h35
-rw-r--r--xen/include/asm-ia64/xenspinlock.h30
-rw-r--r--xen/include/asm-x86/spinlock.h80
-rw-r--r--xen/include/xen/spinlock.h156
7 files changed, 262 insertions, 189 deletions
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 6903a227a3..49ee4565e0 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -252,8 +252,6 @@ void __init subarch_init_memory(void)
BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) !=
(offsetof(struct page_info, count_info) + sizeof(u32)));
BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0);
- BUILD_BUG_ON(sizeof(struct page_info) !=
- (32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long)));
/* M2P table is mappable read-only by privileged domains. */
for ( v = RDWR_MPT_VIRT_START;
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 5190d789dc..1c7edffcfc 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -16,6 +16,7 @@ obj-y += sched_sedf.o
obj-y += schedule.o
obj-y += shutdown.o
obj-y += softirq.o
+obj-y += spinlock.o
obj-y += stop_machine.o
obj-y += string.o
obj-y += symbols.o
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
new file mode 100644
index 0000000000..9e9369a432
--- /dev/null
+++ b/xen/common/spinlock.c
@@ -0,0 +1,147 @@
+#include <xen/config.h>
+#include <xen/smp.h>
+#include <xen/spinlock.h>
+
+void _spin_lock(spinlock_t *lock)
+{
+ _raw_spin_lock(&lock->raw);
+}
+
+void _spin_lock_irq(spinlock_t *lock)
+{
+ local_irq_disable();
+ _raw_spin_lock(&lock->raw);
+}
+
+unsigned long _spin_lock_irqsave(spinlock_t *lock)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ _raw_spin_lock(&lock->raw);
+ return flags;
+}
+
+void _spin_unlock(spinlock_t *lock)
+{
+ _raw_spin_unlock(&lock->raw);
+}
+
+void _spin_unlock_irq(spinlock_t *lock)
+{
+ _raw_spin_unlock(&lock->raw);
+ local_irq_enable();
+}
+
+void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ _raw_spin_unlock(&lock->raw);
+ local_irq_restore(flags);
+}
+
+int _spin_is_locked(spinlock_t *lock)
+{
+ return _raw_spin_is_locked(&lock->raw);
+}
+
+int _spin_trylock(spinlock_t *lock)
+{
+ return _raw_spin_trylock(&lock->raw);
+}
+
+void _spin_barrier(spinlock_t *lock)
+{
+ do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
+ mb();
+}
+
+void _spin_lock_recursive(spinlock_t *lock)
+{
+ int cpu = smp_processor_id();
+ if ( likely(lock->recurse_cpu != cpu) )
+ {
+ spin_lock(lock);
+ lock->recurse_cpu = cpu;
+ }
+ lock->recurse_cnt++;
+}
+
+void _spin_unlock_recursive(spinlock_t *lock)
+{
+ if ( likely(--lock->recurse_cnt == 0) )
+ {
+ lock->recurse_cpu = -1;
+ spin_unlock(lock);
+ }
+}
+
+void _read_lock(rwlock_t *lock)
+{
+ _raw_read_lock(&lock->raw);
+}
+
+void _read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ _raw_read_lock(&lock->raw);
+}
+
+unsigned long _read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ _raw_read_lock(&lock->raw);
+ return flags;
+}
+
+void _read_unlock(rwlock_t *lock)
+{
+ _raw_read_unlock(&lock->raw);
+}
+
+void _read_unlock_irq(rwlock_t *lock)
+{
+ _raw_read_unlock(&lock->raw);
+ local_irq_enable();
+}
+
+void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ _raw_read_unlock(&lock->raw);
+ local_irq_restore(flags);
+}
+
+void _write_lock(rwlock_t *lock)
+{
+ _raw_write_lock(&lock->raw);
+}
+
+void _write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ _raw_write_lock(&lock->raw);
+}
+
+unsigned long _write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ _raw_write_lock(&lock->raw);
+ return flags;
+}
+
+void _write_unlock(rwlock_t *lock)
+{
+ _raw_write_unlock(&lock->raw);
+}
+
+void _write_unlock_irq(rwlock_t *lock)
+{
+ _raw_write_unlock(&lock->raw);
+ local_irq_enable();
+}
+
+void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ _raw_write_unlock(&lock->raw);
+ local_irq_restore(flags);
+}
diff --git a/xen/include/asm-ia64/linux-xen/asm/spinlock.h b/xen/include/asm-ia64/linux-xen/asm/spinlock.h
index 412479e242..981b6f9d64 100644
--- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h
+++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h
@@ -27,25 +27,16 @@ typedef struct {
#ifdef DEBUG_SPINLOCK
void *locker;
#endif
-#ifdef XEN
- unsigned char recurse_cpu;
- unsigned char recurse_cnt;
-#endif
-} spinlock_t;
+} raw_spinlock_t;
#ifdef XEN
#ifdef DEBUG_SPINLOCK
-#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0, NULL, -1, 0 }
+#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 0, NULL }
#else
-#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0, -1, 0 }
+#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 0 }
#endif
-static inline void spin_lock_init(spinlock_t *lock)
-{
- *lock = ((spinlock_t)SPIN_LOCK_UNLOCKED);
-}
#else
-#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0 }
-#define spin_lock_init(x) ((x)->lock = 0)
+#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 0 }
#endif
#ifdef ASM_SUPPORTED
@@ -59,7 +50,7 @@ static inline void spin_lock_init(spinlock_t *lock)
#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
static inline void
-_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
+_raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
{
register volatile unsigned int *ptr asm ("r31") = &lock->lock;
@@ -136,8 +127,8 @@ do { \
} while (0)
#endif /* !ASM_SUPPORTED */
-#define spin_is_locked(x) ((x)->lock != 0)
-#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
+#define _raw_spin_is_locked(x) ((x)->lock != 0)
+#define _raw_spin_unlock(x) do { barrier(); (x)->lock = 0; } while (0)
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
@@ -147,16 +138,15 @@ typedef struct {
#ifdef CONFIG_PREEMPT
unsigned int break_lock;
#endif
-} rwlock_t;
-#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0, 0 }
+} raw_rwlock_t;
+#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 }
-#define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0)
#define read_can_lock(rw) (*(volatile int *)(rw) >= 0)
#define write_can_lock(rw) (*(volatile int *)(rw) == 0)
#define _raw_read_lock(rw) \
do { \
- rwlock_t *__read_lock_ptr = (rw); \
+ raw_rwlock_t *__read_lock_ptr = (rw); \
\
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -167,7 +157,7 @@ do { \
#define _raw_read_unlock(rw) \
do { \
- rwlock_t *__read_lock_ptr = (rw); \
+ raw_rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0)
@@ -230,7 +220,4 @@ do { \
clear_bit(31, (x)); \
})
-#ifdef XEN
-#include <asm/xenspinlock.h>
-#endif
#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/xen/include/asm-ia64/xenspinlock.h b/xen/include/asm-ia64/xenspinlock.h
deleted file mode 100644
index d383df4310..0000000000
--- a/xen/include/asm-ia64/xenspinlock.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef _ASM_IA64_XENSPINLOCK_H
-#define _ASM_IA64_XENSPINLOCK_H
-
-/*
- * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
- * reentered recursively on the same CPU. All critical regions that may form
- * part of a recursively-nested set must be protected by these forms. If there
- * are any critical regions that cannot form part of such a set, they can use
- * standard spin_[un]lock().
- */
-#define _raw_spin_lock_recursive(_lock) \
- do { \
- int cpu = smp_processor_id(); \
- if ( likely((_lock)->recurse_cpu != cpu) ) \
- { \
- spin_lock(_lock); \
- (_lock)->recurse_cpu = cpu; \
- } \
- (_lock)->recurse_cnt++; \
- } while ( 0 )
-
-#define _raw_spin_unlock_recursive(_lock) \
- do { \
- if ( likely(--(_lock)->recurse_cnt == 0) ) \
- { \
- (_lock)->recurse_cpu = -1; \
- spin_unlock(_lock); \
- } \
- } while ( 0 )
-#endif /* _ASM_IA64_XENSPINLOCK_H */
diff --git a/xen/include/asm-x86/spinlock.h b/xen/include/asm-x86/spinlock.h
index 550edcb4e5..7dc1da0bd8 100644
--- a/xen/include/asm-x86/spinlock.h
+++ b/xen/include/asm-x86/spinlock.h
@@ -8,18 +8,16 @@
typedef struct {
volatile s16 lock;
- s8 recurse_cpu;
- u8 recurse_cnt;
-} spinlock_t;
+} raw_spinlock_t;
-#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 1, -1, 0 }
+#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 1 }
-#define spin_lock_init(x) do { *(x) = (spinlock_t) SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0)
+#define _raw_spin_is_locked(x) \
+ (*(volatile char *)(&(x)->lock) <= 0)
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void _raw_spin_lock(raw_spinlock_t *lock)
{
- __asm__ __volatile__ (
+ asm volatile (
"1: lock; decb %0 \n"
" js 2f \n"
".section .text.lock,\"ax\"\n"
@@ -31,81 +29,51 @@ static inline void _raw_spin_lock(spinlock_t *lock)
: "=m" (lock->lock) : : "memory" );
}
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void _raw_spin_unlock(raw_spinlock_t *lock)
{
- ASSERT(spin_is_locked(lock));
- __asm__ __volatile__ (
- "movb $1,%0"
+ ASSERT(_raw_spin_is_locked(lock));
+ asm volatile (
+ "movb $1,%0"
: "=m" (lock->lock) : : "memory" );
}
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int _raw_spin_trylock(raw_spinlock_t *lock)
{
char oldval;
- __asm__ __volatile__(
+ asm volatile (
"xchgb %b0,%1"
:"=q" (oldval), "=m" (lock->lock)
- :"0" (0) : "memory");
- return oldval > 0;
+ :"0" (0) : "memory" );
+ return (oldval > 0);
}
-/*
- * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
- * reentered recursively on the same CPU. All critical regions that may form
- * part of a recursively-nested set must be protected by these forms. If there
- * are any critical regions that cannot form part of such a set, they can use
- * standard spin_[un]lock().
- */
-#define _raw_spin_lock_recursive(_lock) \
- do { \
- int cpu = smp_processor_id(); \
- if ( likely((_lock)->recurse_cpu != cpu) ) \
- { \
- spin_lock(_lock); \
- (_lock)->recurse_cpu = cpu; \
- } \
- (_lock)->recurse_cnt++; \
- } while ( 0 )
-
-#define _raw_spin_unlock_recursive(_lock) \
- do { \
- if ( likely(--(_lock)->recurse_cnt == 0) ) \
- { \
- (_lock)->recurse_cpu = -1; \
- spin_unlock(_lock); \
- } \
- } while ( 0 )
-
-
typedef struct {
volatile unsigned int lock;
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { RW_LOCK_BIAS }
+} raw_rwlock_t;
-#define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0)
+#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { RW_LOCK_BIAS }
/*
* On x86, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit.
*/
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void _raw_read_lock(raw_rwlock_t *rw)
{
__build_read_lock(rw, "__read_lock_failed");
}
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void _raw_write_lock(raw_rwlock_t *rw)
{
__build_write_lock(rw, "__write_lock_failed");
}
-#define _raw_read_unlock(rw) \
- __asm__ __volatile__ ( \
- "lock ; incl %0" : \
+#define _raw_read_unlock(rw) \
+ asm volatile ( \
+ "lock ; incl %0" : \
"=m" ((rw)->lock) : : "memory" )
-#define _raw_write_unlock(rw) \
- __asm__ __volatile__ ( \
- "lock ; addl $" RW_LOCK_BIAS_STR ",%0" : \
+#define _raw_write_unlock(rw) \
+ asm volatile ( \
+ "lock ; addl $" RW_LOCK_BIAS_STR ",%0" : \
"=m" ((rw)->lock) : : "memory" )
#endif /* __ASM_SPINLOCK_H */
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 298211cb2a..30ce057667 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -3,93 +3,95 @@
#include <xen/config.h>
#include <asm/system.h>
+#include <asm/spinlock.h>
-#define spin_lock_irqsave(lock, flags) \
- do { local_irq_save(flags); spin_lock(lock); } while ( 0 )
-#define spin_lock_irq(lock) \
- do { local_irq_disable(); spin_lock(lock); } while ( 0 )
+typedef struct {
+ raw_spinlock_t raw;
+ s8 recurse_cpu;
+ u8 recurse_cnt;
+} spinlock_t;
-#define read_lock_irqsave(lock, flags) \
- do { local_irq_save(flags); read_lock(lock); } while ( 0 )
-#define read_lock_irq(lock) \
- do { local_irq_disable(); read_lock(lock); } while ( 0 )
+#define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, -1, 0 }
+#define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
+#define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
-#define write_lock_irqsave(lock, flags) \
- do { local_irq_save(flags); write_lock(lock); } while ( 0 )
-#define write_lock_irq(lock) \
- do { local_irq_disable(); write_lock(lock); } while ( 0 )
+typedef struct {
+ raw_rwlock_t raw;
+} rwlock_t;
-#define spin_unlock_irqrestore(lock, flags) \
- do { spin_unlock(lock); local_irq_restore(flags); } while ( 0 )
-#define spin_unlock_irq(lock) \
- do { spin_unlock(lock); local_irq_enable(); } while ( 0 )
+#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED }
+#define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
+#define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
-#define read_unlock_irqrestore(lock, flags) \
- do { read_unlock(lock); local_irq_restore(flags); } while ( 0 )
-#define read_unlock_irq(lock) \
- do { read_unlock(lock); local_irq_enable(); } while ( 0 )
+void _spin_lock(spinlock_t *lock);
+void _spin_lock_irq(spinlock_t *lock);
+unsigned long _spin_lock_irqsave(spinlock_t *lock);
-#define write_unlock_irqrestore(lock, flags) \
- do { write_unlock(lock); local_irq_restore(flags); } while ( 0 )
-#define write_unlock_irq(lock) \
- do { write_unlock(lock); local_irq_enable(); } while ( 0 )
+void _spin_unlock(spinlock_t *lock);
+void _spin_unlock_irq(spinlock_t *lock);
+void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
-#ifdef CONFIG_SMP
+int _spin_is_locked(spinlock_t *lock);
+int _spin_trylock(spinlock_t *lock);
+void _spin_barrier(spinlock_t *lock);
-#include <asm/spinlock.h>
+void _spin_lock_recursive(spinlock_t *lock);
+void _spin_unlock_recursive(spinlock_t *lock);
+
+void _read_lock(rwlock_t *lock);
+void _read_lock_irq(rwlock_t *lock);
+unsigned long _read_lock_irqsave(rwlock_t *lock);
+
+void _read_unlock(rwlock_t *lock);
+void _read_unlock_irq(rwlock_t *lock);
+void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
+
+void _write_lock(rwlock_t *lock);
+void _write_lock_irq(rwlock_t *lock);
+unsigned long _write_lock_irqsave(rwlock_t *lock);
+
+void _write_unlock(rwlock_t *lock);
+void _write_unlock_irq(rwlock_t *lock);
+void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
+
+#define spin_lock(l) _spin_lock(l)
+#define spin_lock_irq(l) _spin_lock_irq(l)
+#define spin_lock_irqsave(l, f) ((f) = _spin_lock_irqsave(l))
+
+#define spin_unlock(l) _spin_unlock(l)
+#define spin_unlock_irq(l) _spin_unlock_irq(l)
+#define spin_unlock_irqrestore(l, f) _spin_unlock_irqrestore(l, f)
-#else
-
-#if (__GNUC__ > 2)
-typedef struct { } spinlock_t;
-#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { }
-#else
-typedef struct { int gcc_is_buggy; } spinlock_t;
-#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0 }
-#endif
-
-#define spin_lock_init(lock) do { } while(0)
-#define spin_is_locked(lock) (0)
-#define _raw_spin_lock(lock) (void)(lock)
-#define _raw_spin_trylock(lock) ({1; })
-#define _raw_spin_unlock(lock) do { } while(0)
-#define _raw_spin_lock_recursive(lock) do { } while(0)
-#define _raw_spin_unlock_recursive(lock) do { } while(0)
-
-#if (__GNUC__ > 2)
-typedef struct { } rwlock_t;
-#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { }
-#else
-typedef struct { int gcc_is_buggy; } rwlock_t;
-#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0 }
-#endif
-
-#define rwlock_init(lock) do { } while(0)
-#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */
-#define _raw_read_unlock(lock) do { } while(0)
-#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */
-#define _raw_write_unlock(lock) do { } while(0)
-
-#endif
-
-#define spin_lock(_lock) _raw_spin_lock(_lock)
-#define spin_trylock(_lock) _raw_spin_trylock(_lock)
-#define spin_unlock(_lock) _raw_spin_unlock(_lock)
-#define spin_lock_recursive(_lock) _raw_spin_lock_recursive(_lock)
-#define spin_unlock_recursive(_lock) _raw_spin_unlock_recursive(_lock)
-#define read_lock(_lock) _raw_read_lock(_lock)
-#define read_unlock(_lock) _raw_read_unlock(_lock)
-#define write_lock(_lock) _raw_write_lock(_lock)
-#define write_unlock(_lock) _raw_write_unlock(_lock)
+#define spin_is_locked(l) _raw_spin_is_locked(&(l)->raw)
+#define spin_trylock(l) _spin_trylock(l)
/* Ensure a lock is quiescent between two critical operations. */
-static inline void spin_barrier(spinlock_t *lock)
-{
- do { mb(); } while ( spin_is_locked(lock) );
- mb();
-}
-
-#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
-#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
+#define spin_barrier(l) _spin_barrier(l)
+
+/*
+ * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
+ * reentered recursively on the same CPU. All critical regions that may form
+ * part of a recursively-nested set must be protected by these forms. If there
+ * are any critical regions that cannot form part of such a set, they can use
+ * standard spin_[un]lock().
+ */
+#define spin_lock_recursive(l) _spin_lock_recursive(l)
+#define spin_unlock_recursive(l) _spin_unlock_recursive(l)
+
+#define read_lock(l) _read_lock(l)
+#define read_lock_irq(l) _read_lock_irq(l)
+#define read_lock_irqsave(l, f) ((f) = _read_lock_irqsave(l))
+
+#define read_unlock(l) _read_unlock(l)
+#define read_unlock_irq(l) _read_unlock_irq(l)
+#define read_unlock_irqrestore(l, f) _read_unlock_irqrestore(l, f)
+
+#define write_lock(l) _write_lock(l)
+#define write_lock_irq(l) _write_lock_irq(l)
+#define write_lock_irqsave(l, f) ((f) = _write_lock_irqsave(l))
+
+#define write_unlock(l) _write_unlock(l)
+#define write_unlock_irq(l) _write_unlock_irq(l)
+#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
#endif /* __SPINLOCK_H__ */