From 4fa75a7c3b58f11b028905f6a1d3af3c871b4b7a Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Fri, 17 Nov 2006 09:18:28 +0000 Subject: [MINIOS] Refactor spinlock header for multi-arch support. I separated the spinlock parts special to the x86 architecture and moved these to include/x86/arch_spinlock.h. The common code is now in include/spinlock.h. Signed-off-by: Dietmar Hahn --- extras/mini-os/include/spinlock.h | 55 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 extras/mini-os/include/spinlock.h (limited to 'extras/mini-os/include/spinlock.h') diff --git a/extras/mini-os/include/spinlock.h b/extras/mini-os/include/spinlock.h new file mode 100644 index 0000000000..ecfe73627e --- /dev/null +++ b/extras/mini-os/include/spinlock.h @@ -0,0 +1,55 @@ +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H + +#include + +/* + * Your basic SMP spinlocks, allowing only a single CPU anywhere + */ + +typedef struct { + volatile unsigned int slock; +} spinlock_t; + + +#include "arch_spinlock.h" + + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPIN_LOCK_UNLOCKED ARCH_SPIN_LOCK_UNLOCKED + +#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) + +/* + * Simple spin lock operations. There are two variants, one clears IRQ's + * on the local processor, one does not. + * + * We make no fairness assumptions. They have a cost. + */ + +#define spin_is_locked(x) arch_spin_is_locked(x) + +#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) + + +#define _spin_trylock(lock) ({_raw_spin_trylock(lock) ? \ + 1 : ({ 0;});}) + +#define _spin_lock(lock) \ +do { \ + _raw_spin_lock(lock); \ +} while(0) + +#define _spin_unlock(lock) \ +do { \ + _raw_spin_unlock(lock); \ +} while (0) + + +#define spin_lock(lock) _spin_lock(lock) +#define spin_unlock(lock) _spin_unlock(lock) + +#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED + +#endif -- cgit v1.2.3