aboutsummaryrefslogtreecommitdiffstats
path: root/extras/mini-os/include/os.h
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-09-09 09:24:25 +0000
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-09-09 09:24:25 +0000
commitcdb8b09f6b67b270b1c21f1a7f42d5e8a604caa8 (patch)
tree414d05b171df34e0125f29731a8ba12e280297d9 /extras/mini-os/include/os.h
parentc125eb9c047b908b2bb18e5cf4a88355a1526a25 (diff)
downloadxen-cdb8b09f6b67b270b1c21f1a7f42d5e8a604caa8.tar.gz
xen-cdb8b09f6b67b270b1c21f1a7f42d5e8a604caa8.tar.bz2
xen-cdb8b09f6b67b270b1c21f1a7f42d5e8a604caa8.zip
Xenbus implementation ported from Linux to Mini-os, simple thread support introduced
to simplify the porting. 64 bit version of Mini-os now compiles, but does not work because of the pagetables and some bits of scheduler not being written. Signed-off-by: Grzegorz Milos <gm281@cam.ac.uk>
Diffstat (limited to 'extras/mini-os/include/os.h')
-rw-r--r--extras/mini-os/include/os.h335
1 files changed, 246 insertions, 89 deletions
diff --git a/extras/mini-os/include/os.h b/extras/mini-os/include/os.h
index 490abc9839..42882f364f 100644
--- a/extras/mini-os/include/os.h
+++ b/extras/mini-os/include/os.h
@@ -15,16 +15,17 @@
#define unlikely(x) __builtin_expect((x),0)
#define smp_processor_id() 0
-#define preempt_disable() ((void)0)
-#define preempt_enable() ((void)0)
-#define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0))
#ifndef __ASSEMBLY__
#include <types.h>
+#include <hypervisor.h>
#endif
#include <xen/xen.h>
+
+#define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0))
+
#define __KERNEL_CS FLAT_KERNEL_CS
#define __KERNEL_DS FLAT_KERNEL_DS
#define __KERNEL_SS FLAT_KERNEL_SS
@@ -54,8 +55,6 @@
/* Everything below this point is not included by assembler (.S) files. */
#ifndef __ASSEMBLY__
-#define pt_regs xen_regs
-
void trap_init(void);
/*
@@ -69,10 +68,8 @@ void trap_init(void);
#define __cli() \
do { \
vcpu_info_t *_vcpu; \
- preempt_disable(); \
_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
_vcpu->evtchn_upcall_mask = 1; \
- preempt_enable_no_resched(); \
barrier(); \
} while (0)
@@ -80,13 +77,11 @@ do { \
do { \
vcpu_info_t *_vcpu; \
barrier(); \
- preempt_disable(); \
_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
_vcpu->evtchn_upcall_mask = 0; \
barrier(); /* unmask then check (avoid races) */ \
if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
force_evtchn_callback(); \
- preempt_enable(); \
} while (0)
#define __save_flags(x) \
@@ -100,15 +95,12 @@ do { \
do { \
vcpu_info_t *_vcpu; \
barrier(); \
- preempt_disable(); \
_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
barrier(); /* unmask then check (avoid races) */ \
if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
force_evtchn_callback(); \
- preempt_enable(); \
- } else \
- preempt_enable_no_resched(); \
+ }\
} while (0)
#define safe_halt() ((void)0)
@@ -116,11 +108,9 @@ do { \
#define __save_and_cli(x) \
do { \
vcpu_info_t *_vcpu; \
- preempt_disable(); \
_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
(x) = _vcpu->evtchn_upcall_mask; \
_vcpu->evtchn_upcall_mask = 1; \
- preempt_enable_no_resched(); \
barrier(); \
} while (0)
@@ -136,6 +126,15 @@ do { \
/* This is a barrier for the compiler only, NOT the processor! */
#define barrier() __asm__ __volatile__("": : :"memory")
+#if defined(__i386__)
+#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#elif defined(__x86_64__)
+#define mb() __asm__ __volatile__ ("mfence":::"memory")
+#define rmb() __asm__ __volatile__ ("lfence":::"memory")
+#endif
+
+
#define LOCK_PREFIX ""
#define LOCK ""
#define ADDR (*(volatile long *) addr)
@@ -147,69 +146,71 @@ do { \
typedef struct { volatile int counter; } atomic_t;
-#define xchg(ptr,v) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+/************************** i386 *******************************/
+#if defined (__i386__)
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr,
- int size)
+#define __xg(x) ((struct __xchg_dummy *)(x))
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
+ * @nr: Bit to clear
* @addr: Address to count from
*
- * This operation is atomic and cannot be reordered.
+ * This operation is atomic and cannot be reordered.
+ * It can be reorderdered on other architectures other than x86.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
{
- int oldbit;
+ int oldbit;
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"Ir" (nr) : "memory");
- return oldbit;
+ __asm__ __volatile__( LOCK
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr) : "memory");
+ return oldbit;
}
-static __inline__ int constant_test_bit(int nr, const volatile void * addr)
+static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
{
- return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+ return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
}
-static __inline__ int variable_test_bit(int nr, volatile void * addr)
+static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
{
- int oldbit;
-
- __asm__ __volatile__(
- "btl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit)
- :"m" (ADDR),"Ir" (nr));
- return oldbit;
+ int oldbit;
+
+ __asm__ __volatile__(
+ "btl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit)
+ :"m" (ADDR),"Ir" (nr));
+ return oldbit;
}
#define test_bit(nr,addr) \
@@ -217,7 +218,6 @@ static __inline__ int variable_test_bit(int nr, volatile void * addr)
constant_test_bit((nr),(addr)) : \
variable_test_bit((nr),(addr)))
-
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
@@ -225,15 +225,20 @@ static __inline__ int variable_test_bit(int nr, volatile void * addr)
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writting portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static __inline__ void set_bit(int nr, volatile void * addr)
+static inline void set_bit(int nr, volatile unsigned long * addr)
{
- __asm__ __volatile__( LOCK_PREFIX
- "btsl %1,%0"
- :"=m" (ADDR)
- :"Ir" (nr));
+ __asm__ __volatile__( LOCK
+ "btsl %1,%0"
+ :"=m" (ADDR)
+ :"Ir" (nr));
}
/**
@@ -246,43 +251,188 @@ static __inline__ void set_bit(int nr, volatile void * addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static __inline__ void clear_bit(int nr, volatile void * addr)
+static inline void clear_bit(int nr, volatile unsigned long * addr)
{
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %1,%0"
- :"=m" (ADDR)
- :"Ir" (nr));
+ __asm__ __volatile__( LOCK
+ "btrl %1,%0"
+ :"=m" (ADDR)
+ :"Ir" (nr));
}
/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-static __inline__ void atomic_inc(atomic_t *v)
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
{
- __asm__ __volatile__(
- LOCK "incl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ __asm__("bsfl %1,%0"
+ :"=r" (word)
+ :"rm" (word));
+ return word;
}
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+#define ADDR (*(volatile long *) addr)
+
#define rdtscll(val) \
__asm__ __volatile__("rdtsc" : "=A" (val))
+
+
+#elif defined(__x86_64__)/* ifdef __i386__ */
+/************************** x86_84 *******************************/
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+#define __xg(x) ((volatile long *)(x))
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %k0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 8:
+ __asm__ __volatile__("xchgq %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"dIr" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int constant_test_bit(int nr, const volatile void * addr)
+{
+ return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int variable_test_bit(int nr, volatile const void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(
+ "btl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit)
+ :"m" (ADDR),"dIr" (nr));
+ return oldbit;
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ constant_test_bit((nr),(addr)) : \
+ variable_test_bit((nr),(addr)))
+
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void set_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__( LOCK_PREFIX
+ "btsl %1,%0"
+ :"=m" (ADDR)
+ :"dIr" (nr) : "memory");
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static __inline__ void clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__( LOCK_PREFIX
+ "btrl %1,%0"
+ :"=m" (ADDR)
+ :"dIr" (nr));
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
static __inline__ unsigned long __ffs(unsigned long word)
{
- __asm__("bsfl %1,%0"
- :"=r" (word)
- :"rm" (word));
- return word;
+ __asm__("bsfq %1,%0"
+ :"=r" (word)
+ :"rm" (word));
+ return word;
}
#define ADDR (*(volatile long *) addr)
+#define rdtscll(val) do { \
+ unsigned int __a,__d; \
+ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
+} while(0)
+
+
+#else /* ifdef __x86_64__ */
+#error "Unsupported architecture"
+#endif
+
+
+/********************* common i386 and x86_64 ****************************/
+
+
+
static __inline__ void synch_set_bit(int nr, volatile void * addr)
{
__asm__ __volatile__ (
@@ -306,6 +456,14 @@ static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
return oldbit;
}
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btrl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
{
@@ -326,9 +484,8 @@ static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
(__builtin_constant_p(nr) ? \
synch_const_test_bit((nr),(addr)) : \
synch_var_test_bit((nr),(addr)))
-#endif /* !__ASSEMBLY__ */
-#define rdtsc(low,high) \
- __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#endif /* not assembly */
#endif /* _OS_H_ */