aboutsummaryrefslogtreecommitdiffstats
path: root/extras/mini-os/include/x86
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-08-10 10:43:20 +0100
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-08-10 10:43:20 +0100
commit6b11879d819e27d96163f12b40277589bf54be22 (patch)
tree98afff18f73693961d440a9559dc3c1a43575263 /extras/mini-os/include/x86
parent3f31a200de12122fdc0a4031aed3a381ea7d4ed2 (diff)
downloadxen-6b11879d819e27d96163f12b40277589bf54be22.tar.gz
xen-6b11879d819e27d96163f12b40277589bf54be22.tar.bz2
xen-6b11879d819e27d96163f12b40277589bf54be22.zip
[MINIOS] A first step to re-architecture mini-os for a port to ia64.
Create architecture specific directories below mini-os for sources and below mini-os/include for headers. Signed-off-by: Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
Diffstat (limited to 'extras/mini-os/include/x86')
-rw-r--r--extras/mini-os/include/x86/os.h561
-rw-r--r--extras/mini-os/include/x86/spinlock.h121
-rw-r--r--extras/mini-os/include/x86/traps.h73
-rw-r--r--extras/mini-os/include/x86/x86_32/hypercall-x86_32.h326
-rw-r--r--extras/mini-os/include/x86/x86_64/hypercall-x86_64.h326
5 files changed, 1407 insertions, 0 deletions
diff --git a/extras/mini-os/include/x86/os.h b/extras/mini-os/include/x86/os.h
new file mode 100644
index 0000000000..2b6ed5512b
--- /dev/null
+++ b/extras/mini-os/include/x86/os.h
@@ -0,0 +1,561 @@
+/******************************************************************************
+ * os.h
+ *
+ * random collection of macros and definition
+ */
+
+#ifndef _OS_H_
+#define _OS_H_
+
+#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
+#define __builtin_expect(x, expected_value) (x)
+#endif
+#define unlikely(x) __builtin_expect((x),0)
+
+#define smp_processor_id() 0
+
+
+#ifndef __ASSEMBLY__
+#include <types.h>
+#include <hypervisor.h>
+
+extern void do_exit(void);
+#define BUG do_exit
+
+#endif
+#include <xen/xen.h>
+
+
+#define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0, 0))
+
+#define __KERNEL_CS FLAT_KERNEL_CS
+#define __KERNEL_DS FLAT_KERNEL_DS
+#define __KERNEL_SS FLAT_KERNEL_SS
+
+#define TRAP_divide_error 0
+#define TRAP_debug 1
+#define TRAP_nmi 2
+#define TRAP_int3 3
+#define TRAP_overflow 4
+#define TRAP_bounds 5
+#define TRAP_invalid_op 6
+#define TRAP_no_device 7
+#define TRAP_double_fault 8
+#define TRAP_copro_seg 9
+#define TRAP_invalid_tss 10
+#define TRAP_no_segment 11
+#define TRAP_stack_error 12
+#define TRAP_gp_fault 13
+#define TRAP_page_fault 14
+#define TRAP_spurious_int 15
+#define TRAP_copro_error 16
+#define TRAP_alignment_check 17
+#define TRAP_machine_check 18
+#define TRAP_simd_error 19
+#define TRAP_deferred_nmi 31
+
+/* Everything below this point is not included by assembler (.S) files. */
+#ifndef __ASSEMBLY__
+
+extern shared_info_t *HYPERVISOR_shared_info;
+
+void trap_init(void);
+
+
+
+/*
+ * The use of 'barrier' in the following reflects their use as local-lock
+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
+ * critical operations are executed. All critical operations must complete
+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
+ * includes these barriers, for example.
+ */
+
+#define __cli() \
+do { \
+ vcpu_info_t *_vcpu; \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
+ _vcpu->evtchn_upcall_mask = 1; \
+ barrier(); \
+} while (0)
+
+#define __sti() \
+do { \
+ vcpu_info_t *_vcpu; \
+ barrier(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
+ _vcpu->evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+} while (0)
+
+#define __save_flags(x) \
+do { \
+ vcpu_info_t *_vcpu; \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
+ (x) = _vcpu->evtchn_upcall_mask; \
+} while (0)
+
+#define __restore_flags(x) \
+do { \
+ vcpu_info_t *_vcpu; \
+ barrier(); \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
+ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+ }\
+} while (0)
+
+#define safe_halt() ((void)0)
+
+#define __save_and_cli(x) \
+do { \
+ vcpu_info_t *_vcpu; \
+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
+ (x) = _vcpu->evtchn_upcall_mask; \
+ _vcpu->evtchn_upcall_mask = 1; \
+ barrier(); \
+} while (0)
+
+#define local_irq_save(x) __save_and_cli(x)
+#define local_irq_restore(x) __restore_flags(x)
+#define local_save_flags(x) __save_flags(x)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+
+#define irqs_disabled() \
+ HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask
+
+/* This is a barrier for the compiler only, NOT the processor! */
+#define barrier() __asm__ __volatile__("": : :"memory")
+
+#if defined(__i386__)
+#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define wmb() __asm__ __volatile__ ("": : :"memory")
+#elif defined(__x86_64__)
+#define mb() __asm__ __volatile__ ("mfence":::"memory")
+#define rmb() __asm__ __volatile__ ("lfence":::"memory")
+#define wmb() __asm__ __volatile__ ("sfence" ::: "memory") /* From CONFIG_UNORDERED_IO (linux) */
+#endif
+
+
+#define LOCK_PREFIX ""
+#define LOCK ""
+#define ADDR (*(volatile long *) addr)
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+
+/************************** i386 *******************************/
+#if defined (__i386__)
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It can be reorderdered on other architectures other than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
+{
+ return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
+}
+
+static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(
+ "btl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit)
+ :"m" (ADDR),"Ir" (nr));
+ return oldbit;
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ constant_test_bit((nr),(addr)) : \
+ variable_test_bit((nr),(addr)))
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writting portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile unsigned long * addr)
+{
+ __asm__ __volatile__( LOCK
+ "btsl %1,%0"
+ :"=m" (ADDR)
+ :"Ir" (nr));
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void clear_bit(int nr, volatile unsigned long * addr)
+{
+ __asm__ __volatile__( LOCK
+ "btrl %1,%0"
+ :"=m" (ADDR)
+ :"Ir" (nr));
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+ __asm__("bsfl %1,%0"
+ :"=r" (word)
+ :"rm" (word));
+ return word;
+}
+
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+#define ADDR (*(volatile long *) addr)
+
+#define rdtscll(val) \
+ __asm__ __volatile__("rdtsc" : "=A" (val))
+
+
+
+#elif defined(__x86_64__)/* ifdef __i386__ */
+/************************** x86_84 *******************************/
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+#define __xg(x) ((volatile long *)(x))
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %k0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 8:
+ __asm__ __volatile__("xchgq %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"dIr" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int constant_test_bit(int nr, const volatile void * addr)
+{
+ return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int variable_test_bit(int nr, volatile const void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(
+ "btl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit)
+ :"m" (ADDR),"dIr" (nr));
+ return oldbit;
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ constant_test_bit((nr),(addr)) : \
+ variable_test_bit((nr),(addr)))
+
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void set_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__( LOCK_PREFIX
+ "btsl %1,%0"
+ :"=m" (ADDR)
+ :"dIr" (nr) : "memory");
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static __inline__ void clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__( LOCK_PREFIX
+ "btrl %1,%0"
+ :"=m" (ADDR)
+ :"dIr" (nr));
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __inline__ unsigned long __ffs(unsigned long word)
+{
+ __asm__("bsfq %1,%0"
+ :"=r" (word)
+ :"rm" (word));
+ return word;
+}
+
+#define ADDR (*(volatile long *) addr)
+
+#define rdtscll(val) do { \
+ unsigned int __a,__d; \
+ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
+} while(0)
+
+#define wrmsr(msr,val1,val2) \
+ __asm__ __volatile__("wrmsr" \
+ : /* no outputs */ \
+ : "c" (msr), "a" (val1), "d" (val2))
+
+#define wrmsrl(msr,val) wrmsr(msr,(u32)((u64)(val)),((u64)(val))>>32)
+
+
+#else /* ifdef __x86_64__ */
+#error "Unsupported architecture"
+#endif
+
+
+/********************* common i386 and x86_64 ****************************/
+struct __synch_xchg_dummy { unsigned long a[100]; };
+#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
+
+#define synch_cmpxchg(ptr, old, new) \
+((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
+ (unsigned long)(old), \
+ (unsigned long)(new), \
+ sizeof(*(ptr))))
+
+static inline unsigned long __synch_cmpxchg(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("lock; cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__synch_xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__("lock; cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__synch_xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+#ifdef __x86_64__
+ case 4:
+ __asm__ __volatile__("lock; cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__synch_xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ __asm__ __volatile__("lock; cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__synch_xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+#else
+ case 4:
+ __asm__ __volatile__("lock; cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__synch_xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+#endif
+ }
+ return old;
+}
+
+
+static __inline__ void synch_set_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btsl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btrl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btsl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btrl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
+{
+ return ((1UL << (nr & 31)) &
+ (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "btl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
+ return oldbit;
+}
+
+#define synch_test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ synch_const_test_bit((nr),(addr)) : \
+ synch_var_test_bit((nr),(addr)))
+
+
+
+#endif /* not assembly */
+#endif /* _OS_H_ */
diff --git a/extras/mini-os/include/x86/spinlock.h b/extras/mini-os/include/x86/spinlock.h
new file mode 100644
index 0000000000..4274cd2869
--- /dev/null
+++ b/extras/mini-os/include/x86/spinlock.h
@@ -0,0 +1,121 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <lib.h>
+
+/*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+ */
+
+typedef struct {
+ volatile unsigned int slock;
+} spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
+
+#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
+#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
+
+#define spin_lock_string \
+ "1:\n" \
+ LOCK \
+ "decb %0\n\t" \
+ "jns 3f\n" \
+ "2:\t" \
+ "rep;nop\n\t" \
+ "cmpb $0,%0\n\t" \
+ "jle 2b\n\t" \
+ "jmp 1b\n" \
+ "3:\n\t"
+
+#define spin_lock_string_flags \
+ "1:\n" \
+ LOCK \
+ "decb %0\n\t" \
+ "jns 4f\n\t" \
+ "2:\t" \
+ "testl $0x200, %1\n\t" \
+ "jz 3f\n\t" \
+ "#sti\n\t" \
+ "3:\t" \
+ "rep;nop\n\t" \
+ "cmpb $0, %0\n\t" \
+ "jle 3b\n\t" \
+ "#cli\n\t" \
+ "jmp 1b\n" \
+ "4:\n\t"
+
+/*
+ * This works. Despite all the confusion.
+ * (except on PPro SMP or if we are using OOSTORE)
+ * (PPro errata 66, 92)
+ */
+
+#define spin_unlock_string \
+ "xchgb %b0, %1" \
+ :"=q" (oldval), "=m" (lock->slock) \
+ :"0" (oldval) : "memory"
+
+static inline void _raw_spin_unlock(spinlock_t *lock)
+{
+ char oldval = 1;
+ __asm__ __volatile__(
+ spin_unlock_string
+ );
+}
+
+static inline int _raw_spin_trylock(spinlock_t *lock)
+{
+ char oldval;
+ __asm__ __volatile__(
+ "xchgb %b0,%1\n"
+ :"=q" (oldval), "=m" (lock->slock)
+ :"0" (0) : "memory");
+ return oldval > 0;
+}
+
+static inline void _raw_spin_lock(spinlock_t *lock)
+{
+ __asm__ __volatile__(
+ spin_lock_string
+ :"=m" (lock->slock) : : "memory");
+}
+
+static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
+{
+ __asm__ __volatile__(
+ spin_lock_string_flags
+ :"=m" (lock->slock) : "r" (flags) : "memory");
+}
+
+#define _spin_trylock(lock) ({_raw_spin_trylock(lock) ? \
+ 1 : ({ 0;});})
+
+#define _spin_lock(lock) \
+do { \
+ _raw_spin_lock(lock); \
+} while(0)
+
+#define _spin_unlock(lock) \
+do { \
+ _raw_spin_unlock(lock); \
+} while (0)
+
+
+#define spin_lock(lock) _spin_lock(lock)
+#define spin_unlock(lock) _spin_unlock(lock)
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
+
+#endif
diff --git a/extras/mini-os/include/x86/traps.h b/extras/mini-os/include/x86/traps.h
new file mode 100644
index 0000000000..2f54fd28f0
--- /dev/null
+++ b/extras/mini-os/include/x86/traps.h
@@ -0,0 +1,73 @@
+/*
+ ****************************************************************************
+ * (C) 2005 - Grzegorz Milos - Intel Reseach Cambridge
+ ****************************************************************************
+ *
+ * File: traps.h
+ * Author: Grzegorz Milos (gm281@cam.ac.uk)
+ *
+ * Date: Jun 2005
+ *
+ * Environment: Xen Minimal OS
+ * Description: Deals with traps
+ *
+ ****************************************************************************
+ */
+
+#ifndef _TRAPS_H_
+#define _TRAPS_H_
+
+#ifdef __i386__
+struct pt_regs {
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ int xds;
+ int xes;
+ long orig_eax;
+ long eip;
+ int xcs;
+ long eflags;
+ long esp;
+ int xss;
+};
+#elif __x86_64__
+
+struct pt_regs {
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long rbp;
+ unsigned long rbx;
+/* arguments: non interrupts/non tracing syscalls only save upto here*/
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long rax;
+ unsigned long rcx;
+ unsigned long rdx;
+ unsigned long rsi;
+ unsigned long rdi;
+ unsigned long orig_rax;
+/* end of arguments */
+/* cpu exception frame or undefined */
+ unsigned long rip;
+ unsigned long cs;
+ unsigned long eflags;
+ unsigned long rsp;
+ unsigned long ss;
+/* top of stack page */
+};
+
+
+#endif
+
+void dump_regs(struct pt_regs *regs);
+
+#endif /* _TRAPS_H_ */
diff --git a/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h b/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h
new file mode 100644
index 0000000000..6556c4f7e2
--- /dev/null
+++ b/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h
@@ -0,0 +1,326 @@
+/******************************************************************************
+ * hypercall-x86_32.h
+ *
+ * Copied from XenLinux.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERCALL_X86_32_H__
+#define __HYPERCALL_X86_32_H__
+
+#include <xen/xen.h>
+#include <xen/sched.h>
+#include <xen/nmi.h>
+#include <mm.h>
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+extern char hypercall_page[PAGE_SIZE];
+
+#define _hypercall0(type, name) \
+({ \
+ long __res; \
+ asm volatile ( \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res) \
+ : \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall1(type, name, a1) \
+({ \
+ long __res, __ign1; \
+ asm volatile ( \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res), "=b" (__ign1) \
+ : "1" ((long)(a1)) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall2(type, name, a1, a2) \
+({ \
+ long __res, __ign1, __ign2; \
+ asm volatile ( \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
+ : "1" ((long)(a1)), "2" ((long)(a2)) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall3(type, name, a1, a2, a3) \
+({ \
+ long __res, __ign1, __ign2, __ign3; \
+ asm volatile ( \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
+ "=d" (__ign3) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall4(type, name, a1, a2, a3, a4) \
+({ \
+ long __res, __ign1, __ign2, __ign3, __ign4; \
+ asm volatile ( \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
+ "=d" (__ign3), "=S" (__ign4) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)), "4" ((long)(a4)) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
+({ \
+ long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \
+ asm volatile ( \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
+ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)), "4" ((long)(a4)), \
+ "5" ((long)(a5)) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+static inline int
+HYPERVISOR_set_trap_table(
+ trap_info_t *table)
+{
+ return _hypercall1(int, set_trap_table, table);
+}
+
+static inline int
+HYPERVISOR_mmu_update(
+ mmu_update_t *req, int count, int *success_count, domid_t domid)
+{
+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_mmuext_op(
+ struct mmuext_op *op, int count, int *success_count, domid_t domid)
+{
+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_set_gdt(
+ unsigned long *frame_list, int entries)
+{
+ return _hypercall2(int, set_gdt, frame_list, entries);
+}
+
+static inline int
+HYPERVISOR_stack_switch(
+ unsigned long ss, unsigned long esp)
+{
+ return _hypercall2(int, stack_switch, ss, esp);
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+ unsigned long event_selector, unsigned long event_address,
+ unsigned long failsafe_selector, unsigned long failsafe_address)
+{
+ return _hypercall4(int, set_callbacks,
+ event_selector, event_address,
+ failsafe_selector, failsafe_address);
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+ int set)
+{
+ return _hypercall1(int, fpu_taskswitch, set);
+}
+
+static inline int
+HYPERVISOR_sched_op(
+ int cmd, unsigned long arg)
+{
+ return _hypercall2(int, sched_op, cmd, arg);
+}
+
+static inline long
+HYPERVISOR_set_timer_op(
+ u64 timeout)
+{
+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
+ unsigned long timeout_lo = (unsigned long)timeout;
+ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
+}
+
+static inline int
+HYPERVISOR_dom0_op(
+ dom0_op_t *dom0_op)
+{
+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+ return _hypercall1(int, dom0_op, dom0_op);
+}
+
+static inline int
+HYPERVISOR_set_debugreg(
+ int reg, unsigned long value)
+{
+ return _hypercall2(int, set_debugreg, reg, value);
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(
+ int reg)
+{
+ return _hypercall1(unsigned long, get_debugreg, reg);
+}
+
+static inline int
+HYPERVISOR_update_descriptor(
+ u64 ma, u64 desc)
+{
+ return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
+}
+
+static inline int
+HYPERVISOR_memory_op(
+ unsigned int cmd, void *arg)
+{
+ return _hypercall2(int, memory_op, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_multicall(
+ void *call_list, int nr_calls)
+{
+ return _hypercall2(int, multicall, call_list, nr_calls);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(
+ unsigned long va, pte_t new_val, unsigned long flags)
+{
+ unsigned long pte_hi = 0;
+#ifdef CONFIG_X86_PAE
+ pte_hi = new_val.pte_high;
+#endif
+ return _hypercall4(int, update_va_mapping, va,
+ new_val.pte_low, pte_hi, flags);
+}
+
+static inline int
+HYPERVISOR_event_channel_op(
+ void *op)
+{
+ return _hypercall1(int, event_channel_op, op);
+}
+
+static inline int
+HYPERVISOR_xen_version(
+ int cmd, void *arg)
+{
+ return _hypercall2(int, xen_version, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_console_io(
+ int cmd, int count, char *str)
+{
+ return _hypercall3(int, console_io, cmd, count, str);
+}
+
+static inline int
+HYPERVISOR_physdev_op(
+ void *physdev_op)
+{
+ return _hypercall1(int, physdev_op, physdev_op);
+}
+
+static inline int
+HYPERVISOR_grant_table_op(
+ unsigned int cmd, void *uop, unsigned int count)
+{
+ return _hypercall3(int, grant_table_op, cmd, uop, count);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(
+ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
+{
+ unsigned long pte_hi = 0;
+#ifdef CONFIG_X86_PAE
+ pte_hi = new_val.pte_high;
+#endif
+ return _hypercall5(int, update_va_mapping_otherdomain, va,
+ new_val.pte_low, pte_hi, flags, domid);
+}
+
+static inline int
+HYPERVISOR_vm_assist(
+ unsigned int cmd, unsigned int type)
+{
+ return _hypercall2(int, vm_assist, cmd, type);
+}
+
+static inline int
+HYPERVISOR_vcpu_op(
+ int cmd, int vcpuid, void *extra_args)
+{
+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
+}
+
+static inline int
+HYPERVISOR_suspend(
+ unsigned long srec)
+{
+ return _hypercall3(int, sched_op, SCHEDOP_shutdown,
+ SHUTDOWN_suspend, srec);
+}
+
+static inline int
+HYPERVISOR_nmi_op(
+ unsigned long op,
+ unsigned long arg)
+{
+ return _hypercall2(int, nmi_op, op, arg);
+}
+
+#endif /* __HYPERCALL_X86_32_H__ */
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h
new file mode 100644
index 0000000000..6a68a10b02
--- /dev/null
+++ b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h
@@ -0,0 +1,326 @@
+/******************************************************************************
+ * hypercall-x86_64.h
+ *
+ * Copied from XenLinux.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * 64-bit updates:
+ * Benjamin Liu <benjamin.liu@intel.com>
+ * Jun Nakajima <jun.nakajima@intel.com>
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERCALL_X86_64_H__
+#define __HYPERCALL_X86_64_H__
+
+#include <xen/xen.h>
+#include <xen/sched.h>
+#include <mm.h>
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+extern char hypercall_page[PAGE_SIZE];
+
+#define _hypercall0(type, name) \
+({ \
+ long __res; \
+ asm volatile ( \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res) \
+ : \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall1(type, name, a1) \
+({ \
+ long __res, __ign1; \
+ asm volatile ( \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res), "=D" (__ign1) \
+ : "1" ((long)(a1)) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall2(type, name, a1, a2) \
+({ \
+ long __res, __ign1, __ign2; \
+ asm volatile ( \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \
+ : "1" ((long)(a1)), "2" ((long)(a2)) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall3(type, name, a1, a2, a3) \
+({ \
+ long __res, __ign1, __ign2, __ign3; \
+ asm volatile ( \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
+ "=d" (__ign3) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall4(type, name, a1, a2, a3, a4) \
+({ \
+ long __res, __ign1, __ign2, __ign3; \
+ asm volatile ( \
+ "movq %7,%%r10; " \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
+ "=d" (__ign3) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)), "g" ((long)(a4)) \
+ : "memory", "r10" ); \
+ (type)__res; \
+})
+
+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
+({ \
+ long __res, __ign1, __ign2, __ign3; \
+ asm volatile ( \
+ "movq %7,%%r10; movq %8,%%r8; " \
+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
+ "=d" (__ign3) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)), "g" ((long)(a4)), \
+ "g" ((long)(a5)) \
+ : "memory", "r10", "r8" ); \
+ (type)__res; \
+})
+
+static inline int
+HYPERVISOR_set_trap_table(
+ trap_info_t *table)
+{
+ return _hypercall1(int, set_trap_table, table);
+}
+
+static inline int
+HYPERVISOR_mmu_update(
+ mmu_update_t *req, int count, int *success_count, domid_t domid)
+{
+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_mmuext_op(
+ struct mmuext_op *op, int count, int *success_count, domid_t domid)
+{
+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_set_gdt(
+ unsigned long *frame_list, int entries)
+{
+ return _hypercall2(int, set_gdt, frame_list, entries);
+}
+
+static inline int
+HYPERVISOR_stack_switch(
+ unsigned long ss, unsigned long esp)
+{
+ return _hypercall2(int, stack_switch, ss, esp);
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+ unsigned long event_address, unsigned long failsafe_address,
+ unsigned long syscall_address)
+{
+ return _hypercall3(int, set_callbacks,
+ event_address, failsafe_address, syscall_address);
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+ int set)
+{
+ return _hypercall1(int, fpu_taskswitch, set);
+}
+
+static inline int
+HYPERVISOR_sched_op(
+ int cmd, unsigned long arg)
+{
+ return _hypercall2(int, sched_op, cmd, arg);
+}
+
+static inline long
+HYPERVISOR_set_timer_op(
+ u64 timeout)
+{
+ return _hypercall1(long, set_timer_op, timeout);
+}
+
+static inline int
+HYPERVISOR_dom0_op(
+ dom0_op_t *dom0_op)
+{
+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+ return _hypercall1(int, dom0_op, dom0_op);
+}
+
+static inline int
+HYPERVISOR_set_debugreg(
+ int reg, unsigned long value)
+{
+ return _hypercall2(int, set_debugreg, reg, value);
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(
+ int reg)
+{
+ return _hypercall1(unsigned long, get_debugreg, reg);
+}
+
+static inline int
+HYPERVISOR_update_descriptor(
+ unsigned long ma, unsigned long word)
+{
+ return _hypercall2(int, update_descriptor, ma, word);
+}
+
+static inline int
+HYPERVISOR_memory_op(
+ unsigned int cmd, void *arg)
+{
+ return _hypercall2(int, memory_op, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_multicall(
+ void *call_list, int nr_calls)
+{
+ return _hypercall2(int, multicall, call_list, nr_calls);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(
+ unsigned long va, pte_t new_val, unsigned long flags)
+{
+ return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
+}
+
+static inline int
+HYPERVISOR_event_channel_op(
+ void *op)
+{
+ return _hypercall1(int, event_channel_op, op);
+}
+
+static inline int
+HYPERVISOR_xen_version(
+ int cmd, void *arg)
+{
+ return _hypercall2(int, xen_version, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_console_io(
+ int cmd, int count, char *str)
+{
+ return _hypercall3(int, console_io, cmd, count, str);
+}
+
+static inline int
+HYPERVISOR_physdev_op(
+ void *physdev_op)
+{
+ return _hypercall1(int, physdev_op, physdev_op);
+}
+
+static inline int
+HYPERVISOR_grant_table_op(
+ unsigned int cmd, void *uop, unsigned int count)
+{
+ return _hypercall3(int, grant_table_op, cmd, uop, count);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(
+ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
+{
+ return _hypercall4(int, update_va_mapping_otherdomain, va,
+ new_val.pte, flags, domid);
+}
+
+static inline int
+HYPERVISOR_vm_assist(
+ unsigned int cmd, unsigned int type)
+{
+ return _hypercall2(int, vm_assist, cmd, type);
+}
+
+static inline int
+HYPERVISOR_vcpu_op(
+ int cmd, int vcpuid, void *extra_args)
+{
+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
+}
+
+static inline int
+HYPERVISOR_set_segment_base(
+ int reg, unsigned long value)
+{
+ return _hypercall2(int, set_segment_base, reg, value);
+}
+
+static inline int
+HYPERVISOR_suspend(
+ unsigned long srec)
+{
+ return _hypercall3(int, sched_op, SCHEDOP_shutdown,
+ SHUTDOWN_suspend, srec);
+}
+
+static inline int
+HYPERVISOR_nmi_op(
+ unsigned long op,
+ unsigned long arg)
+{
+ return _hypercall2(int, nmi_op, op, arg);
+}
+
+#endif /* __HYPERCALL_X86_64_H__ */
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */