aboutsummaryrefslogtreecommitdiffstats
path: root/extras/mini-os/include
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-09-09 09:24:25 +0000
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-09-09 09:24:25 +0000
commitcdb8b09f6b67b270b1c21f1a7f42d5e8a604caa8 (patch)
tree414d05b171df34e0125f29731a8ba12e280297d9 /extras/mini-os/include
parentc125eb9c047b908b2bb18e5cf4a88355a1526a25 (diff)
downloadxen-cdb8b09f6b67b270b1c21f1a7f42d5e8a604caa8.tar.gz
xen-cdb8b09f6b67b270b1c21f1a7f42d5e8a604caa8.tar.bz2
xen-cdb8b09f6b67b270b1c21f1a7f42d5e8a604caa8.zip
Xenbus implementation ported from Linux to Mini-os, simple thread support introduced
to simplify the porting. 64 bit version of Mini-os now compiles, but does not work because of the pagetables and some bits of scheduler not being written. Signed-off-by: Grzegorz Milos <gm281@cam.ac.uk>
Diffstat (limited to 'extras/mini-os/include')
-rw-r--r--extras/mini-os/include/ctype.h79
-rw-r--r--extras/mini-os/include/err.h31
-rw-r--r--extras/mini-os/include/errno-base.h39
-rw-r--r--extras/mini-os/include/errno.h109
-rw-r--r--extras/mini-os/include/events.h11
-rw-r--r--extras/mini-os/include/fcntl.h89
-rw-r--r--extras/mini-os/include/hypervisor.h687
-rw-r--r--extras/mini-os/include/lib.h30
-rw-r--r--extras/mini-os/include/mm.h20
-rw-r--r--extras/mini-os/include/os.h335
-rw-r--r--extras/mini-os/include/sched.h38
-rw-r--r--extras/mini-os/include/semaphore.h46
-rw-r--r--extras/mini-os/include/time.h2
-rw-r--r--extras/mini-os/include/traps.h32
-rw-r--r--extras/mini-os/include/types.h8
-rw-r--r--extras/mini-os/include/wait.h91
-rw-r--r--extras/mini-os/include/xenbus.h89
17 files changed, 1553 insertions, 183 deletions
diff --git a/extras/mini-os/include/ctype.h b/extras/mini-os/include/ctype.h
new file mode 100644
index 0000000000..013948656a
--- /dev/null
+++ b/extras/mini-os/include/ctype.h
@@ -0,0 +1,79 @@
+#ifndef _CTYPE_H
+#define _CTYPE_H
+
+/*
+ * NOTE! This ctype does not handle EOF like the standard C
+ * library is required to.
+ */
+
+#define _U 0x01 /* upper */
+#define _L 0x02 /* lower */
+#define _D 0x04 /* digit */
+#define _C 0x08 /* cntrl */
+#define _P 0x10 /* punct */
+#define _S 0x20 /* white space (space/lf/tab) */
+#define _X 0x40 /* hex digit */
+#define _SP 0x80 /* hard space (0x20) */
+
+
+unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
+
+#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
+
+#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
+#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
+#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
+#define isdigit(c) ((__ismask(c)&(_D)) != 0)
+#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
+#define islower(c) ((__ismask(c)&(_L)) != 0)
+#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
+#define ispunct(c) ((__ismask(c)&(_P)) != 0)
+#define isspace(c) ((__ismask(c)&(_S)) != 0)
+#define isupper(c) ((__ismask(c)&(_U)) != 0)
+#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
+
+#define isascii(c) (((unsigned char)(c))<=0x7f)
+#define toascii(c) (((unsigned char)(c))&0x7f)
+
+static inline unsigned char __tolower(unsigned char c)
+{
+ if (isupper(c))
+ c -= 'A'-'a';
+ return c;
+}
+
+static inline unsigned char __toupper(unsigned char c)
+{
+ if (islower(c))
+ c -= 'a'-'A';
+ return c;
+}
+
+#define tolower(c) __tolower(c)
+#define toupper(c) __toupper(c)
+
+#endif
diff --git a/extras/mini-os/include/err.h b/extras/mini-os/include/err.h
new file mode 100644
index 0000000000..c329c57d88
--- /dev/null
+++ b/extras/mini-os/include/err.h
@@ -0,0 +1,31 @@
+#ifndef _ERR_H
+#define _ERR_H
+
+#include <errno.h>
+
+/*
+ * Kernel pointers have redundant information, so we can use a
+ * scheme where we can return either an error code or a dentry
+ * pointer with the same return value.
+ *
+ * This should be a per-architecture thing, to allow different
+ * error and pointer decisions.
+ */
+#define IS_ERR_VALUE(x) ((x) > (unsigned long)-1000L)
+
+static inline void *ERR_PTR(long error)
+{
+ return (void *) error;
+}
+
+static inline long PTR_ERR(const void *ptr)
+{
+ return (long) ptr;
+}
+
+static inline long IS_ERR(const void *ptr)
+{
+ return IS_ERR_VALUE((unsigned long)ptr);
+}
+
+#endif /* _LINUX_ERR_H */
diff --git a/extras/mini-os/include/errno-base.h b/extras/mini-os/include/errno-base.h
new file mode 100644
index 0000000000..036a0802a8
--- /dev/null
+++ b/extras/mini-os/include/errno-base.h
@@ -0,0 +1,39 @@
+#ifndef _ERRNO_BASE_H
+#define _ERRNO_BASE_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Argument list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+
+#endif
diff --git a/extras/mini-os/include/errno.h b/extras/mini-os/include/errno.h
new file mode 100644
index 0000000000..aa45040a27
--- /dev/null
+++ b/extras/mini-os/include/errno.h
@@ -0,0 +1,109 @@
+#ifndef _ERRNO_H
+#define _ERRNO_H
+
+#include <errno-base.h>
+
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+#define ECANCELED 125 /* Operation Canceled */
+#define ENOKEY 126 /* Required key not available */
+#define EKEYEXPIRED 127 /* Key has expired */
+#define EKEYREVOKED 128 /* Key has been revoked */
+#define EKEYREJECTED 129 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 130 /* Owner died */
+#define ENOTRECOVERABLE 131 /* State not recoverable */
+
+#endif
diff --git a/extras/mini-os/include/events.h b/extras/mini-os/include/events.h
index 7825650c57..3fad1f376c 100644
--- a/extras/mini-os/include/events.h
+++ b/extras/mini-os/include/events.h
@@ -20,6 +20,7 @@
#define _EVENTS_H_
#include<traps.h>
+#include <xen/event_channel.h>
#define NR_EVS 1024
@@ -39,6 +40,16 @@ typedef struct _ev_action_t {
/* prototypes */
int do_event(u32 port, struct pt_regs *regs);
int bind_virq( u32 virq, void (*handler)(int, struct pt_regs *) );
+void bind_evtchn( u32 virq, void (*handler)(int, struct pt_regs *) );
void init_events(void);
+static inline int notify_via_evtchn(int port)
+{
+ evtchn_op_t op;
+ op.cmd = EVTCHNOP_send;
+ op.u.send.local_port = port;
+ return HYPERVISOR_event_channel_op(&op);
+}
+
+
#endif /* _EVENTS_H_ */
diff --git a/extras/mini-os/include/fcntl.h b/extras/mini-os/include/fcntl.h
new file mode 100644
index 0000000000..64020795ab
--- /dev/null
+++ b/extras/mini-os/include/fcntl.h
@@ -0,0 +1,89 @@
+#ifndef _I386_FCNTL_H
+#define _I386_FCNTL_H
+
+/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
+ located on an ext2 file system */
+#define O_ACCMODE 0003
+#define O_RDONLY 00
+#define O_WRONLY 01
+#define O_RDWR 02
+#define O_CREAT 0100 /* not fcntl */
+#define O_EXCL 0200 /* not fcntl */
+#define O_NOCTTY 0400 /* not fcntl */
+#define O_TRUNC 01000 /* not fcntl */
+#define O_APPEND 02000
+#define O_NONBLOCK 04000
+#define O_NDELAY O_NONBLOCK
+#define O_SYNC 010000
+#define FASYNC 020000 /* fcntl, for BSD compatibility */
+#define O_DIRECT 040000 /* direct disk access hint */
+#define O_LARGEFILE 0100000
+#define O_DIRECTORY 0200000 /* must be a directory */
+#define O_NOFOLLOW 0400000 /* don't follow links */
+#define O_NOATIME 01000000
+
+#define F_DUPFD 0 /* dup */
+#define F_GETFD 1 /* get close_on_exec */
+#define F_SETFD 2 /* set/clear close_on_exec */
+#define F_GETFL 3 /* get file->f_flags */
+#define F_SETFL 4 /* set file->f_flags */
+#define F_GETLK 5
+#define F_SETLK 6
+#define F_SETLKW 7
+
+#define F_SETOWN 8 /* for sockets. */
+#define F_GETOWN 9 /* for sockets. */
+#define F_SETSIG 10 /* for sockets. */
+#define F_GETSIG 11 /* for sockets. */
+
+#define F_GETLK64 12 /* using 'struct flock64' */
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK 0
+#define F_WRLCK 1
+#define F_UNLCK 2
+
+/* for old implementation of bsd flock () */
+#define F_EXLCK 4 /* or 3 */
+#define F_SHLCK 8 /* or 4 */
+
+/* for leases */
+#define F_INPROGRESS 16
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH 1 /* shared lock */
+#define LOCK_EX 2 /* exclusive lock */
+#define LOCK_NB 4 /* or'd with one of the above to prevent
+ blocking */
+#define LOCK_UN 8 /* remove lock */
+
+#define LOCK_MAND 32 /* This is a mandatory flock */
+#define LOCK_READ 64 /* ... Which allows concurrent read operations */
+#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
+#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
+
+/*
+struct flock {
+ short l_type;
+ short l_whence;
+ off_t l_start;
+ off_t l_len;
+ pid_t l_pid;
+};
+
+struct flock64 {
+ short l_type;
+ short l_whence;
+ loff_t l_start;
+ loff_t l_len;
+ pid_t l_pid;
+};
+
+#define F_LINUX_SPECIFIC_BASE 1024
+*/
+#endif
diff --git a/extras/mini-os/include/hypervisor.h b/extras/mini-os/include/hypervisor.h
index acfec7cfb0..ed3cab6fe6 100644
--- a/extras/mini-os/include/hypervisor.h
+++ b/extras/mini-os/include/hypervisor.h
@@ -13,7 +13,6 @@
#define _HYPERVISOR_H_
#include <types.h>
-
#include <xen/xen.h>
#include <xen/io/domain_controller.h>
@@ -40,48 +39,43 @@ void clear_evtchn(u32 port);
/*
* Assembler stubs for hyper-calls.
*/
-
-#ifdef __i386__
-#define _a1 "b"
-#define _a2 "c"
-#define _a3 "d"
-#define _a4 "S"
-#else
-#define _a1 "D"
-#define _a2 "S"
-#define _a3 "d"
-#define _a4 "b"
-#endif
-
-static __inline__ int HYPERVISOR_event_channel_op(
- void *op)
+#if defined(__i386__)
+static inline int
+HYPERVISOR_set_trap_table(
+ trap_info_t *table)
{
int ret;
unsigned long ignore;
+
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret), "=b" (ignore)
- : "0" (__HYPERVISOR_event_channel_op), "1" (op)
+ : "0" (__HYPERVISOR_set_trap_table), "1" (table)
: "memory" );
return ret;
}
-static __inline__ int HYPERVISOR_set_trap_table(trap_info_t *table)
+static inline int
+HYPERVISOR_mmu_update(
+ mmu_update_t *req, int count, int *success_count, domid_t domid)
{
int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_set_trap_table),
- _a1 (table) : "memory" );
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
+ "3" (success_count), "4" (domid)
+ : "memory" );
return ret;
}
-static __inline__ int HYPERVISOR_mmu_update(mmu_update_t *req,
- int count,
- int *success_count,
- domid_t domid)
+static inline int
+HYPERVISOR_mmuext_op(
+ struct mmuext_op *op, int count, int *success_count, domid_t domid)
{
int ret;
unsigned long ign1, ign2, ign3, ign4;
@@ -89,18 +83,50 @@ static __inline__ int HYPERVISOR_mmu_update(mmu_update_t *req,
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
- : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
- "3" (success_count), "4" (domid)
- : "memory" );
+ : "0" (__HYPERVISOR_mmuext_op), "1" (op), "2" (count),
+ "3" (success_count), "4" (domid)
+ : "memory" );
return ret;
}
+static inline int
+HYPERVISOR_set_gdt(
+ unsigned long *frame_list, int entries)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
+ : "memory" );
+
-static __inline__ int HYPERVISOR_mmuext_op(struct mmuext_op *op,
- int count,
- int *success_count,
- domid_t domid)
+ return ret;
+}
+
+static inline int
+HYPERVISOR_stack_switch(
+ unsigned long ss, unsigned long esp)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+ unsigned long event_selector, unsigned long event_address,
+ unsigned long failsafe_selector, unsigned long failsafe_address)
{
int ret;
unsigned long ign1, ign2, ign3, ign4;
@@ -108,162 +134,653 @@ static __inline__ int HYPERVISOR_mmuext_op(struct mmuext_op *op,
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
- : "0" (__HYPERVISOR_mmuext_op), "1" (op), "2" (count),
- "3" (success_count), "4" (domid)
+ : "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
+ "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+ int set)
+{
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_fpu_taskswitch), "1" (set)
: "memory" );
return ret;
}
+static inline int
+HYPERVISOR_yield(
+ void)
+{
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_yield)
+ : "memory", "ecx" );
+ return ret;
+}
-static __inline__ int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
+static inline int
+HYPERVISOR_block(
+ void)
{
int ret;
+ unsigned long ign1;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_set_gdt),
- _a1 (frame_list), _a2 (entries) : "memory" );
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_block)
+ : "memory", "ecx" );
+
+ return ret;
+}
+static inline int
+HYPERVISOR_shutdown(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
+ : "memory", "ecx" );
return ret;
}
-static __inline__ int HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
+static inline int
+HYPERVISOR_reboot(
+ void)
{
int ret;
+ unsigned long ign1;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_stack_switch),
- _a1 (ss), _a2 (esp) : "memory" );
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
+ : "memory", "ecx" );
return ret;
}
-#ifdef __i386__
-static __inline__ int HYPERVISOR_set_callbacks(
- unsigned long event_selector, unsigned long event_address,
- unsigned long failsafe_selector, unsigned long failsafe_address)
+static inline int
+HYPERVISOR_suspend(
+ unsigned long srec)
{
int ret;
+ unsigned long ign1, ign2;
+
+ /* NB. On suspend, control software expects a suspend record in %esi. */
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
- _a1 (event_selector), _a2 (event_address),
- _a3 (failsafe_selector), _a4 (failsafe_address) : "memory" );
+ : "=a" (ret), "=b" (ign1), "=S" (ign2)
+ : "0" (__HYPERVISOR_sched_op),
+ "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)),
+ "S" (srec) : "memory", "ecx");
return ret;
}
-#else
-static __inline__ int HYPERVISOR_set_callbacks(
- unsigned long event_address,
- unsigned long failsafe_address,
- unsigned long syscall_address)
+
+static inline int
+HYPERVISOR_crash(
+ void)
{
int ret;
+ unsigned long ign1;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
- _a1 (event_address), _a2 (failsafe_address),
- _a3 (syscall_address) : "memory" );
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_crash << SCHEDOP_reasonshift))
+ : "memory", "ecx" );
+
+ return ret;
+}
+
+static inline long
+HYPERVISOR_set_timer_op(
+ u64 timeout)
+{
+ int ret;
+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
+ unsigned long timeout_lo = (unsigned long)timeout;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_timer_op), "b" (timeout_lo), "c" (timeout_hi)
+ : "memory");
+
+ return ret;
+}
+
+#if 0
+static inline int
+HYPERVISOR_dom0_op(
+ dom0_op_t *dom0_op)
+{
+ int ret;
+ unsigned long ign1;
+
+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
+ : "memory");
return ret;
}
#endif
-static __inline__ int
-HYPERVISOR_fpu_taskswitch(
- int set)
+static inline int
+HYPERVISOR_set_debugreg(
+ int reg, unsigned long value)
{
int ret;
- unsigned long ign;
+ unsigned long ign1, ign2;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
+ : "memory" );
+ return ret;
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(
+ int reg)
+{
+ unsigned long ret;
+ unsigned long ign;
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret), "=b" (ign)
- : "0" (__HYPERVISOR_fpu_taskswitch), "1" (set)
+ : "0" (__HYPERVISOR_get_debugreg), "1" (reg)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_descriptor(
+ u64 ma, u64 desc)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_update_descriptor),
+ "1" ((unsigned long)ma), "2" ((unsigned long)(ma>>32)),
+ "3" ((unsigned long)desc), "4" ((unsigned long)(desc>>32))
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_dom_mem_op(
+ unsigned int op, unsigned long *extent_list,
+ unsigned long nr_extents, unsigned int extent_order)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4, ign5;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
+ "=D" (ign5)
+ : "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
+ "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
: "memory" );
return ret;
}
-static __inline__ int HYPERVISOR_yield(void)
+static inline int
+HYPERVISOR_multicall(
+ void *call_list, int nr_calls)
{
int ret;
+ unsigned long ign1, ign2;
+
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- _a1 (SCHEDOP_yield) : "memory" );
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
+ : "memory" );
return ret;
}
-static __inline__ int HYPERVISOR_block(void)
+static inline int
+HYPERVISOR_update_va_mapping(
+ unsigned long va, pte_t new_val, unsigned long flags)
{
int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- _a1 (SCHEDOP_block) : "memory" );
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_update_va_mapping),
+ "1" (va), "2" ((new_val).pte_low),
+#ifdef CONFIG_X86_PAE
+ "3" ((new_val).pte_high),
+#else
+ "3" (0),
+#endif
+ "4" (flags)
+ : "memory" );
return ret;
}
-static __inline__ int HYPERVISOR_shutdown(void)
+static inline int
+HYPERVISOR_event_channel_op(
+ void *op)
{
int ret;
+ unsigned long ignore;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- _a1 (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
- : "memory" );
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_event_channel_op), "1" (op)
+ : "memory" );
return ret;
}
-static __inline__ int HYPERVISOR_reboot(void)
+static inline int
+HYPERVISOR_xen_version(
+ int cmd)
{
int ret;
+ unsigned long ignore;
+
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- _a1 (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
- : "memory" );
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_xen_version), "1" (cmd)
+ : "memory" );
return ret;
}
-static __inline__ int HYPERVISOR_suspend(unsigned long srec)
+static inline int
+HYPERVISOR_console_io(
+ int cmd, int count, char *str)
{
int ret;
- /* NB. On suspend, control software expects a suspend record in %esi. */
+ unsigned long ign1, ign2, ign3;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_console_io), "1" (cmd), "2" (count), "3" (str)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_physdev_op(
+ void *physdev_op)
+{
+ int ret;
+ unsigned long ign;
+
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
- _a1 (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)),
- "S" (srec) : "memory" );
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
+ : "memory" );
return ret;
}
-#ifdef __i386__
-static __inline__ long HYPERVISOR_set_timer_op( u64 timeout )
+static inline int
+HYPERVISOR_grant_table_op(
+ unsigned int cmd, void *uop, unsigned int count)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_grant_table_op), "1" (cmd), "2" (uop), "3" (count)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(
+ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4, ign5;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3),
+ "=S" (ign4), "=D" (ign5)
+ : "0" (__HYPERVISOR_update_va_mapping_otherdomain),
+ "1" (va), "2" ((new_val).pte_low),
+#ifdef CONFIG_X86_PAE
+ "3" ((new_val).pte_high),
+#else
+ "3" (0),
+#endif
+ "4" (flags), "5" (domid) :
+ "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_vm_assist(
+ unsigned int cmd, unsigned int type)
{
int ret;
- unsigned long timeout_hi = (unsigned long)(timeout>>32);
- unsigned long timeout_lo = (unsigned long)timeout;
unsigned long ign1, ign2;
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_set_timer_op), "b" (timeout_lo), "c" (timeout_hi)
+ : "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_boot_vcpu(
+ unsigned long vcpu, vcpu_guest_context_t *ctxt)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_boot_vcpu), "1" (vcpu), "2" (ctxt)
: "memory");
return ret;
}
-#else
-static __inline__ long HYPERVISOR_set_timer_op( u64 timeout )
+
+static inline int
+HYPERVISOR_vcpu_down(
+ int vcpu)
+{
+ int ret;
+ unsigned long ign1;
+ /* Yes, I really do want to clobber edx here: when we resume a
+ vcpu after unpickling a multi-processor domain, it returns
+ here, but clobbers all of the call clobbered registers. */
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_vcpu_down | (vcpu << SCHEDOP_vcpushift))
+ : "memory", "ecx", "edx" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_vcpu_up(
+ int vcpu)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_vcpu_up | (vcpu << SCHEDOP_vcpushift))
+ : "memory", "ecx" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_vcpu_pickle(
+ int vcpu, vcpu_guest_context_t *ctxt)
+{
+ int ret;
+ unsigned long ign1, ign2;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_vcpu_pickle | (vcpu << SCHEDOP_vcpushift)),
+ "2" (ctxt)
+ : "memory" );
+
+ return ret;
+}
+#elif defined(__x86_64__)
+
+#define __syscall_clobber "r11","rcx","memory"
+
+/*
+ * Assembler stubs for hyper-calls.
+ */
+static inline int
+HYPERVISOR_set_trap_table(
+ trap_info_t *table)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_trap_table), "D" (table)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_mmu_update(
+ mmu_update_t *req, int count, int *success_count, domid_t domid)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ "movq %5, %%r10;" TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_mmu_update), "D" (req), "S" ((long)count),
+ "d" (success_count), "g" ((unsigned long)domid)
+ : __syscall_clobber, "r10" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_mmuext_op(
+ struct mmuext_op *op, int count, int *success_count, domid_t domid)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ "movq %5, %%r10;" TRAP_INSTR
+ : "=a" (ret)
+ : "0" (__HYPERVISOR_mmuext_op), "D" (op), "S" ((long)count),
+ "d" (success_count), "g" ((unsigned long)domid)
+ : __syscall_clobber, "r10" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_gdt(
+ unsigned long *frame_list, int entries)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_gdt), "D" (frame_list), "S" ((long)entries)
+ : __syscall_clobber );
+
+
+ return ret;
+}
+static inline int
+HYPERVISOR_stack_switch(
+ unsigned long ss, unsigned long esp)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_stack_switch), "D" (ss), "S" (esp)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+ unsigned long event_address, unsigned long failsafe_address,
+ unsigned long syscall_address)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_set_callbacks), "D" (event_address),
+ "S" (failsafe_address), "d" (syscall_address)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+ int set)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" ((unsigned long)__HYPERVISOR_fpu_taskswitch),
+ "D" ((unsigned long) set) : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_yield(
+ void)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op), "D" ((unsigned long)SCHEDOP_yield)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_block(
+ void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op), "D" ((unsigned long)SCHEDOP_block)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_shutdown(
+ void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op),
+ "D" ((unsigned long)(SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift)))
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_reboot(
+ void)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op),
+ "D" ((unsigned long)(SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift)))
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_suspend(
+ unsigned long srec)
+{
+ int ret;
+
+ /* NB. On suspend, control software expects a suspend record in %esi. */
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret)
+ : "0" ((unsigned long)__HYPERVISOR_sched_op),
+ "D" ((unsigned long)(SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift))),
+ "S" (srec)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+/*
+ * We can have the timeout value in a single argument for the hypercall, but
+ * that will break the common code.
+ */
+static inline long
+HYPERVISOR_set_timer_op(
+ u64 timeout)
{
int ret;
diff --git a/extras/mini-os/include/lib.h b/extras/mini-os/include/lib.h
index dcf5a84e08..f386cb8053 100644
--- a/extras/mini-os/include/lib.h
+++ b/extras/mini-os/include/lib.h
@@ -60,10 +60,22 @@
/* printing */
#define printk printf
#define kprintf printf
-int printf(const char *fmt, ...);
-int vprintf(const char *fmt, va_list ap);
-int sprintf(char *buf, const char *cfmt, ...);
-int vsprintf(char *buf, const char *cfmt, va_list ap);
+#define _p(_x) ((void *)(unsigned long)(_x))
+void printf(const char *fmt, ...);
+int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+int snprintf(char * buf, size_t size, const char *fmt, ...);
+int scnprintf(char * buf, size_t size, const char *fmt, ...);
+int vsprintf(char *buf, const char *fmt, va_list args);
+int sprintf(char * buf, const char *fmt, ...);
+int vsscanf(const char * buf, const char * fmt, va_list args);
+int sscanf(const char * buf, const char * fmt, ...);
+
+long simple_strtol(const char *cp,char **endp,unsigned int base);
+unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base);
+long long simple_strtoll(const char *cp,char **endp,unsigned int base);
+unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base);
+
/* string and memory manipulation */
int memcmp(const void *cs, const void *ct, size_t count);
@@ -77,6 +89,16 @@ size_t strnlen(const char *s, size_t count);
size_t strlen(const char *s);
char *strchr(const char *s, int c);
char *strstr(const char *s1, const char *s2);
+char * strcat(char * dest, const char * src);
+
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+struct kvec {
+ void *iov_base;
+ size_t iov_len;
+};
+
#endif /* _LIB_H_ */
diff --git a/extras/mini-os/include/mm.h b/extras/mini-os/include/mm.h
index 27af789315..613fe29dfa 100644
--- a/extras/mini-os/include/mm.h
+++ b/extras/mini-os/include/mm.h
@@ -25,6 +25,15 @@
#ifndef _MM_H_
#define _MM_H_
+#ifdef __i386__
+#include <xen/arch-x86_32.h>
+#endif
+
+#ifdef __x86_64__
+#include <xen/arch-x86_64.h>
+#endif
+
+
#ifdef __x86_64__
#define L1_PAGETABLE_SHIFT 12
@@ -56,6 +65,8 @@
#define L1_PAGETABLE_ENTRIES 1024
#define L2_PAGETABLE_ENTRIES 1024
+
+#elif defined(__x86_64__)
#endif
/* Given a virtual address, get an entry offset into a page table. */
@@ -97,13 +108,15 @@
extern unsigned long *phys_to_machine_mapping;
#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
-#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
static __inline__ unsigned long phys_to_machine(unsigned long phys)
{
unsigned long machine = pfn_to_mfn(phys >> L1_PAGETABLE_SHIFT);
machine = (machine << L1_PAGETABLE_SHIFT) | (phys & ~PAGE_MASK);
return machine;
}
+
+
+#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
static __inline__ unsigned long machine_to_phys(unsigned long machine)
{
unsigned long phys = mfn_to_pfn(machine >> L1_PAGETABLE_SHIFT);
@@ -119,16 +132,15 @@ static __inline__ unsigned long machine_to_phys(unsigned long machine)
#define to_phys(x) ((unsigned long)(x)-VIRT_START)
#define to_virt(x) ((void *)((unsigned long)(x)+VIRT_START))
-#define __va to_virt
-#define __pa to_phys
#define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt)))
+#define mach_to_virt(_mach) (to_virt(machine_to_phys(_mach)))
+#define mfn_to_virt(_mfn) (mach_to_virt(_mfn << PAGE_SHIFT))
void init_mm(void);
unsigned long alloc_pages(int order);
#define alloc_page() alloc_pages(0);
void free_pages(void *pointer, int order);
-//int is_mfn_mapped(unsigned long mfn);
static __inline__ int get_order(unsigned long size)
{
diff --git a/extras/mini-os/include/os.h b/extras/mini-os/include/os.h
index 490abc9839..42882f364f 100644
--- a/extras/mini-os/include/os.h
+++ b/extras/mini-os/include/os.h
@@ -15,16 +15,17 @@
#define unlikely(x) __builtin_expect((x),0)
#define smp_processor_id() 0
-#define preempt_disable() ((void)0)
-#define preempt_enable() ((void)0)
-#define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0))
#ifndef __ASSEMBLY__
#include <types.h>
+#include <hypervisor.h>
#endif
#include <xen/xen.h>
+
+#define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0))
+
#define __KERNEL_CS FLAT_KERNEL_CS
#define __KERNEL_DS FLAT_KERNEL_DS
#define __KERNEL_SS FLAT_KERNEL_SS
@@ -54,8 +55,6 @@
/* Everything below this point is not included by assembler (.S) files. */
#ifndef __ASSEMBLY__
-#define pt_regs xen_regs
-
void trap_init(void);
/*
@@ -69,10 +68,8 @@ void trap_init(void);
#define __cli() \
do { \
vcpu_info_t *_vcpu; \
- preempt_disable(); \
_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
_vcpu->evtchn_upcall_mask = 1; \
- preempt_enable_no_resched(); \
barrier(); \
} while (0)
@@ -80,13 +77,11 @@ do { \
do { \
vcpu_info_t *_vcpu; \
barrier(); \
- preempt_disable(); \
_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
_vcpu->evtchn_upcall_mask = 0; \
barrier(); /* unmask then check (avoid races) */ \
if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
force_evtchn_callback(); \
- preempt_enable(); \
} while (0)
#define __save_flags(x) \
@@ -100,15 +95,12 @@ do { \
do { \
vcpu_info_t *_vcpu; \
barrier(); \
- preempt_disable(); \
_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
barrier(); /* unmask then check (avoid races) */ \
if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
force_evtchn_callback(); \
- preempt_enable(); \
- } else \
- preempt_enable_no_resched(); \
+ }\
} while (0)
#define safe_halt() ((void)0)
@@ -116,11 +108,9 @@ do { \
#define __save_and_cli(x) \
do { \
vcpu_info_t *_vcpu; \
- preempt_disable(); \
_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
(x) = _vcpu->evtchn_upcall_mask; \
_vcpu->evtchn_upcall_mask = 1; \
- preempt_enable_no_resched(); \
barrier(); \
} while (0)
@@ -136,6 +126,15 @@ do { \
/* This is a barrier for the compiler only, NOT the processor! */
#define barrier() __asm__ __volatile__("": : :"memory")
+#if defined(__i386__)
+#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#elif defined(__x86_64__)
+#define mb() __asm__ __volatile__ ("mfence":::"memory")
+#define rmb() __asm__ __volatile__ ("lfence":::"memory")
+#endif
+
+
#define LOCK_PREFIX ""
#define LOCK ""
#define ADDR (*(volatile long *) addr)
@@ -147,69 +146,71 @@ do { \
typedef struct { volatile int counter; } atomic_t;
-#define xchg(ptr,v) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+/************************** i386 *******************************/
+#if defined (__i386__)
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr,
- int size)
+#define __xg(x) ((struct __xchg_dummy *)(x))
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
+ * @nr: Bit to clear
* @addr: Address to count from
*
- * This operation is atomic and cannot be reordered.
+ * This operation is atomic and cannot be reordered.
+ * It can be reorderdered on other architectures other than x86.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
{
- int oldbit;
+ int oldbit;
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"Ir" (nr) : "memory");
- return oldbit;
+ __asm__ __volatile__( LOCK
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr) : "memory");
+ return oldbit;
}
-static __inline__ int constant_test_bit(int nr, const volatile void * addr)
+static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
{
- return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+ return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
}
-static __inline__ int variable_test_bit(int nr, volatile void * addr)
+static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
{
- int oldbit;
-
- __asm__ __volatile__(
- "btl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit)
- :"m" (ADDR),"Ir" (nr));
- return oldbit;
+ int oldbit;
+
+ __asm__ __volatile__(
+ "btl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit)
+ :"m" (ADDR),"Ir" (nr));
+ return oldbit;
}
#define test_bit(nr,addr) \
@@ -217,7 +218,6 @@ static __inline__ int variable_test_bit(int nr, volatile void * addr)
constant_test_bit((nr),(addr)) : \
variable_test_bit((nr),(addr)))
-
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
@@ -225,15 +225,20 @@ static __inline__ int variable_test_bit(int nr, volatile void * addr)
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writting portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static __inline__ void set_bit(int nr, volatile void * addr)
+static inline void set_bit(int nr, volatile unsigned long * addr)
{
- __asm__ __volatile__( LOCK_PREFIX
- "btsl %1,%0"
- :"=m" (ADDR)
- :"Ir" (nr));
+ __asm__ __volatile__( LOCK
+ "btsl %1,%0"
+ :"=m" (ADDR)
+ :"Ir" (nr));
}
/**
@@ -246,43 +251,188 @@ static __inline__ void set_bit(int nr, volatile void * addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static __inline__ void clear_bit(int nr, volatile void * addr)
+static inline void clear_bit(int nr, volatile unsigned long * addr)
{
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %1,%0"
- :"=m" (ADDR)
- :"Ir" (nr));
+ __asm__ __volatile__( LOCK
+ "btrl %1,%0"
+ :"=m" (ADDR)
+ :"Ir" (nr));
}
/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-static __inline__ void atomic_inc(atomic_t *v)
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
{
- __asm__ __volatile__(
- LOCK "incl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ __asm__("bsfl %1,%0"
+ :"=r" (word)
+ :"rm" (word));
+ return word;
}
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+#define ADDR (*(volatile long *) addr)
+
#define rdtscll(val) \
__asm__ __volatile__("rdtsc" : "=A" (val))
+
+
+#elif defined(__x86_64__)/* ifdef __i386__ */
+/************************** x86_84 *******************************/
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+#define __xg(x) ((volatile long *)(x))
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %k0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 8:
+ __asm__ __volatile__("xchgq %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"dIr" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int constant_test_bit(int nr, const volatile void * addr)
+{
+ return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int variable_test_bit(int nr, volatile const void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(
+ "btl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit)
+ :"m" (ADDR),"dIr" (nr));
+ return oldbit;
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ constant_test_bit((nr),(addr)) : \
+ variable_test_bit((nr),(addr)))
+
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void set_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__( LOCK_PREFIX
+ "btsl %1,%0"
+ :"=m" (ADDR)
+ :"dIr" (nr) : "memory");
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static __inline__ void clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__( LOCK_PREFIX
+ "btrl %1,%0"
+ :"=m" (ADDR)
+ :"dIr" (nr));
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
static __inline__ unsigned long __ffs(unsigned long word)
{
- __asm__("bsfl %1,%0"
- :"=r" (word)
- :"rm" (word));
- return word;
+ __asm__("bsfq %1,%0"
+ :"=r" (word)
+ :"rm" (word));
+ return word;
}
#define ADDR (*(volatile long *) addr)
+#define rdtscll(val) do { \
+ unsigned int __a,__d; \
+ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
+} while(0)
+
+
+#else /* ifdef __x86_64__ */
+#error "Unsupported architecture"
+#endif
+
+
+/********************* common i386 and x86_64 ****************************/
+
+
+
static __inline__ void synch_set_bit(int nr, volatile void * addr)
{
__asm__ __volatile__ (
@@ -306,6 +456,14 @@ static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
return oldbit;
}
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btrl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
{
@@ -326,9 +484,8 @@ static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
(__builtin_constant_p(nr) ? \
synch_const_test_bit((nr),(addr)) : \
synch_var_test_bit((nr),(addr)))
-#endif /* !__ASSEMBLY__ */
-#define rdtsc(low,high) \
- __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#endif /* not assembly */
#endif /* _OS_H_ */
diff --git a/extras/mini-os/include/sched.h b/extras/mini-os/include/sched.h
new file mode 100644
index 0000000000..d7b26289ff
--- /dev/null
+++ b/extras/mini-os/include/sched.h
@@ -0,0 +1,38 @@
+#ifndef __SCHED_H__
+#define __SCHED_H__
+
+#include <list.h>
+
+struct thread
+{
+ char *name;
+ char *stack;
+ unsigned long eps;
+ unsigned long eip;
+ struct list_head thread_list;
+ u32 flags;
+};
+
+
+
+void init_sched(void);
+void run_idle_thread(void);
+struct thread* create_thread(char *name, void (*function)(void *), void *data);
+void schedule(void);
+
+static inline struct thread* get_current(void)
+{
+ struct thread **current;
+#ifdef __i386__
+ __asm__("andl %%esp,%0; ":"=r" (current) : "r" (~8191UL));
+#endif
+ return *current;
+}
+
+#define current get_current()
+
+
+void wake(struct thread *thread);
+void block(struct thread *thread);
+
+#endif /* __SCHED_H__ */
diff --git a/extras/mini-os/include/semaphore.h b/extras/mini-os/include/semaphore.h
new file mode 100644
index 0000000000..d30c81e674
--- /dev/null
+++ b/extras/mini-os/include/semaphore.h
@@ -0,0 +1,46 @@
+#ifndef _SEMAPHORE_H_
+#define _SEMAPHORE_H_
+
+#include <wait.h>
+
+/*
+ * Implementation of semaphore in Mini-os is simple, because
+ * there are no preemptive threads, the atomicity is guaranteed.
+ */
+
+struct semaphore
+{
+ int count;
+ struct wait_queue_head wait;
+};
+
+
+#define __SEMAPHORE_INITIALIZER(name, n) \
+{ \
+ .count = n, \
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+}
+
+#define __MUTEX_INITIALIZER(name) \
+ __SEMAPHORE_INITIALIZER(name,1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+
+static void inline down(struct semaphore *sem)
+{
+ wait_event(sem->wait, sem->count > 0);
+ sem->count--;
+}
+
+static void inline up(struct semaphore *sem)
+{
+ sem->count++;
+ wake_up(&sem->wait);
+}
+
+#endif /* _SEMAPHORE_H */
diff --git a/extras/mini-os/include/time.h b/extras/mini-os/include/time.h
index 067b8b36a1..aacf1d17e7 100644
--- a/extras/mini-os/include/time.h
+++ b/extras/mini-os/include/time.h
@@ -58,6 +58,6 @@ void init_time(void);
s_time_t get_s_time(void);
s_time_t get_v_time(void);
void gettimeofday(struct timeval *tv);
-void block(u32 millisecs);
+void block_domain(u32 millisecs);
#endif /* _TIME_H_ */
diff --git a/extras/mini-os/include/traps.h b/extras/mini-os/include/traps.h
index 258ae819f9..2f54fd28f0 100644
--- a/extras/mini-os/include/traps.h
+++ b/extras/mini-os/include/traps.h
@@ -17,6 +17,7 @@
#ifndef _TRAPS_H_
#define _TRAPS_H_
+#ifdef __i386__
struct pt_regs {
long ebx;
long ecx;
@@ -34,7 +35,38 @@ struct pt_regs {
long esp;
int xss;
};
+#elif __x86_64__
+struct pt_regs {
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long rbp;
+ unsigned long rbx;
+/* arguments: non interrupts/non tracing syscalls only save upto here*/
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long rax;
+ unsigned long rcx;
+ unsigned long rdx;
+ unsigned long rsi;
+ unsigned long rdi;
+ unsigned long orig_rax;
+/* end of arguments */
+/* cpu exception frame or undefined */
+ unsigned long rip;
+ unsigned long cs;
+ unsigned long eflags;
+ unsigned long rsp;
+ unsigned long ss;
+/* top of stack page */
+};
+
+
+#endif
void dump_regs(struct pt_regs *regs);
diff --git a/extras/mini-os/include/types.h b/extras/mini-os/include/types.h
index c87ae161b8..602a080152 100644
--- a/extras/mini-os/include/types.h
+++ b/extras/mini-os/include/types.h
@@ -44,11 +44,19 @@ typedef unsigned long u_long;
typedef long long quad_t;
typedef unsigned long long u_quad_t;
typedef unsigned int uintptr_t;
+
+typedef struct { unsigned long pte_low; } pte_t;
#elif defined(__x86_64__)
typedef long quad_t;
typedef unsigned long u_quad_t;
typedef unsigned long uintptr_t;
+
+typedef struct { unsigned long pte; } pte_t;
#endif
+
+
+
+#define INT_MAX ((int)(~0U>>1))
#define UINT_MAX (~0U)
#endif /* _TYPES_H_ */
diff --git a/extras/mini-os/include/wait.h b/extras/mini-os/include/wait.h
new file mode 100644
index 0000000000..4c23855620
--- /dev/null
+++ b/extras/mini-os/include/wait.h
@@ -0,0 +1,91 @@
+#ifndef __WAIT_H__
+#define __WAIT_H__
+
+#include <sched.h>
+#include <list.h>
+#include <lib.h>
+#include <os.h>
+
+struct wait_queue
+{
+ struct thread *thread;
+ struct list_head thread_list;
+};
+
+struct wait_queue_head
+{
+ /* TODO - lock required? */
+ struct list_head thread_list;
+};
+
+#define DECLARE_WAIT_QUEUE_HEAD(name) \
+ struct wait_queue_head name = \
+ { .thread_list = { &(name).thread_list, &(name).thread_list} }
+
+#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
+ .thread_list = { &(name).thread_list, &(name).thread_list } }
+
+
+#define DEFINE_WAIT(name) \
+struct wait_queue name = { \
+ .thread = current, \
+ .thread_list = LIST_HEAD_INIT((name).thread_list), \
+}
+
+
+
+static inline void init_waitqueue_entry(struct wait_queue *q, struct thread *thread)
+{
+ q->thread = thread;
+}
+
+
+static inline void add_wait_queue(struct wait_queue_head *h, struct wait_queue *q)
+{
+ if (list_empty(&q->thread_list))
+ list_add(&q->thread_list, &h->thread_list);
+}
+
+static inline void remove_wait_queue(struct wait_queue *q)
+{
+ list_del(&q->thread_list);
+}
+
+static inline void wake_up(struct wait_queue_head *head)
+{
+ struct list_head *tmp, *next;
+ list_for_each_safe(tmp, next, &head->thread_list)
+ {
+ struct wait_queue *curr;
+ curr = list_entry(tmp, struct wait_queue, thread_list);
+ wake(curr->thread);
+ }
+}
+
+#define wait_event(wq, condition) do{ \
+ unsigned long flags; \
+ if(condition) \
+ break; \
+ DEFINE_WAIT(__wait); \
+ for(;;) \
+ { \
+ /* protect the list */ \
+ local_irq_save(flags); \
+ add_wait_queue(&wq, &__wait); \
+ block(current); \
+ local_irq_restore(flags); \
+ if(condition) \
+ break; \
+ schedule(); \
+ } \
+ local_irq_save(flags); \
+ /* need to wake up */ \
+ wake(current); \
+ remove_wait_queue(&__wait); \
+ local_irq_restore(flags); \
+} while(0)
+
+
+
+
+#endif /* __WAIT_H__ */
diff --git a/extras/mini-os/include/xenbus.h b/extras/mini-os/include/xenbus.h
new file mode 100644
index 0000000000..9acd17f03f
--- /dev/null
+++ b/extras/mini-os/include/xenbus.h
@@ -0,0 +1,89 @@
+/******************************************************************************
+ * xenbus.h
+ *
+ * Talks to Xen Store to figure out what devices we have.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _ASM_XEN_XENBUS_H
+#define _ASM_XEN_XENBUS_H
+
+
+/* Caller must hold this lock to call these functions: it's also held
+ * across watch callbacks. */
+// TODO
+//extern struct semaphore xenbus_lock;
+
+char **xenbus_directory(const char *dir, const char *node, unsigned int *num);
+void *xenbus_read(const char *dir, const char *node, unsigned int *len);
+int xenbus_write(const char *dir, const char *node,
+ const char *string, int createflags);
+int xenbus_mkdir(const char *dir, const char *node);
+int xenbus_exists(const char *dir, const char *node);
+int xenbus_rm(const char *dir, const char *node);
+int xenbus_transaction_start(const char *subtree);
+int xenbus_transaction_end(int abort);
+
+/* Single read and scanf: returns -errno or num scanned if > 0. */
+int xenbus_scanf(const char *dir, const char *node, const char *fmt, ...)
+ __attribute__((format(scanf, 3, 4)));
+
+/* Single printf and write: returns -errno or 0. */
+int xenbus_printf(const char *dir, const char *node, const char *fmt, ...)
+ __attribute__((format(printf, 3, 4)));
+
+/* Generic read function: NULL-terminated triples of name,
+ * sprintf-style type string, and pointer. Returns 0 or errno.*/
+int xenbus_gather(const char *dir, ...);
+
+/* Register callback to watch this node. */
+struct xenbus_watch
+{
+ struct list_head list;
+ char *node;
+ void (*callback)(struct xenbus_watch *, const char *node);
+};
+
+int register_xenbus_watch(struct xenbus_watch *watch);
+void unregister_xenbus_watch(struct xenbus_watch *watch);
+void reregister_xenbus_watches(void);
+
+/* Called from xen core code. */
+void xenbus_suspend(void);
+void xenbus_resume(void);
+
+#define XENBUS_IS_ERR_READ(str) ({ \
+ if (!IS_ERR(str) && strlen(str) == 0) { \
+ kfree(str); \
+ str = ERR_PTR(-ERANGE); \
+ } \
+ IS_ERR(str); \
+})
+
+#define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
+
+int xs_init(void);
+
+#endif /* _ASM_XEN_XENBUS_H */